blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M โ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c38a872e6e6ae5b1d7edc0e1731ec393f49a556d | a0241ddd6feb1236740c4b57f86b4e2b9c812ffe | /userdata/models/beirat.py | ee4cada6ca3852e198c2996ce9b2dd941e739f94 | [] | no_license | patta42/website | 0a08d4b11c49020acefaf8f8cca30982ca2afa50 | 72d2e136272e0ed23f74080697d16eb9bc692ac3 | refs/heads/master | 2021-06-10T16:30:51.292679 | 2021-04-24T06:27:02 | 2021-04-24T06:27:02 | 151,085,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | from django.db import models
from django.utils.translation import gettext as _
from website.models import TranslatedField
from userdata.models import StaffUser
class BeiratGroups(models.Model):
title_de = models.CharField(
max_length = 128,
verbose_name = 'Gruppe innerhalb des Beirats (deutsch)'
)
title_en = models.CharField(
max_length = 128,
verbose_name = 'Gruppe innerhalb des Beirats (english)'
)
order = models.IntegerField(
verbose_name = 'Anzeigeposition in der Beiratsliste'
)
has_sub_groups = models.BooleanField(
default = False,
help_text = _('Ist diese Gruppe in Untergruppen nach Bereichen unterteilt?')
)
title = TranslatedField('title_en', 'title_de')
def __str__(self):
return self.title
class Beirat2StaffRelation(models.Model):
beirat_group = models.ForeignKey(
BeiratGroups,
on_delete = models.CASCADE
)
member = models.ForeignKey(
StaffUser,
on_delete = models.CASCADE,
null = True,
blank = True
)
is_surrogate = models.BooleanField(
default = False,
help_text = _('Is this Beitrat member surrogate?')
)
is_head = models.BooleanField(
default = False,
help_text = _('Is the member head of the Beirat')
)
faculty_group = models.CharField(
max_length = 64,
choices = (
('natural', _('Natural Sciences')),
('engineering', _('Engineering')),
('medicine', _('Medicine'))
),
blank = True,
null = True
)
def __str__(self):
return '{} ({})'.format(str(self.member), str(self.beirat_group))
| [
"patrick.happel@rub.de"
] | patrick.happel@rub.de |
6b85244c9a072334ead551a420b5939053c349c0 | 5ad839ea5a8b7acff149a7e620baf132d60ef655 | /PythonFiles/Miniconda2/pkgs/python-2.7.16-hcb6e200_0/info/recipe/run_test.py | f060bd4d4208c8e15097a32acdad1af1765ba963 | [
"MIT",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"BSD-3-Clause"
] | permissive | manos-mark/WebCamera-Head-and-Gaze-tracking | 816ad2804528a53d515380078d7aed2adb8ec4f2 | 5001196f2e9ef31a653ee66efed979b3d10452e8 | refs/heads/master | 2022-11-07T15:47:10.225791 | 2019-07-18T08:51:30 | 2019-07-18T08:51:30 | 197,543,675 | 0 | 1 | MIT | 2022-10-26T10:54:00 | 2019-07-18T08:17:32 | Python | UTF-8 | Python | false | false | 3,200 | py | # make sure Grammar pickle files are present
import os
from os.path import dirname, isfile, join
for fn in ('Grammar2.7.16.final.0.pickle',
'PatternGrammar2.7.16.final.0.pickle'):
assert isfile(join(dirname(os.__file__), 'lib2to3', fn))
import platform
import sys
import subprocess
from pprint import pprint
# it is important to run the test for the 2to3 command *after* the existance
# of the Grammar pickle files has been checked (because running 2to3) will
# create them
subprocess.check_call([join(sys.prefix,
'Scripts/2to3.exe' if sys.platform == 'win32' else 'bin/2to3'), '-h'])
armv7l = bool(platform.machine() == 'armv7l')
ppc64le = bool(platform.machine() == 'ppc64le')
debug = int(os.getenv('DEBUG', 0))
print('Python version:', platform.python_version())
assert platform.python_version() == '2.7.16'
assert sys.version_info[:3] == (2, 7, 16)
if sys.platform == 'win32':
assert 'MSC v.1500' in sys.version
print('max unicode:', sys.maxunicode)
print('architecture:', platform.architecture())
print('sys.version:', sys.version)
print('platform.machine():', platform.machine())
print('DEBUG:', debug)
assert hasattr(sys, 'gettotalrefcount') == bool(debug)
if debug:
print('sys.gettotalrefcount:', sys.gettotalrefcount())
import _bisect
import _codecs_cn
import _codecs_hk
import _codecs_iso2022
import _codecs_jp
import _codecs_kr
import _codecs_tw
import _collections
import _csv
import _ctypes
import _ctypes_test
import _elementtree
import _functools
import _hashlib
import _heapq
import _hotshot
import _io
import _json
import _locale
import _lsprof
import _multibytecodec
import _multiprocessing
import _random
import _socket
import _sqlite3
import _ssl
import _struct
import _testcapi
import array
import audioop
import binascii
import bz2
import cPickle
import cStringIO
import cmath
import datetime
import future_builtins
import itertools
import math
import mmap
import operator
import parser
import pyexpat
import select
import ssl
import strop
import time
import test
import unicodedata
import zlib
import gzip
from os import urandom
import os
a = 20 * 'Ilan'
b = 'x\x9c\xf3\xccI\xcc\xf3\xa4"\x06\x00\xc8L\x1eQ'
assert zlib.compress(a) == b
assert zlib.decompress(b) == a
with gzip.open('x.gz', 'wb') as fo:
fo.write(a)
with open('x.gz', 'rb') as fi:
assert len(fi.read()) == 29
if sys.platform != 'win32':
if not (ppc64le or armv7l):
import _curses
import _curses_panel
import crypt
import fcntl
import grp
import nis
import readline
import resource
import syslog
import termios
readline.clear_history()
if not (armv7l or ppc64le):
import _tkinter
import Tkinter
import turtle
print('TK_VERSION:', _tkinter.TK_VERSION)
print('TCL_VERSION:', _tkinter.TCL_VERSION)
if sys.platform == 'win32':
TCLTK_VER = '8.5'
else:
TCLTK_VER = os.getenv("tk")
assert _tkinter.TK_VERSION == _tkinter.TCL_VERSION == TCLTK_VER
print('OPENSSL_VERSION:', ssl.OPENSSL_VERSION)
if sys.platform != 'win32':
assert os.getenv("openssl") in ssl.OPENSSL_VERSION
pprint(platform._sys_version())
if int(os.getenv('GUI_TEST', 0)):
turtle.forward(100)
| [
"manos-mark@hotmail.com"
] | manos-mark@hotmail.com |
f9ef28ee724dadfa8e3aac1696e9d0c261ca91c1 | a5fd6b06cfeed486e3729de3be2202140656eed6 | /accounts/views.py | 4c974a455ccead5f6c91aa3157dc34d03bff1d51 | [] | no_license | Sevikus/btre_project | ca723bc1372d7a717d62d3efc430b245384b6d37 | 729b7e6e118a6a409e6ffdbd7f44f10a09d95a81 | refs/heads/master | 2020-09-30T04:02:43.627869 | 2019-12-10T19:25:29 | 2019-12-10T19:25:29 | 227,197,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.auth.models import User
from contacts.models import Contact
def register(request):
if request.method == 'POST':
# Get form values
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password2 = request.POST['password2']
# Check if passwords match
if password == password2:
# Check username
if User.objects.filter(username=username).exists():
messages.error(request, 'That username is taken')
return redirect('register')
else:
if User.objects.filter(email=email).exists():
messages.error(request, 'That email is being used')
return redirect('register')
else:
# Looks good
user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name, email=email, password=password)
#Login after register
# auth.login(request, user)
# messages.success(request, 'You are now logged in')
# return redirect('index')
user.save()
messages.success(request, 'You are now registered')
return redirect('login')
else:
messages.error(request, 'Passwords do not match')
return redirect('register')
else:
return render(request, 'accounts/register.html')
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
messages.success(request, 'You are now logged in')
return redirect('dashboard')
else:
messages.error(request, 'Invalid credentials')
return redirect('login')
else:
return render(request, 'accounts/login.html')
def logout(request):
if request.method == 'POST':
auth.logout(request)
messages.success(request, 'You are now logged out')
return redirect('index')
def dashboard(request):
user_contacts = Contact.objects.order_by('-contact_date').filter(user_id=request.user.id)
context = {
'contacts': user_contacts
}
return render(request, 'accounts/dashboard.html', context)
| [
"dragansevo.ds@gmail.com"
] | dragansevo.ds@gmail.com |
7bc22ff7aaf4908fbac56962aad200eb123b3427 | 59522e46a73630181f19251b8bfef90e497c2f82 | /coop_cms/management/commands/create_db_password.py | 5e1a63ab5f5a28fa874530f70fdb8939143577be | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ljean/coop_cms | 9befe74edda007686007f8566cd2555856099ae8 | 9e6c70afb61b57dc0326fbb64f9d6b19c04f48a1 | refs/heads/master | 2023-07-11T16:02:35.945029 | 2023-06-30T12:16:26 | 2023-06-30T12:16:26 | 5,846,409 | 3 | 5 | NOASSERTION | 2019-08-30T10:55:02 | 2012-09-17T19:53:56 | Python | UTF-8 | Python | false | false | 409 | py | # -*- coding: utf-8 -*-
from getpass import getpass
from django.core.management.base import BaseCommand
from django.conf import settings
from ...secrets import set_db_password
class Command(BaseCommand):
help = 'Generates a password DB'
def handle(self, *args, **options):
db_password = getpass("password?")
set_db_password(settings.BASE_DIR, settings.SECRET_KEY, db_password)
| [
"ljean@apidev.fr"
] | ljean@apidev.fr |
46028ac27300218928133b6ea7f6ab6a92c51374 | 57579a07e4dc2144518a8aa48824916a97df13a4 | /Think_Python/tmp/scratch_12.py | 3e36926d8b17227cf7c77592b3880a0bcbcbf405 | [] | no_license | ekc0106/python | c49a7ef482d1009a8dfdf939fa82049adc64471d | dd82596e22d132ea04e9e88f6b1bc3eec23220d8 | refs/heads/master | 2020-03-08T04:16:17.538144 | 2019-12-27T05:28:44 | 2019-12-27T05:28:44 | 127,916,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,141 | py | ## time
class Time(object):
"""Represents the time of day.
attributes: hour, minute, second
"""
time = Time()
time.hour = 11
time.minute = 59
time.second = 30
time.__dict__
## pure functions
def add_time(t1, t2):
sum = Time()
sum.hour = t1.hour + t2.hour
sum.minute = t1.minute + t2.minute
sum.second = t1.second + t2.second
return sum
start = Time()
start.hour = 9
start.minute = 45
start.second = 0
duration = Time()
duration.hour = 1
duration.minute = 35
duration.second = 0
done = add_time(start, duration)
done.__dict__ # ....๋ถ์ด 60๋ถ์ด ๋์ด๊ฐ๋ฒ๋ฆผ.
def print_time(t):
print('%g:%g:%g' % (t.hour, t.minute, t.second))
print_time(done)
#pure function.. ๊ธฐ์กด์๊ฒ์ ๊ฑด๋๋ฆฌ์ง ์์
def add_time(t1, t2):
sum = Time()
sum.hour = t1.hour + t2.hour
sum.minute = t1.minute + t2.minute
sum.second = t1.second + t2.second
if sum.second >= 60:
sum.second -= 60
sum.minute += 1
if sum.minute >= 60:
sum.minute -= 60
sum.hour += 1
return sum
done = add_time(start, duration)
print_time(done)
## modifiers ๊ธฐ์กด์ ๊ฒ์..๊ฑด๋๋ฆฌ๋๊ฑฐ
def increment(time, seconds):
time.second += seconds
if time.second >= 60:
time.second -= 60
time.minute += 1
if time.minute >= 60:
time.minute -= 60
time.hour += 1
#์ด๊ฑฐ๋...์์ ์๋ ํธ๋ฆฌํ์ง๋ง ์ค๋ฅ ๋ฐ์๋๊ธฐ ์ฌ์. ๋๋๋ก ์ฌ์ฉ ์๋๊ฒ์ด ์ข์..
print_time(time)
increment(time,30)
print_time(time)
## prototyping and planning
def time_to_int(time):
minutes = time.hour * 60 + time.minute
seconds = minutes * 60 + time.second
return seconds
#second ๊ฐ์ minute์ผ๋ก, minute์ hour๋ก.. ..
def int_to_time(seconds):
time = Time()
minutes, time.second = divmod(seconds, 60)
time.hour, time.minute = divmod(minutes, 60)
return time
def add_time(t1, t2):
seconds = time_to_int(t1) + time_to_int(t2)
return int_to_time(seconds)
print_time(start)
print_time(duration)
done = add_time(start,duration)
print_time(done)
## debugging
def valid_time(time):
if time.hour < 0 or time.minute < 0 or time.second < 0:
return False
if time.minutes >= 60 or time.second >= 60:
return False
return True
time= Time()
time.hour = -1
time.minute = 10
time.second=5
valid_time(time)
def add_time(t1, t2):
if not valid_time(t1) or not valid_time(t2):
raise ValueError('invalid Time object in add_time')
seconds = time_to_int(t1) + time_to_int(t2)
return int_to_time(seconds)
add_time(time,start) #์๋ฌ๋ธ
# assert ๋ฌธ์ ์ด์ฉํ ์๋ ์์(์กฐ๊ฑด์ ๋ง์กฑํ์ง ์์ ๋ ์์ธ ๋ฐ์)
def add_time(t1, t2):
assert valid_time(t1) and valid_time(t2)
seconds = time_to_int(t1) + time_to_int(t2)
return int_to_time(seconds)
add_time(time,start) #์๋ฌ์ ํ์
์ด ๋ค๋ฅด๊ธดํ๋ฐ ์ฝ๋ฉ์ด ์๋ณด๋ค ๊ฐ๊ฒฐํ๋๊น..
#16.6 mul_time(์ผ์ข
์ ํจ์ด๋ปฅ์
)
# ์๊ท๋จผํธ๋ time, object, ์ซ์.. return value: time*์ซ์
# ๊ทธ๊ฑธ ์ด์ฉํด์ ๊ทธ.. ๋๋ค๋ฅธ ๋ปฅ์
์ ์์ฑํด๋ผ. ์๊ท๋จผํธ๋ time obj (finishing time in a race)
# return : time object(average pace time per mile)
#Time ๊ฐ์ฒด์ ์ซ์๋ฅผ ๋ฐ์์ Time ๊ณผ ์ซ์์ ๊ณฑ์ ํฌํจํ๋ ์ Time ๊ฐ์ฒด๋ฅผ ๋๋ ค์ฃผ๋ ํจ์ mul_time ๋ฅผ ์์ฑํ์ธ์.
#๊ทธ๋ฐ ๋ค์ ๊ฒฝ์ฃผ์์ ์ฃผํ์๊ฐ์ ๋ํ๋ด๋ Time ๊ฐ์ฒด์ ๊ฑฐ๋ฆฌ๋ฅผ ๋ํ๋ด๋ ์ซ์๋ฅผ ๋ฐ์์ ํ๊ท ํจ์ด์ค(๋ง์ผ๋น ์๊ฐ)๋ฅผ ๋ํ๋ด๋ Time ๊ฐ์ฒด๋ฅผ ๋๋ ค์ฃผ๋ ํจ์๋ฅผ ์์ฑํ๋๋ฐ mul_time๋ฅผ ์ฌ์ฉํ์ธ์.
def mul_time(time, dist):
"time : ์๊ฐ(s) , dist : ๊ฑฐ๋ฆฌ(mile)"
time_per_mile =int(time_to_int(time)/dist)
return (int_to_time(time_per_mile))
times = Time()
times.hour = 1
times.minute = 30
times.second = 30
times_per_mile = mul_time(times, 3)
print_time(times_per_mile) # ์ฆ 3๋ง์ผ์ 1์๊ฐ 30๋ถ 30์ด๊ฐ ๊ฑธ๋ฆฐ๋ค๋ฉด, 1๋ง์ผ์๋ 30๋ถ 10์ด๊ฐ ๊ฑธ๋ฆฐ๋ค.
##
# 16.7 datetime.... date time..๊ต์ฌ์ ๋งํฌ๋ค๊ฐ์ 3.๋ฒ์ ์ผ๋ก ๋ค๊ฐ๋ผ.
# 1. current date (2018๋
4์23์ผ) day of week (์์์ผ) ..๋ฑ๋ฑ ๋งค์จ๋๊ฐ ์์๊ฑฐ์.
import time
from datetime import datetime
today = datetime.today()
today.weekday()
def weekday_fun(weekday):
if weekday == 0:
print('์์์ผ')
elif weekday == 1:
print('ํ์์ผ')
elif weekday == 2:
print('์์์ผ')
elif weekday == 3:
print('๋ชฉ์์ผ')
elif weekday == 4:
print('๊ธ์์ผ')
elif weekday == 5:
print('ํ ์์ผ')
else:
print('์ผ์์ผ')
weekday_fun(today.weekday())
# 2. birthday ->input ... age # of days times muinutes and second.. untile next birthday->output..
# ์ฆ ๋ ์์ผ ๋ ์ง? ์ฐ๊ณ ๊ทธ๋ฌ๋ฉด ์์ํ์ผ๋ก ๋ค์ ์์ผ๊น์ง ๋ช์ผ ๋ช์๊ฐ ๋ช๋ถ ๋ช์ด ๋จ์๋์ง ๊ณ์ฐํ๋ ๋ปฅ์
์ ์์ฑํ๋ผ.
# datetime ๋ชจ๋์ ์ด ์ฅ์ ๋์ค๋ Date ์ Time ๊ฐ์ฒด์ ์ ์ฌํ date ์ time ๊ฐ์ฒด๋ฅผ ์ ๊ณตํ๋๋ฐ, ๋ ํ๋ถํ ๋ฉ์๋์ ์ฐ์ฐ๋ค์ ์ ๊ณตํฉ๋๋ค. http://docs.python.org/2/library/datetime.html์์ ์ค๋ช
์๋ฅผ ์ฝ์ผ์ธ์.
# datetime ๋ชจ๋์ ์จ์, ํ์ฌ ๋ ์ง๋ฅผ ์ป์ด์ ์์ผ์ ์ธ์ํ๋ ํ๋ก๊ทธ๋จ์ ์์ฑํ์ธ์.
# ์
๋ ฅ์ผ๋ก ์์ผ์ ๋ฐ์์ ์ฌ์ฉ์์ ๋์ด์ ๋ค์ ์์ผ๊น์ง ๋จ์ ์ผ, ์, ๋ถ, ์ด๋ฅผ ์ธ์ํ๋ ํ๋ก๊ทธ๋จ์ ์์ฑํ์ธ์.
#(2)
import time
from datetime import datetime
today = datetime.today()
my_birthday = datetime(1995, 1, 6)
def time_to_birth(my_birth):
age = today.year - my_birth.year
my_birthday = my_birth.replace(year=today.year)
if my_birth >= today:
until_birth = my_birth - today
else:
my_birth = my_birth.replace(year = today.year +1)
until_birth = my_birth - today
print('๋์ด : ', age, '์์ผ๊น์ง๋จ์๊ธฐ๊ฐ : ',until_birth)
time_to_birth(my_birthday)
| [
"noreply@github.com"
] | ekc0106.noreply@github.com |
a3abb973df7c49c23f085d85ce66b4a6df1246d2 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/gigasecond/f02e9fb37a7446a1af9ca9e795c77365.py | e490967c96d68ba8fa7ac4318031d539f832a649 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 135 | py | from datetime import timedelta
def add_gigasecond(dateTimeObj):
gigasecond=10**9
return dateTimeObj + timedelta(0,gigasecond)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
df514b9511c6fc36b842a7f6ad97a00e5cef44a3 | 17207061de7d0ebbca88b551283eaecac70fc552 | /main_AL.py | dd93ef8cb75b185e260fbb799117dd21a30cd0f4 | [] | no_license | Abagena/Python | 127934f56d1e9a23bf5f58f32ec56c972886f65a | 976d24898269f2e946d9e55ebd94649b85e66abd | refs/heads/main | 2023-03-30T12:17:46.246390 | 2021-03-24T12:15:07 | 2021-03-24T12:15:07 | 351,063,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,197 | py | import re, sys, time, random, requests, json, hashlib, datetime
# -*- coding: utf-8 -*-
class Handler:
reload(sys)
sys.setdefaultencoding('UTF8')
#
# new message handler
#
def new_message_handler(self, input_data, c2d):
schedule = c2d.get_company_info()
messageid = input_data['message']['id']
dialogid = input_data['message']['dialogID']
#dialog_unassign = input_data['dialog']['unassigned']
#channelidint = input_data['channel']['id']
try:
idnovaint = input_data['client']['id']
#c2d.send_message(idnovaint, str(dialog_unassign),'system')
if (dialogid is None) and (schedule['online'] == True):
#c2d.send_message(idnovaint, str(dialogid) + ' dialog_id','system')
#c2d.send_message(idnovaint, 'online: ' + str(schedule['online']),'system')
c2d.send_message(idnovaint, str(idnovaint) + ' idnovaint','system')
tags = self.get_tags(idnovaint)
c2d.send_message(idnovaint, str(tags) + ' tags_id','system')
group = self.get_group(tags)
c2d.send_message(idnovaint, str(group) + ' group', 'system')
operators = self.get_operators(group)
#c2d.send_message(idnovaint, str(operators) + ' all_operators', 'system')
online_operator = self.get_online_operators(idnovaint, operators, c2d, messageid, dialogid)
c2d.send_message(idnovaint, str(online_operator) + ' online_operator','system')
#c2d.send_message(idnovaint, 'online: ' + str(schedule['online']),'system')
self.transfer_to_operator(messageid, dialogid, c2d, online_operator)
elif (dialogid is None) and (schedule['online'] == False):
c2d.transfer_message(messageid, bot_id)
else:
c2d.send_message(idnovaint, str(dialogid) + ' dialogid','system')
except Exception:
pass
return dialogid
#if (dialogid is None) and (schedule['online'] == True):
#c2d.transfer_message(messageid, bot_id)
def get_tags(self, idnovaint):
tagid = requests.get('https://api.chat24.io/v1/clients/' + str(idnovaint),
headers={
'Authorization': api_token,
'Content-Type': 'application/json'},
timeout=2
)
tags = json.loads(tagid.text)['data']['tags']
proper_tags = []
for t in tags:
if t['id'] == tag_paid or t['id'] == tag_after or t['id'] == tag_markng:
proper_tags.append(t['id'])
return proper_tags
def get_group(self, tags):
if tag_paid in tags and tag_after and tag_markng not in tags:
return group_paid
elif tag_after in tags and tag_paid and tag_markng not in tags:
return group_calls
elif tag_markng in tags and tag_after and tag_paid not in tags:
return group_ad
elif tag_paid and tag_markng in tags and tag_after not in tags:
return group_ad
elif tag_paid and tag_after in tags and tag_markng not in tags:
return group_paid
elif tag_after and tag_markng in tags and tag_paid not in tags:
return group_ad
else:
return group_attendants
def get_operators(self, group):
operators_groups_id = requests.get('https://api.chat24.io/v1/operators_groups',
headers={
'Authorization': api_token,
'Content-Type': 'application/json'},
timeout=2
)
group_data_all = json.loads(operators_groups_id.text)['data']
group_operator_ids = []
for g in group_data_all:
if g['id'] == group:
group_operator_ids.extend(g['operator_ids'])
return group_operator_ids
def getKeysByValue(self, dictOfElements, valueToFind):
listOfItems = dictOfElements.items()
for item in listOfItems:
if item[1] == valueToFind:
Key = item[0]
return Key
def get_online_operators(self, idnovaint, operators, c2d, messageid, dialogid):
operators_ids = requests.get('https://api.chat24.io/v1/operators/?limit=100',
headers={
'Authorization': api_token,
'Content-Type': 'application/json'},
timeout=2
)
operator_data_all = json.loads(operators_ids.text)['data']
online_ops = []
dialog_num = []
for o in operators:
for each_op in operator_data_all:
if (each_op['id'] == o) and (each_op['online'] == 1) and (each_op['offline_type'] == None):
online_ops.append(o)
dialog_num.append(each_op['opened_dialogs'])
break
if online_ops == [] and messageid == 777:
operators = self.get_operators(group_attendants)
c2d.send_message(log_id, 'operators_night ' + str(operators),'system')
online_operator = self.get_online_operators_night(operators, c2d)
c2d.send_message(log_id, 'online_operator_night ' + str(online_operator),'system')
self.transfer_dialog(dialogid, online_operator, c2d)
elif online_ops == []:
c2d.send_message(idnovaint, 'no operators online','system')
time.sleep(1) #ัะฑัะฐัั ะทะฐะดะตัะถะบั
c2d.transfer_message_to_group(messageid, group_attendants)
zip_op = zip(online_ops, dialog_num)
dict_op = dict(zip_op)
if len(online_ops) > 0:
free_operator = dialog_num[0]
for n in dialog_num:
if n < free_operator:
free_operator = n
available_op = self.getKeysByValue(dict_op,free_operator)
return available_op
def transfer_to_operator(self, messageid, dialogid, c2d, online_operator):
if dialogid is None:
c2d.transfer_message(messageid, online_operator)
else:
pass
#
# before sending message handler
#
def before_sending_message_handler(self, input_data, c2d):
return '[before_sending_message] do logic here'
#
# after closing dialog handler
#
def after_closing_dialog_handler(self, input_data, c2d):
return '[after_closing_dialog] do logic here'
#
# before closing dialog handler
#
def before_closing_dialog_handler(self, input_data, c2d):
return '[after_closing_dialog] do logic here'
#
# auto checking handler
#
def auto_checking_handler(self, input_data, c2d):
messageid = 777
timeisnow = datetime.datetime.now().time().strftime("%H:%M")
#c2d.send_message(log_id, 'time: ' + str(timeisnow),'system')
info = c2d.get_company_info()
#c2d.send_message(log_id, 'online: ' + str(info['online']),'system')
if (info['online'] == True) and (timeisnow > "08:04" and timeisnow < "22:00"):
all_bot_chats = requests.get('https://api.chats.novait.com.ua/v1/dialogs?operator_id=38154&limit=100',
headers={
'Authorization': api_token,
'Content-Type': 'application/json'},
timeout=2
)
bot_data = json.loads(all_bot_chats.text)['data']
all_bot_chats_ids = []
all_bot_clients_ids = []
all_bot_message_ids = []
for g in bot_data:
all_bot_chats_ids.append(g['last_message']['dialog_id'])
#c2d.send_message(log_id, 'dialog_ids: ' + str(all_bot_chats_ids),'system')
for c in bot_data:
all_bot_clients_ids.append(c['last_message']['client_id'])
#c2d.send_message(log_id, 'client_ids: ' + str(all_bot_clients_ids),'system')
for m in bot_data:
all_bot_message_ids.append(m['last_message']['id'])
#c2d.send_message(log_id, 'message_ids: ' + str(all_bot_message_ids),'system')
zip_info = zip(all_bot_chats_ids, all_bot_clients_ids)
dict_info = dict(zip_info)
#c2d.send_message(log_id, 'dictionary ' + str(dict_info),'system')
for key, value in dict_info.iteritems():
#c2d.send_message(log_id, 'key ' + str(key),'system')
#c2d.send_message(log_id, 'value ' + str(value),'system')
tags = self.get_tags(value)
#c2d.send_message(log_id, 'tags ' + str(tags),'system')
group = self.get_group(tags)
#c2d.send_message(log_id, 'group ' + str(group),'system')
operators = self.get_operators(group)
#c2d.send_message(log_id, 'operators ' + str(operators),'system')
online_operator = self.get_online_operators(log_id, operators, c2d, messageid, key)
#c2d.send_message(log_id, 'online_operator ' + str(online_operator),'system')
if key is not None:
try:
#c2d.send_message(log_id, 'trasfer','system')
self.transfer_dialog(key, online_operator, c2d)
except Exception:
pass
else:
pass
# all_op_admin_chats = requests.get('https://api.chats.novait.com.ua/v1/dialogs?operator_id=37861&limit=10&state=op'+'en',
# headers={
# 'Authorization': api_token,
# 'Content-Type': 'application/json'},
# timeout=2
# )
# admin_data_chats = json.loads(all_op_admin_chats.text)['data']
# all_admin_chats_ids = []
# #c2d.send_message(log_id, 'dialog_ids: ' + str(admin_data_chats),'system')
# for g in admin_data_chats:
# all_admin_chats_ids.append(g['last_message']['dialog_id'])
# c2d.send_message(log_id, 'dialog_ids: ' + str(all_admin_chats_ids),'system')
# for m_id in all_admin_chats_ids:
# c2d.send_message(log_id, m_id,'system')
# try:
# requests.put('https://api.chats.novait.com.ua/v1/dialogs/' + str(m_id) + '?operator_id=37861&state=closed',
# headers={
# 'Authorization': api_token,
# 'Content-Type': 'application/json'},
# timeout=2
# )
# #c2d.send_message(log_id, 'done','system')
# except Exception:
# pass
# all_op_admin_chats = requests.get('https://api.chats.novait.com.ua/v1/dialogs?operator_id=37861&limit=10&state=op'+'en',
# headers={
# 'Authorization': api_token,
# 'Content-Type': 'application/json'},
# timeout=2
# )
# admin_data = json.loads(all_op_admin_chats.text)['data']
# all_admin_chats_ids = []
# all_admin_clients_ids = []
# all_admin_message_ids = []
# for g in admin_data:
# all_admin_chats_ids.append(g['last_message']['dialog_id'])
# #c2d.send_message(log_id, 'dialog_ids: ' + str(all_admin_chats_ids),'system')
# for c in admin_data:
# all_admin_clients_ids.append(c['last_message']['client_id'])
# c2d.send_message(log_id, 'client_ids: ' + str(all_admin_clients_ids),'system')
# for m in admin_data:
# all_admin_message_ids.append(m['last_message']['id'])
# c2d.send_message(log_id, 'message_ids: ' + str(all_admin_message_ids),'system')
# zip_info_op = zip(all_admin_chats_ids, all_admin_clients_ids)
# dict_info_op = dict(zip_info_op)
# c2d.send_message(log_id, 'dictionary ' + str(dict_info_op),'system')
# for key, value in dict_info_op.iteritems():
# c2d.send_message(log_id, 'key ' + str(key),'system')
# c2d.send_message(log_id, 'value ' + str(value),'system')
# tags = self.get_tags(value)
# c2d.send_message(log_id, 'tags ' + str(tags),'system')
# group = self.get_group(tags)
# c2d.send_message(log_id, 'group ' + str(group),'system')
# operators = self.get_operators(group)
# c2d.send_message(log_id, 'operators ' + str(operators),'system')
# online_operator = self.get_online_operators(log_id, operators, c2d, messageid, key)
# c2d.send_message(log_id, 'online_operator ' + str(online_operator),'system')
# if key is not None:
# try:
# c2d.send_message(log_id, 'trasfer','system')
# time.sleep(1)
# self.transfer_dialog(key, online_operator, c2d)
# except Exception:
# c2d.send_message(log_id, 'no_online','system')
# else:
# pass
else:
pass
def transfer_dialog(self, dialogid, online_operator, c2d):
c2d.transfer_dialog(dialogid, online_operator)
def get_online_operators_night(self, operators, c2d):
operators_ids = requests.get('https://api.chat24.io/v1/operators/?limit=100',
headers={
'Authorization': api_token,
'Content-Type': 'application/json'},
timeout=2
)
operator_data_all = json.loads(operators_ids.text)['data']
online_ops = []
dialog_num = []
for o in operators:
for each_op in operator_data_all:
if (each_op['id'] == o) and (each_op['online'] == 1) and (each_op['offline_type'] == None):
online_ops.append(o)
dialog_num.append(each_op['opened_dialogs'])
break
zip_op = zip(online_ops, dialog_num)
dict_op = dict(zip_op)
if len(online_ops) > 0:
free_operator = dialog_num[0]
for n in dialog_num:
if n < free_operator:
free_operator = n
available_op = self.getKeysByValue(dict_op,free_operator)
return available_op
#
# after scanning QR-code handler
#
def qr_code_result_handler(self, input_data, c2d):
return '[qr_code_result] do logic here'
#
# after manually call
#
def manually_handler(self, input_data, c2d):
return '[manually] do logic here'
#
# after chat bot don't triggered
#
def chat_bot_not_triggered_handler(self, input_data, c2d):
return '[manually] do logic here'
#
# dialog transfer handler
#
def dialog_transfer_handler(self, input_data, c2d):
return '[dialog_transfer] do logic here'
#
# new request handler
#
def new_request_handler(self, input_data, c2d):
return '[new_request] do logic here'
#
# client updated handler
#
def client_updated_handler(self, input_data, c2d):
return '[client_updated] do logic here'
api_token = 'token'
tag_paid = 333
tag_after = 444
tag_markng = 555
group_paid = 666
group_calls = 777
group_ad = 888
group_attendants = 999
log_id = 2432
bot_id = '0000'
# examples
# send message
#response = c2d.get_unanswered_dialogs()
# send question
#response = c2d.send_question(94212, 4321)
# get client info
#response = c2d.get_client_info(94212)
# get operators
#response = c2d.get_operators()
# get online operators
#response = c2d.get_online_operators()
# get list of question
#response = c2d.get_questions(5369, '10-10-2015', '10-10-2016')
# get last question
# response = c2d.get_last_question(5369)
# get unanswered dialogs
#response = c2d.get_unanswered_dialogs(18000)
# transfer dialog
#response = c2d.transfer_dialog(81984, 1899)
# get last message id in dialog
# dialog_id = 100
# type = 2 (1-client, 2-operator, 3-auto, 4-system)
# 2*24*60*60 time ago
#response = c2d.get_last_message_id(100, 2, 2*24*60*60)
# operator groups_ids
# operator_id = 81984
#response = c2d.get_operator_group_ids(81984)
# check if operator in group
# operator_id = 81984
# group_id = 81984
#response = c2d.operator_in_group(81984, 100)
# not send menu in new_message_handler add
# print 'not send menu'
| [
"noreply@github.com"
] | Abagena.noreply@github.com |
b89aa1714d2572de6e214b9b1642e5621db8fb30 | 159b561d7ce4ed467576458e0b308bb5cd861b00 | /linkedlist.py | 767c4589693d02edd21c88c87ec5b09a27e724d9 | [] | no_license | bjnish8/python_scripts | 4219a88af0620c56902f0099ddff529c65f56c89 | addb91c18deb402d2ec45d147f6b6a70c9dcc788 | refs/heads/master | 2021-07-13T12:18:15.035028 | 2020-09-24T21:13:01 | 2020-09-24T21:13:01 | 210,206,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | class Node:
def __init__(self, val, nextnode = None):
self.val = val
self.nextnode = nextnode
class LinkedList:
def __init__(self, head = None, tail = None):
self.head = head
self.tail = tail
def append(self, node):
if self.head is None:
self.head = node
self.tail = node
self.head.nextnode = None
else:
curr = self.tail
curr.nextnode = node
self.tail = node
def display(self):
curr = self.head
while curr is not None:
print(curr.val, end = "-->")
curr = curr.nextnode
print(None)
def getfirst(self):
return (self.head)
def getlast(self):
return(self.tail)
def search(self,val):
curr = self.head
isvalid = False
while curr:
if curr.val == val:
isvalid = True
break
curr = curr.nextnode
return isvalid
def insert(self, node, value):
if not self.search(value):
print ("Could not be added")
return None
curr = self.head
while curr:
if curr.val == value:
temp = curr.nextnode
curr.nextnode = node
node.nextnode = temp
curr = curr.nextnode
def delete(self, value):
if self.head.val == value:
self.head = self.head.nextnode
return None
curr = self.head
while curr.nextnode:
if curr.nextnode.val == value:
break
curr = curr.nextnode
curr.nextnode = curr.nextnode.nextnode
linked = LinkedList()
a = Node(10)
b = Node(20)
c = Node(1)
linked.append(a)
linked.append(b)
linked.append(c)
linked.display()
print(linked.search(2))
d = Node(31)
e = Node(2)
f = Node(91)
linked.insert(d, 20)
linked.display()
linked.delete(31)
linked.display()
linked.insert(d, 20)
linked.display()
linked.insert(e, 1)
linked.display()
linked.insert(f, 10)
linked.display()
print(linked.search(2))
| [
"binishk@bgsu.edu"
] | binishk@bgsu.edu |
478b775070fc031d634ff2c05787e5d2a3d74da5 | 1b7e65a2b8ff8350db0541c20c29ffe10d510fc6 | /custom_components/tautulli/sensor.py | 82fd9bdd86bf0307edfc28dfaf737c6eb4e434d0 | [] | no_license | torn8o/Home-AssistantConfig | 3084181d64c8c62ce1c48d35102ba252f5b3de9d | 7cd42c49d5b05f1c0008d284b63a6d4f2cce4d39 | refs/heads/master | 2023-04-27T19:24:42.319264 | 2023-04-14T21:29:58 | 2023-04-14T21:29:58 | 86,635,902 | 28 | 3 | null | 2017-07-14T03:34:19 | 2017-03-29T22:42:34 | HTML | UTF-8 | Python | false | false | 6,195 | py | """
Support for getting statistical data from a Tautulli system.
"""
import logging
import json
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_SSL, CONF_VERIFY_SSL, CONF_TOKEN, CONF_MONITORED_CONDITIONS)
_LOGGER = logging.getLogger(__name__)
_ENDPOINT = '/api/v2'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Tautulli'
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = True
SCAN_INTERVAL = timedelta(minutes=1)
MONITORED_CONDITIONS = {
'stream_count': ['Total',
'streams', 'mdi:basket-unfill'],
'stream_count_transcode': ['Transcode',
'streams', 'mdi:basket-unfill'],
'stream_count_direct_play': ['Direct Play',
'streams', 'mdi:basket-unfill'],
'stream_count_direct_stream': ['Direct Stream',
'streams', 'mdi:basket-unfill'],
'total_bandwidth': ['Total Bandwidth',
'Mbps', 'mdi:basket-unfill'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_TOKEN): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Tautulli sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
use_ssl = config.get(CONF_SSL)
token = config.get(CONF_TOKEN)
verify_ssl = config.get(CONF_VERIFY_SSL)
api = TautulliAPI('{}'.format(host), use_ssl, verify_ssl, token)
sensors = [TautulliSensor(hass, api, name, condition)
for condition in config[CONF_MONITORED_CONDITIONS]]
add_devices(sensors, True)
class TautulliSensor(Entity):
"""Representation of a Tautulli sensor."""
def __init__(self, hass, api, name, variable):
"""Initialize a Tautulli sensor."""
self._hass = hass
self._api = api
self._name = name
self._var_id = variable
variable_info = MONITORED_CONDITIONS[variable]
self._var_name = variable_info[0]
self._var_units = variable_info[1]
self._var_icon = variable_info[2]
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(self._name, self._var_name)
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._var_units
# pylint: disable=no-member
@property
def state(self):
"""Return the state of the device."""
try:
return_value = self._api.data['response']['data'][self._var_id]
if self._var_id == 'total_bandwidth':
return_value = round((return_value / 1000), 2)
return return_value
except TypeError:
return self._api.data['response']['data'][self._var_id]
# pylint: disable=no-member
@property
def device_state_attributes(self):
"""Return the state attributes of the Tautulli."""
attributes = {}
if self._var_id == 'total_bandwidth':
attributes['wan_bandwidth'] = round(
(self._api.data['response']['data']['wan_bandwidth'] / 1000), 2)
attributes['lan_bandwidth'] = round(
(self._api.data['response']['data']['lan_bandwidth'] / 1000), 2)
# attributes[ATTR_TOTAL_BANDWIDTH] = self._api.data['response']['data']['total_bandwidth']
else:
for session in self._api.data['response']['data']['sessions']:
if self._var_id == 'stream_count':
attributes[session['friendly_name']
] = session['full_title']
elif self._var_id == 'stream_count_transcode' and session['transcode_decision'] == "transcode":
attributes[session['friendly_name']
] = session['full_title']
elif self._var_id == 'stream_count_direct_stream' and session['transcode_decision'] == "copy":
attributes[session['friendly_name']
] = session['full_title']
elif self._var_id == 'stream_count_direct_play' and session['transcode_decision'] == "direct play":
attributes[session['friendly_name']
] = session['full_title']
return attributes
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._api.available
def update(self):
"""Get the latest data from the Tautulli API."""
self._api.update()
class TautulliAPI(object):
"""Get the latest data and update the states."""
def __init__(self, host, use_ssl, verify_ssl, token):
"""Initialize the data object."""
from homeassistant.components.sensor.rest import RestData
uri_scheme = 'https://' if use_ssl else 'http://'
resource = "{}{}{}?cmd=get_activity&apikey={}".format(
uri_scheme, host, _ENDPOINT, token)
self._rest = RestData('GET', resource, None, None, None, verify_ssl)
self.data = None
self.available = True
self.update()
def update(self):
"""Get the latest data from the Tautulli."""
try:
self._rest.update()
self.data = json.loads(self._rest.data)
self.available = True
except TypeError:
_LOGGER.error("Unable to fetch data from Tautulli")
self.available = False
| [
"icharliebrown@gmail.com"
] | icharliebrown@gmail.com |
f10f5f5e2fae16dfd47bfe080d7e78398a19a6a6 | dddaa82f5f8b9d96c2177009c987e6d3bfe9ed72 | /coupon.py | 9484008936e984bfc127d1aab846204edf389149 | [] | no_license | matthewparkbusiness/Coupon-Generator | 2320d9f44b495a44a901b211ffe311ca314b85e0 | ad538c9f2ffefdf0f25462c03336f22b39526565 | refs/heads/master | 2020-08-17T12:54:10.666323 | 2019-10-17T00:39:39 | 2019-10-17T00:39:39 | 215,669,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from coupon_rule import CouponRule
from typing import Type
class Coupon:
def __init__(self, rule: Type[CouponRule], code: str) -> None:
self.rule = rule
self.code = code
self.used = False
def use(self) -> bool:
result = not self.used
self.used = True
return result
def __repr__(self) -> str:
return f"Coupon {self.code}{' [USED]' if self.used else ''}: {self.rule.description()}"
def __hash__(self) -> int:
return hash(self.code)
| [
"42101816+matthewpark@users.noreply.github.com"
] | 42101816+matthewpark@users.noreply.github.com |
0d5a7edd04f421f3cceee2e108c13b1740c89b0a | c3a24f00c916a8747cf3875a5f3377e14caae786 | /location_coord_checker.py | cb474f63984a735d5e48a2cda619d79c6b51d9e0 | [] | no_license | friendslab20/lab_ensino_20 | 1e55dcd175a7e88f31062872b60258964b775082 | 5991d773a853b0944688b1ef70363d7ac9d453b8 | refs/heads/main | 2023-02-02T07:06:47.838448 | 2020-12-21T19:21:08 | 2020-12-21T19:21:08 | 323,419,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,766 | py | # !/usr/bin/env python
# coding: utf-8
# Sistema Geodรฉsico โ SIRGAS2000
# pip install --upgrade fiona
# conda install -c intel fiona geopandas
import fiona
# Determina se um ponto estรก dentro do polรญgono
# Polygon รฉ uma lista de pares (x,y).
# Rotina de modelagem geomรฉtrica para verificar se um ponto estรก dentro de um polรญgono qualquer (cรดncavo ou convexo: ver livro velho do Rogers)
def Point_Inside_Polygon(x, y, poly):
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
# Apenas variรกveis globais
lstStates = ['ac', 'al', 'am', 'ap', 'ba', 'ce', 'df', 'es', 'go', 'ma', 'mg', 'ms', 'mt', 'pa',
'pb', 'pe', 'pi', 'pr', 'rj', 'rn', 'ro', 'rr', 'rs', 'sc', 'se', 'sp', 'to']
lstGeodeticSystem = ['sirgas']
# Interface para a bilbioteca de verificaรงรฃo
def IsInside(path, latitude, longitude, geodetic_system, state):
inside = False
where = 'Fora do Estado '
# Testa se o indicador de estado estรก correto
if not (state in lstStates):
print('state: escolha a correta designaรงรฃo de estado.')
print(lstStates)
return inside, where
SHP_file = 'MalhaMunicipios/' + state + '/municipios.shp'
where = where + state
# Testa se o sistema geodรฉsico estรก correto
if not (geodetic_system in lstGeodeticSystem):
print('geodetic_system: escolha a correta designaรงรฃo de sistema geodรฉsico.')
print(lstGeodeticSystem)
return inside, where
# Testa se os valores de latitude e longitude sรฃo float
try:
latitude = float(latitude)
longitude = float(longitude)
except ValueError:
print('latitude,longitude: valores devem ser float.')
# Tudo conferido, pronto para rodar
shapes = fiona.open(SHP_file)
for s in shapes:
if s['type'] == 'Feature':
if s['geometry']['type'] == 'Polygon':
# Um Polygon รฉ uma lista de anรฉis, cada anel uma lista de tuplas
# (x,y) = (Long,Lat)
for ring in s['geometry']['coordinates']:
if Point_Inside_Polygon(longitude, latitude, ring):
if state == 'mt':
where = s['properties']['NM_MUNICIP'].encode('iso-8859-1').decode('utf-8')
else:
where = s['properties']['NM_MUNICIP']
inside = True
shapes.close()
return inside, where
# Exemplo de chamada
# dentro,municipio = IsInside(-22.9518018,-43.1844011,'sirgas','rj')
# dentro,municipio = IsInside(-22.9132525,-43.7261797,'sirgas','rj')
# dentro, municipio = IsInside(-10.781331, -36.993735, 'sirgas', 'se')
# dentro,municipio = IsInside(-7.1464332,-34.9516385,'sirgas','pb')
# print(dentro)
# print(municipio)
# Interface para a bilbioteca de verificaรงรฃo usando List de Estados
def IsInsideByList(path, latitude, longitude, geodetic_system, state_list):
lstIsInside = []
lstWhere = []
for state in state_list:
bInside,strWhere = IsInside(path,latitude,longitude,geodetic_system,'rj')
lstIsInside.append(bInside)
lstWhere.append(strWhere)
return lstIsInside,lstWhere
# Exemplo de chamada
#estados = ['pb','se','rj']
#dentro,municipio = IsInsideByList('input',-7.1464332,-34.9516385,'sirgas',estados)
#print(dentro)
#print(municipio) | [
"friendslab20@gmail.com"
] | friendslab20@gmail.com |
a2bfd5334ad964b8fe2f5c3addb9d88084f31e5b | 0d9b75fee49b37038a10e467c39cf75c9cf4d5ae | /OpenCV_learn/code_030/opencv_030.py | d4195aea4d2a004c17e47d6166072a6ffa2427bc | [] | no_license | MachineLP/OpenCV- | 743a5fcfc3f300ccb135f869e2f048cb5fdcd02a | f3da4feb71c20d2e8bc426eb5a4e2e61a2fd4a75 | refs/heads/master | 2023-03-23T15:31:22.985413 | 2023-03-08T09:33:28 | 2023-03-08T09:33:28 | 178,887,816 | 104 | 51 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import cv2 as cv
import numpy as np
src = cv.imread("./test.png")
cv.namedWindow("input", cv.WINDOW_AUTOSIZE)
cv.imshow("input", src)
blur_op = np.ones([5, 5], dtype=np.float32)/25.
shape_op = np.array([[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]], np.float32)
grad_op = np.array([[1, 0],[0, -1]], dtype=np.float32)
dst1 = cv.filter2D(src, -1, blur_op)
dst2 = cv.filter2D(src, -1, shape_op)
dst3 = cv.filter2D(src, cv.CV_32F, grad_op)
dst3 = cv.convertScaleAbs(dst3)
cv.imshow("blur=5x5", dst1);
cv.imshow("shape=3x3", dst2);
cv.imshow("gradient=2x2", dst3);
cv.waitKey(0)
cv.destroyAllWindows()
| [
"noreply@github.com"
] | MachineLP.noreply@github.com |
9f2fb10d39746c5c6f7122d0b4aa30e0eca352c9 | ae4c386efb1a0ed55f2c88d7194edcecf1b35ed4 | /nixui/graphics/diff_widget.py | 170a6f5dc4aa925ee4e83f84691fb3622ffaf404 | [
"MIT"
] | permissive | tusharsadhwani/nix-gui | 71738951965865fae88c3896e387c615711190e1 | e7d1137ff85ef6bfe33ccc7cd8eee944475d4217 | refs/heads/master | 2023-08-02T10:51:02.707574 | 2021-10-09T03:38:22 | 2021-10-09T03:38:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py | from PyQt5 import QtWidgets
import difflib
from nixui.graphics import generic_widgets
class DiffedOptionListSelector(generic_widgets.ScrollListStackSelector):
ItemCls = generic_widgets.OptionListItem # TODO: remove break dependency with generic_widgets.py
def __init__(self, updates, *args, **kwargs):
self.updates_map = {
u.option: (
u.old_definition.expression_string,
u.new_definition.expression_string
)
for u in updates
}
super().__init__(*args, **kwargs)
# hack: make text box 3x the width of the list view
self.stack.setMinimumWidth(self.item_list.width() * 3)
def insert_items(self):
for option in self.updates_map:
it = self.ItemCls(option)
self.item_list.addItem(it)
def change_selected_item(self):
option = self.item_list.currentItem().option
old_value, new_value = self.updates_map[option]
diff = difflib.unified_diff(
old_value.splitlines(1),
new_value.splitlines(1),
lineterm=''
)
# blank lines and control lines
diff = [line.strip() for line in diff][3:]
diff_str = '\n'.join(diff)
view = QtWidgets.QPlainTextEdit(diff_str)
view.setReadOnly(True)
# monospace
font = view.document().defaultFont()
font.setFamily("Courier New")
view.document().setDefaultFont(font)
old_widget = self.current_widget
self.stack.addWidget(view)
self.stack.setCurrentWidget(view)
self.stack.removeWidget(old_widget)
self.current_widget = view
class DiffDialogBase(QtWidgets.QDialog):
def __init__(self, statemodel, *args, **kwargs):
super().__init__(*args, **kwargs)
self.statemodel = statemodel
diff_table = DiffedOptionListSelector(statemodel.get_update_set())
layout = QtWidgets.QVBoxLayout()
layout.addWidget(diff_table)
layout.addWidget(self.init_btn_box())
self.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
self.setLayout(layout)
class DiffDialog(DiffDialogBase):
def init_btn_box(self):
btn_box = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok)
btn_box.accepted.connect(self.accept)
return btn_box
class SaveDialog(DiffDialogBase):
def init_btn_box(self):
btn_box = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Save)
btn_box.accepted.connect(self.save)
btn_box.rejected.connect(self.reject)
return btn_box
def save(self):
self.statemodel.persist_updates()
self.accept()
| [
"andrew"
] | andrew |
f24fdaada3d2c828fe26947ec27984466814cd1e | b70ec5f64ba9ee1317da1cab0dfc5955910c5a4f | /manage.py | d403d35b999e174c6a554342fc7ffac9bc35a89c | [] | no_license | wittymindstech/iot-dashboard-djangoweb | f2f862d910d687a16a64801b01a869e2ef65231b | 75fc0bc831b3b2a83ad36fe9043d6c61e487c2a2 | refs/heads/main | 2023-06-27T04:44:14.166102 | 2021-07-13T11:33:47 | 2021-07-13T11:33:47 | 380,271,956 | 1 | 2 | null | 2021-07-13T11:33:47 | 2021-06-25T14:59:03 | JavaScript | UTF-8 | Python | false | false | 683 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'WTIOT.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"gaurav.2897@gmail.com"
] | gaurav.2897@gmail.com |
9803cad3f1b6b91ba7c59d74fbcc65e29ef64f80 | 4094cdd4f6034dd082c6f97187819c5d88c3199f | /public/common/my_unit.py | 8b22abcad8d36f86afd5ba605d5d61524797a353 | [] | no_license | destinationnn/auto_ui_test | 691a6b85ba8242727d5a7c1b0a6efa6fd8282a11 | 59882bdac80ff5fe04228588e763ca819982cf1b | refs/heads/master | 2021-07-01T23:45:35.830969 | 2017-09-22T02:59:10 | 2017-09-22T02:59:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | import os
import unittest
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
conf_path = BASE_DIR + '/config/config.ini'
conf_path = conf_path.replace('/', '\\')
from public.common.get_config import r_config
from public.common.get_log import Log
from public.common.get_images import browser, insert_img
img_path = r_config(conf_path, 'image', 'img_path')
class MyTest(unittest.TestCase):
global case_count
case_count = 0
global image_count
image_count = 0
# ่ฎก็ฎๆต่ฏ็จไพ็ไธชๆฐ๏ผ็จไบๆพ็คบๅจๆต่ฏๆฅๅไธญ
def case_id(self):
global case_count
case_count += 1
if case_count <= 9:
count = "00" + str(case_count)
elif case_count <= 99:
count = "0" + str(case_count)
else:
count = str(case_count)
return count
# ๆต่ฏๅฎๆ๏ผ็ๆๆชๅพๆไปถ็ๅ็งฐ
def image_id(self):
global image_count
image_count += 1
if image_count <= 9:
count = "00" + str(image_count)
elif image_count <= 99:
count = "0" + str(image_count)
else:
count = str(image_count)
return count
def setUp(self):
self.logger = Log(os.path.join(os.path.dirname(os.path.dirname(os.getcwd())), 'conf/conf.ini'))
self.logger.info('############################### START ###############################')
self.driver = browser()
self.driver.implicitly_wait(10)
self.driver.maximize_window()
print("case " + str(self.case_id()))
def tearDown(self):
img_id = self.image_id()
file_name =img_path + img_id + ".jpg"
print(file_name)
insert_img(self.driver, file_name)
self.driver.quit()
self.logger.info('############################### End ###############################')
| [
"onlyccie@live.cn"
] | onlyccie@live.cn |
02bf8fdde14c3aac9ce51f353f4387eeb79001e6 | e627d47d5102bd68c2012501aa120833b9271da7 | /aws_api/core/models.py | 89508d58bf7297df0d5b42ee5c50cbd82b4d8508 | [] | no_license | aayushgupta97/django-km | 5ba275d1f85eaaf8bc052e47d2b6b6f1a5e4cf90 | d34cd4f8637718044832d9baeecee86df5e821a5 | refs/heads/master | 2023-01-02T18:12:31.384634 | 2020-10-24T09:21:50 | 2020-10-24T09:21:50 | 298,391,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | from django.db import models
from django.contrib.auth.models import User
from jsonfield import JSONField
# Create your models here.
class AWSCredentials(models.Model):
access_key = models.CharField(max_length=128)
secret_key = models.CharField(max_length=512)
account_id = models.CharField(max_length=40)
default_region = models.CharField(max_length=32, default='us-east-1')
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.account_id
class EC2(models.Model):
instance_id = models.CharField(max_length=255, blank=False)
instance_type = models.CharField(max_length=255, blank=False)
state = models.BooleanField()
instance_data = JSONField(null=True)
credentials = models.ForeignKey(AWSCredentials, null=False, on_delete=models.CASCADE)
def __str__(self):
return f"{self.instance_id} {self.credentials}"
| [
"aayushgupta2097@gmail.com"
] | aayushgupta2097@gmail.com |
81ea85c94df6be769b91ca66567cb99c500486e8 | ae231d793f80966513e225dafd127c25772c2e6b | /nuitka/tree/ReformulationClasses.py | 95e73f0da69e65ac532dc10c0ba890fe779bd1d6 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | awesome-python/Nuitka | 41bbc3cbd8a0e025ac5f59e6d3f7bcca3efe1522 | ad0ed473eb4a919d758c72cfc6cfbd5977998c5b | refs/heads/master | 2021-01-18T06:23:06.493913 | 2016-05-26T12:58:27 | 2016-05-26T12:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,489 | py | # Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of class statements.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
ExpressionTargetTempVariableRef,
ExpressionTargetVariableRef,
StatementAssignmentVariable,
StatementReleaseVariable
)
from nuitka.nodes.AttributeNodes import (
ExpressionAttributeLookup,
ExpressionBuiltinHasattr
)
from nuitka.nodes.BuiltinRefNodes import ExpressionBuiltinRef
from nuitka.nodes.CallNodes import ExpressionCall, ExpressionCallNoKeywords
from nuitka.nodes.ClassNodes import (
ExpressionClassBody,
ExpressionSelectMetaclass
)
from nuitka.nodes.CodeObjectSpecs import CodeObjectSpec
from nuitka.nodes.ComparisonNodes import ExpressionComparisonIn
from nuitka.nodes.ConditionalNodes import (
ExpressionConditional,
StatementConditional
)
from nuitka.nodes.ConstantRefNodes import ExpressionConstantRef
from nuitka.nodes.ContainerMakingNodes import ExpressionMakeTuple
from nuitka.nodes.DictionaryNodes import (
ExpressionDictOperationGet,
StatementDictOperationRemove
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionQualnameRef,
ExpressionFunctionRef
)
from nuitka.nodes.GlobalsLocalsNodes import (
ExpressionBuiltinLocals,
StatementSetLocals
)
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.SubscriptNodes import ExpressionSubscriptLookup
from nuitka.nodes.TypeNodes import ExpressionBuiltinType1
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef
)
from nuitka.PythonVersions import python_version
from .Helpers import (
buildNode,
buildNodeList,
buildStatementsNode,
extractDocFromBody,
getKind,
makeDictCreationOrConstant,
makeSequenceCreationOrConstant,
makeStatementsSequence,
makeStatementsSequenceFromStatement
)
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
def _buildClassNode3(provider, node, source_ref):
# Many variables, due to the huge re-formulation that is going on here,
# which just has the complexity, pylint: disable=R0914
# This function is the Python3 special case with special re-formulation as
# according to developer manual.
class_statement_nodes, class_doc = extractDocFromBody(node)
# We need a scope for the temporary variables, and they might be closured.
temp_scope = provider.allocateTempScope(
name = "class_creation",
allow_closure = True
)
tmp_bases = provider.allocateTempVariable(
temp_scope = temp_scope,
name = "bases"
)
tmp_class_decl_dict = provider.allocateTempVariable(
temp_scope = temp_scope,
name = "class_decl_dict"
)
tmp_metaclass = provider.allocateTempVariable(
temp_scope = temp_scope,
name = "metaclass"
)
tmp_prepared = provider.allocateTempVariable(
temp_scope = temp_scope,
name = "prepared"
)
class_creation_function = ExpressionClassBody(
provider = provider,
name = node.name,
doc = class_doc,
flags = set(),
source_ref = source_ref
)
if python_version >= 340 and False: # TODO: Temporarily reverted:
tmp_class = class_creation_function.allocateTempVariable(
temp_scope = None,
name = "__class__"
)
class_target_variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_class,
source_ref = source_ref
)
class_variable_ref = ExpressionTempVariableRef(
variable = tmp_class,
source_ref = source_ref
)
else:
class_variable = class_creation_function.getVariableForAssignment(
"__class__"
)
class_target_variable_ref = ExpressionTargetVariableRef(
variable_name = "__class__",
variable = class_variable,
source_ref = source_ref
)
class_variable_ref = ExpressionVariableRef(
variable_name = "__class__",
variable = class_variable,
source_ref = source_ref
)
code_object = CodeObjectSpec(
code_name = node.name,
code_kind = "Class",
arg_names = (),
kw_only_count = 0,
has_starlist = False,
has_stardict = False
)
body = buildStatementsNode(
provider = class_creation_function,
nodes = class_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
source_ref_orig = source_ref
if body is not None:
# The frame guard has nothing to tell its line number to.
body.source_ref = source_ref
module_variable = class_creation_function.getVariableForAssignment(
"__module__"
)
statements = [
StatementSetLocals(
new_locals = ExpressionTempVariableRef(
variable = tmp_prepared,
source_ref = source_ref
),
source_ref = source_ref
),
StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = "__module__",
variable = module_variable,
source_ref = source_ref
),
source = ExpressionConstantRef(
constant = provider.getParentModule().getFullName(),
source_ref = source_ref,
user_provided = True
),
source_ref = source_ref
)
]
if class_doc is not None:
doc_variable = class_creation_function.getVariableForAssignment(
"__doc__"
)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = "__doc__",
variable = doc_variable,
source_ref = source_ref
),
source = ExpressionConstantRef(
constant = class_doc,
source_ref = source_ref,
user_provided = True
),
source_ref = source_ref
)
)
# The "__qualname__" attribute is new in Python 3.3.
if python_version >= 330:
qualname = class_creation_function.getFunctionQualname()
qualname_variable = class_creation_function.getVariableForAssignment(
"__qualname__"
)
if python_version < 340:
qualname_ref = ExpressionConstantRef(
constant = qualname,
source_ref = source_ref,
user_provided = True
)
else:
qualname_ref = ExpressionFunctionQualnameRef(
function_body = class_creation_function,
source_ref = source_ref,
)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = "__qualname__",
variable = qualname_variable,
source_ref = source_ref
),
source = qualname_ref,
source_ref = source_ref
)
)
if python_version >= 340:
qualname_assign = statements[-1]
statements += [
body,
StatementAssignmentVariable(
variable_ref = class_target_variable_ref,
source = ExpressionCall(
called = ExpressionTempVariableRef(
variable = tmp_metaclass,
source_ref = source_ref
),
args = makeSequenceCreationOrConstant(
sequence_kind = "tuple",
elements = (
ExpressionConstantRef(
constant = node.name,
source_ref = source_ref,
user_provided = True
),
ExpressionTempVariableRef(
variable = tmp_bases,
source_ref = source_ref
),
ExpressionBuiltinLocals(
source_ref = source_ref
)
),
source_ref = source_ref
),
kw = ExpressionTempVariableRef(
variable = tmp_class_decl_dict,
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
StatementReturn(
expression = class_variable_ref,
source_ref = source_ref
)
]
body = makeStatementsSequence(
statements = statements,
allow_none = True,
source_ref = source_ref
)
# The class body is basically a function that implicitly, at the end
# returns its locals and cannot have other return statements contained.
class_creation_function.setBody(body)
class_creation_function.registerProvidedVariable(tmp_bases)
class_creation_function.registerProvidedVariable(tmp_class_decl_dict)
class_creation_function.registerProvidedVariable(tmp_metaclass)
class_creation_function.registerProvidedVariable(tmp_prepared)
# The class body is basically a function that implicitly, at the end
# returns its created class and cannot have other return statements
# contained.
decorated_body = ExpressionFunctionCall(
function = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = class_creation_function,
source_ref = source_ref
),
code_object = code_object,
defaults = (),
kw_defaults = None,
annotations = None,
source_ref = source_ref
),
values = (),
source_ref = source_ref
)
for decorator in buildNodeList(
provider,
reversed(node.decorator_list),
source_ref
):
decorated_body = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (
decorated_body,
),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
)
statements = (
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_bases,
source_ref = source_ref
),
source = makeSequenceCreationOrConstant(
sequence_kind = "tuple",
elements = buildNodeList(
provider, node.bases, source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_class_decl_dict,
source_ref = source_ref
),
source = makeDictCreationOrConstant(
keys = [
ExpressionConstantRef(
constant = keyword.arg,
source_ref = source_ref,
user_provided = True
)
for keyword in
node.keywords
],
values = [
buildNode(provider, keyword.value, source_ref)
for keyword in
node.keywords
],
source_ref = source_ref
),
source_ref = source_ref
),
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_metaclass,
source_ref = source_ref
),
source = ExpressionSelectMetaclass(
metaclass = ExpressionConditional(
condition = ExpressionComparisonIn(
left = ExpressionConstantRef(
constant = "metaclass",
source_ref = source_ref,
user_provided = True
),
right = ExpressionTempVariableRef(
variable = tmp_class_decl_dict,
source_ref = source_ref
),
source_ref = source_ref
),
expression_yes = ExpressionDictOperationGet(
dict_arg = ExpressionTempVariableRef(
variable = tmp_class_decl_dict,
source_ref = source_ref
),
key = ExpressionConstantRef(
constant = "metaclass",
source_ref = source_ref,
user_provided = True
),
source_ref = source_ref
),
expression_no = ExpressionConditional(
condition = ExpressionTempVariableRef(
variable = tmp_bases,
source_ref = source_ref
),
expression_no = ExpressionBuiltinRef(
builtin_name = "type",
source_ref = source_ref
),
expression_yes = ExpressionBuiltinType1(
value = ExpressionSubscriptLookup(
subscribed = ExpressionTempVariableRef(
variable = tmp_bases,
source_ref = source_ref
),
subscript = ExpressionConstantRef(
constant = 0,
source_ref = source_ref,
user_provided = True
),
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
bases = ExpressionTempVariableRef(
variable = tmp_bases,
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref_orig
),
StatementConditional(
condition = ExpressionComparisonIn(
left = ExpressionConstantRef(
constant = "metaclass",
source_ref = source_ref,
user_provided = True
),
right = ExpressionTempVariableRef(
variable = tmp_class_decl_dict,
source_ref = source_ref
),
source_ref = source_ref
),
no_branch = None,
yes_branch = makeStatementsSequenceFromStatement(
statement = StatementDictOperationRemove(
dict_arg = ExpressionTempVariableRef(
variable = tmp_class_decl_dict,
source_ref = source_ref
),
key = ExpressionConstantRef(
constant = "metaclass",
source_ref = source_ref,
user_provided = True
),
source_ref = source_ref
)
),
source_ref = source_ref
),
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_prepared,
source_ref = source_ref
),
source = ExpressionConditional(
condition = ExpressionBuiltinHasattr( # pylint: disable=E1120,E1123
object = ExpressionTempVariableRef(
variable = tmp_metaclass,
source_ref = source_ref
),
name = ExpressionConstantRef(
constant = "__prepare__",
source_ref = source_ref,
user_provided = True
),
source_ref = source_ref
),
expression_no = ExpressionConstantRef(
constant = {},
source_ref = source_ref,
user_provided = True
),
expression_yes = ExpressionCall(
called = ExpressionAttributeLookup(
source = ExpressionTempVariableRef(
variable = tmp_metaclass,
source_ref = source_ref
),
attribute_name = "__prepare__",
source_ref = source_ref
),
args = ExpressionMakeTuple(
elements = (
ExpressionConstantRef(
constant = node.name,
source_ref = source_ref,
user_provided = True
),
ExpressionTempVariableRef(
variable = tmp_bases,
source_ref = source_ref
)
),
source_ref = source_ref
),
kw = ExpressionTempVariableRef(
variable = tmp_class_decl_dict,
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = node.name,
source_ref = source_ref
),
source = decorated_body,
source_ref = source_ref
),
)
if python_version >= 340:
class_assign = statements[-1]
class_creation_function.qualname_setup = class_assign, qualname_assign
final = (
StatementReleaseVariable(
variable = tmp_bases,
source_ref = source_ref
),
StatementReleaseVariable(
variable = tmp_class_decl_dict,
source_ref = source_ref
),
StatementReleaseVariable(
variable = tmp_metaclass,
source_ref = source_ref
),
StatementReleaseVariable(
variable = tmp_prepared,
source_ref = source_ref
)
)
return makeTryFinallyStatement(
provider = provider,
tried = statements,
final = final,
source_ref = source_ref
)
def _buildClassNode2(provider, node, source_ref):
# This function is the Python2 special case with special re-formulation as
# according to developer manual, and it's very detailed, pylint: disable=R0914
class_statement_nodes, class_doc = extractDocFromBody(node)
function_body = ExpressionClassBody(
provider = provider,
name = node.name,
doc = class_doc,
flags = set(),
source_ref = source_ref
)
code_object = CodeObjectSpec(
code_name = node.name,
code_kind = "Class",
arg_names = (),
kw_only_count = 0,
has_starlist = False,
has_stardict = False
)
body = buildStatementsNode(
provider = function_body,
nodes = class_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
if body is not None:
# The frame guard has nothing to tell its line number to.
body.source_ref = source_ref.atInternal()
# The class body is basically a function that implicitly, at the end
# returns its locals and cannot have other return statements contained, and
# starts out with a variables "__module__" and potentially "__doc__" set.
statements = [
StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = "__module__",
source_ref = source_ref
),
source = ExpressionConstantRef(
constant = provider.getParentModule().getFullName(),
source_ref = source_ref,
user_provided = True
),
source_ref = source_ref.atInternal()
)
]
if class_doc is not None:
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = "__doc__",
source_ref = source_ref
),
source = ExpressionConstantRef(
constant = class_doc,
source_ref = source_ref,
user_provided = True
),
source_ref = source_ref.atInternal()
)
)
statements += [
body,
StatementReturn(
expression = ExpressionBuiltinLocals(
source_ref = source_ref
),
source_ref = source_ref.atInternal()
)
]
body = makeStatementsSequence(
statements = statements,
allow_none = True,
source_ref = source_ref
)
# The class body is basically a function that implicitly, at the end
# returns its locals and cannot have other return statements contained.
function_body.setBody(body)
temp_scope = provider.allocateTempScope("class_creation")
tmp_bases = provider.allocateTempVariable(temp_scope, "bases")
tmp_class_dict = provider.allocateTempVariable(temp_scope, "class_dict")
tmp_metaclass = provider.allocateTempVariable(temp_scope, "metaclass")
tmp_class = provider.allocateTempVariable(temp_scope, "class")
statements = [
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_bases,
source_ref = source_ref
),
source = makeSequenceCreationOrConstant(
sequence_kind = "tuple",
elements = buildNodeList(
provider, node.bases, source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_class_dict,
source_ref = source_ref
),
source = ExpressionFunctionCall(
function = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
code_object = None,
defaults = (),
kw_defaults = None,
annotations = None,
source_ref = source_ref
),
values = (),
source_ref = source_ref
),
source_ref = source_ref
),
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_metaclass,
source_ref = source_ref
),
source = ExpressionConditional(
condition = ExpressionComparisonIn(
left = ExpressionConstantRef(
constant = "__metaclass__",
source_ref = source_ref,
user_provided = True
),
right = ExpressionTempVariableRef(
variable = tmp_class_dict,
source_ref = source_ref
),
source_ref = source_ref
),
expression_yes = ExpressionDictOperationGet(
dict_arg = ExpressionTempVariableRef(
variable = tmp_class_dict,
source_ref = source_ref
),
key = ExpressionConstantRef(
constant = "__metaclass__",
source_ref = source_ref,
user_provided = True
),
source_ref = source_ref
),
expression_no = ExpressionSelectMetaclass(
metaclass = None,
bases = ExpressionTempVariableRef(
variable = tmp_bases,
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_class,
source_ref = source_ref
),
source = ExpressionCallNoKeywords(
called = ExpressionTempVariableRef(
variable = tmp_metaclass,
source_ref = source_ref
),
args = ExpressionMakeTuple(
elements = (
ExpressionConstantRef(
constant = node.name,
source_ref = source_ref,
user_provided = True
),
ExpressionTempVariableRef(
variable = tmp_bases,
source_ref = source_ref
),
ExpressionTempVariableRef(
variable = tmp_class_dict,
source_ref = source_ref
)
),
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
),
]
for decorator in buildNodeList(
provider,
reversed(node.decorator_list),
source_ref
):
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = tmp_class,
source_ref = source_ref
),
source = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (
ExpressionTempVariableRef(
variable = tmp_class,
source_ref = source_ref
),
),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
),
source_ref = decorator.getSourceReference()
)
)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = node.name,
source_ref = source_ref
),
source = ExpressionTempVariableRef(
variable = tmp_class,
source_ref = source_ref
),
source_ref = source_ref
)
)
final = (
StatementReleaseVariable(
variable = tmp_class,
source_ref = source_ref
),
StatementReleaseVariable(
variable = tmp_bases,
source_ref = source_ref
),
StatementReleaseVariable(
variable = tmp_class_dict,
source_ref = source_ref
),
StatementReleaseVariable(
variable = tmp_metaclass,
source_ref = source_ref
)
)
return makeTryFinallyStatement(
provider = function_body,
tried = statements,
final = final,
source_ref = source_ref
)
def buildClassNode(provider, node, source_ref):
assert getKind(node) == "ClassDef"
# There appears to be a inconsistency with the top level line number
# not being the one really the class has, if there are bases, and a
# decorator.
if node.bases:
source_ref = source_ref.atLineNumber(node.bases[-1].lineno)
# Python2 and Python3 are similar, but fundamentally different, so handle
# them in dedicated code.
if python_version < 300:
return _buildClassNode2(provider, node, source_ref)
else:
return _buildClassNode3(provider, node, source_ref)
| [
"kay.hayen@gmail.com"
] | kay.hayen@gmail.com |
75fe5a8f200efb8a375fdb2be70b50fa02ee2375 | 2e78cd226a3c1fd2a27e60cea28dd77ba68a8630 | /preprocessing.py | 10bd1674b81b1a6f1eb1766fc3d510a37ca8b474 | [
"MIT"
] | permissive | Steve-YJ/Steve-SageMaker-Tutorials | 86a39b4da8b0aca3f54f40a32f536c043ceede56 | 5b43228eeb1c8a5846fb1a8860b839df157e2619 | refs/heads/main | 2023-04-23T09:14:41.976900 | 2021-04-29T00:34:17 | 2021-04-29T00:34:17 | 362,421,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,034 | py | import argparse
import os
import warnings
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.exceptions import DataConversionWarning
from sklearn.compose import make_column_transformer
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-test-split-ratio', type=float, default=0.3)
parser.add_argument('--random-split', type=int, default=0)
args, _ = parser.parse_known_args()
print('Received arguments {}'.format(args))
input_data_path = os.path.join('/opt/ml/processing/input', 'rawdata.csv')
print('Reading input data from {}'.format(input_data_path))
df = pd.read_csv(input_data_path)
df.sample(frac=1)
COLS = df.columns
newcolorder = ['PAY_AMT1','BILL_AMT1'] + list(COLS[1:])[:11] + list(COLS[1:])[12:17] + list(COLS[1:])[18:]
split_ratio = args.train_test_split_ratio
random_state=args.random_split
X_train, X_test, y_train, y_test = train_test_split(df.drop('Label', axis=1), df['Label'],
test_size=split_ratio, random_state=random_state)
preprocess = make_column_transformer(
(['PAY_AMT1'], StandardScaler()),
(['BILL_AMT1'], MinMaxScaler()),
remainder='passthrough')
print('Running preprocessing and feature engineering transformations')
train_features = pd.DataFrame(preprocess.fit_transform(X_train), columns = newcolorder)
test_features = pd.DataFrame(preprocess.transform(X_test), columns = newcolorder)
# concat to ensure Label column is the first column in dataframe
train_full = pd.concat([pd.DataFrame(y_train.values, columns=['Label']), train_features], axis=1)
test_full = pd.concat([pd.DataFrame(y_test.values, columns=['Label']), test_features], axis=1)
print('Train data shape after preprocessing: {}'.format(train_features.shape))
print('Test data shape after preprocessing: {}'.format(test_features.shape))
train_features_headers_output_path = os.path.join('/opt/ml/processing/train_headers', 'train_data_with_headers.csv')
train_features_output_path = os.path.join('/opt/ml/processing/train', 'train_data.csv')
test_features_output_path = os.path.join('/opt/ml/processing/test', 'test_data.csv')
print('Saving training features to {}'.format(train_features_output_path))
train_full.to_csv(train_features_output_path, header=False, index=False)
print("Complete")
print("Save training data with headers to {}".format(train_features_headers_output_path))
train_full.to_csv(train_features_headers_output_path, index=False)
print('Saving test features to {}'.format(test_features_output_path))
test_full.to_csv(test_features_output_path, header=False, index=False)
print("Complete")
| [
"stevelee@mz.co.kr"
] | stevelee@mz.co.kr |
d17ce8802fa93c39c6b0c878618591c8e9e54804 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-eihealth/huaweicloudsdkeihealth/v1/model/list_workflow_statistic_request.py | 15c6ff0b1d99414e34d6ff1f3f4a86a7a12c7a08 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,669 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListWorkflowStatisticRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'eihealth_project_id': 'str'
}
attribute_map = {
'eihealth_project_id': 'eihealth_project_id'
}
def __init__(self, eihealth_project_id=None):
"""ListWorkflowStatisticRequest
The model defined in huaweicloud sdk
:param eihealth_project_id: ๅป็ๆบ่ฝไฝๅนณๅฐ้กน็ฎID๏ผๆจๅฏไปฅๅจEIHealthๅนณๅฐๅๅปๆ้็้กน็ฎๅ็งฐ๏ผ่ฟๅ
ฅ้กน็ฎ่ฎพ็ฝฎ้กต้ขๆฅ็ใ
:type eihealth_project_id: str
"""
self._eihealth_project_id = None
self.discriminator = None
self.eihealth_project_id = eihealth_project_id
@property
def eihealth_project_id(self):
"""Gets the eihealth_project_id of this ListWorkflowStatisticRequest.
ๅป็ๆบ่ฝไฝๅนณๅฐ้กน็ฎID๏ผๆจๅฏไปฅๅจEIHealthๅนณๅฐๅๅปๆ้็้กน็ฎๅ็งฐ๏ผ่ฟๅ
ฅ้กน็ฎ่ฎพ็ฝฎ้กต้ขๆฅ็ใ
:return: The eihealth_project_id of this ListWorkflowStatisticRequest.
:rtype: str
"""
return self._eihealth_project_id
@eihealth_project_id.setter
def eihealth_project_id(self, eihealth_project_id):
"""Sets the eihealth_project_id of this ListWorkflowStatisticRequest.
ๅป็ๆบ่ฝไฝๅนณๅฐ้กน็ฎID๏ผๆจๅฏไปฅๅจEIHealthๅนณๅฐๅๅปๆ้็้กน็ฎๅ็งฐ๏ผ่ฟๅ
ฅ้กน็ฎ่ฎพ็ฝฎ้กต้ขๆฅ็ใ
:param eihealth_project_id: The eihealth_project_id of this ListWorkflowStatisticRequest.
:type eihealth_project_id: str
"""
self._eihealth_project_id = eihealth_project_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListWorkflowStatisticRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
8ce185c295da02d35794c2f261b5426e43b51655 | b431b7566427faec22d97f5032de8b78f2ce7b3c | /venv/Lib/site-packages/utilpy/files.py | 2f995631950c60811adf0087b3c6fc8fc3c9c8de | [] | no_license | Emilianopp/pygame | 735f030780ddf799a83270b92fb29da9692e1dd9 | f5f5eaff72fb8e3c677f66f28667c70b91b6a2bd | refs/heads/main | 2023-07-23T13:30:40.699820 | 2021-08-24T02:06:40 | 2021-08-24T02:06:40 | 336,358,437 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,217 | py | import zipfile
import os
import shutil
import patoolib
from .utils import walk_directory, format_filename
def _zipdir(dir_path, ziph, exclude=None, include=None):
"""
write file to zip object
Args:
dir_path:``str``
parent directory to zip
ziph:``Zipfile``
Zipfile from zipfile
exclude:``list``
list of file to exclude
include:``list``
list of file to include
"""
# ziph is zipfile handle
files = walk_directory(dir_path)
for p in files:
if exclude:
for f in exclude:
if f not in p:
ziph.write(p)
elif include:
for f in include:
if f in p:
ziph.write(p)
else:
ziph.write(p)
def zip_folder(dir_path, filename=None, exclude=None, include=None):
"""
zip all file in folder,
Args:
dir_path:``str``
parent directory to zip
filename:``str``
filename of zip file
exclude:``list``
list of file to exclude
include:``list``
list of file to include
"""
if not filename:
filename = format_filename(dir_path) + '.zip'
zipf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
_zipdir(dir_path, zipf, exclude, include)
zipf.close()
def unzip_file(filename, save_dir='data'):
"""
unzip all file
Args:
filename:``str``
filename of zip file
sav_dir:``str``
parent directory to zip
.. warning:: for ``rar`` file type, install ``rar`` and ``unrar``
.. code-block:: sh
apt install rar && apt install unrar
"""
# check if save_dir exists
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
if filename[-3:]=='zip':
zip_ref = zipfile.ZipFile(filename, 'r')
zip_ref.extractall(save_dir)
zip_ref.close()
elif filename[-3:]=='rar':
patoolib.extract_archive(filename, outdir=save_dir)
def parse_path(path, fn_only=False, ext=False, al=True):
"""get the directory from filename,
Args:
path:``str``
path of file
fn_only:``bol``
get the file name only from path
ext:``bol``
split file name into name and extension
al: ``bol``
get list of dir and file name
Returns:
list of value: ``list``
"""
dir_ = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
if fn_only:
return filename
elif ext:
return name, ext
elif al:
return dir_, name, ext
else:
return dir_
def move_file(filename, out_dir):
"""
move file/dir
Args:
filename:``str``
filename of file to be moved or name of dir to be moved
out_dir:``str``
output directory
"""
# check if out_dir exists
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
shutil.move(filename, out_dir)
def delete_dirs(folder_names):
"""
delete folder and all its contents
Args: `
folder_names:`str`` or ``list``
string or list of folders
"""
if isinstance(folder_names, str):
shutil.rmtree(folder_names)
os.makedirs(folder_names)
elif isinstance(folder_names, list):
for f in folder_names:
shutil.rmtree(f)
os.makedirs(f)
else:
raise TypeError("Input should be str or list ")
def make_dirs(folder_names):
"""
make folder if it doesnot exist
Args:
folder_names:``str`` or ``list``
string or list of folders
"""
if isinstance(folder_names, str):
if not os.path.exists(folder_names):
os.makedirs(folder_names)
elif isinstance(folder_names, list):
_ = [ os.makedirs(f) for f in folder_names if not os.path.exists(f) ]
else:
raise TypeError("Input should be str or list ")
| [
"epenaloz@uwo.ca"
] | epenaloz@uwo.ca |
9e8d73095336a9aec74a9c50d0d0e8418c3ee1fa | 72863e7278f4be8b5d63d999144f9eaec3e7ec48 | /venv/lib/python2.7/site-packages/libmproxy/console/window.py | 69d5e242cd0f379d95e9b2ce08e66ce6ea7d6a88 | [
"MIT"
] | permissive | sravani-m/Web-Application-Security-Framework | 6e484b6c8642f47dac94e67b657a92fd0dbb6412 | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | refs/heads/master | 2020-04-26T11:54:01.334566 | 2019-05-03T19:17:30 | 2019-05-03T19:17:30 | 173,532,718 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | import urwid
from . import signals
class Window(urwid.Frame):
def __init__(self, master, body, header, footer, helpctx):
urwid.Frame.__init__(
self,
urwid.AttrWrap(body, "background"),
header = urwid.AttrWrap(header, "background") if header else None,
footer = urwid.AttrWrap(footer, "background") if footer else None
)
self.master = master
self.helpctx = helpctx
signals.focus.connect(self.sig_focus)
def sig_focus(self, sender, section):
self.focus_position = section
def mouse_event(self, *args, **kwargs):
# args: (size, event, button, col, row)
k = super(self.__class__, self).mouse_event(*args, **kwargs)
if not k:
if args[1] == "mouse drag":
signals.status_message.send(
message = "Hold down shift, alt or ctrl to select text.",
expire = 1
)
elif args[1] == "mouse press" and args[2] == 4:
self.keypress(args[0], "up")
elif args[1] == "mouse press" and args[2] == 5:
self.keypress(args[0], "down")
else:
return False
return True
def keypress(self, size, k):
k = super(self.__class__, self).keypress(size, k)
if k == "?":
self.master.view_help(self.helpctx)
elif k == "c":
if not self.master.client_playback:
signals.status_prompt_path.send(
self,
prompt = "Client replay",
callback = self.master.client_playback_path
)
else:
signals.status_prompt_onekey.send(
self,
prompt = "Stop current client replay?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = self.master.stop_client_playback_prompt,
)
elif k == "i":
signals.status_prompt.send(
self,
prompt = "Intercept filter",
text = self.master.state.intercept_txt,
callback = self.master.set_intercept
)
elif k == "o":
self.master.view_options()
elif k == "Q":
raise urwid.ExitMainLoop
elif k == "q":
signals.pop_view_state.send(self)
elif k == "S":
if not self.master.server_playback:
signals.status_prompt_path.send(
self,
prompt = "Server replay path",
callback = self.master.server_playback_path
)
else:
signals.status_prompt_onekey.send(
self,
prompt = "Stop current server replay?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = self.master.stop_server_playback_prompt,
)
else:
return k
| [
"sravani.manukonda7@gmail.com"
] | sravani.manukonda7@gmail.com |
72a814435d4159ba19a2c548b890a43020e942c8 | f68cd225b050d11616ad9542dda60288f6eeccff | /testscripts/RDKB/component/PAM/TS_PAM_GetProcessNumberOfEntries.py | 173f02eb2b3d59f3694789afc506cc744abebd56 | [
"Apache-2.0"
] | permissive | cablelabs/tools-tdkb | 18fb98fadcd169fa9000db8865285fbf6ff8dc9d | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | refs/heads/master | 2020-03-28T03:06:50.595160 | 2018-09-04T11:11:00 | 2018-09-05T00:24:38 | 147,621,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,419 | py | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>5</version>
<name>TS_PAM_GetProcessNumberOfEntries</name>
<primitive_test_id/>
<primitive_test_name>pam_GetParameterValues</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>This testcase returns the no: of processes running in the device</synopsis>
<groups_id/>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>RPI</box_type>
<box_type>Emulator</box_type>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_PAM_89</test_case_id>
<test_objective>To get the no: of processes running in the device</test_objective>
<test_type>Positive</test_type>
<test_setup>Emulator,XB3</test_setup>
<pre_requisite>1.Ccsp Components in DUT should be in a running state that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
pam_GetParameterValues
Input:
ParamName -
Device.DeviceInfo.ProcessStatus.ProcessNumberOfEntries</input_parameters>
<automation_approch>1.Function which needs to be tested will be configured in Test Manager GUI.
2.Python Script will be generated by Test Manager with provided arguments in configure page.
3.TM will load the PAM library via Test agent
4.From python script, invoke pam_GetParameterValues() stub function to get the number of processes running.
5.pam stub function will call the ssp_getParameterValue() function of tdk component.
6.Responses from the pam stub function will be logged in Agent Console log.
7.pam stub will validate the actual result with the expected result and send the result status to Test Manager.
8.Test Manager will publish the result in GUI as PASS/FAILURE based on the response from pam stub.</automation_approch>
<except_output>CheckPoint 1:
The output should be logged in the Agent console/Component log
CheckPoint 2:
Stub function result should be success and should see corresponding log in the agent console log
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution/Console page of Test Manager</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_PAM_GetProcessNumberOfEntries</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
#import statement
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("pam","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_PAM_GetProcessNumberOfEntries');
#Get the result of connection with test component and STB
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName","Device.DeviceInfo.ProcessStatus.ProcessNumberOfEntries");
expectedresult="SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the number of processes";
print "EXPECTED RESULT 1: Should get the number of processes";
print "ACTUAL RESULT 1: No of Processes %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS"
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the number of processes";
print "EXPECTED RESULT 1: Should get the number of processes";
print "ACTUAL RESULT 1: Failure in getting the number of processes. Details : %s" %details;
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("pam");
else:
print "Failed to load pam module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| [
"jim.lawton@accenture.com"
] | jim.lawton@accenture.com |
cca1b0900bb1ccb36f71339ce1474d3bf34c967b | 0b6dbf85c169622e4da4cabeb799ed8ec5eeb109 | /djangorest/settings.py | 91ee2a91489a3f472a01b6cb84260a8f6fc81dc7 | [] | no_license | rochimo2/api_tutorial | 88e1aa542222211a7bc4b0c47ed759cca129f14e | 194109383339b9fba0252726ea1019e251eb52f2 | refs/heads/master | 2020-03-07T07:31:20.028560 | 2018-03-29T21:57:25 | 2018-03-29T21:57:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,462 | py | """
Django settings for djangorest project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j(om&bo)4zd$4v4ucoq#zi!+f^m5pz-523$f6tk5-=##c_*%*-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'apiapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangorest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangorest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"rmoyano@nomades.com.ar"
] | rmoyano@nomades.com.ar |
a3896f595a20d91502cbd6e39182b02c88162ce0 | c8a84cdced4d408a273a99a82432e41fc6a9ab55 | /src/neprojects/settings/prod.py | f0bd684ef104bfaafcf743399bf9f4088f514eba | [] | no_license | poudel/goodmandu | 584bb652ef4f51ee4ca336f674e37d38a782e3c9 | 64177d7fd95002dd06ad9b99817fff6095a9166c | refs/heads/master | 2021-06-12T14:56:18.802948 | 2019-10-09T19:15:34 | 2019-10-09T19:47:16 | 128,678,514 | 1 | 0 | null | 2020-06-05T18:07:43 | 2018-04-08T20:18:13 | Python | UTF-8 | Python | false | false | 96 | py | from .base import *
DEBUG = False
try:
from .local import *
except ImportError:
pass
| [
"self@keshab.net"
] | self@keshab.net |
1a22c6f4d145dfc0e9a39fdb60db7ac9c0a5883a | 162dbc3ab6a46402d32461aceda18091b59f01a5 | /fb37/lib/python3.7/site-packages/facebook_business/adobjects/customconversion.py | 109346d17b38e2e23274dc0c1ae35ba43b966bf3 | [] | no_license | justineshaw/fbv37 | 2895b8fc5d6f5efbe5ff4e487025ac8d403b78aa | e5b0c46f76adfec74899adb782321335cbeaf1a3 | refs/heads/master | 2022-12-13T10:19:25.113429 | 2019-08-19T13:03:22 | 2019-08-19T13:03:22 | 199,038,644 | 2 | 1 | null | 2022-12-08T05:19:51 | 2019-07-26T15:15:56 | Python | UTF-8 | Python | false | false | 14,700 | py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class CustomConversion(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isCustomConversion = True
super(CustomConversion, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
account_id = 'account_id'
aggregation_rule = 'aggregation_rule'
business = 'business'
creation_time = 'creation_time'
custom_event_type = 'custom_event_type'
data_sources = 'data_sources'
default_conversion_value = 'default_conversion_value'
description = 'description'
event_source_type = 'event_source_type'
first_fired_time = 'first_fired_time'
id = 'id'
is_archived = 'is_archived'
last_fired_time = 'last_fired_time'
name = 'name'
offline_conversion_data_set = 'offline_conversion_data_set'
pixel = 'pixel'
retention_days = 'retention_days'
rule = 'rule'
event_source_id = 'event_source_id'
advanced_rule = 'advanced_rule'
custom_conversion_id = 'custom_conversion_id'
class CustomEventType:
add_payment_info = 'ADD_PAYMENT_INFO'
add_to_cart = 'ADD_TO_CART'
add_to_wishlist = 'ADD_TO_WISHLIST'
complete_registration = 'COMPLETE_REGISTRATION'
contact = 'CONTACT'
content_view = 'CONTENT_VIEW'
customize_product = 'CUSTOMIZE_PRODUCT'
donate = 'DONATE'
find_location = 'FIND_LOCATION'
initiated_checkout = 'INITIATED_CHECKOUT'
lead = 'LEAD'
listing_interaction = 'LISTING_INTERACTION'
other = 'OTHER'
purchase = 'PURCHASE'
schedule = 'SCHEDULE'
search = 'SEARCH'
start_trial = 'START_TRIAL'
submit_application = 'SUBMIT_APPLICATION'
subscribe = 'SUBSCRIBE'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'customconversions'
def api_create(self, parent_id, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.adobjects.adaccount import AdAccount
return AdAccount(api=self._api, fbid=parent_id).create_custom_conversion(fields, params, batch, success, failure, pending)
def api_delete(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=CustomConversion,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_update(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'name': 'string',
'default_conversion_value': 'float',
'description': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=CustomConversion,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_activities(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.customconversionactivities import CustomConversionActivities
param_types = {
'start_time': 'datetime',
'end_time': 'datetime',
'event_type': 'event_type_enum',
}
enums = {
'event_type_enum': CustomConversionActivities.EventType.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/activities',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=CustomConversionActivities,
api_type='EDGE',
response_parser=ObjectParser(target_class=CustomConversionActivities, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def delete_ad_accounts(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'account_id': 'string',
'business': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/adaccounts',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_ad_accounts(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.adaccount import AdAccount
param_types = {
'business': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/adaccounts',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdAccount,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdAccount, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_ad_account(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'account_id': 'string',
'business': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/adaccounts',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=CustomConversion,
api_type='EDGE',
response_parser=ObjectParser(target_class=CustomConversion, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_stats(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.customconversionstatsresult import CustomConversionStatsResult
param_types = {
'start_time': 'datetime',
'end_time': 'datetime',
'aggregation': 'aggregation_enum',
}
enums = {
'aggregation_enum': CustomConversionStatsResult.Aggregation.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/stats',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=CustomConversionStatsResult,
api_type='EDGE',
response_parser=ObjectParser(target_class=CustomConversionStatsResult, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'account_id': 'string',
'aggregation_rule': 'string',
'business': 'Business',
'creation_time': 'datetime',
'custom_event_type': 'CustomEventType',
'data_sources': 'list<ExternalEventSource>',
'default_conversion_value': 'int',
'description': 'string',
'event_source_type': 'string',
'first_fired_time': 'datetime',
'id': 'string',
'is_archived': 'bool',
'last_fired_time': 'datetime',
'name': 'string',
'offline_conversion_data_set': 'OfflineConversionDataSet',
'pixel': 'AdsPixel',
'retention_days': 'unsigned int',
'rule': 'string',
'event_source_id': 'string',
'advanced_rule': 'string',
'custom_conversion_id': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['CustomEventType'] = CustomConversion.CustomEventType.__dict__.values()
return field_enum_info
| [
"easyworkemail@gmail.com"
] | easyworkemail@gmail.com |
1e3202d244ebfff9dd6bcecadaa71c32fb40e2fd | 709bd5f2ecc69a340da85f6aed67af4d0603177e | /saleor/product/migrations/0073_auto_20181010_0729.py | b6435a6d48c72a8b3723f876f1995998d63fc51b | [
"BSD-3-Clause"
] | permissive | Kenstogram/opensale | 41c869ee004d195bd191a1a28bf582cc6fbb3c00 | 5102f461fa90f2eeb13b9a0a94ef9cb86bd3a3ba | refs/heads/master | 2022-12-15T02:48:48.810025 | 2020-03-10T02:55:10 | 2020-03-10T02:55:10 | 163,656,395 | 8 | 0 | BSD-3-Clause | 2022-12-08T01:31:09 | 2018-12-31T09:30:41 | Python | UTF-8 | Python | false | false | 1,129 | py | # Generated by Django 2.1.2 on 2018-10-10 12:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0072_auto_20180925_1048'),
]
operations = [
migrations.AddField(
model_name='attribute',
name='product_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='temp_product_attributes', to='product.ProductType'),
),
migrations.AddField(
model_name='attribute',
name='product_variant_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='temp_variant_attributes', to='product.ProductType'),
),
migrations.AlterField(
model_name='attribute',
name='name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='attribute',
name='slug',
field=models.SlugField(),
),
]
| [
"Kenstogram@gmail.com"
] | Kenstogram@gmail.com |
8b0c025a56778453f144197d06f980ed00737458 | af8f0d50bb11279c9ff0b81fae97f754df98c350 | /src/book/api/views/category.py | 3037b5486bcde77e4a473f50e746cb978cc99220 | [
"Apache-2.0"
] | permissive | DmytroKaminskiy/ltt | 592ed061efe3cae169a4e01f21d2e112e58714a1 | d08df4d102e678651cd42928e2343733c3308d71 | refs/heads/master | 2022-12-18T09:56:36.077545 | 2020-09-20T15:57:35 | 2020-09-20T15:57:35 | 292,520,616 | 0 | 0 | Apache-2.0 | 2020-09-20T15:49:58 | 2020-09-03T09:09:26 | HTML | UTF-8 | Python | false | false | 513 | py | from book.api.serializers.category import CategorySerializer
from book.models import Category
from rest_framework import generics
__all__ = [
'ListCreateCategoryView',
'RetrieveCategoryView',
]
class ListCreateCategoryView(generics.ListCreateAPIView):
serializer_class = CategorySerializer
queryset = Category.objects.all().order_by('-id')
class RetrieveCategoryView(generics.RetrieveAPIView):
queryset = Category.objects.all().order_by('-id')
serializer_class = CategorySerializer
| [
"dmytro.kaminskyi92@gmail.com"
] | dmytro.kaminskyi92@gmail.com |
3eebded3e51926fff9c1f76a81b7786c011c7547 | 8aa1b94626402c0c614128d6061edb771dad05cf | /e100/e017.py | b24dd8c62dd7fb877ccffbdbd147ff7d80e27ed6 | [] | no_license | netfj/Project_Stu02 | 31e76c1b656ee74c54cae2185821dec7ccf50401 | afc1b26b7c586fd6979ab574c7d357a6b9ef4d29 | refs/heads/master | 2023-03-13T22:24:40.364167 | 2021-02-23T09:53:31 | 2021-02-23T09:53:31 | 341,506,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #coding:utf-8
"""
@info: ้ข็ฎ๏ผ่พๅ
ฅไธ่กๅญ็ฌฆ๏ผๅๅซ็ป่ฎกๅบๅ
ถไธญ่ฑๆๅญๆฏใ็ฉบๆ ผใๆฐๅญๅๅ
ถๅฎๅญ็ฌฆ็ไธชๆฐใ
@author:NetFj @software:PyCharm @file:e017.py @time:2018/11/1.16:45
"""
# c='0'
# while c != '':
# c = input('Input a string:')
c='abc1 2 3 4 5 6[(@#$)]'
y,k,s,q =0,0,0,0
for x in c:
if x.isalpha():y+=1
elif x.isspace():k+=1
elif x.isdigit(): s += 1
else: q+=1
print(y,k,s,q)
y,k,s,q =0,0,0,0
for n in range(0,len(c)):
if c[n].isalpha():
y += 1
elif c[n].isspace():
k += 1
elif c[n].isdigit():
s += 1
else:
q += 1
print(y, k, s, q) | [
"netfj@sina.com"
] | netfj@sina.com |
f591d4de1b9eed87d9957639e078e2d3f1771f12 | 058095044273a31b63e9075874cc6c930966b248 | /pickapub_api/settings.py | bb95b30879ef175c46a8cb202b7230cd9dc74208 | [] | no_license | ishankyadav92/pickapub_api | ecb8db2771a85e900f6c72c25bac5eb065decc6f | f921ed255fab955ed3d5c4dabf98b1fdb24caa05 | refs/heads/master | 2022-12-09T02:52:55.643909 | 2017-05-29T18:32:32 | 2017-05-29T18:32:32 | 91,780,329 | 0 | 1 | null | 2022-12-07T23:57:10 | 2017-05-19T07:48:54 | Python | UTF-8 | Python | false | false | 3,918 | py | """
Django settings for pickapub_api project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import json
import os
from django.core.exceptions import ImproperlyConfigured
from unipath import Path
BASE_DIR = Path(__file__).ancestor(2)
with open(BASE_DIR.child("secrets.json")) as secret:
secrets = json.loads(secret.read())
def get_secret(setting, secrets=secrets):
"""Get the secret variable or return explicit exception."""
try:
return secrets[setting]
except KeyError:
error_msg = "Set the {0} environment variable".format(setting)
raise ImproperlyConfigured(error_msg)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^)v%(g-qy&d2m%)=ss9=cu^zvt=qyu!gj5$*5a@igp86!euxk8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_swagger',
'restaurants'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pickapub_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pickapub_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': get_secret('DB_HOST'),
'USER': get_secret('DB_USER'),
'PASSWORD': get_secret('DB_PASSWORD'),
'NAME': get_secret('DB_NAME'),
'OPTIONS': {
'sql_mode': 'traditional',
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"joshi.shubham82@gmail.com"
] | joshi.shubham82@gmail.com |
1c514da4809f069dea6bd00c236b1d993bd44ffb | d59a8dfbdb33e5e039601df23f45dab0ce083361 | /users/models.py | 3edda378495739be1a5e505b795a15a21233bd27 | [] | no_license | bagafoot/django | 16f6ed8cd9944ad7e6892d7c9924bd1623268696 | 68a2c51ff9d3211e68b8508883f470066f383738 | refs/heads/master | 2020-07-29T11:04:50.200209 | 2019-09-20T11:25:11 | 2019-09-20T11:25:11 | 209,773,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | from django.db import models
from django.contrib.auth.models import User
from PIL import Image
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
image = models.ImageField(default='default.jpeg',upload_to='profile_pics/')
def __str__(self):
return (self.user.username)
def save(self, **kwargs):
super().save()
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
| [
"bagafoot@hotmail.com"
] | bagafoot@hotmail.com |
88ae10543f5e8553ed3e462607d89c072b024aac | 81e0c610f487cd54e3596c0a13942158038fd7ca | /analysis/performance.py | a1075e60bc9501b86a33e39589afd9528ea080cc | [] | no_license | santacruzlab/bmi_tasks_analysis | fbb8aeec03d215951f941cf6f0b7cb953d5ffb46 | 232eeeffa82e5d3e174ae5a5acdf9b7b15f5c53a | refs/heads/master | 2022-05-05T02:03:55.808391 | 2022-04-06T18:13:17 | 2022-04-06T18:13:17 | 168,071,555 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 44,029 | py | #!/usr/bin/python
'''
Task-dependent performance measures (primarily for BMI tasks currently)
'''
from db import dbfunctions
from db import dbfunctions as dbfn
import numpy as np
from scipy.stats import circmean
import matplotlib.pyplot as plt
# import plotutil
from collections import OrderedDict, defaultdict
from db.tracker import models
import os
import tables
from itertools import izip
from riglib.bmi import robot_arms, train, kfdecoder, ppfdecoder
min_per_sec = 1./60
seconds_per_min = 60
sec_per_min = 60
pi = np.pi
plot_dir = '/storage/plots'
from performance_metrics import get_task_axis_error_measures
## Calculate trials per min
def trials_per_min(task_entries):
if not np.iterable(task_entries):
task_entries = (task_entries,)
length = 0
n_rewards = 0
for entry in task_entries:
if isinstance(entry, int):
te = _get_te(entry)
else:
te = entry
n_rewards += te.n_rewards #te.get_trial_end_types()['success']
length += float(len(te.hdf.root.task)) / te.update_rate
return float(n_rewards)/length * seconds_per_min
class Trials(object):
def __init__(self, inds, length):
self.inds = inds
self.length = length
def __iter__(self):
return iter(self.inds)
@property
def bool(self):
if not hasattr(self, '_full_inds'):
_full_inds = np.zeros(self.length, dtype=bool)
for st, end in self.inds:
_full_inds[st:end] = 1
self._full_inds = _full_inds
return self._full_inds
def get_kf_blocks_after(id, **kwargs):
blocks = dbfn.get_blocks_after(id, **kwargs)
return filter(lambda x: _get_te(x).decoder_type == 'KF', blocks)
def get_ppf_blocks_after(id, **kwargs):
blocks = dbfn.get_blocks_after(id, **kwargs)
return filter(lambda x: _get_te(x).decoder_type == 'PPF', blocks)
def bits_per_sec(workspace_radius, target_radius):
'''
Calculate the difficulty of the BMI task in Fitts bits.
This measure is defined in Gilja et al 2012, Nature neuroscience.
Distance + Window
bits = log2 -----------------
Window
where 'Distance' is the distance between the center of the origin and
center of the target. 'Window' is apparently slightly inaccurate in Gilja
et al as the 'Window' in the numerator is the *radius* of the target and
the 'Window' in the denominator is the *diameter* of the target
'''
workspace_radius = float(workspace_radius)
return np.log2((workspace_radius + target_radius)/(2*target_radius))
def plot_targets(ax=None, targets=None, facecolor='none', radius=2, **kwargs):
if ax == None:
plt.figure()
ax = plt.subplot(111)
from pylab import Circle
patches = []
for target in targets:
c = Circle(target[[0,2]], radius=radius, facecolor=facecolor, **kwargs)
patches.append(c)
ax.add_patch(c)
from riglib.bmi import kfdecoder
from scipy.stats import pearsonr
def sliding_average(data, window_size):
window = np.ones(window_size)
return 1./window_size * np.convolve(data, window, 'valid')[::window_size]
def reward_time_between_blocks(id0, id1):
reward_time = 0
for k in range(id0+1, id1):
try:
models.TaskEntry.objects.get(id=k)
stuff = 1
except:
stuff = 0
if stuff:
te = _get_te(k)
reward_time += te.total_reward_time
return reward_time
def task_type(te):
if te.decoder_type == 'KF':
return 'KF'
elif te.decoder_type == 'PPF':
if not hasattr(te, 'feedback_rate'):
return 'PPF'
elif te.task_update_rate == 10:
return 'LC'
elif te.task_update_rate == 60:
return 'LF'
class ManualControlMultiTaskEntry(dbfunctions.TaskEntry):
'''
Extension of dbfunctions TaskEntry class to calculate performance measures for the generic "target capture" task
'''
def __init__(self, *args, **kwargs):
self.fixed = kwargs.pop('fixed', True)
super(ManualControlMultiTaskEntry, self).__init__(*args, **kwargs)
try:
task_msgs = self.hdf.root.task_msgs[:]
# Ignore the last message if it's the "None" transition used to stop the task
if task_msgs[-1]['msg'] == 'None':
task_msgs = task_msgs[:-1]
# ignore "update bmi" messages. These have been removed in later datasets
task_msgs = task_msgs[task_msgs['msg'] != 'update_bmi']
target_index = self.hdf.root.task[:]['target_index'].ravel()
task_msg_dtype = np.dtype([('msg', '|S256'), ('time', '<u4'), ('target_index', 'f8')])
task_msgs_ext = np.zeros(len(task_msgs), dtype=task_msg_dtype)
for k in range(len(task_msgs)):
task_msgs_ext[k]['msg'] = task_msgs[k]['msg']
task_msgs_ext[k]['time'] = task_msgs[k]['time']
try:
task_msgs_ext[k]['target_index'] = target_index[task_msgs[k]['time']]
except:
task_msgs_ext[k]['target_index'] = np.nan
self.task_msgs = task_msgs_ext
## Split the task messages into separate trials
# A new trial starts in either the 'wait' state or when 'targ_transition' has a target_index of -1
trial_start = np.logical_or(self.task_msgs['msg'] == 'wait', np.logical_and(self.task_msgs['msg'] == 'targ_transition', self.task_msgs['target_index'] == -1))
trial_start_inds, = np.nonzero(trial_start)
trial_end_inds = np.hstack([trial_start_inds[1:], len(trial_start)])
self.trial_msgs = []
for trial_st, trial_end in izip(trial_start_inds, trial_end_inds):
self.trial_msgs.append(self.task_msgs[trial_st:trial_end])
except:
print "Couldn't process HDF file. Is it copied?"
import traceback
traceback.print_exc()
if 'target_radius' not in self.params:
self.target_radius = 2.
if 'cursor_radius' not in self.params:
self.cursor_radius = 0.4
### Update rate of task
self.update_rate = 60.
@property
def reach_origin(self):
return self.get_cached_attr('origin', self.calc_reach_origin)
def calc_reach_origin(self):
target = self.hdf.root.task[:]['target']
origin = np.zeros_like(target)
first_target_change = False
prev_target = target[0]
curr_origin = np.ones(3) * np.nan
for t in range(len(target)):
curr_target = target[t]
if not np.array_equal(curr_target, prev_target):
curr_origin = prev_target.copy()
prev_target = curr_target
origin[t] = curr_origin
return origin
@property
def angular_error(self):
return self.get_cached_attr('angular_error', self.calc_angular_error)
def calc_angular_error(self):
'''
Compute the angular error between the cursor movement from one
task loop iteration to the next (typically at 60 Hz). Angular error
is with reference to the straight line between the cursor and the target
'''
# compute angles for each trial
cursor = self.hdf.root.task[:]['cursor']
target = self.hdf.root.task[:]['target']
cursor_vel = np.diff(cursor, axis=0)
int_dir = target - cursor
dist_to_targ = np.array(map(np.linalg.norm, int_dir))
window_angle = np.arctan2(self.target_radius, dist_to_targ)
import geometry
angles = geometry.angle(int_dir[:-1], cursor_vel, axis=0)
angles = angles - window_angle[:-1]
angles[angles < 0] = 0
angles = np.hstack([angles, np.nan])
return angles
def get_targets(self):
all_targets = self.hdf.root.task[:]['target']
s = set(map(tuple, all_targets))
return np.vstack(s)
def plot_targets(self, ax=None, targets=None, facecolor='none', **kwargs):
if ax == None:
plt.figure()
ax = plt.subplot(111)
if targets == None:
targets = self.get_targets()
from pylab import Circle
target_radius = self.target_radius
patches = []
for target in targets:
c = Circle(target[[0,2]], radius=target_radius, facecolor=facecolor, **kwargs)
patches.append(c)
ax.add_patch(c)
return ax, patches
def get_fixed_decoder_task_msgs(self):
try:
return self._fixed_decoder_task_msgs, self._fixed_start
except:
hdf = self.hdf
task_msgs = hdf.root.task_msgs[:]
update_bmi_msgs = np.nonzero(task_msgs['msg'] == 'update_bmi')[0]
if len(update_bmi_msgs) > 0:
fixed_start = update_bmi_msgs[-1] + 1
else:
try:
assist_off = np.nonzero(hdf.root.task[:]['assist_level'] == 0)[0][0]
except ValueError:
assist_off = 0
except:
return np.zeros((0,), dtype=task_msgs.dtype), np.inf
assist_off = filter(lambda k: task_msgs['time'][k] > assist_off, xrange(len(task_msgs)))[0]
fixed_start = max(assist_off, 0)
task_msgs = task_msgs[fixed_start:]
self._fixed_decoder_task_msgs = task_msgs
self._fixed_start = fixed_start
return self._fixed_decoder_task_msgs, self._fixed_start
#return task_msgs, fixed_start
def get_plot_fnames(self):
files = os.popen('ls /storage/plots | grep %s' % self.name)
files = [f.rstrip() for f in files]
return files
def _from_hdf_get_trial_end_types(self, fixed=True):
hdf = self.hdf
target_index = hdf.root.task[:]['target_index']
fixed = self.fixed
if fixed:
task_msgs, fixed_start = self.get_fixed_decoder_task_msgs()
else:
task_msgs = hdf.root.task_msgs[:]
task_msgs = task_msgs[~(task_msgs['msg'] == 'update_bmi')]
# Count the number of reward trials
n_rewards = len(np.nonzero(task_msgs['msg'] == 'reward')[0])
n_timeouts = len(np.nonzero(task_msgs['msg'] == 'timeout_penalty')[0])
# TODO Number of trials
hold_inds = np.nonzero(task_msgs['msg'] == 'hold')[0]
target_seq_length = max(target_index) + 1
hold_penalty_by_target = np.zeros(target_seq_length)
for msg_ind in hold_inds:
if task_msgs[msg_ind+1]['msg'] == 'hold_penalty':
trial_targ_idx = target_index[task_msgs[msg_ind]['time']]
hold_penalty_by_target[trial_targ_idx] += 1
# Count the number of hold errors at each of the types of targets
return dict(success=n_rewards, hold_error=hold_penalty_by_target, timeout=n_timeouts)
def get_trial_end_types(self):
# self.save()
# hdf = tables.openFile('/storage/plots/fixed_bmi_performance.hdf', mode='r')
# hdf.close()
return self._from_hdf_get_trial_end_types()
def get_rewards_per_min(self, window_size_mins=1.):
'''
Estimates rewards per minute. New estimates are made every 1./60 seconds
using the # of rewards observed in the previous 'window_size_mins' minutes
'''
hdf = self.hdf
task_msgs = hdf.root.task_msgs[:]
reward_msgs = filter(lambda m: m[0] == 'reward', task_msgs)
reward_on = np.zeros(hdf.root.task.shape)
for reward_msg in reward_msgs:
reward_on[reward_msg[1]] = 1
# Hz
window_size_updates = window_size_mins * seconds_per_min * self.update_rate
conv = np.ones(window_size_updates) * 1./window_size_mins
rewards_per_min = np.convolve(reward_on, conv, 'valid')
tvec = np.arange(len(rewards_per_min)) * 1./self.update_rate + window_size_mins * seconds_per_min
return tvec, rewards_per_min
@property
def clda_stop_time(self):
try:
task_msgs = self.hdf.root.task_msgs[:]
last_update_msg_ind = np.nonzero(task_msgs['msg'] == 'update_bmi')[0][-1]
last_update_msg = task_msgs[last_update_msg_ind]
clda_stop = last_update_msg['time'] * 1./self.update_rate * min_per_sec
except:
clda_stop = 0
return clda_stop
@property
def clda_stop_ind(self):
task_msgs = self.hdf.root.task_msgs[:]
last_update_msg_ind = np.nonzero(task_msgs['msg'] == 'update_bmi')[0][-1]
last_update_msg = task_msgs[last_update_msg_ind]
clda_stop = last_update_msg['time']
return clda_stop
def plot_rewards_per_min(self, ax=None, show=False, max_ylim=None, save=True, **kwargs):
'''
Make a plot of the rewards per minute
'''
import plotutil
tvec, rewards_per_min = self.get_rewards_per_min(**kwargs)
rewards_per_min = rewards_per_min[::900]
tvec = tvec[::900]
# find the time when CLDA turns off
task_msgs = self.hdf.root.task_msgs[:]
clda_stop = self.clda_stop_time
if ax == None:
plt.figure(figsize=(4,3))
axes = plotutil.subplots(1, 1, return_flat=True, hold=True, left_offset=0.1)
ax = axes[0]
else:
save = False
try:
# find the time when the assist turns off
assist_level = self.hdf.root.task[:]['assist_level'].ravel()
assist_stop = np.nonzero(assist_level == 0)[0][0]
assist_stop *= min_per_sec * 1./self.update_rate # convert to min
ax.axvline(assist_stop, label='Assist off', color='green', linewidth=2)
except:
pass
ax.axvline(clda_stop, label='CLDA off', color='blue', linewidth=2, linestyle='--')
ax.plot(tvec * min_per_sec, rewards_per_min, color='black', linewidth=2)
if max_ylim == None:
max_ylim = int(max(15, int(np.ceil(max(rewards_per_min)))))
max_xlim = int(np.ceil(max(tvec * min_per_sec)))
# plotutil.set_axlim(ax, [0, max_ylim], labels=range(max_ylim+1), axis='y')
# plotutil.set_axlim(ax, [0, max_ylim], labels=range(0, max_ylim+1), axis='y')
plotutil.set_xlim(ax, [0, max_xlim])
plotutil.ylabel(ax, 'Rewards/min', offset=-0.08)
plotutil.xlabel(ax, 'Time during block (min)')
plotutil.legend(ax)
ax.grid()
if save: self.save_plot('rewards_per_min')
if show:
plt.show()
@property
def trials_per_min(self):
return self.get_trial_end_types()['success']/self.length * sec_per_min
@property
def n_trials(self):
return self.trial_end_types['success']
@property
def start_time(self):
''' Define the start tiem of the block. For a block with a fixed decoder,
this is 0. For a block where the BMI changes, this is the time of the first fixed event
'''
task_msgs = self.hdf.root.task_msgs[:]
if 'update_bmi' in task_msgs['msg']:
task_msgs, _ = self.get_fixed_decoder_task_msgs()
return task_msgs[0]['time'] * min_per_sec
else:
return 0.0
@property
def length(self):
'''
Length of session changes based on whether it was a 'fixed' block
'''
task_msgs, _ = self.get_fixed_decoder_task_msgs()
rewardtimes = [r['time'] for r in task_msgs if r['msg']=='reward']
if len(rewardtimes)>0:
if self.fixed:
return (rewardtimes[-1] * 1./self.update_rate - self.start_time)
else:
return rewardtimes[-1] * 1./self.update_rate
else:
return 0.0
def label_trying(self, ds_factor=6):
T = len(self.hdf.root.task) / ds_factor
task_msgs = self.hdf.root.task_msgs[:]
timeout_penalty_msg_inds = np.array(filter(lambda k: task_msgs[k]['msg'] == 'timeout_penalty', range(len(task_msgs))))
#### exclude the last trial before a timeout
labels = np.ones(T)
for ind in timeout_penalty_msg_inds:
# find the first 'hold' state before the timeout
hold_ind = ind
while not task_msgs[hold_ind]['msg'] == 'hold':
hold_ind -= 1
timeout_time = task_msgs[ind]['time'] / ds_factor
hold_time = task_msgs[hold_ind]['time'] / ds_factor
labels[hold_time : timeout_time] = 0
### Exclude the first 'target' state (return to center) after the timeout
for ind in timeout_penalty_msg_inds:
# find the first 'hold' state before the timeout
hold_ind = ind
while hold_ind < len(task_msgs) and not task_msgs[hold_ind]['msg'] == 'hold':
hold_ind += 1
if hold_ind < len(task_msgs):
timeout_time = task_msgs[ind]['time'] / ds_factor
hold_time = task_msgs[hold_ind]['time'] / ds_factor
labels[timeout_time : hold_time] = 0
else:
labels[timeout_time:] = 0
return labels.astype(bool)
class BMIControlMultiTaskEntry(ManualControlMultiTaskEntry):
def __str__(self):
return str(self.record) + '\nDecoder: %s' % (self.decoder.name)
def __repr__(self):
return self.__str__()
def get_firing_rate_stats(self):
mFR = np.mean(self.hdf.root.task[:]['spike_counts'], axis=0)
sdFR = np.std(self.hdf.root.task[:]['spike_counts'], axis=0)
return mFR, sdFR
@property
def assist_off_ind(self):
assist_level = self.hdf.root.task[:]['assist_level'].ravel()
try:
assist_off_ind = np.nonzero(assist_level == 0)[0][0]
except:
# assist level never gets to 0
assist_off_ind = np.nan
return assist_off_ind
def plot_loop_times(self, intended_update_rate=60.):
loop_times = self.hdf.root.task[:]['loop_time'].ravel()
plt.figure()
axes = plotutil.subplots(1, 1, return_flat=True)
plotutil.histogram_line(axes[0], loop_times, np.arange(0, 0.050, 0.0005))
axes[0].axvline(1./intended_update_rate, color='black', linestyle='--')
self.save_plot('loop_times')
@property
def perc_correct(self):
trial_end_types = self.trial_end_types
return float(trial_end_types['success']) / (trial_end_types['success'] + trial_end_types['timeout'] + sum(trial_end_types['hold_error'][1:]))
def get_perc_correct(self, n_trials=None):
if n_trials == None or n_trials == self.n_trials:
return self.perc_correct
else:
# return the % correct within the first n_trials successful trials
task_msgs, _ = self.get_fixed_decoder_task_msgs()
n_rewards = 0
n_timeouts = 0
n_hold_errors = 0
length = self.length
target_index = self.hdf.root.task[:]['target_index']
for msg in task_msgs:
if n_rewards >= n_trials:
break
elif msg['msg'] == 'reward':
n_rewards += 1
elif msg['msg'] == 'timeout_penalty':
n_timeouts += 1
elif msg['msg'] == 'hold_penalty':
trial_targ_idx = target_index[msg['time']-1]
if trial_targ_idx > 0: # ignore center hold errors
n_hold_errors += 1
return float(n_rewards) / (n_rewards + n_timeouts + n_hold_errors)
@property
def decoder_type(self):
from riglib.bmi import ppfdecoder, kfdecoder
if isinstance(self.decoder, ppfdecoder.PPFDecoder):
return 'PPF'
elif isinstance(self.decoder, kfdecoder.KFDecoder):
return 'KF'
else:
return 'unk'
@property
def training_tau(self):
try:
return dbfn.TaskEntry(self.decoder_record.entry).params['tau']
except:
return np.nan
def cursor_speed(self, sl=slice(None)):
cursor_pos = self.hdf.root.task[sl]['cursor']
step_size = 1 if sl.step == None else sl.step
cursor_vel = np.diff(cursor_pos, axis=0) * (self.update_rate/step_size)
cursor_speed = np.array(map(np.linalg.norm, cursor_vel))
return cursor_speed
def get_ctrl_vecs(self):
# get K
if not hasattr(self, 'Ku'):
F, K = self.decoder.filt.get_sskf()
u = self.get_spike_counts()
Ku = np.dot(K, u)
self.Ku = Ku
return self.Ku
def get_decoder_state(self):
if not hasattr(self, 'x_t'):
if isinstance(self.decoder, kfdecoder.KFDecoder):
self.x_t = np.mat(self.hdf.root.task[5::6]['decoder_state'][:,:,0].T)
elif isinstance(self.decoder, ppfdecoder.PPFDecoder):
try:
self.x_t = np.mat(np.hstack(self.hdf.root.task[:]['internal_decoder_state']))
except:
self.x_t = np.mat(np.hstack(self.hdf.root.task[:]['decoder_state']))
else:
raise ValueError("decoder type?!?")
return self.x_t
def get_KF_active_BMI_motor_commands(self):
'''
KF Dynamics model: x_{t+1} = Ax_t + w_t
KF update equation: x_{t+1|t+1} = Ax_{t|t} + K_t (y_{t+1} - CAx_{t|t})
Therefore,
w_{t+1|t+1} = K_t (y_{t+1} - CAx_{t|t})
= x_{t+1|t+1} - Ax_{t|t}
(simultaneously estimate the newest motor command while refining the previous state estimate)
'''
if not hasattr(self, 'w'):
y = np.mat(self.get_spike_counts())
x = self.get_decoder_state()
F, K = self.decoder.filt.get_sskf()
C = np.mat(self.decoder.filt.C)
A = np.mat(self.decoder.filt.A)
self.w = y[:,1:] - C*A*x[:,:-1]
return self.w
def calc_Kyt(self):
'''
steady state kalman gain times obs
'''
y = np.mat(self.get_spike_counts())
F, K = self.decoder.filt.get_sskf()
K = np.mat(K)
Kyt = K*y
return Kyt
@property
def Kyt(self):
return self.get_cached_attr('Kyt', self.calc_Kyt)
def get_BMI_motor_commands(self):
'''
KF Dynamics model: x_{t+1} = Ax_t + w_t
KF update equation: x_{t+1|t+1} = Ax_{t|t} + K_t (y_{t+1} - CAx_{t|t})
Therefore,
w_{t+1|t+1} = K_t (y_{t+1} - CAx_{t|t})
= x_{t+1|t+1} - Ax_{t|t}
(simultaneously estimate the newest motor command while refining the previous state estimate)
'''
try:
A = self.decoder.filt.A
except:
from db.tracker import models
d = models.Decoder.objects.using(self.record._state.db).get(name=self.decoder_record.name.rstrip('_sskf'))
A = d.load().filt.A
if not hasattr(self, 'w_t'):
x = self.get_decoder_state()
A = np.mat(A)
w_t = np.mat(np.zeros_like(x))
w_t[:,:-1] = x[:,1:] - A*x[:,:-1]
self.w_t = w_t
return self.w_t
def get_spike_counts(self, start=None, stop=None, binlen=None):
if binlen == None:
binlen = self.decoder.binlen
if binlen > 1./self.update_rate: # Default bin lengths for graphics-driven tasks
step = binlen/(1./self.update_rate)
if not hasattr(self, 'u'):
try:
u_60hz = self.hdf.root.task[slice(None, None)]['spike_counts'][:,:,0]
T = len(u_60hz)
u = []
for k in range(int(np.floor(T/step))):
u.append(np.sum(u_60hz[step*k: step*(k+1), :], axis=0))
u = np.vstack(u).T
self.u = u
except:
self.u = self.hdf.root.task[5::6]['lfp_power'][:,:,0].T
return self.u
class BMIManipulatedFeedbackTaskEntry(BMIControlMultiTaskEntry):
def __str__(self):
s = super(BMIManipulatedFeedbackTaskEntry, self).__str__()
s = s + '\nfeedback rate = %d, control rate = %d' % (self.feedback_rate, self.task_update_rate)
return s
class CLDAControlMultiTaskEntry(BMIControlMultiTaskEntry):
def __str__(self):
try:
decoder = self.get_decoders_trained_in_block() #dbfn.get_decoders_trained_in_block(self.record, dbname=self.dbname)
if isinstance(decoder, list):
decoder = decoder[0]
return str(self.record) + '\nDecoder: %s' % decoder.name
except:
return super(CLDAControlMultiTaskEntry, self).__str__()
def label_trying(self, *args, **kwargs):
clda_stop_ind = self.clda_stop_ind / 6 #### #TODO REMOVE 60 Hz hardcoding!
trying = super(CLDAControlMultiTaskEntry, self).label_trying(*args, **kwargs)
trying[:clda_stop_ind] = 0
return trying
def gen_summary_plots(self):
self.plot_rewards_per_min()
def get_matching_state_transition_seq(self, seq):
task_msgs = self.get_fixed_decoder_task_msgs()# self.hdf.root.task_msgs[:]
seq = np.array(seq, dtype='|S256')
msg_list_inds = []
trial_msgs = []
epochs = []
for k in range(len(task_msgs)-len(seq)):
if np.all(task_msgs[k:k+len(seq)]['msg'] == seq):
msg_list_inds.append(k)
trial_msgs.append(task_msgs[k:k+len(seq)])
epochs.append((task_msgs[k]['time'], task_msgs[k+len(seq)-1]['time']))
return msg_list_inds, trial_msgs, epochs
def plot_C_hist(self, param_fns=[lambda C_hist: C_hist[:,:,3], lambda C_hist: C_hist[:,:,5], lambda C_hist: C_hist[:,:,6], lambda C_hist: np.sqrt(C_hist[:, :, 3]**2 + C_hist[:,:,5]**2)],
labels=['Change in x-vel tuning', 'Change in z-vel tuning', 'Change in baseline', 'Change in mod. depth']):
'''
Plot parameter trajectories for C
'''
C_hist = self.hdf.root.task[1:]['filt_C']
n_units = C_hist.shape[1]
n_blocks = int(np.ceil(float(n_units)/7))
fig = plt.figure(facecolor='w', figsize=(8./3*len(param_fns), 2*n_blocks))
axes = plotutil.subplots(n_blocks, len(param_fns), y=0.01)
#, bottom_offset=0.01)
#fig = plt.figure(figsize=(8, 2*n_units), facecolor='w')
#axes = plotutil.subplots(n_units, len(param_fns), y=0.01) #, bottom_offset=0.01)
for m, fn in enumerate(param_fns):
for k in range(n_blocks):
sl = slice(k*7, (k+1)*7, None)
param_hist = fn(C_hist)[:,sl]
param_hist_diff = param_hist - param_hist[0,:]
axes[k,m].plot(param_hist_diff)
axes[k,m].set_xticklabels([])
if m == 0:
plotutil.ylabel(axes[k,m], 'Units %d-%d' % (sl.start, sl.stop-1))
if k == n_blocks - 1:
plotutil.xlabel(axes[k,m], labels[m])
lims = np.vstack(map(lambda ax: ax.get_ylim(), axes[:,m]))
ylim = min(lims[:,0]), max(lims[:,1])
plotutil.set_axlim(axes[:,m], ylim, axis='y')
self.save_plot('clda_param_hist')
def plot_C_hist_pds(self):
C_hist_plot = self.hdf.root.task[1:10000:sec_per_min*self.update_rate]['filt_C']
n_plots = C_hist_plot.shape[0]
plt.figure(figsize=(3, 3*n_plots))
axes = plotutil.subplots(n_plots, 1, return_flat=True, hold=True, aspect=1)
for k in range(n_plots):
self.decoder.plot_pds(C_hist_plot[k,:,:], ax=axes[k])
self.save_plot('clda_param_hist_pds')
def get_npz_param_hist(self, key, glue_fn=np.hstack):
return np.array(glue_fn([x[key] for x in self.clda_param_hist]))
@property
def intended_kin(self):
if not hasattr(self, '_intended_kin'):
self._intended_kin = self.get_npz_param_hist('intended_kin', np.hstack)
return self._intended_kin
def intended_kin_norm(self, sl=slice(None, None)):
return np.array(map(np.linalg.norm, self.intended_kin[sl, :].T))
def cursor_speed(self, sl=None):
if sl == None:
sl = slice(None, None, self.update_rate)
elif sl == 'assist_off':
sl = slice(self.assist_off_ind, None, self.update_rate)
return super(CLDAControlMultiTaskEntry, self).cursor_speed(sl)
def plot_before_and_after_C(self):
dec_before = self.decoder
dec_after = dbfn.get_decoders_trained_in_block(self.id)
plt.figure()
axes = plotutil.subplots(1,2,return_flat=True, hold=True)
dec_before.plot_C(ax=axes[0])
dec_after.plot_C(ax=axes[1])
@property
def decoder(self):
decoders = self.get_decoders_trained_in_block()
if isinstance(decoders, list):
return decoders[0]
else:
return decoders
@property
def seed_decoder(self):
return dbfn.get_decoder(self.record)
class CLDAControlKFCG(CLDAControlMultiTaskEntry):
def get_N_trajectory(self):
d = self.hdf.root.clda[:]['kf_C_xpose_Q_inv_C'][:,3,3]
a = self.decoder.filt.A[3,3]
w = self.decoder.filt.W[3,3]
g = (-(1 - a**2 - w*d) + np.sqrt((1 - a**2 - w*d)**2 + 4*d*w)) / (2*d)
n = a/(1 + d*g)
return n
def get_ctrl_vecs(self):
# get K
F, K = self.decoder.filt.get_sskf()
# get u
u_60hz = self.hdf.root.task[:]['spike_counts'][:,:,0]
T = len(u_60hz)
u = []
stepsize = self.decoder.binlen / self.update_rate
assert stepsize >= 1
for k in range(int(np.floor(T/stepsize))):
u.append(np.sum(u_60hz[stepsize*k: stepsize*(k+1), :], axis=0))
u = np.vstack(u).T
return np.dot(K, u)
class CLDAControlPPFTaskEntry(CLDAControlMultiTaskEntry):
def conv_param_hist_to_mat(self, concat_fns={}):
data = dict()
data['spike_counts'] = np.hstack([x['spike_counts_batch'] for x in self.clda_param_hist])
data['intended_kin'] = np.hstack([x['intended_kin'] for x in self.clda_param_hist])
data['filt_C'] = np.dstack([np.asarray(x['filt.C']) for x in self.clda_param_hist])
from scipy.io import loadmat, savemat
savemat('/storage/bmi_params/%s.mat' % self.name, data)
tasks = dict(
# manual_control=dbfn.TaskEntry,
# clda_control=dbfn.TaskEntry,
# manual_control_2 = dbfn.TaskEntry,
# visual_feedback = dbfn.TaskEntry,
# visual_feedback_multi = ManualControlMultiTaskEntry,
# machine_control = ManualControlMultiTaskEntry,
# clda_auto_assist = dbfn.TaskEntry,
# clda_constrained_sskf = dbfn.TaskEntry,
# clda_constrained_sskf_multi = dbfn.TaskEntry,
# manual_control_multi=ManualControlMultiTaskEntry,
# joystick_multi=ManualControlMultiTaskEntry,
# joystick_leaky_vel=ManualControlMultiTaskEntry,
# bmi_control_multi=BMIControlTentacleTaskEntry,
# bmi_cursor_bias=BMIControlTentacleTaskEntry,
# bmi_manipulated_feedback = BMIManipulatedFeedbackTaskEntry,
# clda_control_multi = CLDAControlMultiTaskEntry,
# clda_rml_kf = CLDAControlMultiTaskEntry,
# bmi_control_tentacle_attractor = BMIControlTentacleTaskEntry,
# clda_kf_cg_rml = CLDAControlMultiTaskEntry,
# clda_kf_cg_rml_ivc_trial = CLDAControlMultiTaskEntry,
# clda_rml_kf_ofc= CLDAControlMultiTaskEntry,
# clda_cont_ppf=CLDAControlPPFTaskEntry,
# clda_kf_cg_joint_rml = CLDAControlMultiTaskEntry,
# clda_kf_ofc_tentacle_rml = CLDATentacleTaskEntry,
# clda_kf_ofc_tentacle_rml_base = dbfn.TaskEntry,
# clda_kf_ofc_tentacle_rml_trial = CLDATentacleTaskEntry,
# bmi_baseline = BMIControlMultiTaskEntry,
# bmi_joint_perturb = BMIControlTentacleTaskEntry,
# tentacle_multi_config = BMIControlTentacleTaskEntry,
# clda_kf_cg_sb=CLDAControlKFCG,
# joystick_ops=BMIControlTentacleTaskEntry,
# joystick_ops_bias=BMIControlTentacleTaskEntry,
# passive_exo=ManualControlMultiTaskEntry,
#joystick_freechoice = ManualControlMultiTaskEntry,
#joystick_freechoice_pilot = ManualControlMultiTaskEntry,
#joystick_instructedchoice_pilot = ManualControlMultiTaskEntry,
#joystick_freechoice_with_reversal = ManualControlMultiTaskEntry,
# clda_tentacle_rl = BMIControlTentacleTaskEntry,
# bmi_resetting = BMIControlMultiTaskEntry,
# bmi_control_targ_jump=BMIControlMultiTaskEntry,
# tentacle_center_out_obstacle=BMIControlTentacleTaskEntry,
# mpc_test=BMIControlTentacleTaskEntry,
)
def _get_te(te, **kwargs):
# dbname = kwargs.pop('dbname', 'default')
te = dbfn.TaskEntry(te, **kwargs)
try:
return tasks[te.record.task.name](te.record.id, **kwargs)
except:
return te
def summarize_bmi_performance(date, **kwargs):
''' For a given date, print out a summary of the BMI performance
'''
for block in dbfn.get_bmi_blocks(date, **kwargs):
te = _get_te(block)
print te
print te.summary()
def summarize_performance(blocks, **kwargs):
''' For a given date, print out a summary of the BMI performance
'''
for block in blocks:
te = _get_te(block)
print te
print te.summary()
def compare_perc_correct(te1, te2):
from scipy import stats
end_types1 = te1.get_trial_end_types()
end_types2 = te2.get_trial_end_types()
n_hold_errors = np.sum(end_types['hold_error'][1:]) # No. of hold errors, excluding the first target in the trial target sequence
def fn(end_types): return (end_types['success'], n_hold_errors)
print fn(end_types1)
return stats.chi2_contingency(np.array([fn(end_types1), fn(end_types2)]))
def dir_change(hdf, step=6):
boundaries = dbfunctions.get_center_out_reach_inds(hdf)
n_trials = boundaries.shape[0]
vel_angle_diff = [None] * n_trials
cursor = hdf.root.task[:]['cursor']
for k, (st, end) in enumerate(boundaries):
cursor_pos_tr = cursor[st:end:step, [0,2]]
vel = np.diff(cursor_pos_tr, axis=0)
vel_angle = np.arctan2(vel[:,1], vel[:,0])
vel_angle_diff[k] = np.diff(vel_angle)
vel_angle_diff_concat = np.hstack(vel_angle_diff)
mean = circmean(np.abs(vel_angle_diff_concat), high=2*np.pi, low=-2*np.pi)
print mean
return vel_angle_diff, mean
def edge_detect(vec, edge_type='pos'):
""" Edge detector for a 1D array
Example:
vec = [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, ...]
^ ^
^ ^
pos neg
edge edge
vec : 1D array
edge_type : {'pos', 'neg'}
"""
if np.ndim(vec) > 1:
vec = vec.reshape(-1)
T = len(vec)
edges = np.zeros(T)
for t in range(1,T):
if edge_type == 'pos':
if vec[t] and not vec[t-1]:
edges[t] = 1
elif edge_type == 'neg':
if vec[t-1] and not vec[t]:
edges[t] = 1
return edges
def _count_switches(vec):
""" vec is an array of binary variables (0,1). The number of switches
between 1's and 0's is counted
"""
return len(np.nonzero(edge_detect(vec, 'pos'))[0]) + len(np.nonzero(edge_detect(vec, 'neg'))[0])
def get_trial_end_types(entry):
entry = lookup_task_entries(entry)
hdf = get_hdf(entry)
task_msgs = get_fixed_decoder_task_msgs(hdf)
# number of successful trials
reward_msgs = filter(lambda m: m[0] == 'reward', task_msgs)
n_success_trials = len(reward_msgs)
# number of hold errors
hold_penalty_inds = np.array(filter(lambda k: task_msgs[k][0] == 'hold_penalty', range(len(task_msgs))))
msg_before_hold_penalty = task_msgs[(hold_penalty_inds - 1).tolist()]
n_terminus_hold_errors = len(filter(lambda m: m['msg'] == 'terminus_hold', msg_before_hold_penalty))
n_origin_hold_errors = len(filter(lambda m: m['msg'] == 'origin_hold', msg_before_hold_penalty))
# number of timeout trials
timeout_msgs = filter(lambda m: m[0] == 'timeout_penalty', task_msgs)
n_timeout_trials = len(timeout_msgs)
return n_success_trials, n_terminus_hold_errors, n_timeout_trials, n_origin_hold_errors
def get_hold_error_rate(task_entry):
hold_error_rate = float(n_terminus_hold_errors) / n_success_trials
return hold_error_rate
def get_fixed_decoder_task_msgs(hdf):
task_msgs = hdf.root.task_msgs[:]
update_bmi_msgs = np.nonzero(task_msgs['msg'] == 'update_bmi')[0]
if len(update_bmi_msgs) > 0:
fixed_start = update_bmi_msgs[-1] + 1
else:
fixed_start = 0
task_msgs = task_msgs[fixed_start:]
return task_msgs
def get_center_out_reach_inds(hdf, fixed=True):
if fixed:
task_msgs = get_fixed_decoder_task_msgs(hdf)
else:
task_msgs = hdf.root.task_msgs[:]
n_msgs = len(task_msgs)
terminus_hold_msg_inds = np.array(filter(lambda k: task_msgs[k]['msg'] == 'terminus_hold', range(n_msgs)))
if terminus_hold_msg_inds[0] == 0: # HACK mid-trial start due to CLDA
terminus_hold_msg_inds = terminus_hold_msg_inds[1:]
terminus_msg_inds = terminus_hold_msg_inds - 1
boundaries = np.vstack([task_msgs[terminus_msg_inds]['time'],
task_msgs[terminus_hold_msg_inds]['time']]).T
return boundaries
def get_movement_durations(task_entry):
'''
Get the movement durations of each trial which enters the 'terminus_hold'
state
'''
hdf = get_hdf(task_entry)
boundaries = get_center_out_reach_inds(hdf)
return np.diff(boundaries, axis=1) * self.update_rate
def get_movement_error(task_entry):
'''
Get movement error
'''
task_entry = lookup_task_entries(task_entry)
reach_trajectories = get_reach_trajectories(task_entry)
n_trials = len(reach_trajectories)
ME = np.array([np.mean(np.abs(x[1, ::6])) for x in reach_trajectories])
MV = np.array([np.std(np.abs(x[1, ::6])) for x in reach_trajectories])
return ME, MV
def get_total_movement_error(task_entry):
task_entry = lookup_task_entries(task_entry)
reach_trajectories = get_reach_trajectories(task_entry)
total_ME = np.array([np.sum(np.abs(x[1, ::6])) for x in reach_trajectories])
return total_ME
def edge_detect(vec, edge_type='pos'):
""" Edge detector for a 1D array
Example:
vec = [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, ...]
^ ^
^ ^
pos neg
edge edge
vec : 1D array
edge_type : {'pos', 'neg'}
"""
if np.ndim(vec) > 1:
vec = vec.reshape(-1)
T = len(vec)
edges = np.zeros(T)
for t in range(1,T):
if edge_type == 'pos':
if vec[t] and not vec[t-1]:
edges[t] = 1
elif edge_type == 'neg':
if vec[t-1] and not vec[t]:
edges[t] = 1
return edges
def _count_switches(vec):
""" vec is an array of binary variables (0,1). The number of switches
between 1's and 0's is counted
"""
return len(np.nonzero(edge_detect(vec, 'pos'))[0]) + len(np.nonzero(edge_detect(vec, 'neg'))[0])
def get_direction_change_counts(entry):
entry = lookup_task_entries(entry)
reach_trajectories = get_reach_trajectories(entry)
n_trials = len(reach_trajectories)
ODCs = np.array([_count_switches( 0.5*(np.sign(np.diff(x[0,::6])) + 1) ) for x in reach_trajectories])
MDCs = np.array([_count_switches( 0.5*(np.sign(np.diff(x[1,::6])) + 1) ) for x in reach_trajectories])
return MDCs, ODCs
def plot_trajectories(task_entry, ax=None, show=False, **kwargs):
hdf = get_hdf(task_entry)
boundaries = get_center_out_reach_inds(hdf)
targets = hdf.root.task[:]['target']
cursor = hdf.root.task[:]['cursor']
if ax is None:
plt.figure()
ax = plt.subplot(111)
n_trials = boundaries.shape[0]
for k, (st, end) in enumerate(boundaries):
trial_target = targets[st][[0,2]]
angle = -np.arctan2(trial_target[1], trial_target[0])
# counter-rotate trajectory
cursor_pos_tr = cursor[st:end, [0,2]]
trial_len = cursor_pos_tr.shape[0]
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
cursor_pos_tr_rot = np.vstack([np.dot(R, cursor_pos_tr[k,:]) for k in range(trial_len)])
ax.plot(cursor_pos_tr_rot[:,0], cursor_pos_tr_rot[:,1], **kwargs)
if show:
plt.show()
def get_workspace_size(task_entry):
'''
Get movement error
'''
hdf = get_hdf(task_entry)
targets = hdf.root.task[:]['target']
print targets.min(axis=0)
print targets.max(axis=0)
def plot_dist_to_targ(task_entry, reach_trajectories=None, targ_dist=10., plot_all=False, ax=None, target=None, update_rate=60., decoder_rate=10., **kwargs):
task_entry = dbfn.lookup_task_entries(task_entry)
if reach_trajectories == None:
reach_trajectories = task_entry.get_reach_trajectories()
if target == None:
target = np.array([targ_dist, 0])
trajectories_dist_to_targ = [map(np.linalg.norm, traj.T - target) for traj in reach_trajectories]
step = update_rate/decoder_rate
trajectories_dist_to_targ = map(lambda x: x[::step], trajectories_dist_to_targ)
max_len = np.max([len(traj) for traj in trajectories_dist_to_targ])
n_trials = len(trajectories_dist_to_targ)
# TODO use masked arrays
data = np.ones([n_trials, max_len]) * np.nan
for k, traj in enumerate(trajectories_dist_to_targ):
data[k, :len(traj)] = traj
from scipy.stats import nanmean, nanstd
mean_dist_to_targ = np.array([nanmean(data[:,k]) for k in range(max_len)])
std_dist_to_targ = np.array([nanstd(data[:,k]) for k in range(max_len)])
if ax == None:
plt.figure()
ax = plt.subplot(111)
# time vector, assuming original screen update rate of 60 Hz
time = np.arange(max_len)*0.1
if plot_all:
for dist_to_targ in trajectories_dist_to_targ:
ax.plot(dist_to_targ, **kwargs)
else:
ax.plot(time, mean_dist_to_targ, **kwargs)
import plotutil
#plotutil.set_ylim(ax, [0, targ_dist])
plotutil.ylabel(ax, 'Distance to target')
plotutil.xlabel(ax, 'Time (s)')
plt.draw()
| [
"noreply@github.com"
] | santacruzlab.noreply@github.com |
d782aef0120ef786fd9c92630888749d2cab695e | 3afc61e1ee2ca7152f0adaf8d3ad8709843b5823 | /kohei4/tutorial00/word-count.py | 07d02a4dde25515de8a08b6d01d338ef6855ed1d | [] | no_license | tmu-nlp/NLPtutorial2017 | 87114aa96c03faa5023b44228aed939a1f017f0b | c31011ab55a85f81a078c7c9ed64807e258d3b94 | refs/heads/master | 2021-01-19T22:39:59.991612 | 2018-07-02T05:52:21 | 2018-07-02T05:52:21 | 88,833,788 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import sys
from collections import defaultdict
w_cnt = defaultdict(lambda: 0)
with open(sys.argv[1], 'r') as f:
for line in f:
line = line.lower()
w_list = line.split()
for i in range(len(w_list)):
w_cnt[w_list[i]] +=1
for wd, ct in sorted(w_cnt.items()):
print("{} {}" .format(wd, ct))
| [
"kohei@Kohei-no-MacBook-Air.local"
] | kohei@Kohei-no-MacBook-Air.local |
a5ef4b8ccf8dadcffeb1fc3f537fe542a24382f4 | d20b76335933ab14cb56d969c4638d301a26a923 | /tools/views.py | 5265ae18d92a6845e9c0b9a9a08d99ec3195de52 | [] | no_license | doctorMcbob/wwf | 87c8fc053b8d090aebf9078edfbebce7fd7a021f | 6a7af3f4b46dc6d9c91378db9ed0c26862d20b5f | refs/heads/master | 2021-01-10T13:01:28.044366 | 2015-10-13T00:00:24 | 2015-10-13T00:00:24 | 44,136,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | URLS = []
class ViewConfig(object):
def __init__(self, url, method):
self.method = method
self.url = url
def __call__(self, func):
def view(*args, **kwargs):
return func(*args, **kwargs)
for u in URLS:
if u[0] == self.url:
if self.method == "*":
for m in ["GET", "HEAD", "POST",
"OPTIONS", "PUT", "DELETE",
"TRACE", "CONNECT"]:
u[1][m] = view
else:
u[1][self.method] = view
return view
URLS.append((self.url, {self.method: view}))
return view
| [
"wootenwesley@gmail.com"
] | wootenwesley@gmail.com |
aa36af9177a2fa95baad418cbde3b3279a24ef38 | 0c93bdbac7fb97cf0df07657a8c7e72da6daa618 | /chia/consensus/block_creation.py | 612f8763f3a7f16da9c7752ebdf5a3707282c78e | [
"Apache-2.0"
] | permissive | Kaieida/bluepool | 6fe77d057ac576f669d0cb3498c7bbc5110db1fb | 88feb12da64673815ff20c503e497b28fa9f9b82 | refs/heads/main | 2023-06-04T10:44:47.941235 | 2021-06-24T15:46:03 | 2021-06-24T15:46:03 | 379,903,815 | 0 | 0 | Apache-2.0 | 2021-06-24T11:35:32 | 2021-06-24T11:35:31 | null | UTF-8 | Python | false | false | 21,599 | py | import logging
import random
from dataclasses import replace
from typing import Callable, Dict, List, Optional, Tuple
import blspy
from blspy import G1Element, G2Element
from chiabip158 import PyBIP158
from chia.consensus.block_record import BlockRecord
from chia.consensus.block_rewards import (
calculate_base_farmer_reward,
calculate_pool_reward,
)
from chia.consensus.blockchain_interface import BlockchainInterface
from chia.consensus.coinbase import create_farmer_coin, create_pool_coin
from chia.consensus.constants import ConsensusConstants
from chia.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from chia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from chia.full_node.signage_point import SignagePoint
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.foliage import (
Foliage,
FoliageBlockData,
FoliageTransactionBlock,
TransactionsInfo,
)
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.reward_chain_block import (
RewardChainBlock,
RewardChainBlockUnfinished,
)
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.vdf import VDFInfo, VDFProof
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.merkle_set import MerkleSet
from chia.util.prev_transaction_block import get_prev_transaction_block
from chia.util.recursive_replace import recursive_replace
log = logging.getLogger(__name__)
def create_foliage(
constants: ConsensusConstants,
reward_block_unfinished: RewardChainBlockUnfinished,
block_generator: Optional[BlockGenerator],
aggregate_sig: G2Element,
additions: List[Coin],
removals: List[Coin],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
timestamp: uint64,
farmer_reward_puzzlehash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
seed: bytes32 = b"",
) -> Tuple[Foliage, Optional[FoliageTransactionBlock], Optional[TransactionsInfo]]:
"""
Creates a foliage for a given reward chain block. This may or may not be a tx block. In the case of a tx block,
the return values are not None. This is called at the signage point, so some of this information may be
tweaked at the infusion point.
Args:
constants: consensus constants being used for this chain
reward_block_unfinished: the reward block to look at, potentially at the signage point
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
prev_block: the previous block at the signage point
blocks: dict from header hash to blocks, of all ancestor blocks
total_iters_sp: total iters at the signage point
timestamp: timestamp to put into the foliage block
farmer_reward_puzzlehash: where to pay out farming reward
pool_target: where to pay out pool reward
get_plot_signature: retrieve the signature corresponding to the plot public key
get_pool_signature: retrieve the signature corresponding to the pool public key
seed: seed to randomize block
"""
if prev_block is not None:
res = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
is_transaction_block: bool = res[0]
prev_transaction_block: Optional[BlockRecord] = res[1]
else:
# Genesis is a transaction block
prev_transaction_block = None
is_transaction_block = True
random.seed(seed)
# Use the extension data to create different blocks based on header hash
extension_data: bytes32 = random.randint(0, 100000000).to_bytes(32, "big")
if prev_block is None:
height: uint32 = uint32(0)
else:
height = uint32(prev_block.height + 1)
# Create filter
byte_array_tx: List[bytes32] = []
tx_additions: List[Coin] = []
tx_removals: List[bytes32] = []
pool_target_signature: Optional[G2Element] = get_pool_signature(
pool_target, reward_block_unfinished.proof_of_space.pool_public_key
)
foliage_data = FoliageBlockData(
reward_block_unfinished.get_hash(),
pool_target,
pool_target_signature,
farmer_reward_puzzlehash,
extension_data,
)
foliage_block_data_signature: G2Element = get_plot_signature(
foliage_data.get_hash(),
reward_block_unfinished.proof_of_space.plot_public_key,
)
prev_block_hash: bytes32 = constants.GENESIS_CHALLENGE
if height != 0:
assert prev_block is not None
prev_block_hash = prev_block.header_hash
generator_block_heights_list: List[uint32] = []
if is_transaction_block:
cost = uint64(0)
# Calculate the cost of transactions
if block_generator is not None:
generator_block_heights_list = block_generator.block_height_list()
result: NPCResult = get_name_puzzle_conditions(block_generator, constants.MAX_BLOCK_COST_CLVM, True)
cost = calculate_cost_of_program(block_generator.program, result, constants.COST_PER_BYTE)
removal_amount = 0
addition_amount = 0
for coin in removals:
removal_amount += coin.amount
for coin in additions:
addition_amount += coin.amount
spend_bundle_fees = removal_amount - addition_amount
else:
spend_bundle_fees = 0
reward_claims_incorporated = []
if height > 0:
assert prev_transaction_block is not None
assert prev_block is not None
curr: BlockRecord = prev_block
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
assert curr.fees is not None
pool_coin = create_pool_coin(
curr.height,
curr.pool_puzzle_hash,
calculate_pool_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(curr.height) + curr.fees),
constants.GENESIS_CHALLENGE,
)
assert curr.header_hash == prev_transaction_block.header_hash
reward_claims_incorporated += [pool_coin, farmer_coin]
if curr.height > 0:
curr = blocks.block_record(curr.prev_hash)
# Prev block is not genesis
while not curr.is_transaction_block:
pool_coin = create_pool_coin(
curr.height,
curr.pool_puzzle_hash,
calculate_pool_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
calculate_base_farmer_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
reward_claims_incorporated += [pool_coin, farmer_coin]
curr = blocks.block_record(curr.prev_hash)
additions.extend(reward_claims_incorporated.copy())
for coin in additions:
tx_additions.append(coin)
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin in removals:
tx_removals.append(coin.name())
byte_array_tx.append(bytearray(coin.name()))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded = bytes(bip158.GetEncoded())
removal_merkle_set = MerkleSet()
addition_merkle_set = MerkleSet()
# Create removal Merkle set
for coin_name in tx_removals:
removal_merkle_set.add_already_hashed(coin_name)
# Create addition Merkle set
puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
for coin in tx_additions:
if coin.puzzle_hash in puzzlehash_coin_map:
puzzlehash_coin_map[coin.puzzle_hash].append(coin)
else:
puzzlehash_coin_map[coin.puzzle_hash] = [coin]
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coin_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
additions_root = addition_merkle_set.get_root()
removals_root = removal_merkle_set.get_root()
generator_hash = bytes32([0] * 32)
if block_generator is not None:
generator_hash = std_hash(block_generator.program)
generator_refs_hash = bytes32([1] * 32)
if generator_block_heights_list not in (None, []):
generator_ref_list_bytes = b"".join([bytes(i) for i in generator_block_heights_list])
generator_refs_hash = std_hash(generator_ref_list_bytes)
filter_hash: bytes32 = std_hash(encoded)
transactions_info: Optional[TransactionsInfo] = TransactionsInfo(
generator_hash,
generator_refs_hash,
aggregate_sig,
uint64(spend_bundle_fees),
cost,
reward_claims_incorporated,
)
if prev_transaction_block is None:
prev_transaction_block_hash: bytes32 = constants.GENESIS_CHALLENGE
else:
prev_transaction_block_hash = prev_transaction_block.header_hash
assert transactions_info is not None
foliage_transaction_block: Optional[FoliageTransactionBlock] = FoliageTransactionBlock(
prev_transaction_block_hash,
timestamp,
filter_hash,
additions_root,
removals_root,
transactions_info.get_hash(),
)
assert foliage_transaction_block is not None
foliage_transaction_block_hash: Optional[bytes32] = foliage_transaction_block.get_hash()
foliage_transaction_block_signature: Optional[G2Element] = get_plot_signature(
foliage_transaction_block_hash,
reward_block_unfinished.proof_of_space.plot_public_key,
)
assert foliage_transaction_block_signature is not None
else:
foliage_transaction_block_hash = None
foliage_transaction_block_signature = None
foliage_transaction_block = None
transactions_info = None
assert (foliage_transaction_block_hash is None) == (foliage_transaction_block_signature is None)
foliage = Foliage(
prev_block_hash,
reward_block_unfinished.get_hash(),
foliage_data,
foliage_block_data_signature,
foliage_transaction_block_hash,
foliage_transaction_block_signature,
)
return foliage, foliage_transaction_block, transactions_info
def create_unfinished_block(
constants: ConsensusConstants,
sub_slot_start_total_iters: uint128,
sub_slot_iters: uint64,
signage_point_index: uint8,
sp_iters: uint64,
ip_iters: uint64,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
signage_point: SignagePoint,
timestamp: uint64,
blocks: BlockchainInterface,
seed: bytes32 = b"",
block_generator: Optional[BlockGenerator] = None,
aggregate_sig: G2Element = G2Element(),
additions: Optional[List[Coin]] = None,
removals: Optional[List[Coin]] = None,
prev_block: Optional[BlockRecord] = None,
finished_sub_slots_input: List[EndOfSubSlotBundle] = None,
) -> UnfinishedBlock:
"""
Creates a new unfinished block using all the information available at the signage point. This will have to be
modified using information from the infusion point.
Args:
constants: consensus constants being used for this chain
sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot
sub_slot_iters: sub-slot-iters at the infusion point epoch
signage_point_index: signage point index of the block to create
sp_iters: sp_iters of the block to create
ip_iters: ip_iters of the block to create
proof_of_space: proof of space of the block to create
slot_cc_challenge: challenge hash at the sp sub-slot
farmer_reward_puzzle_hash: where to pay out farmer rewards
pool_target: where to pay out pool rewards
get_plot_signature: function that returns signature corresponding to plot public key
get_pool_signature: function that returns signature corresponding to pool public key
signage_point: signage point information (VDFs)
timestamp: timestamp to add to the foliage block, if created
seed: seed to randomize chain
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
additions: Coins added in spend_bundle
removals: Coins removed in spend_bundle
prev_block: previous block (already in chain) from the signage point
blocks: dictionary from header hash to SBR of all included SBR
finished_sub_slots_input: finished_sub_slots at the signage point
Returns:
"""
if finished_sub_slots_input is None:
finished_sub_slots: List[EndOfSubSlotBundle] = []
else:
finished_sub_slots = finished_sub_slots_input.copy()
overflow: bool = sp_iters > ip_iters
total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters)
is_genesis: bool = prev_block is None
new_sub_slot: bool = len(finished_sub_slots) > 0
cc_sp_hash: Optional[bytes32] = slot_cc_challenge
# Only enters this if statement if we are in testing mode (making VDF proofs here)
if signage_point.cc_vdf is not None:
assert signage_point.rc_vdf is not None
cc_sp_hash = signage_point.cc_vdf.output.get_hash()
rc_sp_hash = signage_point.rc_vdf.output.get_hash()
else:
if new_sub_slot:
rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash()
else:
if is_genesis:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_block is not None
assert blocks is not None
curr = prev_block
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
signage_point = SignagePoint(None, None, None, None)
cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key)
rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key)
assert cc_sp_signature is not None
assert rc_sp_signature is not None
assert blspy.AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature)
total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0))
rc_block = RewardChainBlockUnfinished(
total_iters,
signage_point_index,
slot_cc_challenge,
proof_of_space,
signage_point.cc_vdf,
cc_sp_signature,
signage_point.rc_vdf,
rc_sp_signature,
)
if additions is None:
additions = []
if removals is None:
removals = []
(foliage, foliage_transaction_block, transactions_info,) = create_foliage(
constants,
rc_block,
block_generator,
aggregate_sig,
additions,
removals,
prev_block,
blocks,
total_iters_sp,
timestamp,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
seed,
)
return UnfinishedBlock(
finished_sub_slots,
rc_block,
signage_point.cc_proof,
signage_point.rc_proof,
foliage,
foliage_transaction_block,
transactions_info,
block_generator.program if block_generator else None,
block_generator.block_height_list() if block_generator else [],
)
def unfinished_block_to_full_block(
unfinished_block: UnfinishedBlock,
cc_ip_vdf: VDFInfo,
cc_ip_proof: VDFProof,
rc_ip_vdf: VDFInfo,
rc_ip_proof: VDFProof,
icc_ip_vdf: Optional[VDFInfo],
icc_ip_proof: Optional[VDFProof],
finished_sub_slots: List[EndOfSubSlotBundle],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
difficulty: uint64,
) -> FullBlock:
"""
Converts an unfinished block to a finished block. Includes all the infusion point VDFs as well as tweaking
other properties (height, weight, sub-slots, etc)
Args:
unfinished_block: the unfinished block to finish
cc_ip_vdf: the challenge chain vdf info at the infusion point
cc_ip_proof: the challenge chain proof
rc_ip_vdf: the reward chain vdf info at the infusion point
rc_ip_proof: the reward chain proof
icc_ip_vdf: the infused challenge chain vdf info at the infusion point
icc_ip_proof: the infused challenge chain proof
finished_sub_slots: finished sub slots from the prev block to the infusion point
prev_block: prev block from the infusion point
blocks: dictionary from header hash to SBR of all included SBR
total_iters_sp: total iters at the signage point
difficulty: difficulty at the infusion point
"""
# Replace things that need to be replaced, since foliage blocks did not necessarily have the latest information
if prev_block is None:
is_transaction_block = True
new_weight = uint128(difficulty)
new_height = uint32(0)
new_foliage = unfinished_block.foliage
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
is_transaction_block, _ = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
new_weight = uint128(prev_block.weight + difficulty)
new_height = uint32(prev_block.height + 1)
if is_transaction_block:
new_fbh = unfinished_block.foliage.foliage_transaction_block_hash
new_fbs = unfinished_block.foliage.foliage_transaction_block_signature
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
new_fbh = None
new_fbs = None
new_foliage_transaction_block = None
new_tx_info = None
new_generator = None
new_generator_ref_list = []
assert (new_fbh is None) == (new_fbs is None)
new_foliage = replace(
unfinished_block.foliage,
prev_block_hash=prev_block.header_hash,
foliage_transaction_block_hash=new_fbh,
foliage_transaction_block_signature=new_fbs,
)
ret = FullBlock(
finished_sub_slots,
RewardChainBlock(
new_weight,
new_height,
unfinished_block.reward_chain_block.total_iters,
unfinished_block.reward_chain_block.signage_point_index,
unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash,
unfinished_block.reward_chain_block.proof_of_space,
unfinished_block.reward_chain_block.challenge_chain_sp_vdf,
unfinished_block.reward_chain_block.challenge_chain_sp_signature,
cc_ip_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_signature,
rc_ip_vdf,
icc_ip_vdf,
is_transaction_block,
),
unfinished_block.challenge_chain_sp_proof,
cc_ip_proof,
unfinished_block.reward_chain_sp_proof,
rc_ip_proof,
icc_ip_proof,
new_foliage,
new_foliage_transaction_block,
new_tx_info,
new_generator,
new_generator_ref_list,
)
return recursive_replace(
ret,
"foliage.reward_block_hash",
ret.reward_chain_block.get_hash(),
)
| [
"admin@bluepool.io"
] | admin@bluepool.io |
511334a97959c166add09f07d30f551acd4b2aeb | 268b0441a5fd45da501f5e50e155a86043de2472 | /test/test_puchikarui.py | 6495727bd08686c549c8fb266e4650e6d8b26d27 | [
"MIT"
] | permissive | cliffpham/puchikarui | c837e4b487cd4c3a8fcfe0c46d69ea2804bdfc9a | c6cf9292685f2a5c93f9feb206e2dc1371ce20f2 | refs/heads/master | 2020-07-31T16:28:01.674364 | 2019-07-03T03:36:54 | 2019-07-03T03:36:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,282 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Script for testing puchikarui library
Latest version can be found at https://github.com/letuananh/puchikarui
References:
Python documentation:
https://docs.python.org/
Python unittest
https://docs.python.org/3/library/unittest.html
@author: Le Tuan Anh <tuananh.ke@gmail.com>
@license: MIT
'''
# Copyright (c) 2014-2017, Le Tuan Anh <tuananh.ke@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
########################################################################
import os
import unittest
import logging
from puchikarui import DataSource
from puchikarui import Schema, with_ctx
from puchikarui import escape_like, head_like, tail_like, contain_like
# ----------------------------------------------------------------------
# Configuration
# ----------------------------------------------------------------------
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
TEST_DATA = os.path.join(TEST_DIR, 'data')
SETUP_FILE = os.path.join(TEST_DATA, 'init_script.sql')
SETUP_SCRIPT = "INSERT INTO person (name, age) VALUES ('Chun', 78)"
TEST_DB = os.path.join(TEST_DIR, 'data', 'test.db')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# ------------------------------------------------------------------------------
# Test cases
# ------------------------------------------------------------------------------
class SchemaDemo(Schema):
def __init__(self, data_source=':memory:', setup_script=SETUP_SCRIPT, setup_file=SETUP_FILE):
Schema.__init__(self, data_source=data_source, setup_script=setup_script, setup_file=setup_file)
self.add_table('person', ['ID', 'name', 'age'], proto=Person, id_cols=('ID',))
self.add_table('hobby').add_fields('pid', 'hobby')
self.add_table('diary', ['ID', 'pid', 'text'], proto=Diary).set_id('ID').field_map(pid='ownerID', text='content')
class Diary(object):
def __init__(self, content='', owner=None):
"""
"""
self.ID = None
if owner:
self.owner = owner
self.ownerID = owner.ID
else:
self.owner = None
self.ownerID = None
self.content = content
def __str__(self):
return "{per} wrote `{txt}`".format(per=self.owner.name if self.owner else '#{}'.format(self.ownerID), txt=self.content)
class Person(object):
def __init__(self, name='', age=-1):
self.ID = None
self.name = name
self.age = age
def __str__(self):
return "#{}: {}/{}".format(self.ID, self.name, self.age)
########################################################################
class TestUtilClass(unittest.TestCase):
def test_path(self):
my_home = os.path.expanduser('~')
expected_loc = os.path.join(my_home, 'tmp', 'test.db')
ds = DataSource('~/tmp/test.db')
self.assertEqual(expected_loc, ds.path)
class TestDemoLib(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("Setting up tests ...")
if os.path.isfile(TEST_DB):
logger.info("Test DB exists, removing it now")
os.unlink(TEST_DB)
def test_sqlite_methods(self):
db = SchemaDemo()
num = db.ds.select_scalar('SELECT 2')
self.assertEqual(num, 2)
nums = db.ds.select_single('SELECT 2, 3, 4')
self.assertEqual(tuple(nums), (2, 3, 4))
matrix = db.ds.select('SELECT 1, 2, 3 UNION SELECT 4, 5, 6')
self.assertEqual(tuple(tuple(row) for row in matrix), ((1, 2, 3), (4, 5, 6)))
def test_basic(self):
print("Testing basic database actions")
db = SchemaDemo(TEST_DB, setup_file=SETUP_FILE, setup_script=SETUP_SCRIPT)
# We can excute SQLite script as usual ...
db.ds.execute("INSERT INTO person (name, age) VALUES ('Chen', 15);")
# Or use this ORM-like method
# Test insert
db.person.insert('Kent', 42)
# Test select data
persons = db.person.select(where='age > ?', values=[25], orderby='age', limit=10)
expected = [('Ji', 28), ('Ka', 32), ('Vi', 33), ('Kent', 42), ('Chun', 78)]
actual = [(person.name, person.age) for person in persons]
self.assertEqual(expected, actual)
# Test select single
ji = db.person.select_single('name=?', ('Ji',))
self.assertIsNotNone(ji)
self.assertEqual(ji.age, 28)
# Test delete
db.person.delete(where='age > ?', values=(70,))
chun = db.person.select_single('name=?', ('Chun',))
self.assertIsNone(chun)
def test_execution_context(self):
db = SchemaDemo(":memory:")
with db.ctx() as ctx:
# test select
ppl = ctx.person.select()
self.assertEqual(len(ppl), 6)
# test insert
ctx.person.insert('Totoro', columns=('name',)) # insert partial data
ctx.person.insert('Shizuka', 10) # full record
p = ctx.person.select_single(where='name=?', values=('Dunno',))
self.assertIsNone(p)
# Test update data & select single
ctx.person.update((10,), "name=?", ("Totoro",), columns=('age',))
totoro = ctx.person.select_single(where='name=?', values=('Totoro',))
self.assertEqual(totoro.age, 10)
# test updated
ppl = ctx.person.select()
self.assertEqual(len(ppl), 8)
# test delete
ctx.person.delete('age > ?', (70,))
ppl = ctx.person.select()
# done!
expected = [(1, 'Ji', 28), (2, 'Zen', 25), (3, 'Ka', 32), (4, 'Anh', 15), (5, 'Vi', 33), (7, 'Totoro', 10), (8, 'Shizuka', 10)]
actual = [(person.ID, person.name, person.age) for person in ppl]
self.assertEqual(expected, actual)
def test_selective_select(self):
db = SchemaDemo() # create a new DB in RAM
pers = db.person.select(columns=('name',))
names = [x.name for x in pers]
self.assertEqual(names, ['Ji', 'Zen', 'Ka', 'Anh', 'Vi', 'Chun'])
def test_orm_persistent(self):
db = SchemaDemo(TEST_DB)
bid = db.person.save(Person('Buu', 1000))
buu = db.person.by_id(bid)
self.assertIsNotNone(buu)
self.assertEqual(buu.name, 'Buu')
# insert more stuff
db.hobby.insert(buu.ID, 'candies')
db.hobby.insert(buu.ID, 'chocolate')
db.hobby.insert(buu.ID, 'santa')
hobbies = db.hobby.select('pid=?', (buu.ID,))
self.assertEqual({x.hobby for x in hobbies}, {'candies', 'chocolate', 'santa'})
db.hobby.delete('hobby=?', ('chocolate',))
hobbies = db.hobby.select('pid=?', (buu.ID,))
self.assertEqual({x.hobby for x in hobbies}, {'candies', 'santa'})
def test_orm_with_context(self):
db = SchemaDemo() # create a new DB in RAM
with db.ctx() as ctx:
p = ctx.person.select_single('name=?', ('Anh',))
# There is no prototype class for hobby, so a namedtuple will be generated
hobbies = ctx.hobby.select('pid=?', (p.ID,))
self.assertIsInstance(p, Person)
self.assertIsInstance(hobbies[0], tuple)
self.assertEqual(hobbies[0].hobby, 'coding')
# insert hobby
ctx.hobby.insert(p.ID, 'reading')
hobbies = [x.hobby for x in ctx.hobby.select('pid=?', (p.ID,), columns=('hobby',))]
self.assertEqual(hobbies, ['coding', 'reading'])
# now only select the name and not the age
p2 = ctx.person.select_single('name=?', ('Vi',), columns=('ID', 'name',))
self.assertEqual(p2.name, 'Vi')
self.assertEqual(p2.age, -1)
# test updating object
p2.name = 'Vee'
ctx.update_object(db.person, p2, ('name',))
p2.age = 29
ctx.update_object(db.person, p2)
# ensure that data was updated
p2n = ctx.person.by_id(p2.ID)
self.assertEqual(p2n.name, 'Vee')
self.assertEqual(p2n.age, 29)
self.assertEqual(p2n.ID, p2.ID)
def test_field_mapping(self):
content = 'I am better than Emacs'
new_content = 'I am NOT better than Emacs'
db = SchemaDemo()
with db.ctx() as ctx:
vi = ctx.person.select_single('name=?', ('Vi',))
diary = Diary(content, owner=vi)
ctx.diary.save(diary)
diaries = ctx.diary.select('pid=?', (vi.ID,))
for d in diaries:
d.owner = ctx.person.by_id(d.ownerID)
print(d)
# test update
d.content = new_content
ctx.diary.save(d)
diary = ctx.diary.by_id(d.ID)
self.assertEqual(diary.content, new_content)
print(diary)
class SchemaA(Schema):
SETUP_FILE = os.path.join(TEST_DATA, 'schemaA.sql')
def __init__(self, data_source=':memory:', setup_script=None, setup_file=None):
super().__init__(data_source=data_source, setup_script=setup_script, setup_file=setup_file)
# setup scripts & files
self.add_file(SchemaA.SETUP_FILE)
self.add_script("INSERT INTO person (name, age) VALUES ('potter', 10)")
# Table definitions
self.add_table('person', ['ID', 'name', 'age'], proto=Person, id_cols=('ID',))
class SchemaB(Schema):
SETUP_FILE = os.path.join(TEST_DATA, 'schemaB.sql')
def __init__(self, data_source=':memory:', setup_script=None, setup_file=None):
super().__init__(data_source=data_source, setup_script=setup_script, setup_file=setup_file)
# setup scripts & files
self.add_file(SchemaB.SETUP_FILE)
self.add_script("INSERT INTO hobby (name) VALUES ('magic')")
# Table definitions
self.add_table('hobby', ['ID', 'name'], proto=Hobby, id_cols=('ID',))
self.add_table("person_hobby", ["hid", "pid"])
class Hobby(object):
def __init__(self, name=None):
self.name = name
def __repr__(self):
return "Hobby: {}".format(self.name)
class SchemaAB(SchemaB, SchemaA):
''' Execution order: setup_files > setup_scripts
Schema's file > SchemaA's file > SchemaB's file >
Schema's script > SchemaA's script > SchemaB's script
Note: The first class in inheritance list will be executed last
'''
def __init__(self, data_source=":memory:", setup_script=None, setup_file=None):
super().__init__(data_source=data_source, setup_script=setup_script, setup_file=setup_file)
self.add_script('''INSERT INTO person_hobby VALUES ((SELECT ID FROM hobby WHERE name='magic'), (SELECT ID FROM person WHERE name='potter'));''')
@with_ctx
def all_hobby(self, ctx=None):
return ctx.hobby.select()
@with_ctx
def find_hobby(self, name, ctx=None):
return ctx.hobby.select("name = ?", (name,))
class TestMultipleSchema(unittest.TestCase):
def test_ms(self):
db = SchemaAB()
with db.ctx() as ctx:
potter = ctx.person.select_single()
magic = ctx.hobby.select_single()
link = ctx.person_hobby.select_single()
self.assertEqual(potter.name, 'potter')
self.assertEqual(magic.name, 'magic')
self.assertEqual(link.hid, magic.ID)
self.assertEqual(link.pid, potter.ID)
# access schema function from context
self.assertEqual(len(ctx.all_hobby()), 1)
self.assertEqual(ctx.find_hobby('magic')[0].name, 'magic')
print
pass
class AdvancedDemo(SchemaDemo):
@with_ctx
def demo(self, ctx=None):
p = Person("Buu", 1000)
p.ID = ctx.person.save(p)
return ctx.person.by_id(p.ID)
class TestWithContext(unittest.TestCase):
def test_ms(self):
db = AdvancedDemo()
print(db.demo().age)
with db.ctx() as ctx:
print(db.demo(ctx=ctx))
class TestHelpers(unittest.TestCase):
def test_escape(self):
actual = escape_like('_')
expect = '@_'
self.assertEqual(actual, expect)
actual = escape_like('%')
expect = '@%'
self.assertEqual(actual, expect)
actual = escape_like('@')
expect = '@@'
self.assertEqual(actual, expect)
actual = escape_like('')
expect = ''
self.assertEqual(actual, expect)
actual = escape_like('usual')
expect = 'usual'
self.assertEqual(actual, expect)
self.assertRaises(Exception, lambda: escape_like(None))
actual = escape_like('%_%@')
expect = '@%@_@%@@'
self.assertEqual(actual, expect)
actual = head_like('a@b')
expect = 'a@@b%'
self.assertEqual(actual, expect)
actual = tail_like('a@b')
expect = '%a@@b'
self.assertEqual(actual, expect)
actual = contain_like('a_@_b')
expect = '%a@_@@@_b%'
self.assertEqual(actual, expect)
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
| [
"tuananh.ke@gmail.com"
] | tuananh.ke@gmail.com |
8a8f6112f53bb309788221a2b6409b7fa37abe2b | ffd7b823905e1db0462e39017414dbef8bdf8c21 | /src/VOMSAdmin/VOMSAttributesService_services.py | 9ad694bb1ff2f3dfc59309b94439cf8ba0d0b07d | [
"Apache-2.0"
] | permissive | AlbertoPeon/voms-admin-client | 3dd7cd2d71b3c5591dbd96ccca950d460071320d | e628a8e09acc19a106d325154706f300d7651a42 | refs/heads/master | 2021-01-15T17:21:24.956019 | 2012-04-14T08:17:42 | 2012-04-14T08:17:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,392 | py | #
# Copyright (c) Members of the EGEE Collaboration. 2006-2009.
# See http://www.eu-egee.org/partners/ for details on the copyright holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Andrea Ceccanti (INFN)
#
##################################################
# VOMSAttributesService_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from VOMSAttributesService_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
from AttributesFix import *
import ZSI
# Locator
class VOMSAttributesServiceLocator:
VOMSAttributes_address = "https://localhost:8443/glite-security-voms-admin-interface/VOMSAttributes"
def getVOMSAttributesAddress(self):
return VOMSAttributesServiceLocator.VOMSAttributes_address
def getVOMSAttributes(self, url=None, **kw):
return VOMSAttributesSoapBindingSOAP(url or VOMSAttributesServiceLocator.VOMSAttributes_address, **kw)
# Methods
class VOMSAttributesSoapBindingSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# op: createAttributeClass
def createAttributeClass(self, request):
if isinstance(request, createAttributeClassRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=createAttributeClassResponse.typecode.ofwhat, pyclass=createAttributeClassResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: getAttributeClass
def getAttributeClass(self, request):
if isinstance(request, getAttributeClassRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=getAttributeClassResponse.typecode.ofwhat, pyclass=getAttributeClassResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: saveAttributeClass
def saveAttributeClass(self, request):
if isinstance(request, saveAttributeClassRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=saveAttributeClassResponse.typecode.ofwhat, pyclass=saveAttributeClassResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: deleteAttributeClass
def deleteAttributeClass(self, request):
if isinstance(request, deleteAttributeClassRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=deleteAttributeClassResponse.typecode.ofwhat, pyclass=deleteAttributeClassResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: listAttributeClasses
def listAttributeClasses(self, request):
if isinstance(request, listAttributeClassesRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=listAttributeClassesResponse.typecode.ofwhat, pyclass=listAttributeClassesResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: listUserAttributes
def listUserAttributes(self, request):
if isinstance(request, listUserAttributesRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=listUserAttributesResponse.typecode.ofwhat, pyclass=listUserAttributesResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: setUserAttribute
def setUserAttribute(self, request):
if isinstance(request, setUserAttributeRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=setUserAttributeResponse.typecode.ofwhat, pyclass=setUserAttributeResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: deleteUserAttribute
def deleteUserAttribute(self, request):
if isinstance(request, deleteUserAttributeRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=deleteUserAttributeResponse.typecode.ofwhat, pyclass=deleteUserAttributeResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: setGroupAttribute
def setGroupAttribute(self, request):
if isinstance(request, setGroupAttributeRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=setGroupAttributeResponse.typecode.ofwhat, pyclass=setGroupAttributeResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: deleteGroupAttribute
def deleteGroupAttribute(self, request):
if isinstance(request, deleteGroupAttributeRequest1) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=deleteGroupAttributeResponse1.typecode.ofwhat, pyclass=deleteGroupAttributeResponse1.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: listGroupAttributes
def listGroupAttributes(self, request):
if isinstance(request, listGroupAttributesRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=listGroupAttributesResponse.typecode.ofwhat, pyclass=listGroupAttributesResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: setRoleAttribute
def setRoleAttribute(self, request):
if isinstance(request, setRoleAttributeRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=setRoleAttributeResponse.typecode.ofwhat, pyclass=setRoleAttributeResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: deleteRoleAttribute
def deleteRoleAttribute(self, request):
if isinstance(request, deleteRoleAttributeRequest1) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=deleteRoleAttributeResponse1.typecode.ofwhat, pyclass=deleteRoleAttributeResponse1.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
# op: listRoleAttributes
def listRoleAttributes(self, request):
if isinstance(request, listRoleAttributesRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", encodingStyle="http://schemas.xmlsoap.org/soap/encoding/", **kw)
# no output wsaction
typecode = Struct(pname=None, ofwhat=listRoleAttributesResponse.typecode.ofwhat, pyclass=listRoleAttributesResponse.typecode.pyclass)
response = self.binding.Receive(typecode)
return response
class createAttributeClassRequest2:
def __init__(self):
self._in0 = None
return
createAttributeClassRequest2.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","createAttributeClass"), ofwhat=[ZSI.TC.String(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=createAttributeClassRequest2, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class createAttributeClassResponse2:
def __init__(self):
return
createAttributeClassResponse2.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","createAttributeClassResponse"), ofwhat=[], pyclass=createAttributeClassResponse2, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class getAttributeClassRequest:
def __init__(self):
self._in0 = None
return
getAttributeClassRequest.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","getAttributeClass"), ofwhat=[ZSI.TC.String(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=getAttributeClassRequest, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class getAttributeClassResponse:
def __init__(self):
self._getAttributeClassReturn = None
return
getAttributeClassResponse.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","getAttributeClassResponse"), ofwhat=[ns1.AttributeClass_Def(pname="getAttributeClassReturn", aname="_getAttributeClassReturn", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=getAttributeClassResponse, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class saveAttributeClassRequest:
def __init__(self):
self._in0 = None
return
saveAttributeClassRequest.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","saveAttributeClass"), ofwhat=[ns1.AttributeClass_Def(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=saveAttributeClassRequest, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class saveAttributeClassResponse:
def __init__(self):
return
saveAttributeClassResponse.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","saveAttributeClassResponse"), ofwhat=[], pyclass=saveAttributeClassResponse, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class deleteAttributeClassRequest1:
def __init__(self):
self._in0 = None
return
deleteAttributeClassRequest1.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","deleteAttributeClass"), ofwhat=[ns1.AttributeClass_Def(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=deleteAttributeClassRequest1, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class deleteAttributeClassResponse1:
def __init__(self):
return
deleteAttributeClassResponse1.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","deleteAttributeClassResponse"), ofwhat=[], pyclass=deleteAttributeClassResponse1, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class listAttributeClassesRequest:
def __init__(self):
return
listAttributeClassesRequest.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","listAttributeClasses"), ofwhat=[], pyclass=listAttributeClassesRequest, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class listAttributeClassesResponse:
def __init__(self):
self._listAttributeClassesReturn = None
return
listAttributeClassesResponse.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","listAttributeClassesResponse"), ofwhat=[ns1.ArrayOfAttributeClass_Def(pname="listAttributeClassesReturn", aname="_listAttributeClassesReturn", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=listAttributeClassesResponse, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class listUserAttributesRequest:
def __init__(self):
self._in0 = None
return
listUserAttributesRequest.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","listUserAttributes"), ofwhat=[ns0.User_Def(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=listUserAttributesRequest, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class listUserAttributesResponse:
def __init__(self):
self._listUserAttributesReturn = None
return
listUserAttributesResponse.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","listUserAttributesResponse"), ofwhat=[ns1.ArrayOfAttributeValue_Def(pname="listUserAttributesReturn", aname="_listUserAttributesReturn", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=listUserAttributesResponse, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class setUserAttributeRequest:
def __init__(self):
self._in0 = None
self._in1 = None
return
setUserAttributeRequest.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","setUserAttribute"), ofwhat=[ns0.User_Def(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True), ns1.AttributeValue_Def(pname="in1", aname="_in1", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=setUserAttributeRequest, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class setUserAttributeResponse:
def __init__(self):
return
setUserAttributeResponse.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","setUserAttributeResponse"), ofwhat=[], pyclass=setUserAttributeResponse, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class deleteUserAttributeRequest1:
def __init__(self):
self._in0 = None
self._in1 = None
return
deleteUserAttributeRequest1.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","deleteUserAttribute"), ofwhat=[ns0.User_Def(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True), ns1.AttributeValue_Def(pname="in1", aname="_in1", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=deleteUserAttributeRequest1, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class deleteUserAttributeResponse1:
def __init__(self):
return
deleteUserAttributeResponse1.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","deleteUserAttributeResponse"), ofwhat=[], pyclass=deleteUserAttributeResponse1, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class setGroupAttributeRequest:
def __init__(self):
self._in0 = None
self._in1 = None
return
setGroupAttributeRequest.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","setGroupAttribute"), ofwhat=[ZSI.TC.String(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True), ns1.AttributeValue_Def(pname="in1", aname="_in1", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=setGroupAttributeRequest, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class setGroupAttributeResponse:
def __init__(self):
return
setGroupAttributeResponse.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","setGroupAttributeResponse"), ofwhat=[], pyclass=setGroupAttributeResponse, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class deleteGroupAttributeRequest1:
def __init__(self):
self._in0 = None
self._in1 = None
return
deleteGroupAttributeRequest1.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","deleteGroupAttribute"),
ofwhat=[ZSI.TC.String(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True),
ns1.AttributeValue_Def(pname="in1", aname="_in1", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)],
pyclass=deleteGroupAttributeRequest1,
encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class deleteGroupAttributeResponse1:
def __init__(self):
return
deleteGroupAttributeResponse1.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","deleteGroupAttributeResponse"), ofwhat=[], pyclass=deleteGroupAttributeResponse1, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class listGroupAttributesRequest:
def __init__(self):
self._in0 = None
return
listGroupAttributesRequest.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","listGroupAttributes"), ofwhat=[ZSI.TC.String(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=listGroupAttributesRequest, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class listGroupAttributesResponse:
def __init__(self):
self._listGroupAttributesReturn = None
return
listGroupAttributesResponse.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","listGroupAttributesResponse"), ofwhat=[ns1.ArrayOfAttributeValue_Def(pname="listGroupAttributesReturn", aname="_listGroupAttributesReturn", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=listGroupAttributesResponse, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class setRoleAttributeRequest:
def __init__(self):
self._in0 = None
self._in1 = None
self._in2 = None
return
setRoleAttributeRequest.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","setRoleAttribute"), ofwhat=[ZSI.TC.String(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True), ZSI.TC.String(pname="in1", aname="_in1", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True), ns1.AttributeValue_Def(pname="in2", aname="_in2", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=setRoleAttributeRequest, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class setRoleAttributeResponse:
def __init__(self):
return
setRoleAttributeResponse.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","setRoleAttributeResponse"), ofwhat=[], pyclass=setRoleAttributeResponse, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class deleteRoleAttributeRequest1:
def __init__(self):
self._in0 = None
self._in1 = None
self._in2 = None
return
deleteRoleAttributeRequest1.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","deleteRoleAttribute"),
ofwhat=[ZSI.TC.String(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True),
ZSI.TC.String(pname="in1", aname="_in1", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True),
ns1.AttributeValue_Def(pname="in2", aname="_in2", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)],
pyclass=deleteRoleAttributeRequest1,
encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class deleteRoleAttributeResponse1:
def __init__(self):
return
deleteRoleAttributeResponse1.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","deleteRoleAttributeResponse"), ofwhat=[], pyclass=deleteRoleAttributeResponse1, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class listRoleAttributesRequest:
def __init__(self):
self._in0 = None
self._in1 = None
return
listRoleAttributesRequest.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","listRoleAttributes"), ofwhat=[ZSI.TC.String(pname="in0", aname="_in0", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True), ZSI.TC.String(pname="in1", aname="_in1", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=listRoleAttributesRequest, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
class listRoleAttributesResponse:
def __init__(self):
self._listRoleAttributesReturn = None
return
listRoleAttributesResponse.typecode = Struct(pname=("http://glite.org/wsdl/services/org.glite.security.voms.service.attributes","listRoleAttributesResponse"), ofwhat=[ns1.ArrayOfAttributeValue_Def(pname="listRoleAttributesReturn", aname="_listRoleAttributesReturn", typed=False, encoded=None, minOccurs=1, maxOccurs=1, nillable=True)], pyclass=listRoleAttributesResponse, encoded="http://glite.org/wsdl/services/org.glite.security.voms.service.attributes")
| [
"andrea.ceccanti@cnaf.infn.it"
] | andrea.ceccanti@cnaf.infn.it |
808052a2b4fdea98185de20e30878e2b5e2d9fbb | 81c7cf8d7e80d9d27a19ddb3600916b084755852 | /vincent_CNN/cnn.py | a6eee2a213a62447673439fabfc780e9d9788494 | [] | no_license | daveguy/COMP652Project | c67eb4389be9424ed3d3301f0bbf623d52e39701 | 30809b86b9515856f987a3d9ef76e603b0762579 | refs/heads/master | 2020-12-31T05:24:03.790773 | 2016-04-23T14:57:39 | 2016-04-23T14:57:39 | 56,445,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,990 | py | '''
Convolutional Neural Network implementation (Using Lasagne helper library for Theano)
Author: Vincent Petrella (modified from Lasagne MNIST tutorial: http://lasagne.readthedocs.org/en/latest/user/tutorial.html)
'''
import random, time, csv
import numpy as np
import theano
import theano.tensor as T
import lasagne
num_channels = 32
window_size = 150
num_events = 6
def load_data(read_numpy_file=False):
if read_numpy_file==True:
X_t = np.load('X_train_subj1_series1.npy')
Y_t = np.load('Y_train_subj1_series1.npy')
X_v = np.load('X_test_subj1_series1.npy')
Y_v = np.load('Y_test_subj1_series1.npy')
else:
X_t, Y_t, X_v, Y_v = l.load_training_and_validation("features/*")
return X_t, Y_t, X_v, Y_v
def build_cnn(input_var=None, dropoutRate=0.1):
# Input layer
network = lasagne.layers.InputLayer(shape=(None, 1, window_size, num_channels),
input_var=input_var)
# First Convolution layer, convolutes over time only (5-points in time)
network = lasagne.layers.Conv2DLayer(
network, num_filters=4, filter_size=(1, 4),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Max-pooling layer of factor 2 in the time dimension: 'average_exc_pad' for mean pooling (extremely slow.. Theano bug ?)
network = lasagne.layers.Pool2DLayer(network, pool_size=(1, 2), mode='max')
# Second Conv layer, conv over freq and time ((p,3)-points)
network = lasagne.layers.Conv2DLayer(
network, num_filters=2, filter_size=(1, 2),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Max-pooling layer of factor 2 in the time dimension: 'average_exc_pad' for mean pooling (extremely slow.. Theano bug ?)
network = lasagne.layers.Pool2DLayer(network, pool_size=(1, 2), mode='max')
# And, finally, fully connected 2-unit output layer
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=dropoutRate),
num_units=num_events,
nonlinearity=lasagne.nonlinearities.softmax)
return network
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def train_CNN(X_train,Y_train,X_val,Y_val,num_epochs):
print "Validation Data size: " + str(Y_val.shape[0]) + " entries."
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
network = build_cnn(input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.0001, momentum=0.9)
test_prediction = lasagne.layers.get_output(network, deterministic=True)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], [loss,prediction], updates=updates)
#Prediction Function
predict_fn = theano.function([input_var],[T.argmax(test_prediction, axis=1)])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
#We chose a minibatch size of 500 entries
for batch in iterate_minibatches(X_train, Y_train, 100, shuffle=True):
inputs, targets = batch
t = train_fn(inputs,targets)
train_err += t[0]
train_batches += 1
# And a full pass over the validation data:
# Here we compute the number of True Positive and True negative
# To then calculate sensitivity and specificity below
val_acc = 0.1
val_tpos = 0.1
val_tneg = 0.1
val_pred = predict_fn(X_val)[0]
for i in range(val_pred.shape[0]):
if val_pred[i] == Y_val[i]:
val_acc += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation accuracy:\t\t{:.2f} %".format((val_acc / float(Y_val.shape[0])) * 100))
# Optionally, you could now dump the network weights to a file like this:
#np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
X_train, Y_train, X_val, Y_val = load_data(read_numpy_file=True)
train_CNN(X_train, Y_train, X_val, Y_val,200) | [
"davidlbrq@gmail.com"
] | davidlbrq@gmail.com |
f91de9c4fe70f68674d8418ec69a0c0fc7065996 | 57dfc1f76d75d457406a33818d8aacc5335f448f | /cmg/dataVisualize.py | ddd51d857f6cd29cdd86f537b7d1b930916a6084 | [] | no_license | euphoricpoptarts/IPDPS_2021_Graph_Coarsening | 548d8ffea29088b51ae1fb647d8271005b651894 | 981ae35b7433d8cf4c4419dac08b2ebaac85dbf3 | refs/heads/master | 2023-04-29T01:30:13.671311 | 2021-05-17T04:09:40 | 2021-05-17T04:09:40 | 339,294,239 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,816 | py | import sys
from glob import glob
from parse import parse
from statistics import mean, stdev
from pathlib import Path
import matplotlib.pyplot as plt
lineFormat = "{}: {}"
stemFormat = "{}_Sampling_Data"
outputFormat = "{}\{}.png"
size = (10,10)
class PlotGroup:
def __init__(self, name, yaxis, index):
self.primaryFigure = plt.figure(figsize=size)
self.primaryPlot = self.primaryFigure.add_subplot()
formatPlot(self.primaryPlot, name + " Comparison", yaxis)
self.primaryFigFilename = name.replace(" ","_") + "_Comparison"
self.index = index
self.name = name
self.yaxis = yaxis
def formatPlot(plot, name, yaxis):
plot.set_xscale('log')
plot.set_yscale('log')
plot.title.set_text(name)
plot.set_xlabel('Refinement Iterations')
plot.set_ylabel(yaxis)
def plotQuantity(dataList, name, yaxis, xdata, outpath, comparePlot):
minVal = min(dataList)
dataList = list(map(lambda p: p / minVal, dataList))
outputFile = outputFormat.format(outpath, name.replace(" ","_"))
fig = plt.figure(figsize=size)
plot = fig.add_subplot()
formatPlot(plot, name, yaxis)
plot.plot(xdata, dataList)
comparePlot.plot(xdata, dataList)
fig.savefig(outputFile)
plt.close(fig)
def processGraph(filepath, outpath, plotGroups):
stats = {}
stem = Path(filepath).stem
graphName = parse(stemFormat,stem)[0]
dataLines = []
with open(filepath) as fp:
for line in fp:
parsed = parse(lineFormat, line)
dataLines.append(parsed[1])
stats["tolerance"] = list(map(float, dataLines[0].split()))
stats["refineIter"] = list(map(float, dataLines[1].split()))
for plotGroup in plotGroups:
stat = list(map(float, dataLines[plotGroup.index].split()))
zipped = zip(stats["refineIter"], stat)
zipped = sorted(zipped, key=lambda x: x[0])
stat = [x[1] for x in zipped]
refine = [x[0] for x in zipped]
plotQuantity(stat, graphName + " " + plotGroup.name, plotGroup.yaxis, refine, outpath, plotGroup.primaryPlot)
def main():
dirpath = sys.argv[1]
outpath = sys.argv[2]
globMatch = "{}/*.txt".format(dirpath)
plotGroups = []
plotGroups.append(PlotGroup("Edge Cut Mean", "Normalized Edge Cut Mean", 7))
plotGroups.append(PlotGroup("Edge Cut Min", "Normalized Edge Cut Min", 8))
plotGroups.append(PlotGroup("Swaps Mean", "Normalized Swaps Mean", 10))
plotGroups.append(PlotGroup("Swaps Min", "Normalized Swaps Min", 11))
for file in glob(globMatch):
filepath = file
processGraph(filepath, outpath, plotGroups)
for plotGroup in plotGroups:
plotGroup.primaryFigure.savefig(outputFormat.format(outpath,plotGroup.primaryFigFilename))
if __name__ == "__main__":
main() | [
"mike@mikessh.fios-router.home"
] | mike@mikessh.fios-router.home |
52c250248f66ae158898ea2129bf9fa588c8d1cd | 82a76580a4894c48bd9f72f8d9b84d1fb6ed3a77 | /main.py | 46fdc3ba90ca3d0161a3154c3de7f81e1cf3f918 | [] | no_license | RazK/Tofu-Reef-Soup-App | e4cf129b3915684a1ca3ee7c86be4bcafaef93ed | f3956c402faa2cb0aaac532c3e63f61c28fae359 | refs/heads/master | 2020-03-07T15:22:49.428065 | 2018-04-08T12:15:37 | 2018-04-08T12:15:37 | 127,553,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | from random import random
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.graphics import Color, Ellipse, Line
from PieChartApp import PieChart
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
print("Touching")
color = (random(), random(), random())
with self.canvas:
Color(*color)
d = 30.
Ellipse(pos=(touch.x - d / 2, touch.y - d / 2), size=(d, d))
touch.ud['line'] = Line(points=(touch.x, touch.y))
def on_touch_move(self, touch):
h = (touch.x+touch.y/1000.)%1
print ("Moving : ", h)
color = (h, 1, 1)
with self.canvas:
Color(*color, mode='hsv')
d = 30.
Ellipse(pos=(touch.x - d / 2, touch.y - d / 2), size=(d, d))
touch.ud['line'].points += [touch.x, touch.y]
class MyPaintApp(App):
def build(self):
parent = Widget()
self.painter = MyPaintWidget()
clearbtn = Button(text='Clear')
clearbtn.bind(on_release=self.clear_canvas)
parent.add_widget(self.painter)
parent.add_widget(clearbtn)
return parent
def clear_canvas(self, obj):
self.painter.canvas.clear()
if __name__ == '__main__':
MyPaintApp().run() | [
"Raz@yosigal.com"
] | Raz@yosigal.com |
77e7cc5640e890f2d61a864488a0607592a374d8 | 5ede23dc0292d4de375eed434148a1e0f5e45c4c | /face_eye.py | 5af7ff9fc7b89babab7663b64fd00dc68609e2d5 | [] | no_license | themailman05/FaceDetectionTutorial | 85d4718b838cd157fab865208abd3d0b141083e3 | 80ea4153545893ec890f39645f242f08c8113e30 | refs/heads/master | 2021-01-23T13:49:57.042145 | 2014-12-10T22:23:18 | 2014-12-10T22:23:18 | 27,513,526 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | import cv2
import sys
# Get user supplied values
imagePath = sys.argv[1]
firstPath = sys.argv[2]
secondPath = sys.argv[3]
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(firstPath)
eyeCascade = cv2.CascadeClassifier(secondPath)
# Read the image
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Detect Eyes in the image
eyes = eyeCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Draw a rectangle around the eyes
for (x, y, w, h) in eyes:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.imshow("Faces & Eyes found", image)
cv2.waitKey(0)
| [
"castborg@Daniels-MacBook-Pro.local"
] | castborg@Daniels-MacBook-Pro.local |
ab6b6c52579d74e382ce37f2ebe8f535a24dbc3f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_59/455.py | 3e6cea92b28872ec185a3a22fdbde19106c357b6 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,472 | py | #! /usr/bin/env python
import sys
f = file(sys.argv[1])
lines = [ln.strip() for ln in f.readlines()]
T = int(lines[0])
print('%s contains %i (T) test cases' % (sys.argv[1],T))
cases = []
ind = 1
for i in range(T):
#print(lines[ind], lines[ind].split(' '))
n,m = [int(k) for k in lines[ind].split(' ')]
#print(n,m)
ind = ind + 1
dirsExisting = lines[ind:ind+n]
ind = ind + n
dirsToBeCreated = lines[ind:ind+m]
ind = ind + m
cases.append([dirsExisting, dirsToBeCreated])
print(cases)
class directoryNode:
def __init__(self, name, parent, level):
self.name = name
self.parent = parent
self.level = level
self.folders = []
def has_folder(self, fname):
return any([folder.name == fname for folder in self.folders])
def create_folder(self, fname):
self.folders.append(directoryNode(fname,self,self.level+1))
def get_folder(self, fname):
return self.folders[[folder.name == fname for folder in self.folders].index(True)]
def __repr__(self):
return repr(self.parent) + '/' + self.name
def directoryProblem(iDirs, mDirs):
directoryRoot = directoryNode('',None,0)
def mkdirs(dirsClean):
creations = 0
currentDir = directoryRoot
dirs = sorted(dirsClean)
currentFolders = []
for d in dirs:
folders = d.split('/')[1:]
#print('d,folders',d,folders)
j = 0
while j < min(len(folders),len(currentFolders)) and folders[j] == currentFolders[j]:
j = j + 1
# rolling back required dirs
while len(currentFolders) > j:
currentDir = currentDir.parent
del currentFolders[-1]
#print('currentDir, currentFolders',currentDir, currentFolders)
for fold in folders[j:]:
if not currentDir.has_folder(fold):
currentDir.create_folder(fold)
creations = creations + 1
currentDir = currentDir.get_folder(fold)
currentFolders = folders
return creations
c1 = mkdirs(iDirs)
c2 = mkdirs(mDirs)
return c2
results = []
for t in range(T):
print('case %i of %i' % (t+1,T))
print(cases[t])
res = directoryProblem(*cases[t])
results.append('Case #%i: %i' % (t+1,res))
print(results[-1])
f = file(sys.argv[1].replace('.in','.out'),'w')
f.write('\n'.join(results))
f.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e876905aa9164e349da05d0b1b8ed896b2ad4179 | bd9b24955dc0f0de5788043736c096b31ef48aa4 | /listtojson.py | c53fc0d327362bcc05aafada30898d6821a4793a | [
"MIT"
] | permissive | Exal117/pywikibot-core | 27033b116bdc783d056b9ea397522b36167fe5db | 731b0811ad31b227f822c2bc977b098d4ecea3a7 | refs/heads/master | 2020-12-25T23:19:32.393233 | 2014-12-14T09:43:32 | 2014-12-14T09:43:32 | 27,010,993 | 0 | 0 | null | 2018-01-04T14:45:32 | 2014-11-22T20:11:43 | Python | UTF-8 | Python | false | false | 5,385 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import pywikibot
import json
import unicodedata
def getListsFromList(paginaLlistes):
lists = re.findall(ur'\*{1,2}[:space:]{0,}(\[{2}Llista [a-zA-Z0-9 \:]+(.*?)\]{2})',paginaLlistes, re.S)
for l in lists:
if (len(l) > 0):
var = l[0]
var = re.sub(u'\[','',var)
var = re.sub(u'\]','',var)
getJSONfromPageList(var)
#exit(0)
def getJSONfromPageList(pagename):
site=pywikibot.Site('ca','wikipedia')
page = pywikibot.Page(site, u"%s" % pagename)
pagetext = page.get()
match = re.search(u'(.*?)\={2} Vegeu tambรฉ \={2}', pagetext, re.S)
if (match == None):
pagetext2 = pagetext
else:
pagetext2 = match.group(0)
if not pagetext2:
pagetext2 = pagetext
templates = re.findall(u'\{{2}filera IPA(.*?)\}{2}',pagetext2, re.S)
if (len(templates) > 0):
jsonresult = '{"llista":['
i = 1
for template in templates:
attributes = re.findall(u'\|(.*?)\n', template, re.S)
more_names = ''
monument_json_string = '{'
j = 1
for a in attributes:
if "=" in a:
key, value = a.split("=",1)
else:
key, value = a, ""
key = key.strip()
value = value.strip()
if '<ref' in value:
value=re.sub(ur'\<ref(.*?)\<\/ref\>','',value)
if ('estil' == key):
if ('<br' in value):
estil_arquitecte = re.split(ur'\<br +\/\>',value)
monument_json_string+=('"%s":"%s"') % (key, estil_arquitecte[0].strip())
if (len(estil_arquitecte) == 2):
arq = estil_arquitecte[1]
if ('[' in arq):
arq = arq.replace('[','')
arq = arq.replace(']','')
monument_json_string+=(',"arquitecte":"%s"') % (arq.strip())
else:
monument_json_string+=('"%s":"%s"') % (key,value)
elif ('nom' == key):
main_name = ''
if (('[' in value) & (',' not in value)):
# contemplem el cas: Esglรฉsia de [[Besalรบ]] (per exemple)
value = re.sub(ur'\[','',value)
value = re.sub(ur'\]','',value)
if ('|' in value):
allnames = re.split(ur'\|', value.strip())
main_name = allnames[0]
for index in range(1,len(allnames)):
more_names+=allnames[index]
more_names+=';'
else:
main_name = value
else:
amb_article = re.findall(r'\[{2}(.*?)\]{2}',value);
if (len(amb_article) > 0):
# he trobat el nom amb article (รฉs el prioritari)
if '|' in amb_article[0]:
allnames = amb_article[0].split('|')#re.split(r'|',)
main_name = allnames[0]
for index in range(1,len(allnames)):
altnames = re.split(ur',(?! [0-9]{1,3})',allnames[index])
for it in altnames:
more_names+=it.strip()
more_names+=';'
value = re.sub('%s' % it.strip, '',value)
else:
allnames = re.split(ur',(?! [0-9]{1,3})',value)
main_name = amb_article[0]
for index in range(1,len(allnames)):
altnames = re.split(ur',(?! [0-9]{1,3})',allnames[index])
for it in altnames:
more_names+=it.strip()
more_names+=';'
value = re.sub('%s'%it.strip, '',value)
value = re.sub(ur'\[{2}(.*?)\]{2}','',value)
alternative_names = re.split(ur',(?! [0-9]{1,3})',value)
for ite in range(0,len(alternative_names)):
attr = alternative_names[ite]
attr = attr.strip(' ')
if attr:
if ('|' in attr):
different_names = attr.split('|')
if (main_name == ''):
value = different_names[0]
for d in range(1,len(different_names)):
if (different_names[d] not in more_names):
more_names+=different_names[d]
more_names+=';'
else:
if (ite == 0):
if not main_name:
main_name = attr
else:
if (attr not in more_names):
more_names+=attr.strip()
more_names+=';'
else:
if (attr not in more_names):
more_names+=attr.strip()
more_names+=';'
monument_json_string+=('"%s":"%s"') % (key, main_name)
elif ('lat' == key):
if 'lon' in value:
# Vol dir que tenim tambรฉ en la mateixa linia la longitud
coords = re.split(ur'\|',value)
monument_json_string+=('"%s":"%s",') % (key, coords[0].strip())
lon = coords[1].split('=')
monument_json_string+=('"%s":"%s"') % (lon[0].strip(), lon[1].strip())
else:
monument_json_string+=('"%s":"%s"') % (key, value.strip())
else:
monument_json_string+=('"%s":"%s"') % (key, value.strip())
if (j < len(attributes)):
monument_json_string+=','
j+=1
if more_names:
monument_json_string+=(',"Altresnoms":"%s"') % (more_names)
monument_json_string+='}'
if (i < len(templates)):
monument_json_string+=','
jsonresult+=monument_json_string
i+=1
jsonresult+=']}'
nom = pagename.replace(' ','')
writeJSONintoFile(nom,jsonresult)
return jsonresult
else:
getListsFromList(pagetext2)
return ''
def writeJSONintoFile(name, jsonstring):
no_espaces_name = '%s.json' % name.replace(' ','')
f = open(no_espaces_name,'w')
try:
f.write(jsonstring.encode("utf-8"))
#print 'guardat %s' % name
except(OSError, IOError) as e:
print 'Error writring json into file:'
print e
exit(0)
f.close()
| [
"alex.cortijo117@gmail.com"
] | alex.cortijo117@gmail.com |
dc56ce3d672acadfd6e6b640ae3c4b0cf870703f | c28b15dfca3212f0f28727c7ad9d6193936598dc | /tasks/_iblrig_tasks_trainingChoiceWorld/_iblrig_tasks_trainingChoiceWorld.py | 7e7cf1d44f7c01a2c0531d5126b05a0bfb48d72f | [
"MIT"
] | permissive | justinplittle/iblrig | 78d0703cda0b8623015f77939220128f20a997e6 | 0ee17a14633f0dc7b87f268f433a027e73fd6d57 | refs/heads/master | 2022-11-30T07:20:20.637002 | 2020-08-12T17:04:41 | 2020-08-12T17:04:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,435 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Niccolรฒ Bonacchi
# @Date: 2018-02-02 12:31:13
import logging
import matplotlib.pyplot as plt
from pybpodapi.protocol import Bpod, StateMachine
import online_plots as op
import task_settings
import user_settings
from iblrig.bpod_helper import BpodMessageCreator
from session_params import SessionParamHandler
from trial_params import TrialParamHandler
log = logging.getLogger("iblrig")
log.setLevel(logging.INFO)
global sph
sph = SessionParamHandler(task_settings, user_settings)
def bpod_loop_handler():
f.canvas.flush_events() # 100ยตs
def softcode_handler(data):
"""
Soft codes should work with resasonable latency considering our limiting
factor is the refresh rate of the screen which should be 16.667ms @ a frame
rate of 60Hz
1 : go_tone
2 : white_noise
"""
global sph
if data == 0:
sph.stop_sound()
elif data == 1:
sph.play_tone()
elif data == 2:
sph.play_noise()
elif data == 3:
sph.start_camera_recording()
# sph.OSC_CLIENT.send_message("/e", data)
# =============================================================================
# CONNECT TO BPOD
# =============================================================================
bpod = Bpod()
# Loop handler function is used to flush events for the online plotting
bpod.loop_handler = bpod_loop_handler
# Soft code handler function can run arbitrary code from within state machine
bpod.softcode_handler_function = softcode_handler
# Bpod message creator
msg = BpodMessageCreator(bpod)
re_reset = msg.rotary_encoder_reset()
bonsai_hide_stim = msg.bonsai_hide_stim()
bonsai_show_stim = msg.bonsai_show_stim()
bonsai_close_loop = msg.bonsai_close_loop()
bonsai_freeze_stim = msg.bonsai_freeze_stim()
sc_play_tone = msg.sound_card_play_idx(sph.GO_TONE_IDX)
sc_play_noise = msg.sound_card_play_idx(sph.WHITE_NOISE_IDX)
bpod = msg.return_bpod()
# =============================================================================
# TRIAL PARAMETERS AND STATE MACHINE
# =============================================================================
global tph
tph = TrialParamHandler(sph)
f, axes = op.make_fig(sph)
plt.pause(1)
for i in range(sph.NTRIALS): # Main loop
tph.next_trial()
log.info(f"Starting trial: {i + 1}")
# =============================================================================
# Start state machine definition
# =============================================================================
sma = StateMachine(bpod)
if i == 0: # First trial exception start camera
log.info("Waiting for camera pulses...")
sma.add_state(
state_name="trial_start",
state_timer=0,
state_change_conditions={"Port1In": "reset_rotary_encoder"},
output_actions=[("SoftCode", 3)],
) # sart camera
else:
sma.add_state(
state_name="trial_start",
state_timer=0, # ~100ยตs hardware irreducible delay
state_change_conditions={"Tup": "reset_rotary_encoder"},
output_actions=[tph.out_stop_sound],
) # stop all sounds
sma.add_state(
state_name="reset_rotary_encoder",
state_timer=0,
state_change_conditions={"Tup": "quiescent_period"},
output_actions=[("Serial1", re_reset)],
)
sma.add_state( # '>back' | '>reset_timer'
state_name="quiescent_period",
state_timer=tph.quiescent_period,
state_change_conditions={
"Tup": "stim_on",
tph.movement_left: "reset_rotary_encoder",
tph.movement_right: "reset_rotary_encoder",
},
output_actions=[],
)
sma.add_state(
state_name="stim_on",
state_timer=0.1,
state_change_conditions={
"Tup": "interactive_delay",
"BNC1High": "interactive_delay",
"BNC1Low": "interactive_delay",
},
output_actions=[("Serial1", bonsai_show_stim)],
)
sma.add_state(
state_name="interactive_delay",
state_timer=tph.interactive_delay,
state_change_conditions={"Tup": "play_tone"},
output_actions=[],
)
sma.add_state(
state_name="play_tone",
state_timer=0.1,
state_change_conditions={
"Tup": "reset2_rotary_encoder",
"BNC2High": "reset2_rotary_encoder",
},
output_actions=[tph.out_tone],
)
sma.add_state(
state_name="reset2_rotary_encoder",
state_timer=0,
state_change_conditions={"Tup": "closed_loop"},
output_actions=[("Serial1", re_reset)],
)
sma.add_state(
state_name="closed_loop",
state_timer=tph.response_window,
state_change_conditions={
"Tup": "no_go",
tph.event_error: "freeze_error",
tph.event_reward: "freeze_reward",
},
output_actions=[("Serial1", bonsai_close_loop)],
)
sma.add_state(
state_name="no_go",
state_timer=tph.iti_error,
state_change_conditions={"Tup": "exit_state"},
output_actions=[("Serial1", bonsai_hide_stim), tph.out_noise],
)
sma.add_state(
state_name="freeze_error",
state_timer=0,
state_change_conditions={"Tup": "error"},
output_actions=[("Serial1", bonsai_freeze_stim)],
)
sma.add_state(
state_name="error",
state_timer=tph.iti_error,
state_change_conditions={"Tup": "hide_stim"},
output_actions=[tph.out_noise],
)
sma.add_state(
state_name="freeze_reward",
state_timer=0,
state_change_conditions={"Tup": "reward"},
output_actions=[("Serial1", bonsai_freeze_stim)],
)
sma.add_state(
state_name="reward",
state_timer=tph.reward_valve_time,
state_change_conditions={"Tup": "correct"},
output_actions=[("Valve1", 255)],
)
sma.add_state(
state_name="correct",
state_timer=tph.iti_correct,
state_change_conditions={"Tup": "hide_stim"},
output_actions=[],
)
sma.add_state(
state_name="hide_stim",
state_timer=0.1,
state_change_conditions={
"Tup": "exit_state",
"BNC1High": "exit_state",
"BNC1Low": "exit_state",
},
output_actions=[("Serial1", bonsai_hide_stim)],
)
sma.add_state(
state_name="exit_state",
state_timer=0.5,
state_change_conditions={"Tup": "exit"},
output_actions=[],
)
# Send state machine description to Bpod device
bpod.send_state_machine(sma)
# Run state machine
if not bpod.run_state_machine(sma): # Locks until state machine 'exit' is reached
break
tph = tph.trial_completed(bpod.session.current_trial.export())
as_data = tph.save_ambient_sensor_data(bpod, sph.SESSION_RAW_DATA_FOLDER)
tph.show_trial_log()
# Update online plots
op.update_fig(f, axes, tph)
tph.check_sync_pulses()
stop_crit = tph.check_stop_criterions()
if stop_crit and sph.USE_AUTOMATIC_STOPPING_CRITERIONS:
if stop_crit == 1:
msg = "STOPPING CRITERIA Nยบ1: PLEASE STOP TASK AND REMOVE MOUSE\
\n< 400 trials in 45min"
f.patch.set_facecolor("xkcd:mint green")
elif stop_crit == 2:
msg = "STOPPING CRITERIA Nยบ2: PLEASE STOP TASK AND REMOVE MOUSE\
\nMouse seems to be inactive"
f.patch.set_facecolor("xkcd:yellow")
elif stop_crit == 3:
msg = "STOPPING CRITERIA Nยบ3: PLEASE STOP TASK AND REMOVE MOUSE\
\n> 90 minutes have passed since session start"
f.patch.set_facecolor("xkcd:red")
if not sph.SUBJECT_DISENGAGED_TRIGGERED and stop_crit:
patch = {
"SUBJECT_DISENGAGED_TRIGGERED": stop_crit,
"SUBJECT_DISENGAGED_TRIALNUM": i + 1,
}
sph.patch_settings_file(patch)
[log.warning(msg) for x in range(5)]
bpod.close()
if __name__ == "__main__":
print("main")
| [
"nbonacchi@gmail.com"
] | nbonacchi@gmail.com |
eaddbd9e7beeadcf5d48bd8734cbeb4e41feef36 | be4150f4381f3f59453b564af7e3d73089190521 | /manage.py | fa7d0faab5b1fc49a492c96eb547714995f495f9 | [] | no_license | wolf811/rsps-new | 75d3cda36e5c599b768a80d70959e13eeb679732 | cda55af71164220dd5f1158adcf824a4697d1863 | refs/heads/master | 2020-04-19T09:45:22.658273 | 2019-05-18T09:29:55 | 2019-05-18T09:29:55 | 168,119,596 | 0 | 1 | null | 2019-05-18T09:29:56 | 2019-01-29T08:36:51 | HTML | UTF-8 | Python | false | false | 536 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rsps.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"wolf811@gmail.com"
] | wolf811@gmail.com |
b373b1aad1f299786843fccd8f279956c726152f | 5025cc322727a0c59f238d4e50ad4405ea529985 | /SEMI_LEARN/GAN/net.py | 4cf98f40529351b7c527c02bb904222b1b8b67ca | [] | no_license | yugitti/learn-ai2 | c19fd5ebdb80bb7643e0649d301abd35c39cb363 | e10e32991c5bbffc104fc2ec43b0e8fce8f41dc3 | refs/heads/master | 2020-04-11T11:35:57.665102 | 2018-12-14T11:01:22 | 2018-12-14T11:01:22 | 161,753,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,741 | py | import torch.nn as nn
import torch
class Generator(nn.Module):
def __init__(self, nz, ngf, nc):
super(Generator, self).__init__()
# self.gpu = args.gpu
##upsampleinใงๅพใใใ็ปๅใตใคใบ = (W - 1) x stride - 2xpadding + kernel + outputpadding
self.model = nn.Sequential(
# ไนฑๆฐz, generatorใฎconvๅฑคใธใฎๅ
ฅๅ
# ๅฑคใฎๆทฑใ ngfx8, kernel: 4, stride: 1, padding: 0
# ใตใคใบ: (1-1)x 1 - 2x0 + 4 = 4
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# ๅฑคใฎๆทฑใ ngfx4, kernel: 4, stride: 2, padding: 1
# ใตใคใบ: (4-1)x 2 - 2x1 + 4 = 8
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# ๅฑคใฎๆทฑใ ngfx2, kernel: 4, stride: 2, padding: 1
# ใตใคใบ: (8-1)x 2 - 2x1 + 4 = 16
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# ๅฑคใฎๆทฑใ ngfx2, kernel: 4, stride: 2, padding: 1
# ใตใคใบ: (32-1)x 2 - 2x1 + 4 = 64
nn.ConvTranspose2d(ngf*2, nc, 4, 2, 1, bias=False),
nn.Tanh()
# # ๅฑคใฎๆทฑใ ngfx2, kernel: 4, stride: 2, padding: 1
# # ใตใคใบ: (16-1)x 2 - 2x1 + 4 = 32
# nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
# nn.BatchNorm2d(ngf),
# nn.ReLU(True),
# # ๅฑคใฎๆทฑใ ngfx2, kernel: 4, stride: 2, padding: 1
# # ใตใคใบ: (32-1)x 2 - 2x1 + 4 = 64
# nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
# nn.Tanh()
)
# self.model(weights_init)
def forward(self, input):
return self.model(input)
class MiniBatchDiscriminator(nn.Module):
def __init__(self, A, B, C, device, batch_size):
super(MiniBatchDiscriminator, self).__init__()
self.A, self.B, self.C = A, B, C
self.device = device
self.eraser = torch.eye(batch_size).view(batch_size, 1, batch_size).to(self.device)
# T_init = torch.randn([A, B, C])
# self.T = nn.Parameter(T_init, requires_grad=True).to(device)
T_init = torch.randn([A, B * C])
self.T = nn.Parameter(T_init, requires_grad=True).to(device)
def forward(self, x):
# start = torch.cuda.Event(enable_timing=True)
# interval1 = torch.cuda.Event(enable_timing=True)
# interval2 = torch.cuda.Event(enable_timing=True)
# interval3 = torch.cuda.Event(enable_timing=True)
# interval4 = torch.cuda.Event(enable_timing=True)
# interval4_1 = torch.cuda.Event(enable_timing=True)
# interval5 = torch.cuda.Event(enable_timing=True)
# interval6 = torch.cuda.Event(enable_timing=True)
# end = torch.cuda.Event(enable_timing=True)
# start.record()
batch_size = x.size()[0]
# self.T = (self.T).view([self.A, -1])
m = x.mm(self.T)
# interval1.record()
m = m.view(-1, self.B, self.C)
m = m.unsqueeze(-1)
m_T = torch.transpose(m, 0, 3)
# interval2.record()
m = m.expand(batch_size, -1, -1, batch_size)
m_T = m_T.expand(batch_size, -1, -1, batch_size)
# interval3.record()
norm2 = torch.sum(torch.abs(m - m_T), dim=2)
# interval4.record()
# eraser = torch.eye(batch_size).view(batch_size, 1, batch_size).to(self.device)
eraser = self.eraser[:batch_size, :, :batch_size]
# interval4_1.record()
eraser = eraser.expand_as(norm2)
# interval5.record()
c_b2 = torch.exp(-(norm2 + 1e6 * eraser))
o_b2 = torch.sum(c_b2, dim=2)
# interval6.record()
output = torch.cat((x, o_b2), 1)
# end.record()
# torch.cuda.synchronize()
# print('interval[1]: {}'.format(start.elapsed_time(interval1)))
# print('interval[2]: {}'.format(interval1.elapsed_time(interval2)))
# print('interval[3]: {}'.format(interval2.elapsed_time(interval3)))
# print('interval[4]: {}'.format(interval3.elapsed_time(interval4)))
# print('interval[4_1]: {}'.format(interval4.elapsed_time(interval4_1)))
# print('interval[5]: {}'.format(interval4_1.elapsed_time(interval5)))
# print('interval[6]: {}'.format(interval5.elapsed_time(interval6)))
# print('interval[7]: {}'.format(interval6.elapsed_time(end)))
return output
class Discriminator(nn.Module):
def __init__(self, nc, ndf, device, batch_size, minibatch=True):
super(Discriminator, self).__init__()
self.ndf = ndf
self.A = ndf*8
# self.A = ndf*8*4*4
self.B = 128
self.C = 16
self.minibatch_flag = minibatch
self.minibatch = MiniBatchDiscriminator(self.A, self.B, self.C, device, batch_size)
self.model = nn.Sequential(
# SIZE = (W + 2xpadding - kernel) / stride + 1
# nc x 64 x 64 >> (64 + 2x1 - 4)/2 +1 = 32
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# (ndf) x 32 x 32 >> (32 + 2x1 - 4)/2 +1 = 16
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# (ndf) x 16 x 16 >> (16 + 2x1 - 4)/2 +1 = 8
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# (ndf) x 8 x 8 >> (8 + 2x1 - 4)/2 +1 = 4
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# # (ndf) x 4 x 4 >> (4 + 2x0 - 4)/1 +1 = 1 >> 1ใคใฎๅคใๅบๅ
# nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
# nn.Sigmoid()
)
# self.fn = nn.Linear(ndf*8*4*4, 1)
# self.fn1 = nn.Linear(ndf*8*4*4, ndf*8)
self.fn1 = nn.Linear(ndf * 8 * 2 * 2, ndf * 8)
self.fn2 = nn.Linear(ndf*8, 10)
# self.fn_mb = nn.Linear(ndf*8*4*4 + self.B, 1)
self.fn2_mb = nn.Linear(ndf*8 + self.B, 10)
# self.sigmoid = nn.Sigmoid()
# self.model(weights_init)
def forward(self, h, feature_matching = False):
# start = torch.cuda.Event(enable_timing=True)
# interval1 = torch.cuda.Event(enable_timing=True)
# interval2 = torch.cuda.Event(enable_timing=True)
# end = torch.cuda.Event(enable_timing=True)
# start.record()
x = self.model(h)
# x = x.view(-1, self.ndf * 8 * 4 * 4)
x = x.view(-1, self.ndf * 8 * 2 * 2)
x = self.fn1(x)
# interval1.record()
if self.minibatch_flag is True:
x = self.minibatch(x)
# interval2.record()
output = self.fn2_mb(x)
# end.record()
else:
output = self.fn2(x)
# interval2.record()
# output = self.sigmoid(output)
# torch.cuda.synchronize()
# print('interval[1]: {}'.format(start.elapsed_time(interval1)))
# print('interval[2]: {}'.format(interval1.elapsed_time(interval2)))
# print('interval[3]: {}'.format(interval2.elapsed_time(end)))
return output, x
def weights_init(m):
classname = m.__class__.__name__
if classname.find('conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0) | [
"s18150@s68.lab"
] | s18150@s68.lab |
ba1d09dfa2be98c97876545eb931ac5de3435418 | 360bfa89ca9d65fb2c25aef4442bc9eb4cbe191e | /libs/common/Auth.py | e1dc2f8054ea987e5a9db4c01015ac46c812ee04 | [] | no_license | onceWarmth/WarehousePurchase | 36625a8b0bc368837d0be6cad0b6cf1441d135ee | f34b83aeb6a373571c0a19831d16b1a1a946dabc | refs/heads/master | 2020-12-30T11:52:27.967392 | 2017-06-18T02:30:36 | 2017-06-18T02:30:36 | 91,433,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | # coding:utf-8
from libs.rModels.User import *
from libs.common.Password import *
import re
class Auth(object):
"""docstring for Auth"""
def __init__(self,request):
super(Auth, self).__init__()
self.request = request
self.permission = {
}
def verify(self,username,password,method="sha256"):
#้ช่ฏๅฝๆฐ
res = User.get(id = username)
if res:
salt = res["password"]["salt"]
algorithm = res["password"]["algorithm"]
hashPass = res["password"]["hash"]
result = encryption(password, algorithm, salt)
if result == hashPass:
return res
return False
def login(self,username,password,method="sha256"):
user = self.verify(username,password)
if user:
self.request.session["user"] = {
"username":user["id"],
"type":user["type"],
}
# set log
return True
else:
return False
def logout(self):
try:
username= self.request.session["user"]["username"]
del self.request.session['user']
return username
except KeyError:
return False
def auth(self):
path = self.request.path
userType = self.identity()
# user = self.request.session["user"]["username"]
if not userType:
return False
patterns = self.permission[userType]
for pattern in patterns:
if re.match(pattern,path) != None:
return True
return False
def identity(self):
try:
userType = self.request.session["user"]["type"]
except KeyError:
userType = "visitor"
if userType=="admin":
userType = "adminUser"
return userType
| [
"hackerlinx@outlook.com"
] | hackerlinx@outlook.com |
2f1c51ce639b56e0728b1bdf8fd01f2ac2dbd4a2 | 219390e0e7f07209660c89e209f90739ce4a96c8 | /fixture/contact.py | a378af460171953b233f4de99a7a577e1cca8a91 | [
"Apache-2.0"
] | permissive | bilimus/python_tests | 70d79014e07553a3ce324a30809193127c622028 | bfba209bf6f320ecef92eb6543962846f22fec8e | refs/heads/master | 2020-03-09T08:05:31.392287 | 2018-06-06T11:48:34 | 2018-06-06T11:48:34 | 128,681,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,168 | py | from model.contact import Contact
import re
import time
class ContactHelper:
def __init__(self, app):
self.app = app
def add(self, contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.contact_cache = None
self.open_contacts_page()
def get_first_contact_id(self):
self.open_contacts_page()
wd = self.app.wd
return wd.find_element_by_css_selector('input[name="selected[]"]').get_attribute('value')
def delete_first_contact_from_group(self):
wd = self.app.wd
self.open_contacts_page()
group_list = wd.find_elements_by_css_selector('select[name="group"] option')
group_list[2].click()
time.sleep(2)
self.checked_first_contact(wd)
time.sleep(2)
wd.find_element_by_css_selector('input[name="remove"]').click()
self.open_contacts_page()
time.sleep(2)
def add_first_contact_to_first_group(self):
wd = self.app.wd
self.open_contacts_page()
self.checked_first_contact(wd)
self.checked_contact_added_to_first_group(wd)
self.open_contacts_page()
def add_contact_to_group(self,contact_id, group_id):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_id(contact_id)
box = wd.find_element_by_css_selector("select[name='to_group']")
box.find_element_by_css_selector("option[value='%s']" % group_id).click()
wd.find_element_by_css_selector("input[name='add']").click()
self.app.open_home_page()
def delete_contact_from_group(self, contact_id, group_id):
wd = self.app.wd
self.app.open_home_page()
box = wd.find_element_by_css_selector("select[name='group']")
box.find_element_by_css_selector("option[value='%s']" % group_id).click()
self.select_contact_by_id(contact_id)
wd.find_element_by_css_selector("input[name='remove']").click()
self.app.open_home_page()
def checked_contact_added_to_first_group(self, wd):
select_items = wd.find_elements_by_css_selector('select[name="to_group"] option')
select_items[0].click()
wd.find_element_by_css_selector('input[name="add"]').click()
def checked_first_contact(self, wd):
wd.find_element_by_css_selector('input[name="selected[]"]').click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.homephone)
self.change_field_value("mobile", contact.mobilephone)
self.change_field_value("work", contact.workphone)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email_1)
self.change_field_value("email2", contact.email_2)
self.change_field_value("email3", contact.email_3)
self.change_field_value("homepage", contact.homepage)
# if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").is_selected():
# wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").click()
# if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").is_selected():
# wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").click()
self.change_field_value("byear", contact.byear)
# if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[4]").is_selected():
# wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[4]").click()
# if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[3]").is_selected():
# wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[3]").click()
self.change_field_value("ayear", contact.ayear)
self.change_field_value("address2", contact.city)
self.change_field_value("phone2", contact.secondaryphone)
self.change_field_value("notes", contact.notes_here)
def modify(self,contact):
self.modify_contacts_by_index(0, contact)
def modify_contacts_by_index(self, index, contact):
wd = self.app.wd
self.open_contacts_page()
self.select_contact_by_index_for_editing(index)
self.fill_contact_form(contact)
wd.find_element_by_name("update").click()
self.contact_cache = None
def modify_contacts_by_id(self, id, contact):
wd = self.app.wd
self.open_contacts_page()
self.select_contact_by_id_for_editing(id)
self.fill_contact_form(contact)
wd.find_element_by_name("update").click()
self.contact_cache = None
def select_contact_by_index_for_editing(self, index):
wd = self.app.wd
wd.find_elements_by_css_selector('#maintable a[href^="edit.php"]')[index].click()
def select_contact_by_id_for_editing(self, id):
wd = self.app.wd
wd.find_element_by_css_selector('#maintable a[href^="edit.php?id=%s"]' % id).click()
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
self.select_contact_by_index(index)
wd.find_element_by_css_selector('input[value="Delete"]').click()
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.open_contacts_page()
self.select_contact_by_id(id)
wd.find_element_by_css_selector('input[value="Delete"]').click()
wd.switch_to_alert().accept()
self.contact_cache = None
self.open_contacts_page()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("#maintable input[value='%s']" % id).click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def open_contacts_page(self):
wd = self.app.wd
if not(wd.current_url == "http://localhost/addressbook/" and len(wd.find_elements_by_css_selector('input[value="Delete"]'))):
wd.find_element_by_xpath('//div/div[3]/ul/li[1]/a').click()
def count(self):
wd = self.app.wd
self.open_contacts_page()
return len(wd.find_elements_by_name("selected[]"))
def check_presence(self, contact):
wd = self.app.wd
if self.count() == 0:
self.add(contact)
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cache = []
for element in wd.find_elements_by_name('entry'):
id = element.find_element_by_css_selector('input[name="selected[]"]').get_attribute('value')
cells = element.find_elements_by_css_selector('td')
firstname = cells[2].text
lastname = cells[1].text
address = cells[3].text
all_e_mails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, id=id,\
all_phones_from_home_page = all_phones, address=address,
all_e_mails_from_home_page = all_e_mails))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
secondaryphone = wd.find_element_by_name("phone2").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
email_1 = wd.find_element_by_name("email").get_attribute("value")
email_2 = wd.find_element_by_name("email2").get_attribute("value")
email_3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, id=id, homephone=homephone, workphone=workphone,
mobilephone=mobilephone, secondaryphone=secondaryphone, address=address,
email_1 = email_1, email_2 = email_2, email_3 = email_3 )
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id('content').text
homephone = re.search("H: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
secondaryphone = re.search("P: (.*)", text).group(1)
return Contact(homephone=homephone, mobilephone=mobilephone,\
workphone=workphone, secondaryphone=secondaryphone)
def compare_lists(self, new_contacts, old_contacts):
if old_contacts[0].id == new_contacts[-1].id:
old_list = old_contacts[1:]
new_list = new_contacts[:-1]
elif old_contacts[0].id == new_contacts[0].id:
old_list = old_contacts[1:]
new_list = new_contacts[1:]
else:
old_list = old_contacts
new_list = new_contacts
return (old_list, new_list) | [
"bilimus@gmail.com"
] | bilimus@gmail.com |
c57c65b8c8dd6a21d3e7f0f2990567ca94e25f38 | c737f03bce0e52f1beb3fe2f416c802f324f1596 | /auto/models.py | ed9e543ef9a9a51fbb2a06e19b7d430f41573894 | [] | no_license | zouguohui/automation | 4e7a567c162aa9a282454aa32c8a56771c5be50b | 7b51c9ba63d5fa7c161da4e76786cf857d519418 | refs/heads/master | 2020-04-16T13:24:07.233054 | 2019-01-27T08:25:12 | 2019-01-27T08:25:12 | 165,625,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | from django.db import models
# Create your models here.
class HostInfo(models.Model):
ip = models.CharField(max_length=255)
user = models.CharField(max_length=255)
#password = models.CharField(max_length=255)
server_name = models.CharField(max_length=255)
port = models.IntegerField(default=80)
cmd = models.CharField(max_length=255)
host_date = models.DateTimeField()
isDelete = models.BooleanField(default=False)
def __str__(self):
return "%s %s" %(self.ip,self.server_name)
class StatusInfo(models.Model):
ip = models.CharField(max_length=255)
port = models.IntegerField(default=80)
status = models.CharField(max_length=255)
status_date = models.DateTimeField()
isDelete = models.BooleanField(default=False)
def __str__(self):
return "%s %s" %(self.ip, self.status) | [
"guohui.zou@atkj6666.com"
] | guohui.zou@atkj6666.com |
6482e7b55ecab00bdeb4a9a9f5e47b26c85d2c53 | 2279ff1af474557a961d668fdd255130102c0eec | /house/apps/hplogreg/migrations/0001_initial.py | 849e83ec47148ac910b6c0e1eefd948ae41da025 | [] | no_license | sharpree89/wizards_duel | 6fade5f156ebc3ce0afbf4e39f91cca7a71aefbf | f809b9590d0ec671c053b0435397ce9dffeadac1 | refs/heads/master | 2020-12-03T07:45:26.953943 | 2016-08-26T20:06:32 | 2016-08-26T20:06:32 | 66,574,265 | 1 | 2 | null | 2016-08-25T17:40:52 | 2016-08-25T16:19:00 | Python | UTF-8 | Python | false | false | 814 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-25 02:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"cindyngien@gmail.com"
] | cindyngien@gmail.com |
043e9fa6f520efc3819ea417696518a68ba03ca1 | 7769cb512623c8d3ba96c68556b2cea5547df5fd | /configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py | 80a5ed6a2b31ff06816ce74d04570d82517229a0 | [
"MIT"
] | permissive | JialeCao001/D2Det | 0e49f4c76e539d574e46b02f278242ca912c31ea | a76781ab624a1304f9c15679852a73b4b6770950 | refs/heads/master | 2022-12-05T01:00:08.498629 | 2020-09-04T11:33:26 | 2020-09-04T11:33:26 | 270,723,372 | 312 | 88 | MIT | 2020-07-08T23:53:23 | 2020-06-08T15:37:35 | Python | UTF-8 | Python | false | false | 8,061 | py | # model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric=['bbox', 'segm'])
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_mask_rcnn_x101_64x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"connor@tju.edu.cn"
] | connor@tju.edu.cn |
209758b8f513f73a0c45dd72c782f42519515026 | 3244516a8b3cc79c92bea3c79465365478d748f1 | /practice/counting_bag/grab_bag.py | 154410fc69c03c502bf63f00e59a9210e7572773 | [] | no_license | robinrob/python | bd0043a1cb97923db6e4414fa0158569ebc27ee8 | ba598e388bfbbbf642fe73b4da79df2dd2c1c2c8 | refs/heads/master | 2021-06-14T16:45:14.879893 | 2021-01-24T06:30:05 | 2021-01-24T06:30:05 | 27,962,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | from generator import Generator
from my_exceptions import EmptyBagException
class GrabBag:
def __init__(self, items):
self.items = items
def grab_item(self):
if (len(self.items) > 0):
index = Generator().rand_num(len(self.items))
item = self.items[index]
self.items.remove(item)
return item
else:
raise EmptyBagException | [
"msl@mercury.local"
] | msl@mercury.local |
38bb1a8286957994cdba236a62cb1d914d147f4b | ff8f51541efcc886b6323be17611e52f847ba979 | /Advanced IP.py | f51713484f19fef17f276eef8fa094fba52fe820 | [] | no_license | MichaelYadidya/Python-Begginer-Projects | 58bd59177f78304b247164507fc05232e84b14ae | 24c75109ff5f76022b6b4a78351f96f95e050024 | refs/heads/master | 2021-04-28T11:09:35.857059 | 2019-06-27T21:49:14 | 2019-06-27T21:49:14 | 122,086,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | from threading import *
from ipwhois import IPWhois
from pprint import pprint
import socket
class ip_lookup():
def __init__(self):
print('Welcome to IP Scanner: ')
target = input('Enter the IP which you want to scan: ')
ip = IPWhois(target)
resolve = IPWhois.lookup_whois(ip)
pprint(resolve)
def ip_port():
print('Welcome to IP port Scanner: ')
target = input('Enter the IP which you want to scan: ')
from_port = (input('Enter the port from which you want to start the scan: '))
end_port = (input('Enter the port at which you want to stop the scan at: '))
open_port = []
closed_port = []
threads = []
def scan(port):
s = socket.socket()
result = s.connect_ex((target,port))
print('Working on Port: '+ (port))
if result ==0:
open_port.append(port)
s.close()
else:
closed_port.append(port)
s.close()
for i in (open_port,closed_port.append(1)):
t = Thread(target = scan , args = (i,))
threads.append(t)
t.start()
[x.join() for x in threads]
print(open_port)
def main():
print('Welcome to All-in-One Ip tool')
user_input = (input('Enter the Desired Option: '))
if user_input == '1':
return ip_lookup()
elif user_input == '2':
return ip_port()
return;
main()
| [
"36267282+MichaelYadidya@users.noreply.github.com"
] | 36267282+MichaelYadidya@users.noreply.github.com |
dba1a03904559ee7acc59f6c910257a5156bf9d0 | 52c4444b7a8e1a313d847ba5f0474f5a429be4bd | /celescope/fusion/multi_fusion.py | b82cd4bece7083905eb8b0e0063018ff9117df0c | [
"MIT"
] | permissive | JING-XINXING/CeleScope | 98d0d018f3689dbe355679c1b8c06f8d796c296d | d401e01bdf15c8eeb71bddede484ed8d4f189dcd | refs/heads/master | 2023-05-07T11:47:04.133216 | 2021-05-28T10:14:53 | 2021-05-28T10:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | from celescope.fusion.__init__ import __ASSAY__
from celescope.tools.multi import Multi
class Multi_fusion(Multi):
def star_fusion(self, sample):
step = 'star_fusion'
cmd_line = self.get_cmd_line(step, sample)
fq = f'{self.outdir_dic[sample]["cutadapt"]}/{sample}_clean_2.fq{self.fq_suffix}'
cmd = (
f'{cmd_line} '
f'--fq {fq} '
)
self.process_cmd(cmd, step, sample, m=self.args.starMem, x=self.args.thread)
def count_fusion(self, sample):
step = 'count_fusion'
cmd_line = self.get_cmd_line(step, sample)
bam = f'{self.outdir_dic[sample]["star_fusion"]}/{sample}_Aligned.sortedByCoord.out.bam'
cmd = (
f'{cmd_line} '
f'--bam {bam} '
f'--match_dir {self.col4_dict[sample]} '
)
self.process_cmd(cmd, step, sample, m=15, x=1)
def main():
multi = Multi_fusion(__ASSAY__)
multi.run()
if __name__ == '__main__':
main()
| [
"zhouyiqi@singleronbio.com"
] | zhouyiqi@singleronbio.com |
ad0f463e50fc0f5b7824e19aca588087c8539085 | 80b7f2a10506f70477d8720e229d7530da2eff5d | /uhd_restpy/testplatform/sessions/ixnetwork/topology/bfdv6interface_b9a91920db1b70c8c6410d2de0b438d3.py | 949dbb8037a1e6340226858d1613dfa36d82a596 | [
"MIT"
] | permissive | OpenIxia/ixnetwork_restpy | 00fdc305901aa7e4b26e4000b133655e2d0e346a | c8ecc779421bffbc27c906c1ea51af3756d83398 | refs/heads/master | 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 | MIT | 2023-02-02T07:02:43 | 2019-03-06T15:27:20 | Python | UTF-8 | Python | false | false | 52,891 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from uhd_restpy.base import Base
from uhd_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class Bfdv6Interface(Base):
"""BFDv6 Interface level Configuration
The Bfdv6Interface class encapsulates a list of bfdv6Interface resources that are managed by the user.
A list of resources can be retrieved from the server using the Bfdv6Interface.find() method.
The list can be managed by using the Bfdv6Interface.add() and Bfdv6Interface.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'bfdv6Interface'
_SDM_ATT_MAP = {
'Active': 'active',
'AggregateBfdSession': 'aggregateBfdSession',
'ConfigureEchoSourceIp': 'configureEchoSourceIp',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EchoRxInterval': 'echoRxInterval',
'EchoTimeOut': 'echoTimeOut',
'EchoTxInterval': 'echoTxInterval',
'EnableControlPlaneIndependent': 'enableControlPlaneIndependent',
'EnableDemandMode': 'enableDemandMode',
'Errors': 'errors',
'FlapTxIntervals': 'flapTxIntervals',
'IpDiffServ': 'ipDiffServ',
'LocalRouterId': 'localRouterId',
'MinRxInterval': 'minRxInterval',
'Multiplier': 'multiplier',
'Name': 'name',
'NoOfSessions': 'noOfSessions',
'PollInterval': 'pollInterval',
'SessionStatus': 'sessionStatus',
'SourceIp6': 'sourceIp6',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'TimeoutMultiplier': 'timeoutMultiplier',
'TxInterval': 'txInterval',
'Vni': 'vni',
}
_SDM_ENUM_MAP = {
'status': ['configured', 'error', 'mixed', 'notStarted', 'started', 'starting', 'stopping'],
}
def __init__(self, parent, list_op=False):
super(Bfdv6Interface, self).__init__(parent, list_op)
@property
def Bfdv6Session(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv6session_0227b1efa1d435dd43ed809b84abf3ba.Bfdv6Session): An instance of the Bfdv6Session class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv6session_0227b1efa1d435dd43ed809b84abf3ba import Bfdv6Session
if len(self._object_properties) > 0:
if self._properties.get('Bfdv6Session', None) is not None:
return self._properties.get('Bfdv6Session')
return Bfdv6Session(self)._select()
@property
def LearnedInfo(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100.LearnedInfo): An instance of the LearnedInfo class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100 import LearnedInfo
if len(self._object_properties) > 0:
if self._properties.get('LearnedInfo', None) is not None:
return self._properties.get('LearnedInfo')
return LearnedInfo(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def AggregateBfdSession(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, all interfaces except on VNI 0 will be disabled and grayed-out.
"""
return self._get_attribute(self._SDM_ATT_MAP['AggregateBfdSession'])
@AggregateBfdSession.setter
def AggregateBfdSession(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['AggregateBfdSession'], value)
@property
def ConfigureEchoSourceIp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Selecting this check box enables the ability to configure the source address IP of echo message
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConfigureEchoSourceIp']))
@property
def ConnectedVia(self):
# type: () -> List[str]
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EchoRxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The minimum interval, in milliseconds, between received BFD Echo packets that this interface is capable of supporting. If this value is zero, the transmitting system does not support the receipt of BFD Echo packets
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EchoRxInterval']))
@property
def EchoTimeOut(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The interval, in milliseconds, that the interface waits for a response to the last Echo packet sent out
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EchoTimeOut']))
@property
def EchoTxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The minimum interval, in milliseconds, that the interface would like to use when transmitting BFD Echo packets
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EchoTxInterval']))
@property
def EnableControlPlaneIndependent(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This check box enables Control Plane Independent Mode. If set, the interface's BFD is implemented in the forwarding plane and can continue to function through disruptions in the control plane
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableControlPlaneIndependent']))
@property
def EnableDemandMode(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This check box enables Demand Mode. In this mode, it is assumed the interface has an independent way of verifying it has connectivity to the other system. Once a BFD session is established, the systems stop sending BFD Control packets, except when either system feels the need to verify connectivity explicitly. In this case, a short sequence of BFD Control packets is sent
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableDemandMode']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def FlapTxIntervals(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The number of Tx packets sent from device after which session flaps for BFD. A value of zero means no flapping
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FlapTxIntervals']))
@property
def IpDiffServ(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IP DiffServ/TOSByte (Dec)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpDiffServ']))
@property
def LocalRouterId(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): The BFD Router ID value, in IPv4 format.
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalRouterId'])
@property
def MinRxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The minimum interval, in milliseconds, between received BFD Control packets that this interface is capable of supporting
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MinRxInterval']))
@property
def Multiplier(self):
# type: () -> int
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NoOfSessions(self):
# type: () -> int
"""
Returns
-------
- number: The number of configured BFD sessions
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfSessions'])
@NoOfSessions.setter
def NoOfSessions(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NoOfSessions'], value)
@property
def PollInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The interval, in milliseconds, between exchanges of Control Messages in Demand Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PollInterval']))
@property
def SessionStatus(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def SourceIp6(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If Configure Echo Source-IP is selected, the IPv6 source address of the Echo Message
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceIp6']))
@property
def StackedLayers(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
# type: () -> str
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The negotiated transmit interval, multiplied by this value, provides the detection time for the interface
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TimeoutMultiplier']))
@property
def TxInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The minimum interval, in milliseconds, that the interface would like to use when transmitting BFD Control packets
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TxInterval']))
@property
def Vni(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): Corresponding VXLAN Protocol VNI.
"""
return self._get_attribute(self._SDM_ATT_MAP['Vni'])
def update(self, AggregateBfdSession=None, ConnectedVia=None, Multiplier=None, Name=None, NoOfSessions=None, StackedLayers=None):
# type: (bool, List[str], int, str, int, List[str]) -> Bfdv6Interface
"""Updates bfdv6Interface resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- AggregateBfdSession (bool): If enabled, all interfaces except on VNI 0 will be disabled and grayed-out.
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NoOfSessions (number): The number of configured BFD sessions
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, AggregateBfdSession=None, ConnectedVia=None, Multiplier=None, Name=None, NoOfSessions=None, StackedLayers=None):
# type: (bool, List[str], int, str, int, List[str]) -> Bfdv6Interface
"""Adds a new bfdv6Interface resource on the server and adds it to the container.
Args
----
- AggregateBfdSession (bool): If enabled, all interfaces except on VNI 0 will be disabled and grayed-out.
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NoOfSessions (number): The number of configured BFD sessions
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved bfdv6Interface resources using find and the newly added bfdv6Interface resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained bfdv6Interface resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, AggregateBfdSession=None, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, LocalRouterId=None, Multiplier=None, Name=None, NoOfSessions=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None, Vni=None):
"""Finds and retrieves bfdv6Interface resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve bfdv6Interface resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all bfdv6Interface resources from the server.
Args
----
- AggregateBfdSession (bool): If enabled, all interfaces except on VNI 0 will be disabled and grayed-out.
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- LocalRouterId (list(str)): The BFD Router ID value, in IPv4 format.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NoOfSessions (number): The number of configured BFD sessions
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
- Vni (list(number)): Corresponding VXLAN Protocol VNI.
Returns
-------
- self: This instance with matching bfdv6Interface resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of bfdv6Interface data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the bfdv6Interface resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Abort(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
abort(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
abort(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def ClearLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the clearLearnedInfo operation on the server.
Clear Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
clearLearnedInfo(async_operation=bool)
--------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
clearLearnedInfo(SessionIndices=list, async_operation=bool)
-----------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
clearLearnedInfo(SessionIndices=string, async_operation=bool)
-------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
clearLearnedInfo(Arg2=list, async_operation=bool)list
-----------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearLearnedInfo', payload=payload, response_object=None)
def DisableDemandMode(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the disableDemandMode operation on the server.
Disable Demand Mode
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
disableDemandMode(Arg2=list, Arg3=enum, async_operation=bool)list
-----------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
disableDemandMode(Arg2=enum, async_operation=bool)list
------------------------------------------------------
- Arg2 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableDemandMode', payload=payload, response_object=None)
def EnableDemandMode(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the enableDemandMode operation on the server.
Enable Demand Mode
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
enableDemandMode(Arg2=list, Arg3=enum, async_operation=bool)list
----------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
enableDemandMode(Arg2=enum, async_operation=bool)list
-----------------------------------------------------
- Arg2 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableDemandMode', payload=payload, response_object=None)
def GetLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getLearnedInfo operation on the server.
Get Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getLearnedInfo(async_operation=bool)
------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getLearnedInfo(SessionIndices=list, async_operation=bool)
---------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getLearnedInfo(SessionIndices=string, async_operation=bool)
-----------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getLearnedInfo(Arg2=list, async_operation=bool)list
---------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getLearnedInfo', payload=payload, response_object=None)
def InitiatePoll(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the initiatePoll operation on the server.
Initiate Poll
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
initiatePoll(Arg2=list, Arg3=enum, async_operation=bool)list
------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
initiatePoll(Arg2=enum, async_operation=bool)list
-------------------------------------------------
- Arg2 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('initiatePoll', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
restartDown(SessionIndices=list, async_operation=bool)
------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
restartDown(SessionIndices=string, async_operation=bool)
--------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def ResumePDU(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the resumePDU operation on the server.
Resume PDU
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
resumePDU(Arg2=list, Arg3=enum, async_operation=bool)list
---------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
resumePDU(Arg2=enum, async_operation=bool)list
----------------------------------------------
- Arg2 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('resumePDU', payload=payload, response_object=None)
def SetAdminDown(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the setAdminDown operation on the server.
Set Admin Down
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
setAdminDown(Arg2=list, Arg3=enum, async_operation=bool)list
------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
setAdminDown(Arg2=enum, async_operation=bool)list
-------------------------------------------------
- Arg2 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('setAdminDown', payload=payload, response_object=None)
def SetAdminUp(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the setAdminUp operation on the server.
Set Admin Up
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
setAdminUp(Arg2=list, Arg3=enum, async_operation=bool)list
----------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
setAdminUp(Arg2=enum, async_operation=bool)list
-----------------------------------------------
- Arg2 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('setAdminUp', payload=payload, response_object=None)
def SetDiagnosticState(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the setDiagnosticState operation on the server.
Set Diagnostic State
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
setDiagnosticState(Arg2=list, Arg3=enum, Arg4=enum, async_operation=bool)list
-----------------------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- Arg4 (str(controlDetectionTimeExpired | echoFunctionFailed | neighbourSignaledSessionDown | forwardingPlaneReset | pathDown | concatenatedPathDown | administrativelyDown | reverseConcatenatedPathDown | reserved)): Diagnostic Code
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
setDiagnosticState(Arg2=enum, Arg3=enum, async_operation=bool)list
------------------------------------------------------------------
- Arg2 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- Arg3 (str(controlDetectionTimeExpired | echoFunctionFailed | neighbourSignaledSessionDown | forwardingPlaneReset | pathDown | concatenatedPathDown | administrativelyDown | reverseConcatenatedPathDown | reserved)): Diagnostic Code
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('setDiagnosticState', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=list, async_operation=bool)
-----------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=string, async_operation=bool)
-------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def StopPDU(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the stopPDU operation on the server.
Stop PDU
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stopPDU(Arg2=list, Arg3=enum, async_operation=bool)list
-------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
stopPDU(Arg2=enum, async_operation=bool)list
--------------------------------------------
- Arg2 (str(ospf | ospfv3 | bgp | ldp | rsvp | isis | pim | bfd)): Session used by Protocol
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stopPDU', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, Active=None, ConfigureEchoSourceIp=None, EchoRxInterval=None, EchoTimeOut=None, EchoTxInterval=None, EnableControlPlaneIndependent=None, EnableDemandMode=None, FlapTxIntervals=None, IpDiffServ=None, MinRxInterval=None, PollInterval=None, SourceIp6=None, TimeoutMultiplier=None, TxInterval=None):
"""Base class infrastructure that gets a list of bfdv6Interface device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- ConfigureEchoSourceIp (str): optional regex of configureEchoSourceIp
- EchoRxInterval (str): optional regex of echoRxInterval
- EchoTimeOut (str): optional regex of echoTimeOut
- EchoTxInterval (str): optional regex of echoTxInterval
- EnableControlPlaneIndependent (str): optional regex of enableControlPlaneIndependent
- EnableDemandMode (str): optional regex of enableDemandMode
- FlapTxIntervals (str): optional regex of flapTxIntervals
- IpDiffServ (str): optional regex of ipDiffServ
- MinRxInterval (str): optional regex of minRxInterval
- PollInterval (str): optional regex of pollInterval
- SourceIp6 (str): optional regex of sourceIp6
- TimeoutMultiplier (str): optional regex of timeoutMultiplier
- TxInterval (str): optional regex of txInterval
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
af37b6262756949e497f4341d19b94746a7ed2d9 | a99a1bad0dde86da87f121af160f968c48999b0f | /evaluation/cifar10/train_cifar10.py | a4a31984d0433d6e199b4f0e1705b4c76a190bcc | [
"Apache-2.0"
] | permissive | Sandy4321/gdml | 7af582f2c5594db30b8b77a4cb481dadb56e26c8 | 932cab10b7091bbcfa29000e57b2e032ca85e37d | refs/heads/master | 2020-12-21T08:06:26.359057 | 2016-12-13T05:20:51 | 2016-12-13T05:20:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | import find_mxnet
import mxnet as mx
import argparse
import os, sys
import train_model
parser = argparse.ArgumentParser(description='train an image classifer on cifar10')
parser.add_argument('--network', type=str, default='inception-bn-28-small',
help = 'the cnn to use')
parser.add_argument('--data-dir', type=str, default='cifar10/',
help='the input data directory')
parser.add_argument('--gpus', type=str,
help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--num-examples', type=int, default=60000,
help='the number of training examples')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size')
parser.add_argument('--lr', type=float, default=.05,
help='the initial learning rate')
parser.add_argument('--lr-factor', type=float, default=1,
help='times the lr with a factor for every lr-factor-epoch epoch')
parser.add_argument('--lr-factor-epoch', type=float, default=1,
help='the number of epoch to factor the lr, could be .5')
parser.add_argument('--model-prefix', type=str,
help='the prefix of the model to load')
parser.add_argument('--save-model-prefix', type=str,
help='the prefix of the model to save')
parser.add_argument('--num-epochs', type=int, default=20,
help='the number of training epochs')
parser.add_argument('--load-epoch', type=int,
help="load the model on an epoch using the model-prefix")
parser.add_argument('--kv-store', type=str, default='local',
help='the kvstore type')
parser.add_argument('--log-file', type=str, default=None,
help='file to write the logs in')
parser.add_argument('--log-dir', type=str, default='.',
help='file to write the logs in')
args = parser.parse_args()
# download data if necessary
def _download(data_dir):
if not os.path.isdir(data_dir):
os.system("mkdir " + data_dir)
os.chdir(data_dir)
if (not os.path.exists('train.rec')) or \
(not os.path.exists('test.rec')) :
os.system("wget http://data.dmlc.ml/mxnet/data/cifar10.zip")
os.system("unzip -u cifar10.zip")
os.system("mv cifar/* .; rm -rf cifar; rm cifar10.zip")
os.chdir("..")
# network
import importlib
net = importlib.import_module('symbol_' + args.network).get_symbol(10)
# data
def get_iterator(args, kv, data_shape=(3, 28, 28)):
if '://' not in args.data_dir:
_download(args.data_dir)
train = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "train.rec"),
mean_img = os.path.join(args.data_dir, "mean.bin"),
data_shape = data_shape,
batch_size = args.batch_size,
rand_crop = True,
rand_mirror = True,
num_parts = kv.num_workers,
part_index = kv.rank)
val = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "test.rec"),
mean_img = os.path.join(args.data_dir, "mean.bin"),
rand_crop = False,
rand_mirror = False,
data_shape = data_shape,
batch_size = args.batch_size,
num_parts = kv.num_workers,
part_index = kv.rank)
return (train, val)
if __name__ == '__main__':
# train
train_model.fit(args, net, get_iterator)
| [
"mihirsht@gmail.com"
] | mihirsht@gmail.com |
11f386ab1eccc8b471912de2ac1f732237f0c91e | 58515caff2c5aa12560323fe826cfb19b6b93e3e | /pi_monte.py | 319b228fa25645c6c9ca53fdd90f54ee2108e284 | [] | no_license | jyheo/prob_stats | cc5ed6c044619b2c320bf6cfe90ed08a7da748f4 | 2f015f92a51eff5648c6f9836594c70f955bf027 | refs/heads/master | 2021-01-19T09:46:03.630783 | 2013-11-25T15:07:17 | 2013-11-25T15:07:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | import random
incircle = 0
total = 10000
for i in range(total):
x = random.random()
y = random.random()
if x * x + y * y < 1:
incircle += 1
print 4 * float(incircle) / total
| [
"jyheo0@gmail.com"
] | jyheo0@gmail.com |
a8125602d2773545ae4d9ba2db99105520f5492e | e184cb0b6298d0848fcbfea7398b37aa3334172e | /chunker.py | 7de8f5986f12ea0cb9ec3be969771310e9487ac0 | [] | no_license | crapzor/EDAN70-project | 301b8b42cb3d2762d873c7a50163e5ff641144a6 | 20a246d3d13bf9b279d2ed05145d3c1a35686a20 | refs/heads/master | 2020-04-16T20:20:09.807468 | 2019-01-15T18:21:35 | 2019-01-15T18:21:35 | 165,895,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,023 | py | """
Trains the model and predicts the chunk-tags. Saves result into files.
"""
from keras import utils
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, Bidirectional
from keras.callbacks import ModelCheckpoint
from pathlib import Path
import numpy as np
import tensorflow as tf
# Constants
EMBEDDING_DIM = 100
GLOVE_LENGTH = 400000
np.random.seed(1)
# dictionary of words from the glove.6b, one with
# the words as keys with their vector as values (e_idx)
# and the other with the words as keys their index
# as values (w_idx)
def create_embedding(file, e_idx, w_idx):
f = open(file)
idx = 0
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
e_idx[word] = coefs
idx += 1
w_idx[word] = idx
f.close()
# read sentences from file and return
def read_sentences(file):
f = open(file).read().strip()
sentences = f.split('\n\n')
return sentences
# take out word's form and chunk, save in a list
# of dictionaries, also take out the longest sentence
def split_rows(sentences):
all_sentences = []
longest = 0
dict_sentence = {'form': [], 'chunk': []}
for sentence in sentences:
rows = sentence.split('\n')
for row in rows:
r = row.split()
dict_sentence['form'].append(r[0].lower())
dict_sentence['chunk'].append(r[-1])
all_sentences.append(dict_sentence)
if len(dict_sentence['form']) > longest:
longest = len(dict_sentence['form'])
dict_sentence = {'form': [], 'chunk': []}
return all_sentences, longest
# adds the words from the train file, that doesn't
# exist in GloVe, to the dictionary of words-indexes
def add_words_from_train_file(word_index, train_dictionary, LENGTH):
idx = LENGTH
for sentence in train_dictionary:
for word in sentence['form']:
if word not in word_index:
idx += 1
word_index[word] = idx
return idx
# create the embedding matrix, first row is zeros (0), then
# all the rows from the glove embeddings, then random vectors
# for the words from the training data that doesn't exist in
# glove, and lastly the random row for all the unknown
def create_embedding_matrix(word_idx, embedding_index, LENGTH):
em_matrix = np.zeros((LENGTH + 2, EMBEDDING_DIM))
em_matrix[LENGTH + 1] = np.random.rand(1,100) * 2 - 1 # last row is random
for word, i in word_idx.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
em_matrix[i] = embedding_vector
else:
em_matrix[i] = np.random.rand(1, 100) * 2 - 1
return em_matrix
# pads each sentence with zeros so that they all are of
# equal lengths
def sentence_padder(sentences, word_idx, chunk_idx, length, LENGTH):
form_idx_list = list()
chunk_idx_list = list()
for sentence in sentences:
padded_form = [0] * (length - len(sentence['form']))
padded_chunk = [0] * (length - len(sentence['form']))
for i in range(len(sentence['form'])):
if word_idx.get(sentence['form'][i]) is None:
# if word doesn't exist in our list of words
# we give it the label "unkown"
padded_form.append(LENGTH + 1)
else:
padded_form.append(word_idx[sentence['form'][i]])
padded_chunk.append(chunk_idx[sentence['chunk'][i]])
form_idx_list.append(padded_form)
chunk_idx_list.append(padded_chunk)
return np.array(form_idx_list), np.array(chunk_idx_list)
# retrieves the different chunk-tags in the training data
def get_chunks(sentence_dictionary):
chunk_dict = dict()
idx = 0
for sentence in sentence_dictionary:
for i in range(len(sentence['chunk'])):
if sentence['chunk'][i] not in chunk_dict:
idx += 1
chunk_dict[sentence['chunk'][i]] = idx
return chunk_dict
# extracts the necessary data from the output
def extract_useful_data(raw_data, dictionary, longest):
data = list()
for i in range(raw_data.shape[0]):
sentence_length = len(dictionary[i]['chunk'])
data.append(raw_data[i][longest - sentence_length:longest])
return data
# saves into file the predicted chunks
def save_to_file(file_name, sentences, chunk_list, predicted):
f_out = open(file_name, 'w')
for i in range(len(sentences)):
rows = sentences[i].splitlines()
for j in range(len(rows)):
row = rows[j] + ' ' + chunk_list[predicted[i][j] - 1]
f_out.write(row + '\n')
f_out.write('\n')
f_out.close()
if __name__ == '__main__':
embeddings_index = dict()
word_index = dict()
model_name = "english.model"
train = False
train_file = 'corpus/conv_eng.train'
testa_file = 'corpus/conv_eng.testa'
testb_file = 'corpus/conv_eng.testb'
output_file_a = 'predicted_eng.testa'
output_file_b = 'predicted_eng.testb'
glove_file = 'glove.6B/glove.6B.100d.txt'
# getting the embedding matrix
create_embedding(glove_file, embeddings_index, word_index)
# sentences, dictionary of sentences, and longest sentence of training data
train_sentences = read_sentences(train_file)
train_dictionary, longest_sentence_train = split_rows(train_sentences)
# complement word_index with what's missing from train file
WORD_INDEX_LENGTH = add_words_from_train_file(word_index, train_dictionary, GLOVE_LENGTH)
embedding_matrix = create_embedding_matrix(word_index, embeddings_index, WORD_INDEX_LENGTH)
# sentences, dictionary of sentences, and longest sentence of test A data
testa_sentences = read_sentences(testa_file)
testa_dictionary, longest_sentence_testa = split_rows(testa_sentences)
# sentences, dictionary of sentences, and longest sentence of test B data
testb_sentences = read_sentences(testb_file)
testb_dictionary, longest_sentence_testb = split_rows(testb_sentences)
# longest sentence in order to know how much to pad etc
longest_sentence = max(longest_sentence_train, longest_sentence_testa, longest_sentence_testb)
# dictionary of the chunks and their respective indices
chunk_index = get_chunks(train_dictionary)
# list of the different types of chunks
chunk_list = list(chunk_index.keys())
# padding the train sentences
form_idx_train, chunk_idx_train = sentence_padder(train_dictionary, word_index, chunk_index, longest_sentence, WORD_INDEX_LENGTH)
training_samples = form_idx_train.shape[0]
indices_train = np.arange(form_idx_train.shape[0])
forms_train = form_idx_train[indices_train]
chunks_train = chunk_idx_train[indices_train]
# one-hot encode the chunk-tags
y_train = list()
for i in chunks_train:
y_train.append(utils.to_categorical(i, num_classes=10))
x_train = forms_train[:training_samples]
y_train = np.array(y_train)
# if model already exists - get it
# otherwise train it
my_model = Path(model_name)
if my_model.is_file():
print("Loading model...")
model = tf.keras.models.load_model(model_name)
else:
print("Training model...")
model = Sequential()
model.add(Embedding(WORD_INDEX_LENGTH + 2, EMBEDDING_DIM,
mask_zero=True, weights=[embedding_matrix],
input_length=longest_sentence, trainable=train))
model.add(Bidirectional(LSTM(units=EMBEDDING_DIM, dropout=0.5, return_sequences=True)))
model.add(Bidirectional(LSTM(units=EMBEDDING_DIM, return_sequences=True)))
model.add(Dense(units=10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['acc'])
checkpointer = ModelCheckpoint(filepath='weights.hdf5', verbose=1, save_best_only=True)
model.fit(x_train, y_train,
epochs=20,
batch_size=32,
validation_split=0.1,
callbacks=[checkpointer])
model.save(model_name)
print(model.summary())
# test the test A data
form_idx_testa, chunk_idx_testa = sentence_padder(testa_dictionary, word_index, chunk_index, longest_sentence, WORD_INDEX_LENGTH)
indices_testa = np.arange(form_idx_testa.shape[0])
forms_testa = form_idx_testa[indices_testa]
chunks_testa = chunk_idx_testa[indices_testa]
y_testa = list()
for i in chunks_testa:
y_testa.append(utils.to_categorical(i, num_classes=10))
x_testa = form_idx_testa
y_testa = np.array(y_testa)
# predict the data
raw_predicted_testa = model.predict_classes([x_testa])
# extracts the necessary data from the raw data
predicted_testa = extract_useful_data(raw_predicted_testa, testa_dictionary, longest_sentence)
# save to file
save_to_file(output_file_a, testa_sentences, chunk_list, predicted_testa)
# test the test B data
form_idx_testb, chunk_idx_testb = sentence_padder(testb_dictionary, word_index, chunk_index, longest_sentence, WORD_INDEX_LENGTH)
indices_testb = np.arange(form_idx_testb.shape[0])
forms_testb = form_idx_testb[indices_testb]
chunks_testb = chunk_idx_testb[indices_testb]
y_testb = list()
for i in chunks_testb:
y_testb.append(utils.to_categorical(i, num_classes=10))
x_testb = form_idx_testb
y_testb = np.array(y_testb)
#predict the data
raw_predicted_testb = model.predict_classes([x_testb])
#extracts the necessary data from the raw data
predicted_testb = extract_useful_data(raw_predicted_testb, testb_dictionary, longest_sentence)
# save to file
save_to_file(output_file_b, testb_sentences, chunk_list, predicted_testb)
| [
"noreply@github.com"
] | crapzor.noreply@github.com |
a0cd5154277dbc6e13d1a2a4d5494841cba8eb5c | fc8dd32012c8e508b7e03bb403116fc9a0847dcc | /tutorial/settings.py | 457b4d64341ad8030558ac79490cea340517b46c | [] | no_license | buicaochinh/dj_rest_framework_tutorial | ca2405ab235db03997053baa64a13204717d470f | f103c08bb225c1f425596718122f8d13a0f67098 | refs/heads/main | 2023-04-15T15:49:17.574693 | 2021-04-11T10:59:48 | 2021-04-11T10:59:48 | 356,842,887 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,404 | py | """
Django settings for tutorial project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#d!tgyyy*2d7hdaf&%lnxvt*f$qjq0ci)24%(#=ntbr1bg$5c&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'snippets'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
| [
"buicaochinh0811@gmail.com"
] | buicaochinh0811@gmail.com |
12d04f147d69fa46fc883c3596071b6fb5d701df | a0a2bdd1853566e088371172ff6df8f0140db7b7 | /Make It Right.py | b0e6be746cd2d50d409b743bcfc3993a05189730 | [] | no_license | AliRn76/Make-It-Right | 486d8be86be1488f13535771707741c21ced162e | 348f34b6a3e52a6b43adee005f455101aca82e29 | refs/heads/master | 2020-08-06T05:35:02.568899 | 2019-10-11T21:36:21 | 2019-10-11T21:36:21 | 212,855,039 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | # Created By AliRn.ir
import os
for dirpath, dirnames, files in os.walk('.'):
print(f'Found directory: {dirpath}')
for file in files:
if file.endswith('.srt'):
base = os.path.splitext(file)[0]
os.rename(file, base + ".txt")
fileName = base + '.txt'
try:
with open(fileName, 'r', encoding="utf-8") as output:
data = output.read()
os.rename(fileName, base + ".srt")
if data:
continue
except:
with open(fileName, 'r', encoding="cp1256") as output2:
data2 = output2.read()
with open(fileName, 'w', encoding="utf-8") as input1:
input1.write(data2)
os.rename(fileName, base + ".srt")
| [
"alirn76@yahoo.com"
] | alirn76@yahoo.com |
6d29547cb4d09567d559ba7b1c5990eb8fc31a92 | 60ee3b8256e7411f22de4f503fd593288c47fb80 | /game_function.py | 05ce0bced24b99532aa35023cb88dc2baf6fb485 | [] | no_license | loginchaoman/alien | fa7bbbdab201b6dc48f27ff6c67208c7e8dafe1c | 1184ca2c791204e0dc87a93b13f177e7631c9750 | refs/heads/master | 2020-06-28T21:24:02.743914 | 2019-08-03T07:18:12 | 2019-08-03T07:18:12 | 200,345,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,359 | py | import sys
import pygame
from bullet import Bullet
from alien import Alien
from time import sleep
def check_keydown_events(event,a_settings,screen,ship,bullets):#ๆไธ้ฎ็
if event.key==pygame.K_RIGHT:
ship.moving_right = True#ๅๅณ็งปๅจ
if event.key==pygame.K_LEFT:
ship.moving_left = True#ๅๅทฆ็งปๅจ
if event.key==pygame.K_UP:
ship.moving_up = True
if event.key==pygame.K_DOWN:
ship.moving_down = True
if event.key == pygame.K_q:
sys.exit()
elif event.key==pygame.K_SPACE:
fire_bullet(a_settings,screen,ship,bullets)
def fire_bullet(a_settings,screen,ship,bullets):
if len(bullets)<a_settings.bullets_allow:
new_bullet=Bullet(a_settings,screen,ship)
bullets.add(new_bullet)
def check_keyup_events(event,ship):#ๆพๅผ้ฎ็
if event.key == pygame.K_RIGHT:
ship.moving_right = False
if event.key == pygame.K_LEFT:
ship.moving_left = False
if event.key == pygame.K_UP:
ship.moving_up = False
if event.key == pygame.K_DOWN:
ship.moving_down = False
def check_events(a_settings,screen,stats,play_button,ship,aliens,bullets):
for event in pygame.event.get():#ไบไปถๅพช็ฏ
if event.type == pygame.QUIT:#ๆฃๆต้ๅบ
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event,a_settings,screen,ship,bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event,ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x,mouse_y = pygame.mouse.get_pos()#ๅฝๆฐ่ฟๅๅ
็ป๏ผๅ
ถไธญๅ
ๅซ็ฉๅฎถๅๆบ้ผ ๆ ็xๅyๅๆ
check_play_button(a_settings,screen,stats,play_button,ship,aliens,bullets,mouse_x,mouse_y)
def check_play_button(a_settings,screen,stats,play_button,ship,aliens,bullets,mouse_x,mouse_y):
#ๅจ็ฉๅฎถ็นๅปplayๆถๅๅผๅงๆธธๆ
if play_button.rect.collidepoint(mouse_x,mouse_y):#ไฝฟ็จๅฝๆฐๅคๆญๅๆบไฝ็ฝฎๆฏๅฆๅจplay็rectๅ
stats.reset_stats()
stats.game_active=True
#ๆธ
็ฉบๅคๆไบบๅๅญๅผนๅ่กจ
aliens.empty()
bullets.empty()
#ๅๅปบไธ็พคๆฐ็ๅคๆไบบ๏ผๅนถ่ฎฉ้ฃ่นๅฑ
ไธญ
create_fleet(a_settings,screen,ship,aliens)
ship.center_ship()
def get_number_aliens_x(a_settings,alien_width):
#่ฎก็ฎๆฏ่กๅฏๅฎน็บณๅคๅฐๅคๆไบบ
available_space_x = a_settings.screen_width - 2 * alien_width#่ฎก็ฎๅฏ็จไบๆพ็ฝฎๅคๆไบบ็ๆฐดๅนณ็ฉบ้ด
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(a_settings,ship_height,alien_height):
available_space_y=(a_settings.screen_height -(3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_alien(a_settings,screen,aliens,alien_number,row_number):
alien = Alien(a_settings,screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x=alien.x#่ฎก็ฎๅฝๅๅคๆไบบ็ไฝ็ฝฎ
alien.rect.y=alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(a_settings,screen,ship,aliens):
alien=Alien(a_settings,screen)
number_aliens_x = get_number_aliens_x(a_settings,alien.rect.width)
number_rows = get_number_rows(a_settings,ship.rect.height,alien.rect.height)
#alien_width = alien.rect.width#ไปๅคๆไบบrectไธญ่ทๅๅคๆไบบ็ๅฎฝๅบฆ
for row_number in range(number_rows):
for alien_number in range(number_aliens_x): #ๅๅปบๅคๆไบบ ่ฎพ็ฝฎXๅๆ ๅ ๅ
ฅๅฝๅ่ก
create_alien(a_settings,screen,aliens,alien_number,row_number)
#ๅๅปบไธไธชๅคๆไบบใๅนถ่ฎก็ฎไธ่กๅฏๅฎน็บณๅคๅฐไธชๅคๆไบบ
#ๅคๆไบบ้ด่ทไธบๅคๆไบบๅฎฝๅบฆ
def update_screen(a_settings,screen,stats,ship,aliens,bullets,play_button):
screen.fill(a_settings.bg_color)#้็ปๅฑๅน
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
if not stats.game_active:
play_button.draw_button()
pygame.display.flip()#ๅทๆฐๅฑๅน
def update_bullets(a_settings,screen,ship,aliens,bullets):
"""ๆดๆฐๅญๅผนไฝ็ฝฎ๏ผๅ ้คๆถๅคฑ็ๅญๅผน"""
bullets.update()
for bullet in bullets.copy():
if bullet.rect.bottom<=0:
bullets.remove(bullet)
#print(len(bullets))#ๆพ็คบๅฝๅ่ฟๆๅคๅฐๅญๅผน,ๆฃๆตๅฎๆๅๅ ้ค
check_bullet_alien_collisions(a_settings,screen,ship,aliens,bullets)
def check_bullet_alien_collisions(a_settings,screen,ship,aliens,bullets):
collisions = pygame.sprite.groupcollide(bullets,aliens,True,True)#ๅฝๅ็้ๅ ๅ่ฟๅ็ๅญๅ
ธไธญๆทปๅ ้ฎ-ๅผๅฏน
if len(aliens)==0:
bullets.empty()#ไฝฟ็จemptyๅฝๆฐๅ ็ฐๆ็ๆๆๅญๅผน
create_fleet(a_settings,screen,ship,aliens)
def check_fleet_edges(a_settings,aliens):
for alien in aliens.sprites():#ๅฆๆcheck_edges่ฟๅ็ๆฏtrueๆไปฌ้่ฆๆนๅๅคๆไบบ็ๆนๅ
if alien.check_edges():
change_fleet_direction(a_settings,aliens)
break
def change_fleet_direction(a_settings,aliens):
#ๅฐๅคๆไบบไธ็งปๅนถๆนๅไปไปฌ็ๆนๅ
for alien in aliens.sprites():
alien.rect.y += a_settings.fleet_drop_speed
a_settings.fleet_direction *= -1
def check_aliens_bottom(a_settings,stats,screen,ship,aliens,bullets):
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
ship_hit(a_settings,stats,screen,ship,aliens,bullets)
break
def ship_hit(a_settings,stats,screen,ship,aliens,bullets):
if stats.ships_left>0:
stats.ships_left -= 1
#ๆธ
็ฉบๅคๆไบบๅ่กจๅๅญๅผนๅ่กจ
aliens.empty()
bullets.empty()#ๅๅปบไธ็พคๆฐ็ๅคๆไบบ
create_fleet(a_settings,screen,ship,aliens)
ship.center_ship()
sleep(0.5)
else:
stats.game_active=False
def update_aliens(a_settings,stats,screen,ship,aliens,bullets):
check_fleet_edges(a_settings,aliens)
aliens.update()
if pygame.sprite.spritecollideany(ship,aliens):
ship_hit(a_settings,stats,screen,ship,aliens,bullets)
check_aliens_bottom(a_settings,stats,screen,ship,aliens,bullets)
| [
"mc666@163.com"
] | mc666@163.com |
0d5bead907bc0b3631c7153f73c2c017b248310e | ff5f6f5f36038d94352e53cb12b7ac1a1fec6679 | /src/python/Chunked_Arduino_ADC_2.py | 5a6d1135acfc591f0df8d0c8ce567c6a1977712c | [] | no_license | rgill02/EE-137-Doppler-Radar | 8aea0a55a5407279f10cf5094a701f9a900b17e6 | c3d8531d6e529d86b59cc13173d25d2739d16461 | refs/heads/master | 2020-04-23T19:02:40.321173 | 2019-05-06T15:57:50 | 2019-05-06T15:57:50 | 171,388,815 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,312 | py | #Imports
import serial
import serial.tools.list_ports as list_ports
import threading
import queue
import numpy as np
import struct
################################################################################
class Chunked_Arduino_ADC:
"""
Reads values from the arduino and writes them to a queue
"""
def __init__(self, ts_us, chunk_size, record_qs, ser_port=None):
"""
PURPOSE: creates a new Chunked_Arduino_ADC
ARGS:
ts_us (int): sampling period of arduino (microseconds)
chunk_size (int): number of samples to expect in one chunk
record_qs (list): queues to push chunks to
ser_port (str): serial port to listen on, will try to find arduino
if left as None
RETURNS: new instance of an Chunked_Arduino_ADC
NOTES:
"""
#Save arguments
self.ts_us = int(ts_us)
self.chunk_size = int(chunk_size)
self.record_qs = record_qs
self.ser_timeout = self.chunk_size * self.ts_us / 1e6 * 2.5
self.ser_port = ser_port
#Setup record thread variables
self.record_thread = None
self.record_keep_going = threading.Event()
self.record_keep_going.clear()
#Status variables
self.connected = False
self.receiving_data = False
############################################################################
def __del__(self):
"""
PURPOSE: performs any necessary cleanup
ARGS: none
RETURNS: none
NOTES:
"""
self.stop()
############################################################################
def start(self):
"""
PURPOSE: starts the recording thread
ARGS: none
RETURNS: none
NOTES:
"""
if self.record_thread == None or not self.is_running():
self.record_thread = threading.Thread(target = self.run)
self.record_thread.start()
############################################################################
def stop(self):
"""
PURPOSE: stops the recording thread
ARGS: none
RETURNS: none
NOTES: blocks until thread stops
"""
if self.record_thread:
self.record_keep_going.clear()
self.record_thread.join()
self.record_thread = None
############################################################################
def is_running(self):
"""
PURPOSE: checks if the recording thread is running
ARGS: none
RETURNS: True if running, False if stopped
NOTES:
"""
return self.record_keep_going.is_set()
############################################################################
def get_status(self):
"""
PURPOSE: gets the status of this thread
ARGS: none
RETURNS: dictionary of statuses
NOTES:
"""
status = {
"running" : self.is_running(),
"connected" : self.connected,
"receiving_data" : self.receiving_data
}
return status
############################################################################
def run(self):
"""
PURPOSE: performs the recording
ARGS: none
RETURNS: none
NOTES: calling 'start' runs this in a separate thread
"""
#Indicate thread is running
self.record_keep_going.set()
sh = None
try:
#Run until told to stop
while self.is_running():
#Connect to arduino
while self.is_running() and not self.connected:
try:
if self.ser_port:
ser_port = self.ser_port
else:
ser_port = None
ports = list_ports.comports()
for port in ports:
if port[1].find("Arduino Mega 2560") >= 0:
ser_port = port[0]
break
sh = serial.Serial(ser_port, 115200, timeout=self.ser_timeout)
if sh and not sh.isOpen():
sh.close()
sh = None
self.connected = False
self.receiving_data = False
else:
self.connected = True
except serial.serialutil.SerialException as e:
if sh:
sh.close()
sh = None
self.connected = False
self.receiving_data = False
#We are now connected to the arduino so reset cur idx
cur_idx = 0
#Record from arduino
while self.is_running() and self.connected:
try:
sync_count = 0
while sync_count < 2:
data = sh.read(1)
if len(data):
if data[0] == 255:
sync_count += 1
else:
sync_count = 0
data = sh.read(self.chunk_size * 2)
sample_chunk = np.array(struct.unpack('<%dH' % self.chunk_size, data))
to_put = sample_chunk / 1023 * 5
for record_q in self.record_qs:
record_q.put(to_put)
self.receiving_data = True
except (serial.serialutil.SerialException, struct.error) as e:
self.receiving_data = False
if not sh.isOpen():
sh.close()
sh = None
self.connected = False
except Exception as e:
print("ERROR: 'recorder thread' got exception %s" % type(e))
print(e)
self.record_keep_going.clear()
#Cleanup
if sh:
sh.close()
sh = None
self.connected = False
self.receiving_data = False
############################################################################
################################################################################
if __name__ == "__main__":
import time
import matplotlib.pyplot as plt
record_q = queue.Queue()
record_q2 = queue.Queue()
recorder = Chunked_Arduino_ADC(200, 2500, [record_q, record_q2])
recorder.start()
try:
while recorder.is_running():
time.sleep(1)
print(recorder.get_status())
except KeyboardInterrupt as e:
pass
recorder.stop() | [
"ryan.gill@stresearch.com"
] | ryan.gill@stresearch.com |
4e76534011deaf7d7ef0ad602a1443d13a3e04a6 | ea5ae1de651cb9cd61ac1321b8c1ecd44c757ae1 | /TestDjango/asgi.py | 491bed8ef7cbe679f2361f5132c2aa76bc0cd6cd | [] | no_license | SansReves/Prueba03_PGY3121-007D_Ignacia_Mendez | 4126acb0633d811ade46cb7b8fcc85ab9d68a4b8 | 2a95bc3db8cc763121aec4939ea2e0c2d8b97a91 | refs/heads/main | 2023-05-31T09:20:54.803768 | 2021-07-12T04:25:14 | 2021-07-12T04:25:14 | 385,123,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
ASGI config for TestDjango project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TestDjango.settings')
application = get_asgi_application()
| [
"noreply@github.com"
] | SansReves.noreply@github.com |
98fe3ca46edf6dcb82078c09ef1b4d521fdde56e | fe9fe6beee2ef505b76347c98ca5ea7fa0b98620 | /frontend/urls.py | 962624667a7b21a36d7558a4ea31f872ed1c1d36 | [] | no_license | sjadlakha/music_controller | 2860e03c32e2bb1f201e32494dea0c3c92531e7c | 79623313fb8dfa1d83afaef4a60109083ae52b3d | refs/heads/main | 2023-08-11T03:57:51.001424 | 2021-09-19T18:12:42 | 2021-09-19T18:12:42 | 408,114,287 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | from django.urls import path
from .views import index
urlpatterns = [
path('', index)
]
| [
"sjadlakha@gmail.com"
] | sjadlakha@gmail.com |
85f6ba445f50885725cedabb538b5da28dee1645 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/opcodes/cases/test_pexec_253.py | 18a5dd1216b31de8c9699b42ce6666fe9b9aae87 | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 820 | py | from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestpexec_253(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_pexec_253(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/pexec.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN 38 14')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('52')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
| [
"mz@baking-bad.org"
] | mz@baking-bad.org |
c3e433a08931481532b881b2ba141befb721f045 | 0aa43818da2cc5a13dafcd0038ce54dc4daea113 | /dongtai_agent_python/__init__.py | 0d416b267ec089c1f36b3ce7205ebfdcbd802244 | [] | no_license | pohjack/DongTai-agent-python | a09e0fb2bc5675a54b683e2117089926eff2d2ba | ee523c7bde37b8f99f4e473f69e0ef764cb6df74 | refs/heads/master | 2023-08-28T04:18:01.680348 | 2021-10-28T09:56:03 | 2021-10-28T10:16:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | import dongtai_agent_python.global_var as dt_global_var
dt_global_var._init()
dt_global_var.get_config_data() | [
"sjh@jindong-auto.com"
] | sjh@jindong-auto.com |
9ea9b60f4774b5e01662073b55093fbd8e90d7d2 | ac78aff83963680770117adf455c6a1192be92b3 | /aite/wsgi.py | 8193f808c3ac9c19125713857e3f53383573c7e2 | [] | no_license | cash2one/aite | 9bf05642d20900dfa6f9cbbf7ef2af3892c92c84 | 50ccf0a9f87a84337af5c43a7ebf974a45671523 | refs/heads/master | 2021-01-19T12:35:03.586948 | 2016-12-16T03:59:51 | 2016-12-16T03:59:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | """
WSGI config for aite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aite.settings")
application = get_wsgi_application()
| [
"admin@wuaics.cn"
] | admin@wuaics.cn |
9650275dce5dc387097e4086e5ba7b12491f4b90 | fb16a81aa644ebbec72acf80e1077133a0031312 | /tests/test_01_no_fixtures.py | 3bb6e1ada2f1461c104dc5b92859b6fc929de6b2 | [] | no_license | obestwalter/pytest-fixtures-introduction | 567d58908e2181d23f2a944fab4f80e744f4bd95 | acdda53e36c17b43ee9acadd891c3124d6977f72 | refs/heads/master | 2023-06-30T23:21:59.990792 | 2021-07-27T15:51:17 | 2021-07-27T15:51:17 | 282,906,163 | 18 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | """Classic tests - no fixtures"""
import json
import tempfile
from pathlib import Path
from my_package import my_code
def test_read_my_settings_no_fixtures():
"""Make sure that settings are read correctly."""
old_path = my_code.MY_SETTINGS_PATH
try:
with tempfile.TemporaryDirectory() as tmpdir:
my_code.MY_SETTINGS_PATH = Path(tmpdir) / '.my_fake_settings'
fake_settings = {'name': 'Paul'}
my_code.MY_SETTINGS_PATH.write_text(json.dumps(fake_settings))
assert my_code.read_my_settings() == fake_settings
finally:
my_code.MY_SETTINGS_PATH = old_path
def test_write_my_settings_no_fixtures():
"""Make sure that settings are written correctly."""
old_path = my_code.MY_SETTINGS_PATH
try:
with tempfile.TemporaryDirectory() as tmpdir:
my_code.MY_SETTINGS_PATH = Path(tmpdir) / '.my_fake_settings'
fake_settings = {'name': 'Oliver'}
my_code.write_my_settings(fake_settings)
retrieved_settings = my_code.MY_SETTINGS_PATH.read_text()
assert eval(retrieved_settings) == fake_settings
finally:
my_code.MY_SETTINGS_PATH = old_path
| [
"oliver.bestwalter@avira.com"
] | oliver.bestwalter@avira.com |
1c0d8445d02e248cb524001ec7a75923562b2b5f | 64806edf01c1dd6ec6292df4001f564532fcbd66 | /spark_streaming.py | 3a17abb8eaa63a8cb04854e05498bc2b93e4b502 | [] | no_license | JaeMyoungKIM/spark_kafka_es | cd90f9979875d08fb68db37b5d55047de179cc3c | 4c255359ab8cd50467c0623dac68ee8e6a450a6d | refs/heads/master | 2020-04-15T20:50:53.623496 | 2019-01-10T07:47:30 | 2019-01-10T07:47:30 | 165,009,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | # -*- coding: utf-8 -*-
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
import sys
# Stream์ผ๋ก ๋ค์ด์จ ๊ฐ์ _hehe๋ฅผ ๋ถ์ฌ์ ์ถ๋ ฅํ๋ ํจ์์
๋๋ค.
def just_print(line):
return line + "( " + str(len(line)) + " ) " # "_hehe"
# Spark Streaming์ ์ํ Context๋ฅผ ์์ฑ
# local[2]๋ 2๊ฐ์ ๋ก์ปฌ ์ฐ๋ ๋๋ฅผ ์์ฑํ๋ค๋ ๊ฒ์
๋๋ค.
sc = SparkContext("local[2]", "SparkStreamingTest")
# ์์ ๋ง๋ Spark Context๋ฅผ ์ด์ฉํ์ฌ
# StreamingContext๊ฐ 1์ด๋ง๋ค batch์์
์ ํ๋๋ก ์์ฑํฉ๋๋ค.
ssc = StreamingContext(sc, 1)
# TCP socket stream์ผ๋ก ๊ตฌ์ฑ๋ Discretized Stream์ ์์ฑํฉ๋๋ค.
# sys.argv[1], [2] ์๋ host, port๊ฐ ๋ค์ด๊ฐ๋๋ค.
# ๊ฒฐ๊ณผ์ ์ผ๋ก host:port๋ก socket์ฐ๊ฒฐ์ ํ๊ณ ์คํธ๋ฆผ์ ์์ฑํ๊ฒ ๋ฉ๋๋ค.
# (๋จผ์ ํฌํธ๊ฐ ์ด๋ ค ์์ด์ผ๊ฒ ์ฃ !?)
lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2]))
recv_data = lines.map(just_print)
recv_data.pprint()
# ๋ง๋ค์ด์ง Stream(socket base)์ ์์ํฉ๋๋ค.
ssc.start()
# ์์ ์์ํ Stream ์ฐ์ฐ(์์
)์ด ๋๋ ๋ ๊น์ง ๊ธฐ๋ค๋ฆฝ๋๋ค.
ssc.awaitTermination()
| [
"jmketri@gmail.com"
] | jmketri@gmail.com |
9cc60c5abdd36edbd7a873ba397ed2815867ad34 | 66cff6c4ad4c5fd6ecdfb723614f0475e27a5b38 | /akshare/air/air_hebei.py | 7954e6d79e7f2c969e9da72997e8aedbf6ef83fa | [
"MIT"
] | permissive | ifzz/akshare | a862501b314f2b5aeab22af86771dbeee34cfdb8 | 70cf20680b580c8bacab55a0b7d792d06e299628 | refs/heads/master | 2022-12-02T18:36:33.754645 | 2020-08-24T05:16:42 | 2020-08-24T05:16:42 | 289,834,570 | 1 | 0 | MIT | 2020-08-24T05:17:09 | 2020-08-24T05:17:09 | null | UTF-8 | Python | false | false | 3,461 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/4/29 12:33
Desc: ๆฒณๅ็็ฉบๆฐ่ดจ้้ขๆฅไฟกๆฏๅๅธ็ณป็ป
http://110.249.223.67/publish/
ๆฏๆฅ 17 ๆถๅๅธ
็ญ็บงๅๅ
1. ็ฉบๆฐๆฑกๆๆๆฐไธบ0๏ผ50๏ผ็ฉบๆฐ่ดจ้็บงๅซไธบไธ็บง๏ผ็ฉบๆฐ่ดจ้็ถๅตๅฑไบไผใๆญคๆถ๏ผ็ฉบๆฐ่ดจ้ไปคไบบๆปกๆ๏ผๅบๆฌๆ ็ฉบๆฐๆฑกๆ๏ผๅ็ฑปไบบ็พคๅฏๆญฃๅธธๆดปๅจใ
2. ็ฉบๆฐๆฑกๆๆๆฐไธบ51๏ผ100๏ผ็ฉบๆฐ่ดจ้็บงๅซไธบไบ็บง๏ผ็ฉบๆฐ่ดจ้็ถๅตๅฑไบ่ฏใๆญคๆถ็ฉบๆฐ่ดจ้ๅฏๆฅๅ๏ผไฝๆไบๆฑกๆ็ฉๅฏ่ฝๅฏนๆๅฐๆฐๅผๅธธๆๆไบบ็พคๅฅๅบทๆ่พๅผฑๅฝฑๅ๏ผๅปบ่ฎฎๆๅฐๆฐๅผๅธธๆๆไบบ็พคๅบๅๅฐๆทๅคๆดปๅจใ
3. ็ฉบๆฐๆฑกๆๆๆฐไธบ101๏ผ150๏ผ็ฉบๆฐ่ดจ้็บงๅซไธบไธ็บง๏ผ็ฉบๆฐ่ดจ้็ถๅตๅฑไบ่ฝปๅบฆๆฑกๆใๆญคๆถ๏ผๆๆไบบ็พค็็ถๆ่ฝปๅบฆๅ ๅง๏ผๅฅๅบทไบบ็พคๅบ็ฐๅบๆฟ็็ถใๅปบ่ฎฎๅฟ็ซฅใ่ๅนดไบบๅๅฟ่็
ใๅผๅธ็ณป็ป็พ็
ๆฃ่
ๅบๅๅฐ้ฟๆถ้ดใ้ซๅผบๅบฆ็ๆทๅค้ป็ผใ
4. ็ฉบๆฐๆฑกๆๆๆฐไธบ151๏ผ200๏ผ็ฉบๆฐ่ดจ้็บงๅซไธบๅ็บง๏ผ็ฉบๆฐ่ดจ้็ถๅตๅฑไบไธญๅบฆๆฑกๆใๆญคๆถ๏ผ่ฟไธๆญฅๅ ๅงๆๆไบบ็พค็็ถ๏ผๅฏ่ฝๅฏนๅฅๅบทไบบ็พคๅฟ่ใๅผๅธ็ณป็ปๆๅฝฑๅ๏ผๅปบ่ฎฎ็พ็
ๆฃ่
้ฟๅ
้ฟๆถ้ดใ้ซๅผบๅบฆ็ๆทๅค้ป็ป๏ผไธ่ฌไบบ็พค้้ๅๅฐๆทๅค่ฟๅจใ
5. ็ฉบๆฐๆฑกๆๆๆฐไธบ201๏ผ300๏ผ็ฉบๆฐ่ดจ้็บงๅซไธบไบ็บง๏ผ็ฉบๆฐ่ดจ้็ถๅตๅฑไบ้ๅบฆๆฑกๆใๆญคๆถ๏ผๅฟ่็
ๅ่บ็
ๆฃ่
็็ถๆพ่ๅ ๅง๏ผ่ฟๅจ่ๅๅ้ไฝ๏ผๅฅๅบทไบบ็พคๆฎ้ๅบ็ฐ็็ถ๏ผๅปบ่ฎฎๅฟ็ซฅใ่ๅนดไบบๅๅฟ่็
ใ่บ็
ๆฃ่
ๅบๅ็ๅจๅฎคๅ
๏ผๅๆญขๆทๅค่ฟๅจ๏ผไธ่ฌไบบ็พคๅๅฐๆทๅค่ฟๅจใ
6. ็ฉบๆฐๆฑกๆๆๆฐๅคงไบ300๏ผ็ฉบๆฐ่ดจ้็บงๅซไธบๅ
ญ็บง๏ผ็ฉบๆฐ่ดจ้็ถๅตๅฑไบไธฅ้ๆฑกๆใๆญคๆถ๏ผๅฅๅบทไบบ็พค่ฟๅจ่ๅๅ้ไฝ๏ผๆๆๆพๅผบ็็็ถ๏ผๆๅๅบ็ฐๆไบ็พ็
๏ผๅปบ่ฎฎๅฟ็ซฅใ่ๅนดไบบๅ็
ไบบๅบๅฝ็ๅจๅฎคๅ
๏ผ้ฟๅ
ไฝๅๆถ่๏ผไธ่ฌไบบ็พคๅบ้ฟๅ
ๆทๅคๆดปๅจใ
ๅๅธๅไฝ๏ผๆฒณๅ็็ฏๅขๅบๆฅไธ้ๆฑกๆๅคฉๆฐ้ข่ญฆไธญๅฟ ๆๆฏๆฏๆ๏ผไธญๅฝ็งๅญฆ้ขๅคงๆฐ็ฉ็็ ็ฉถๆ ไธญ็งไธๆธ
็งๆๆ้ๅ
ฌๅธ
"""
from datetime import datetime
import pandas as pd
import requests
def air_quality_hebei(city: str = "ๅๅฑฑๅธ") -> pd.DataFrame:
"""
ๆฒณๅ็็ฉบๆฐ่ดจ้้ขๆฅไฟกๆฏๅๅธ็ณป็ป-็ฉบๆฐ่ดจ้้ขๆฅ, ๆชๆฅ 6 ๅคฉ
http://110.249.223.67/publish/
:param city: ['็ณๅฎถๅบๅธ', 'ๅๅฑฑๅธ', '็งฆ็ๅฒๅธ', '้ฏ้ธๅธ', '้ขๅฐๅธ', 'ไฟๅฎๅธ', 'ๅผ ๅฎถๅฃๅธ', 'ๆฟๅพทๅธ', 'ๆฒงๅทๅธ', 'ๅปๅๅธ', '่กกๆฐดๅธ', '่พ้ๅธ', 'ๅฎๅทๅธ']
:type city: str
:return: city = "", ่ฟๅๆๆๅฐๅบ็ๆฐๆฎ; city="ๅๅฑฑๅธ", ่ฟๅๅๅฑฑๅธ็ๆฐๆฎ
:rtype: pandas.DataFrame
"""
url = "http://110.249.223.67/publishNewServer/api/CityPublishInfo/GetProvinceAndCityPublishData"
params = {
"publishDate": f"{datetime.today().strftime('%Y-%m-%d')} 16:00:00"
}
r = requests.get(url, params=params)
json_data = r.json()
city_list = pd.DataFrame.from_dict(json_data["cityPublishDatas"], orient="columns")["CityName"].tolist()
outer_df = pd.DataFrame()
for i in range(1, 7):
inner_df = pd.DataFrame([item[f"Date{i}"] for item in json_data["cityPublishDatas"]], index=city_list)
outer_df = outer_df.append(inner_df)
if city == "":
return outer_df
else:
return outer_df[outer_df.index == city]
if __name__ == "__main__":
air_quality_hebei_df = air_quality_hebei(city="็ณๅฎถๅบๅธ")
print(air_quality_hebei_df)
| [
"jindaxiang@163.com"
] | jindaxiang@163.com |
ed792abb61bcc652956c16fe5d68c92980522a1b | cdf3e4079e0d5cbef05092716dcb72883d3cf374 | /ocrvercode.py | 02f95f5fcc02845b702ee66c463e4d2d0da334e8 | [] | no_license | kerzhao/Keras_OcrVerCode | de654373cdce7e2c24c7a168305d2e1252f81f1a | 2ba1dd33c2c7993e509ada129d7c2f3c50d30aa4 | refs/heads/master | 2021-01-19T05:06:03.472740 | 2017-03-06T08:34:13 | 2017-03-06T08:34:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | import glob
import os
import numpy as np
from scipy import misc
from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Activation, Dense, Dropout
from keras.models import Model
from keras.utils.np_utils import to_categorical
from keras.utils.visualize_util import plot
import sys
from keras.models import model_from_json
img_size = (3L, 160L, 60L)
model = model = model_from_json(open(str(sys.argv[1])).read())
model.load_weights(str(sys.argv[2]))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
#plot(model, to_file='../model.png',show_shapes=True)
def ocr(filename):
img = misc.imresize(misc.imread(filename), img_size[::-1]).T
img = np.array([1 - img.astype(float)/255])
return ''.join(chr(i.argmax()+ord('0')) for i in model.predict(img))
root = './'
truelen = 0.0
files = 0
for i in os.listdir(root):
if os.path.isfile(os.path.join(root,i)):
strcode = i.replace(".jpg","")
if(strcode == ocr(i)):
truelen += 1
print (i + ":" + ocr(i) + " TRUE")
else:
print (i + ":" + ocr(i) + " FALSE")
files += 1
print("Test Sample:" + str(files))
print("Accuracy:" + str(truelen/files*100) + "%")
#in test sample dir
#python ../ocrvercode.py ../test.jpg ../yanzheng_cnn_2d.model | [
"Satan Lucifer"
] | Satan Lucifer |
9560f41fddfa186873de820aa3ad680855f5706e | 663397bc8a4fe6d3b843826606279a2806c896a4 | /5/tmp1.py | 84cb81c475b383ad6f782c115de2cf2722bbc9f9 | [] | no_license | qiongxing/pythonStuByHelloWorld | 8c85bc98653059128c453b94385368601eeeff0f | 258e868ff7ae36c9c67364772e81ed9a3ccbc7f3 | refs/heads/main | 2023-03-02T04:20:11.138247 | 2021-02-08T09:46:40 | 2021-02-08T09:46:40 | 325,244,819 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | fahrenheit = float(raw_input())
celsius = (fahrenheit - 32) *5.0/9
print "Is",celsius,"des"
| [
"qiongxing1412@qq.com"
] | qiongxing1412@qq.com |
74c8a660c0ba693548e3e5fb479a55b43fe0dcaf | ae8b81b3ac15c1fc0ce8aa4d30eecf5d00457e49 | /install/lib/python2.7/dist-packages/auto_driving/msg/_DetectionResult.py | 971669ac17e96c6de618229ce0f89cc721d277a0 | [] | no_license | AlexandruZaharia/AutonomousDriving | 14674cc8a375d57040a8caf2b213514ea5d0ada8 | 48b44cde7867321074fba3704110eac8d72a03e1 | refs/heads/master | 2020-04-22T21:17:46.393836 | 2019-02-19T12:09:00 | 2019-02-19T12:09:00 | 170,669,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,634 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from auto_driving/DetectionResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class DetectionResult(genpy.Message):
_md5sum = "77a2470f91f15b079bebc4e6c7b62731"
_type = "auto_driving/DetectionResult"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string robot_name
string country
uint8 product_id
"""
__slots__ = ['robot_name','country','product_id']
_slot_types = ['string','string','uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
robot_name,country,product_id
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(DetectionResult, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.robot_name is None:
self.robot_name = ''
if self.country is None:
self.country = ''
if self.product_id is None:
self.product_id = 0
else:
self.robot_name = ''
self.country = ''
self.product_id = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.robot_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.country
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.product_id))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.robot_name = str[start:end].decode('utf-8')
else:
self.robot_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.country = str[start:end].decode('utf-8')
else:
self.country = str[start:end]
start = end
end += 1
(self.product_id,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.robot_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.country
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.product_id))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.robot_name = str[start:end].decode('utf-8')
else:
self.robot_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.country = str[start:end].decode('utf-8')
else:
self.country = str[start:end]
start = end
end += 1
(self.product_id,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
| [
"Alexandru.Zaharia@kpit.com"
] | Alexandru.Zaharia@kpit.com |
0b7c05445657217c75fd251f93946173472b3df4 | 7b8efd8ecde77295c09ff247f884ef16619a4e15 | /seam_carving.py | f665b7b579a9015cf25e44ab3f3a2b6a0ab6fa69 | [] | no_license | SwethaGeo/Seam-Carving | 0b05f5923a9cb384f1f59a99d5f5af6f80e6af22 | 0fa00f26ee0beb0c30cc9000022735e5a981cd31 | refs/heads/master | 2020-04-03T13:11:06.945051 | 2018-10-29T20:54:33 | 2018-10-29T20:54:33 | 154,558,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,943 | py | import cv2
import numpy as np
import scipy as sp
import scipy.signal
def energy_map(img):
"""This function calculates the total energy by adding the absolute of x and y gradient of the image.
"""
img_new = img.astype(float) #converting image to float
total_energy = 0.0 # To store the sum of energy for all channels
r,c,d = img.shape
for i in range(d):
dy = np.zeros([r, c], dtype=float)
dx = np.zeros([r, c], dtype=float)
if r > 1:
dy = np.gradient(img_new[:,:,i], axis=0) #gradient along rows
if c > 1:
dx = np.gradient(img_new[:,:,i], axis=1) #gradient along columns
total_energy += np.absolute(dy) + np.absolute(dx)
return total_energy #Total energy map for entire image
def cumulative_energy_vertical(img):
"""This function calculates the cumulative minimum energy for all possible connected seams for each element.
"""
e = energy_map(img) # Total energy
M = np.zeros((e.shape[0], e.shape[1]), dtype=type(e)) #To store cumulative minimum energy
row,col = e.shape
M[0] = e[0] #First row is same as energy_map first row
for i in range(1,row):
for j in range(0,col):
if j == 0:
M[i,j] = e[i,j] + min(M[i-1,j],M[i-1,j+1])
elif j == col-1:
M[i,j] = e[i,j] + min(M[i-1,j-1],M[i-1,j])
else:
M[i,j] = e[i,j] + min(M[i-1,j-1],M[i-1,j],M[i-1,j+1])
return M
def seam_path_vertical(img):
"""This function finds the minimum energy vertical seam path by backtracking from the minimum value of M in the last row.
"""
row, col = img.shape[:2]
M = cumulative_energy_vertical(img)
j = np.argmin(M[-1]) #column number of minimum cumulative energy along last row
seam_path = np.zeros((row),dtype=int) #To store column numbers of minimum cumulative energy for each row
seam_path[-1] = j # last element is j
for i in range(row-1,-1,-1):
if j == 0:
j = j + np.argmin((M[i-1,j],M[i-1,j+1])) # either j or j+1
elif j == col-1:
j = j + np.argmin((M[i-1,j-1],M[i-1,j])) - 1 # either j-1 or j
else:
j = j + np.argmin((M[i-1,j-1],M[i-1,j],M[i-1,j+1])) - 1 # either j-1 or j or j+1
seam_path[i-1] = j
return seam_path, M
def remove_col(channel, seam_path):
"""This function removes the optimal seam path from the given channel of the image
"""
row, col= channel.shape
mask = np.ones(channel.size, dtype = bool)
mask[np.ravel_multi_index([range(row), seam_path], (row, col))] = False #Mask value along seam path marked as False
img = channel.flatten()
return img[mask].reshape(row, col-1)
def seam_removal_vertical(img, seam):
"""This function returns the new image after removing one vertical seam path
"""
row, col,channels = img.shape
path = np.zeros((row),dtype=int)
M = None
e = 0.0
if len(seam) == 0: # For seam removal
path, M = seam_path_vertical(img)
e = min(M[-1]) #Minimum cost of seam which is neeeded for optimal retargeting
else: #For seam insertion where seam path already computed
path = seam
img_ret = np.zeros((row, col-1, channels), dtype=np.uint8) #To store new image after removing seam path
for i in range(channels):
img_ret[:,:,i] = remove_col(img[:,:,i], path) #Removing seam path for each channel
return img_ret, e
def seam_insertion(img, k):
"""This function returns the new enlarged image with (row,col+k) shape and the image showing the optimal seam paths
"""
row,col,channels = img.shape
img_rem = img.copy()
I = np.zeros(img.shape[:2],dtype = bool)
img_new = np.zeros((row,col+k,3),dtype = img.dtype) # To store enlarged image
kernel = np.array([[0,0,0],[0.5,0,0.5],[0,0,0]]) # Kernel to find average of left and right neighbors
seams = [] # To store optimal seam paths
colidx = np.tile(range(col), (row, 1)) # The column index of the original image
for i in range(k):
path,e = seam_path_vertical(img_rem) # Finding seam path
img_rem,e = seam_removal_vertical(img_rem, path) #Removing vertical seam
I[range(row),colidx[range(row), path]] = True # Marking the seam path in original image True
seams.append(colidx[range(row),path]) # appending optimal seam path
colidx = remove_col(colidx, path) #Removing the column numbers of seam path from original image
delta = np.cumsum(I,axis = 1) # Number of shifts required for the columns of the original image
for i in range(row):
img_new[i,range(col)+delta[i,range(col)]] = img[i,range(col)] #Storing the orginal image pixels to new position
img_new1 = cv2.copyMakeBorder(img_new,1,1,1,1,cv2.BORDER_REFLECT_101 )
for i in range(channels):
img1 = sp.signal.convolve2d(img_new1[:,:,i],kernel,mode='valid') #Convolving using kernel to find average of left and right neighbors
img_new[:,:,i] = img1
img_color = img_new.copy()
img_1 = img.copy()
for i in seams:
img_1[range(row),i] = [0,0,255] # Seam path as red
for i in range(row):
img_new[i,range(col)+delta[i,range(col)]] = img[i,range(col)] #Restoring the values of pixel in original image
img_color[i,range(col)+delta[i,range(col)]] = img_1[i,range(col)]
return img_new,img_color
def image_transpose(img):
"""This function returns the transposed image
"""
channels = img.shape[2]
v = [0] * channels
for i in range(channels):
v[i] = img[:,:,i].T # Transposing image for each channel
return np.dstack((v[0],v[1],v[2])) #Returing transposed image
def seam_removal_horizontal(img):
"""This function returns image after removing one horizontal seam
"""
img_T = image_transpose(img)
img_T, e = seam_removal_vertical(img_T,[])
return image_transpose(img_T), e
def transport_map(img):
"""This function returns the Transport map (T) and 1-bit map (C) which indicates whether horizontal or vertical seam
was removed in each step for the entire image.
"""
row, col = img.shape[:2]
I = [None] * col # To store column number of images
T = np.zeros((row,col), dtype=float) #Transport map
C = np.zeros((row,col), dtype=int) #Map with path chosen
for i in range(row):
print "row number Transport map:",i
for j in range(col):
if i == 0 and j == 0:
T[i, j] = 0
I[j] = img
continue
if j==0 and i > 0:
img, e = seam_removal_horizontal(I[j])
T[i,j], I[j], C[i,j] = e + T[i-1, j], img, 0
elif i == 0 and j > 0:
img, e = seam_removal_vertical(I[j-1],[])
T[i,j], I[j], C[i,j] = e + T[i, j-1], img, 1
else:
img_h, eh = seam_removal_horizontal(I[j])
img_v, ev = seam_removal_vertical(I[j-1],[])
T[i,j] = min(eh + T[i-1, j], ev + T[i, j-1])
C[i,j] = np.argmin((eh + T[i-1, j], ev + T[i, j-1]))
if C[i,j] == 0:
I[j] = img_h
else:
I[j] = img_v
return T,C
def optimal_path(T, C, r, c):
"""This function returns a list containing the choice made at each step of the dynamic programming.
The choice made is stored by backtracking from T[r,c] to T[0,0].
"""
seam_path = [0] * (r + c)
k = r + c - 1
while k >= 0:
seam_path[k] = C[r,c]
T[r,c] = None
k -= 1
if C[r,c] == 0:
r = r-1
else:
c = c-1
assert r == 0 and c == 0
return seam_path
def retarget_image(img, T, C, r, c):
"""This function returns the retargeted image after removing r rows and c columns from image.
"""
row, col = img.shape[:2]
seam_path = optimal_path(T, C, r, c)
img_final = img
for i in seam_path:
if i == 0:
img_final, _ = seam_removal_horizontal(img_final)
else:
img_final, _ = seam_removal_vertical(img_final, [])
return img_final
def main():
#Reading fig5
print "Reading fig5"
img1 = cv2.imread("fig5.png")
#Reading fig8
print "Reading fig8"
img2 = cv2.imread("fig8.png")
#Reading fig7
print "Reading fig7"
img3 = cv2.imread("fig7.png")
#Seam removal
print "Removing Vertical Seams"
img_new = img1.copy()
n = 300 # number of vertical seams to remove
for i in range(n):
img_new, e = seam_removal_vertical(img_new,[])
#Saving Seam removal result
print "Saving Seam removal result"
cv2.imwrite('fig5_seam_removal.png',img_new)
#Seam imsertion
print "Inserting Vertical seams"
num_cols_to_insert = int(img2.shape[1] * 0.5)
I, I_color = seam_insertion(img2, num_cols_to_insert)
I_2, I_color_2 = seam_insertion(I, num_cols_to_insert)
#Saving Seam Insertion results
print "Saving Seam insertion results"
cv2.imwrite('fig8_c.png',I_color)
cv2.imwrite('fig8_d.png',I)
cv2.imwrite('fig8_f.png',I_2)
#Optimal Order Retargeting
print "Transport map and Retargeted Image"
T, C = transport_map(img3)
T_new = T.copy()
r = 125 # number of rows to remove
c = 135 # number of columns to remove
image = retarget_image(img3, T_new, C, r, c)
# Applying color map
T2 = T_new.copy()
path_mask = np.isnan(T2)
T2 = T2 / T2[~path_mask].max() * 255
T2 = T2.astype(np.uint8)
T_new_colormap = cv2.applyColorMap(T2, cv2.COLORMAP_JET)
T_new_colormap[path_mask,:] = 255
#Saving Transport map and retargeted image
print "Saving Transport map and retargeted image"
cv2.imwrite('Transport map.png',T_new_colormap)
cv2.imwrite('fig7_retargeted.png',image)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | SwethaGeo.noreply@github.com |
b91cf9d73caef66d4874e5255e2eceec7ee8d15a | 20148efe0936789a790266ea39a7974184cd52b0 | /satchmo/apps/payment/migrations/0003_auto_20170405_0730.py | 31a9182ddf3b982467fb93043ed4f996735f08fd | [
"BSD-2-Clause"
] | permissive | opencoca/WEB-Satchmo | 2f4661a13a66c16fae98e9db09268f2161e39968 | d2f6c086f4742b10743075701caec3e170f112bf | refs/heads/master | 2023-07-06T05:25:12.762921 | 2017-10-12T11:14:55 | 2017-10-12T11:14:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0002_creditcarddetail_orderpayment'),
]
operations = [
migrations.AlterField(
model_name='creditcarddetail',
name='orderpayment',
field=models.OneToOneField(related_name='creditcard', to='shop.OrderPayment'),
),
]
| [
"predatell@localhost"
] | predatell@localhost |
62c23bc35e09fd885d7dd599ac35f30f777a5148 | 4c19eac6e53b2c1230257508370ad60c8d83d6a7 | /dxm/lib/DxAlgorithm/alg_worker.py | 75fcc4c87123d87ea14ca078b5a002fd729f7811 | [
"Apache-2.0"
] | permissive | rakesh-roshan/dxm-toolkit | 2c7741c8a02952de1c23715eadb515d84fcaf954 | 2c6e6ebf8615526501767844edf06fb74d878f25 | refs/heads/master | 2020-04-27T19:05:11.293818 | 2019-03-01T13:49:34 | 2019-03-01T13:49:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,246 | py | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : Marcin Przepiorowski
# Date : April 2018
from dxm.lib.DxEngine.DxMaskingEngine import DxMaskingEngine
import logging
from dxm.lib.DxLogging import print_error
from dxm.lib.DxLogging import print_message
from dxm.lib.Output.DataFormatter import DataFormatter
from dxm.lib.DxTools.DxTools import get_list_of_engines
from dxm.lib.DxAlgorithm.DxAlgorithmList import DxAlgorithmList
from dxm.lib.DxAlgorithm.DxAlgorithm import DxAlgorithm
from dxm.lib.DxDomain.DxDomainList import DxDomainList
import sys
def algorithm_list(p_engine, format, algname):
"""
Print list of algorithms
param1: p_engine: engine name from configuration
param2: format: output format
param2: algname: algname name to list, all if None
return 0 if algname found
"""
ret = 0
data = DataFormatter()
data_header = [
("Engine name", 30),
("Algorithm name", 30),
("Domain name", 32),
("Syncable", 9),
("Algorithm type", 30),
]
data.create_header(data_header)
data.format_type = format
enginelist = get_list_of_engines(p_engine)
if enginelist is None:
return 1
for engine_tuple in enginelist:
engine_obj = DxMaskingEngine(engine_tuple[0], engine_tuple[1],
engine_tuple[2], engine_tuple[3])
if engine_obj.get_session():
continue
domainlist = DxDomainList()
domainlist.LoadDomains()
alglist = DxAlgorithmList()
alglist.LoadAlgorithms()
algref_list = []
if algname:
algobj = alglist.get_by_ref(algname)
if algobj:
algref_list.append(algobj.algorithm_name)
else:
algref_list = alglist.get_allref()
for algref in algref_list:
algobj = alglist.get_by_ref(algref)
if algobj.sync:
syncable = 'Y'
else:
syncable = 'N'
data.data_insert(
engine_tuple[0],
algobj.algorithm_name,
algobj.domain_name,
syncable,
algobj.algorithm_type
)
#algobj.export()
print("")
print (data.data_output(False))
print("")
return ret
def algorithm_worker(p_engine, algname, **kwargs):
"""
Select an algorithm and run action on it
param1: p_engine: engine name from configuration
param2: algname: algorithm name
kwargs: parameters to pass including function name to call
return 0 if algname found
"""
ret = 0
function_to_call = kwargs.get('function_to_call')
enginelist = get_list_of_engines(p_engine)
if enginelist is None:
return 1
for engine_tuple in enginelist:
engine_obj = DxMaskingEngine(engine_tuple[0], engine_tuple[1],
engine_tuple[2], engine_tuple[3])
if engine_obj.get_session():
continue
domainlist = DxDomainList()
domainlist.LoadDomains()
alglist = DxAlgorithmList()
algref_list = []
algobj = alglist.get_by_ref(algname)
if algobj is None:
ret = ret + 1
continue
dynfunc = globals()[function_to_call]
if dynfunc(algobj=algobj, engine_obj=engine_obj, **kwargs):
ret = ret + 1
return ret
def algorithm_export(p_engine, algname, outputfile):
"""
Save algorithm to file
param1: p_engine: engine name from configuration
param2: algname: algname name to export
param3: outputfile: output file
return 0 if OK
"""
return algorithm_worker(p_engine, algname, outputfile=outputfile,
function_to_call='do_export')
def do_export(**kwargs):
algobj = kwargs.get('algobj')
algobj.export()
def algorithm_import(p_engine, inputfile):
"""
Load algorithm from file
param1: p_engine: engine name from configuration
param2: inputfile: input file
return 0 if OK
"""
ret = 0
enginelist = get_list_of_engines(p_engine)
if enginelist is None:
return 1
for engine_tuple in enginelist:
engine_obj = DxMaskingEngine(engine_tuple[0], engine_tuple[1],
engine_tuple[2], engine_tuple[3])
if engine_obj.get_session():
continue
algobj = DxAlgorithm(engine_obj)
algobj.importalg(None)
| [
"marcin@delphix.com"
] | marcin@delphix.com |
6df182f0f896addcf9738b9a20ab16600265939b | b9ed14f23d7d48ce88a93a808556cab9a0abc682 | /tensorflow_model_optimization/python/core/quantization/keras/graph_transformations/model_transformer_test.py | 0398bcdfa5e161b6894a4dd2111cac7008fd2e69 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | akarmi/model-optimization | 2a53655e92cabe5b180a0319bc64c339494b97bb | 2d3faaa361ecb3639f4a29da56e0e6ed52336318 | refs/heads/master | 2020-08-16T17:20:55.836218 | 2019-10-07T17:49:50 | 2019-10-07T17:50:12 | 215,530,733 | 0 | 0 | Apache-2.0 | 2019-10-16T11:23:40 | 2019-10-16T11:23:40 | null | UTF-8 | Python | false | false | 8,873 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Model Transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.platform import test
from tensorflow_model_optimization.python.core.quantization.keras.graph_transformations import model_transformer
from tensorflow_model_optimization.python.core.quantization.keras.graph_transformations import transforms
ModelTransformer = model_transformer.ModelTransformer
Transform = transforms.Transform
LayerPattern = transforms.LayerPattern
LayerNode = transforms.LayerNode
class ModelTransformerTest(test.TestCase):
@staticmethod
def _batch(dims, batch_size):
"""Adds provided batch_size to existing dims.
If dims is (None, 5, 2), returns (batch_size, 5, 2)
Args:
dims: Dimensions
batch_size: batch_size
Returns:
dims with batch_size added as first parameter of list.
"""
if dims[0] is None:
dims[0] = batch_size
return dims
def _create_model_inputs(self, model):
return np.random.randn(*self._batch(model.input.get_shape().as_list(), 1))
def _simple_dense_model(self):
inp = keras.layers.Input((3,))
x = keras.layers.Dense(2)(inp)
out = keras.layers.ReLU(6.0)(x)
return keras.Model(inp, out)
def _assert_config(self, expected_config, actual_config, exclude_keys=None):
"""Asserts that the two config dictionaries are equal.
This method is used to compare keras Model and Layer configs. It provides
the ability to exclude the keys we don't want compared.
Args:
expected_config: Config which we expect.
actual_config: Actual received config.
exclude_keys: List of keys to not check against.
"""
expected_config = expected_config.copy()
actual_config = actual_config.copy()
def _remove_keys(config):
"""Removes all exclude_keys (including nested) from the dict."""
for key in exclude_keys:
if key in config:
del config[key]
for _, v in config.items():
if isinstance(v, dict):
_remove_keys(v)
if isinstance(v, list):
for item in v:
if isinstance(item, dict):
_remove_keys(item)
if exclude_keys:
_remove_keys(expected_config)
_remove_keys(actual_config)
self.assertDictEqual(expected_config, actual_config)
def _assert_model_results_equal(self, model, transformed_model):
inputs = self._create_model_inputs(model)
self.assertAllClose(
model.predict(inputs), transformed_model.predict(inputs))
# Transform classes for testing.
class ReplaceDenseLayer(transforms.Transform):
"""Replaces `Dense` layers with `MyDense`, a simple inherited layer.
This `Transform` class replaces `Dense` layers with a class `MyDense`
which is simply an empty inheritance of `Dense`. This makes it easy to test
the transformation code.
"""
class MyDense(keras.layers.Dense):
pass
def pattern(self):
return LayerPattern('Dense')
def replacement(self, match_layer):
match_layer_config = match_layer.layer['config']
my_dense_layer = self.MyDense(**match_layer_config)
replace_layer = keras.layers.serialize(my_dense_layer)
replace_layer['name'] = replace_layer['config']['name']
return LayerNode(replace_layer, match_layer.weights, [])
def custom_objects(self):
return {'MyDense': self.MyDense}
def testReplaceSingleLayerWithSingleLayer_OneOccurrence(self):
model = self._simple_dense_model()
transformed_model = ModelTransformer(
model, [self.ReplaceDenseLayer()]).transform()
self._assert_config(model.get_config(), transformed_model.get_config(),
['class_name'])
self.assertEqual('MyDense', transformed_model.layers[1].__class__.__name__)
self._assert_model_results_equal(model, transformed_model)
def testReplaceSingleLayerWithSingleLayer_MultipleOccurrences(self):
inp = keras.layers.Input((3,))
x1 = keras.layers.Dense(2)(inp)
x2 = keras.layers.Dense(2)(inp)
out1 = keras.layers.ReLU(6.0)(x1)
out2 = keras.layers.ReLU(6.0)(x2)
model = keras.Model(inp, [out1, out2])
transformed_model = ModelTransformer(
model, [self.ReplaceDenseLayer()]).transform()
self._assert_config(model.get_config(), transformed_model.get_config(),
['class_name'])
self.assertEqual('MyDense', transformed_model.layers[1].__class__.__name__)
self.assertEqual('MyDense', transformed_model.layers[2].__class__.__name__)
self._assert_model_results_equal(model, transformed_model)
def testReplaceSingleLayerWithSingleLayer_MatchParameters(self):
class RemoveBiasInDense(transforms.Transform):
"""Replaces Dense layers with matching layers with `use_bias=False`."""
def pattern(self):
return LayerPattern('Dense', {'use_bias': True})
def replacement(self, match_layer):
match_layer_config = match_layer.layer['config']
# Remove bias
match_layer_weights = match_layer.weights
match_layer_weights.popitem()
match_layer_config['use_bias'] = False
new_dense_layer = keras.layers.Dense(**match_layer_config)
replace_layer = keras.layers.serialize(new_dense_layer)
replace_layer['name'] = replace_layer['config']['name']
return LayerNode(replace_layer, match_layer_weights, [])
model = self._simple_dense_model()
transformed_model = ModelTransformer(
model, [RemoveBiasInDense()]).transform()
self._assert_config(model.get_config(), transformed_model.get_config(),
['use_bias'])
self.assertFalse(transformed_model.layers[1].use_bias)
# Should match since bias is initialized with zeros.
self._assert_model_results_equal(model, transformed_model)
def testReplaceSingleLayer_WithMultipleLayers(self):
# TODO(pulkitb): Implement
pass
def testReplaceChainOfLayers_WithSingleLayer(self):
class FuseReLUIntoDense(transforms.Transform):
"""Fuse ReLU into Dense layers."""
def pattern(self):
return LayerPattern('ReLU', inputs=[LayerPattern('Dense')])
def replacement(self, match_layer):
dense_layer_config = match_layer.input_layers[0].layer['config']
dense_layer_weights = match_layer.input_layers[0].weights
dense_layer_config['activation'] = 'relu'
new_dense_layer = keras.layers.Dense(**dense_layer_config)
replace_layer = keras.layers.serialize(new_dense_layer)
replace_layer['name'] = replace_layer['config']['name']
return LayerNode(replace_layer, dense_layer_weights, [])
inp = keras.layers.Input((3,))
out = keras.layers.Dense(2, activation='relu')(inp)
model_fused = keras.Model(inp, out)
inp = keras.layers.Input((3,))
x = keras.layers.Dense(2)(inp)
out = keras.layers.ReLU()(x)
model = keras.Model(inp, out)
model.set_weights(model_fused.get_weights())
transformed_model = ModelTransformer(
model, [FuseReLUIntoDense()]).transform()
self._assert_config(
model_fused.get_config(), transformed_model.get_config(),
# Layers have different names in the models, but same config.
# Consider verifying the names loosely.
['input_layers', 'output_layers', 'name', 'inbound_nodes'])
self._assert_model_results_equal(model, transformed_model)
self._assert_model_results_equal(model_fused, transformed_model)
def testReplaceChainOfLayers_WithChainOfLayers(self):
# TODO(pulkitb): Implement
pass
def testReplaceTreeOfLayers_WithSingleLayer(self):
# TODO(pulkitb): Implement
pass
def testReplaceTreeOfLayers_WithTreeOfLayers(self):
# TODO(pulkitb): Implement
pass
# Negative Tests
# TODO(pulkitb): Add negative tests
# 1. Does not replace if any layer in the pattern has multiple nodes/consumers
# 2. Adding a single layer clone will lead to infinite loop. Fix and test.
# 3. Handles layer being part of multiple models.
if __name__ == '__main__':
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
ca9866c791f325346d4fceb027aceabf444020e5 | 5c9eadf5bb0aa45cdf8d4c060548722880e67e16 | /flask_test(python_function)/decorator.py | 651840a6689db577413fc5250025ee003da76ea3 | [] | no_license | jsistop16/Flask | 2a7172eed950fb30bd8893657227a5ababe6e3fe | 0a2481fb9462c7555bb8b50b630fb9e7ce968a5c | refs/heads/main | 2023-08-16T23:06:18.793527 | 2021-09-22T08:47:49 | 2021-09-22T08:47:49 | 405,354,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | def decorator1(func):#๋ฐ์ฝ๋ ์ดํฐ1
def inner():
print('decorator1')
func()
return inner
def decorator2(func):#๋ฐ์ฝ๋ ์ดํฐ2
def inner():
print('decorator2')
func()
return inner
@decorator1
@decorator2#์ฌ๊ธฐ์๋ถํฐ @decorator1์ ๋ค ๋ค์ด๊ฐ
def hello():#๋ฐ์ฝ๋ ์ดํฐ๋ก ๊พธ๋ฉฐ์ค ํจ์(์ด๋ป๊ฒ ๋ณด๋ฉด ๋ฉ์ธํจ์)
print('hello')
hello()
##์ถ๋ ฅ๊ฐ์ decorator1 hello decorator2 hello๊ฐ ์๋
##decorator1 decorator2 hello๊ฐ ๋จ
| [
"noreply@github.com"
] | jsistop16.noreply@github.com |
01d81c909553e350dc947bcb7a26a612f0240af0 | b86ff529e783f5dd81391fdd6fc9a65536a68e23 | /Titanic Survivor Tutorial/titanic_tutorial.py | 914e095042817ca316050910ebd28bc2ec3f8d3f | [] | no_license | tturin/TensorFlow_Practice | 098a6fd7406ecec42e8ef60b14be1b507f400539 | 2867389d91a8b8387ce9eb61866bcba458168fe2 | refs/heads/master | 2020-08-03T09:51:00.619766 | 2019-09-30T14:26:59 | 2019-09-30T14:26:59 | 211,708,812 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,600 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import pandas as pd
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
LABEL_COLUMN = 'survived'
LABELS = [0,1]
def main():
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
np.set_printoptions(precision=3, suppress=True)
raw_train_data = get_dataset(train_file_path)
raw_test_data = get_dataset(test_file_path)
show_batch(raw_train_data)
#If columns not provided, specify columns
print("\nExplicit columns")
CSV_COLUMNS = ['survived', 'sex', 'age', 'n_siblings_spouses', 'parch', 'fare', 'class', 'deck', 'embark_town', 'alone']
temp_dataset = get_dataset(train_file_path, column_names = CSV_COLUMNS)
show_batch(temp_dataset)
#Place data into vector
print("\nVectorize data")
SELECT_COLUMNS = ['survived', 'age', 'n_siblings_spouses', 'parch', 'fare']
DEFAULTS = [0, 0.0, 0.0, 0.0, 0.0]
temp_dataset = get_dataset(train_file_path, select_columns = SELECT_COLUMNS, column_defaults = DEFAULTS)
show_batch(temp_dataset)
#Pack columns together
print("\nPack columns with PackNumericFeatures class")
NUMERIC_FEATURES = ['age', 'n_siblings_spouses', 'parch', 'fare']
packed_train_data = raw_train_data.map(PackNumericFeatures(NUMERIC_FEATURES))
packed_test_data = raw_test_data.map(PackNumericFeatures(NUMERIC_FEATURES))
show_batch(packed_train_data)
#General preprocessor class for selecting list of numeric features
#and then packs features into a single column
class PackNumericFeatures(object):
def __init__(self, names):
self.names = names
def __call__(self, features, labels):
numeric_freatures = [features.pop(name) for name in self.names]
numeric_features = [tf.cast(feat, tf.float32) for feat in numeric_freatures]
numeric_features = tf.stack(numeric_features, axis = -1)
features['numeric'] = numeric_features
return features, labels
def get_dataset(file_path, **kwargs):
dataset = tf.data.experimental.make_csv_dataset(
file_path,
batch_size = 5,
label_name = LABEL_COLUMN,
na_value = "?",
num_epochs = 1,
ignore_errors = True,
**kwargs)
return dataset
def show_batch(dataset):
for batch, label in dataset.take(1):
for key, value in batch.items():
print("{:20s}: {}".format(key, value.numpy()))
if __name__ == "__main__":
main() | [
"timturin@gmail.com"
] | timturin@gmail.com |
95e619405d693094e8b77ae8fb4146209735400f | cf862996fb55c25dd668f50d14cd662b0a28a853 | /djangostagram/users/urls.py | 6868bf2e1ac5b3efd57c91e5ebe38f5b0fa7d014 | [
"MIT"
] | permissive | hongsemy/InstagramWithDjango | fc117daae4f249e0c13a754682186a9e9dfac332 | 18cb273668809fb48d829e1ac11438c51505623a | refs/heads/main | 2023-07-14T15:58:25.482737 | 2021-08-19T09:18:16 | 2021-08-19T09:18:16 | 397,247,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from django.urls import path
from . import views
app_name = "users"
urlpatterns = [
path('', views.main, name = 'main'),
path('signup/', views.signup, name='signup')
#This line will transform and send the information into a request object and
# to the function passed as a second parameter (e.g. views.main). The information
# is sent from the html files.
]
| [
"ca711207@gmail.com"
] | ca711207@gmail.com |
44074f6a7dc371ac0f50ed51f5d05b5c89a93a7e | 981fbc25f4a8ef0695830d54c36e0e7c2087575c | /input_template.py | 3ebeae5ee3a6f7dbad4f1574bf6d0f216b007231 | [] | no_license | Sandy4321/CS_algorithm_scripts | 1b0984c25aab362c18767094f6c6252afd8b9f6b | 6eef6ac07ff07362ddaec850a47d7ad7053993b2 | refs/heads/master | 2021-01-15T10:07:18.940108 | 2015-06-08T23:27:25 | 2015-06-08T23:27:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | def solveMeSecond(a,b):
return a+b
n = int(raw_input()) #faster than n = input() , since input() executes the line as python command
for i in range(0,n):
a, b = raw_input().split()
a,b = int(a),int(b)
res = solveMeSecond(a,b)
print res
'''
Alternate code
n = int(raw_input())
for _ in range(n):
a,b = map(int,raw_input().split())
res = solveMeSecond(a,b)
print res
'''
| [
"ymeng.ucla@gmail.com"
] | ymeng.ucla@gmail.com |
72c8cb17f13848ed423e520cd1fc6c649395fe4b | 148c979be2604c37e37ebdfcdbd0b228246220c8 | /hw2/hw2_float.py | 2e427e79b9180be8ba1a177b4f3b4e5b7fb97ec2 | [] | no_license | Bratuha12/Python_homeworks | 2a01b0dac4c24f34463b2e30898210a939671ddc | 82f301dcade1e230db4aa00d7eeab34f6db6e39e | refs/heads/main | 2023-08-19T07:26:28.781251 | 2021-10-01T08:18:43 | 2021-10-01T08:18:43 | 388,483,113 | 0 | 0 | null | 2021-08-01T19:46:15 | 2021-07-22T14:03:25 | Python | UTF-8 | Python | false | false | 522 | py | # ะะฒะพะด ะดัะพะฑะฝะพะณะพ ะทะฝะฐัะตะฝะธั ั ะบะปะฐะฒะธะฐัััั c ะฟะพัะปะตะดัััะตะน ะบะพะฝะฒะตััะฐัะธะตะน ะฒ int
x_str = input("ะะฒะตะดะธัะต ัะธัะปะพ ั ะฟะปะฐะฒะฐััะตะน ะทะฐะฟััะพะน: ")
print("ะั ะฒะฒะตะปะธ: ", x_str)
print("ะขะธะฟ ะฟะพะปััะตะฝะฝะพะณะพ ะทะฝะฐัะตะฝะธั - ", type(x_str))
x_float = float(x_str)
x_int = int(x_float)
print("ะะพัะปะต ะบะพะฝะฒะตััะฐัะธะธ ะฒ int ะผั ะฟะพะปััะธะปะธ: ", x_int)
print("ะขะธะฟ ะทะฝะฐัะตะฝะธั ะฟะพัะปะต ะบะพะฝะฒะตััะฐัะธะธ - ", type(x_int))
| [
"dimjohn.rabota@gmail.com"
] | dimjohn.rabota@gmail.com |
1e3dfd6094aeb30d5c36a02ec70a509b20eb308d | 5399ab9aa0812076f76e6c8f3b5ec38369cdfb3b | /test02.py | 9ff793682dfbcfc4f3307fa530487fc2843ac4ef | [] | no_license | ochestra365/IOT-embeded-RasberryPi | 6615c1a918c79e3e5b497c2a81a5e3c9af06d7c6 | bd52c1e6c0e4fd48d1d121f461c63eef792a28e6 | refs/heads/main | 2023-06-15T19:57:08.434655 | 2021-07-15T05:39:44 | 2021-07-15T05:39:44 | 385,144,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | #๊ตฌ๋ฌธ ํ
์คํธ
#initailize
n=0
#Loop
while True:
n=n+1
if(n==100):
break
#n์ด ์ง์๋ผ๋ฉด ์ถ๋ ฅํ ๊ฒ.
if((n%2)==0):
print(n)
a = 100
b = 80
if a>b:
print('max is {0}'.format(a))
else:
print('max is {0}'.format(b))
i=-45
if i>0:
print("{0} is positive".format(i))
elif i==0:
print("{0} is zero".format(i))
else:
print("{0} is negative".format(i))
for i in [0,1,2,3,4]:
print('{0}*3={1}'.format(i,i*3))
for i in range(5):
print(i*2)
m=0
while m<26:
m=m+2
if (m==20):
continue
print(m)
for i in range(5):
pass # ์๋ฌด ์ผ๋ ํ์ง ์์ ๋.
| [
"ochestra365@naver.com"
] | ochestra365@naver.com |
38d04b2074d7872432ea1dc304a7d29e68cff1fd | 622cb54f246f5eee143d375db66bc1d69691a84f | /project_netra_website/wsgi.py | b5b8a0954d14bc9080e684fc406ee9fb39fdaf4b | [] | no_license | ProjectNetra/projectNetraWebsite | 8018a850166f4c37e20e6276b0a1bd4080ed406b | 97479a049edd19f31a73f7779261fda224f9cc95 | refs/heads/main | 2023-06-19T08:54:13.320590 | 2021-07-16T19:52:14 | 2021-07-16T19:52:14 | 386,737,476 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | """
WSGI config for project_netra_website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"project_netra_website.settings.dev")
application = get_wsgi_application()
| [
"uchicha.pk.27@gmail.com"
] | uchicha.pk.27@gmail.com |
3f6ae3557fb840b712ba31d66869c0d17cc0a93b | d5214b1331c9dae59d95ba5b3aa3e9f449ad6695 | /qPloneSkinDump/tags/0.7.3/skin_template/Extensions/utils.py | 725f5e878f168f0a5e16a43a46eab2ca68f14068 | [] | no_license | kroman0/products | 1661ee25a224c4b5f172f98110944f56136c77cf | f359bb64db22f468db5d1e411638790e94d535a2 | refs/heads/master | 2021-01-10T07:58:04.579234 | 2014-06-11T12:05:56 | 2014-06-11T12:05:56 | 52,677,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,739 | py | import os, sys, re, string
from StringIO import StringIO
from time import gmtime, strftime
from zLOG import LOG, INFO
from zExceptions import BadRequest
from App.config import getConfiguration
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.DirectoryView import addDirectoryViews
from Products.%(SKIN_PRODUCT_NAME)s.config import *
######################################################################
## IMPORTING UTILS ##
######################################################################
osp = os.path
ALLOWED_IMPORT_POLICY = ["only_new", "backup", "overwrite"]
INTRO_TO_INSTANCE = "< Started copying object files from Product import directory to Instance one."
SUMMARY_TO_INSTANCE = "> Finished copying."
INTRO_TO_ROOT = "< Started import %%s file[s] with '%%s' policy."
SUMMARY_TO_ROOT = "> Finished importing."
INTRO_CLEAN = "< Started cleaning Instance import directory."
SUMMARY_CLEAN = "> Finished cleaning."
CREXP_INVALID_ID = re.compile('^The id \"(.*?)\" is invalid - it is already in use.$', re.DOTALL|re.IGNORECASE|re.MULTILINE)
CSS_BASE_IDS_QPSD053 = ['id','expression','enabled','cookable','media','rel','title','rendering'] # supporting qPSD-0.5.3 version
################ CHECK IMPORTING ################
def checkIfImport():
""" Return if perform importing, based on checking
*zexp files in <SkinProduct>/import directory.
"""
instance_ipath, product_ipath = getImportedPathes()
product_ilist = [i for i in os.listdir(product_ipath) \
if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]
if product_ilist:
return 1
return 0
################ IMPORTING TO PLONE'S IMPORT DIR ################
def getImportedPathes():
""" Return Plone instance and Skin product import pathes."""
# Based on instance path, construct import pathes
cfg = getConfiguration()
instance_ipath = osp.join(cfg.instancehome, "import")
product_ipath = osp.join(cfg.instancehome, 'Products', PRODUCT_NAME, "import")
# Check presence of Product import directory
if not osp.isdir(product_ipath):
raise BadRequest, "Skin Product's import directory '%%s' - does not exist or is'nt direcory" %% product_ipath
# Check presence of Instance import directory
if not osp.isdir(instance_ipath):
raise BadRequest, "Instance import directory '%%s' - does not exist or isn't direcory" %% instance_ipath
return [instance_ipath, product_ipath]
def copyFile(src_dir, dst_dir, f_name):
""" Copy file from src_dir to dst_dir under original name."""
try:
src_file = open(osp.join(src_dir, f_name),"rb")
dst_file = open(osp.join(dst_dir, f_name),"wb")
dst_file.write(src_file.read())
dst_file.close()
src_file.close()
except Exception, e:
msg = "!!! In copying files from <%%s> dir to <%%s> dir exception occur. Details: %%s." %% (src_dir,dst_dir, str(e))
print >> import_out, msg
LOG('performImportToPortal',INFO,'copyFile', msg)
def moveToTemp(same_instance_files, instance_ipath, temp_dir_path):
""" Move samenamed files from Instanse's dir to temp dir."""
os.mkdir(temp_dir_path) # Create temp back_[date] dir
try:
[copyFile(instance_ipath, temp_dir_path, f_name) for f_name in same_instance_files]
[os.remove(osp.join(instance_ipath, f_name)) for f_name in same_instance_files]
except Exception, e:
msg = "!!! Exception occur during moving files from Instance's dir to temp dir. Detaile:%%s." %% str(e)
print >> import_out, msg
LOG('performImportToPortal',INFO,'moveToTemp', msg)
def copyToInstanceImport():
""" Perform copying imported files from <SkinProduct>/import dir
to Plone's instance import dir.
"""
print >> import_out, INTRO_TO_INSTANCE
instance_ipath, product_ipath = getImportedPathes()
# Compose temp dir back_[date] dir path in Instance import directory
temp_dir_id = "back_%%s" %% strftime("%%Y%%m%%d%%H%%M%%S", gmtime())
temp_dir_path = osp.join(instance_ipath, temp_dir_id)
# Get *.zexp files from Skin Product's import dir and Plone's instance import dir files
product_ilist = [i for i in os.listdir(product_ipath) \
if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]
instance_ilist = [i for i in os.listdir(instance_ipath) \
if osp.isfile(osp.join(instance_ipath,i)) and i.endswith('.zexp')]
# Check for presence samenamed files in Instance and Product import directories.
same_instance_files = [f_name for f_name in instance_ilist if f_name in product_ilist]
if same_instance_files:
moveToTemp(same_instance_files, instance_ipath, temp_dir_path)
# Copy all *zexp files from Product's import dir to Instance's import dir
[copyFile(product_ipath, instance_ipath, f_name) for f_name in product_ilist]
print >> import_out, SUMMARY_TO_INSTANCE
return [instance_ipath, product_ipath, temp_dir_path, product_ilist]
################ IMPORTING TO PORTAL ################
def importObject(portal, file_name):
""" Work around old Zope bug in importing."""
try:
portal.manage_importObject(file_name)
except:
portal._p_jar = portal.Destination()._p_jar
portal.manage_importObject(file_name)
def makeBackUp(portal, portal_objects, temp_dir_path, obj_id):
""" Perfom backup same named portal objects in temp folder."""
# Get id of temp folder-object
durty_path,temp_id = osp.split(temp_dir_path)
if not temp_id:
durty_path,temp_id = osp.split(durty_path)
# Get temp folder-object
if temp_id not in portal_objects:
portal.invokeFactory('Folder', id=temp_id)
print >> import_out, "! Created '%%s' backup directory with same-ids " \
"objects from portal root." %% temp_id
temp_dir = getattr(portal, temp_id)
# Move object with same id to temp folder-object
get_transaction().commit(1)
obj = portal.manage_cutObjects(ids=[obj_id])
temp_dir.manage_pasteObjects(obj)
print >> import_out, "! '%%s' Object moved from portal root to '%%s' backup directory." %% (obj_id, temp_id)
def performImport(portal, temp_dir_path, file_name):
""" Importing an object to portal."""
portal_objects = portal.objectIds()
try:
portal.manage_importObject(file_name)
except Exception, e:
msg = str(e)
is_invalid_id = CREXP_INVALID_ID.match(msg)
if is_invalid_id:
obj_id = is_invalid_id.group(1)
if IMPORT_POLICY == "only_new":
msg = "! Object with '%%s' id was not importing because it's already exist " \
"in portal root." %% obj_id
print >> import_out, msg
elif IMPORT_POLICY == "backup":
makeBackUp(portal, portal_objects, temp_dir_path, obj_id)
importObject(portal, file_name)
elif IMPORT_POLICY == "overwrite":
portal.manage_delObjects(ids=[obj_id])
importObject(portal, file_name)
else:
# work around old Zope bug in importing
portal._p_jar = portal.Destination()._p_jar
portal.manage_importObject(file_name)
def importToPortalRoot(portal, product_file_names, temp_dir_path):
""" Import all objects from *zexp files to portal root (based on IMPORT_POLICY)."""
if not IMPORT_POLICY in ALLOWED_IMPORT_POLICY:
raise Exception("%%s - wrong import policy in '%%s/config.py' file. Must be one of the %%s" \
%% (IMPORT_POLICY, PRODUCT_NAME, ALLOWED_IMPORT_POLICY) )
print >> import_out, INTRO_TO_ROOT %% (product_file_names, IMPORT_POLICY)
for file_name in product_file_names:
try:
performImport(portal, temp_dir_path, file_name)
except Exception, error:
msg = '!!! Under "%%s" policy importing exception occur: %%s.' %% (IMPORT_POLICY, str(error))
print >> import_out, msg
LOG('performImportToPortal',INFO,'importToPortalRoot', msg)
print >> import_out, SUMMARY_TO_ROOT
################ CLEANING PLONE'S IMPORT DIR ################
def cleanInstanceImport(instance_ipath, product_file_names, temp_dir_path):
""" Cleaning Plone's import dir."""
print >> import_out, INTRO_CLEAN
# Erase all copied *zexp files from Instance's import dir
for f_name in product_file_names:
f_path = osp.join(instance_ipath, f_name)
if osp.exists(f_path) and osp.isfile(f_path):
os.remove(f_path)
else:
msg = '! "%%s" file was not deleted from "%%s" import directory.' %%\
(f_name, osp.join(instance_ipath))
print >> import_out, msg
LOG('performImportToPortal',INFO,'cleanInstanceImport', msg)
# Move all files from temp back_[date] dir to Instance's import dir
if osp.exists(temp_dir_path) and osp.isdir(temp_dir_path):
f_names = os.listdir(temp_dir_path)
try:
[copyFile(temp_dir_path, instance_ipath, f_name) for f_name in f_names]
[os.remove(osp.join(temp_dir_path, f_name)) for f_name in f_names]
# Erase temp back_[date] dir
os.rmdir(temp_dir_path)
except Exception, e:
msg = "!!! In moving files from temp dir to Instance's import dir exception occur."
print >> import_out, msg
LOG('performImportToPortal',INFO,'moveFromTempToImport', msg)
print >> import_out, SUMMARY_CLEAN
################ MAIN ################
def performImportToPortal(portal):
""" Import objects from Skin Product to Portal root."""
globals()['import_out'] = StringIO()
instance_ipath, product_ipath, temp_dir_path, product_file_names = copyToInstanceImport()
if product_file_names:
importToPortalRoot(portal, product_file_names, temp_dir_path)
cleanInstanceImport(instance_ipath, product_file_names, temp_dir_path)
else:
print >> import_out, "!!! Failure importing: there is no file for importing to be found."
result = import_out
del globals()['import_out']
return result.getvalue()
######################################################################
## INSTALLATION/UNINSTALLATION UTILS ##
######################################################################
CSS_REG_PROPS = ['id', 'expression', 'enabled', 'cookable', 'cacheable' \
,'media', 'rel', 'title', 'rendering', 'compression']
JS_REG_PROPS = ['id', 'expression', 'enabled', 'cookable', 'cacheable' \
,'inline', 'compression']
def installSkin(portal, pp_up, out):
# Checking for presense SKIN_NAME in portal_skins directory view or among Skin Names
skinsTool = getToolByName(portal, 'portal_skins')
# Get unique product_skin_name and remember it in case of differ from SKIN_NAME.
product_skin_name = SKIN_NAME
skin_names = skinsTool.getSkinSelections()
if product_skin_name in skin_names:
idx = 0
while product_skin_name in skin_names:
product_skin_name = SKIN_NAME + str(idx)
idx += 1
addProperty(pp_up, 'q_actual_skin_name', product_skin_name, 'string', out)
# Add directory views
layer_skin_name = string.lower(SKIN_NAME)
addDirectoryViews(skinsTool, 'skins', GLOBALS)
print >> out, "- added '%%s' directory views to portal_skins." %% layer_skin_name
# Get Default skin and remember it for backup on uninstallig
default_skin = skinsTool.getDefaultSkin()
addProperty(pp_up, 'q_default_skin', default_skin, 'string', out)
# Building list of layers for NEW SKIN
base_path = skinsTool.getSkinPath(BASE_SKIN_NAME)
new_path = map( string.strip, string.split(base_path,',') )
if layer_skin_name in new_path :
print >> out, "- %%s layer already present in '%%s' skin." %% (layer_skin_name, BASE_SKIN_NAME)
# Remove layer_skin_name from current position.
del new_path[new_path.index(layer_skin_name)]
# Add layer_skin_name just after 'custom' position
try:
new_path.insert(new_path.index('custom')+1, layer_skin_name)
except ValueError:
new_path.append(layer_skin_name)
new_path = string.join(new_path, ', ')
# Add NEW Skin and set it as dafault
skinsTool.addSkinSelection(product_skin_name, new_path, make_default=1)
print >> out, "Added %%s skin, bassed on %%s and set as default." %% (product_skin_name, BASE_SKIN_NAME)
def uninstallSkin(skinsTool, actual_skin_name, initial_skin):
# Get 'portal_skins' object and list available skin names
# And remove SKIN_NAME from available skins, if it present
skin_names = skinsTool.getSkinSelections()
if actual_skin_name in skin_names :
skinsTool.manage_skinLayers(chosen=(actual_skin_name,), del_skin=1, REQUEST=None)
skin_names.remove(actual_skin_name)
# Remove product skin directory from skins tool
# AND Remove skin-product layer from available skins
skin_layer = SKIN_NAME.lower()
if skin_layer in skinsTool.objectIds():
skinsTool.manage_delObjects(skin_layer)
for skin_name in skin_names:
path = skinsTool.getSkinPath(skin_name)
path = [i.strip() for i in path.split(',')]
if skin_layer in path:
path.remove(skin_layer)
path = ','.join(path)
skinsTool.addSkinSelection(skin_name, path)
# If current default skin == actual_skin_name
# Set default skin in initial one (if initial skin still exist)
# or in 1st from available skin names list.
current_default_skin = skinsTool.getDefaultSkin()
if current_default_skin == actual_skin_name:
if initial_skin in skin_names :
skinsTool.manage_properties(default_skin=initial_skin, REQUEST=None)
elif len(skin_names)>0 :
skinsTool.manage_properties(default_skin=skin_names[0], REQUEST=None)
def addProperty(p_sheet, p_id, p_value, p_type, out):
if p_sheet.hasProperty(p_id):
p_sheet._delProperty(p_id)
p_sheet._setProperty(p_id, p_value, p_type)
print >> out, "... added %%s PropertySheet to %%s." %% (p_id, p_sheet.getId())
def getResourceProperties(obj, prop_list, dflt=''):
""" Return list of 2 items list-[property name, property value]."""
properties=[]
for prop in prop_list:
accessor = getattr(obj, 'get%%s' %% prop.capitalize(), None)
if accessor:
properties.append([prop, accessor() or dflt])
return properties
def registerResource(pp_up, portal_res, resRegisterFunction, out \
,RESOURCE_SKIN_LIST, SKIN_RES_REGDATA, UP_PROPERTY, RES_REG_PROPS):
""" Register resources in portal's registry, remember existant settings."""
# Get original registered resources
portal_res_srings = []
for r in portal_res.getResources():
portal_res_srings.append(";".join(['%%s::%%s'%%(r[0],str(r[1])) \
for r in getResourceProperties(r, RES_REG_PROPS)]))
addProperty(pp_up, UP_PROPERTY, portal_res_srings, 'lines', out)
# Tune Resource registry according to new skin needs
unexistent = [] # list of default resources,
# which present in Skin-product, BUT absent in portal
portal_res_ids = portal_res.getResourceIds()
for res_dict in SKIN_RES_REGDATA:
if res_dict['id'] not in portal_res_ids:
# It's interesting - Resource Registry allow adding unexistent resource - use this
resRegisterFunction(**res_dict)
if res_dict['id'] not in RESOURCE_SKIN_LIST:
unexistent.append(res_dict['id'])
else:
pos = portal_res.getResourcePosition(res_dict['id'])
portal_res.unregisterResource(res_dict['id'])
resRegisterFunction(**res_dict)
portal_res.moveResource(res_dict['id'], pos)
if unexistent:
print >> out, "!!! - BAD: your Resource Regestry have'nt %%s resource(s), which may lead to some problems." %% unexistent
def getVersion(res_list):
"""Check version of skin product generator."""
return (res_list and not '::' in res_list[0] and '0.5') or '0.7'
def uninstallResource(portal_res, original_res_list, RESOURCE_SKIN_LIST, resRegisterFunction):
# Prepare Resource Registry data for backup to original state
original_res_regestry = {}
genVersion = getVersion(original_res_list)
for rec in original_res_list:
resource = {}
if genVersion == '0.7':
[resource.update({prop.split('::')[0]:prop.split('::')[1]}) for prop in rec.split(";")]
elif genVersion == '0.5':
props = rec.split(";")
[resource.update({CSS_BASE_IDS_QPSD053[i]:props[i]}) for i in range(len(CSS_BASE_IDS_QPSD053))]
original_res_regestry[resource.pop('id')] = resource
# Work up actual Resource Registry
res_dict = portal_res.getResourcesDict()
for res_id in res_dict.keys():
# Remove from Resource Registry Skin product's resources
if res_id in RESOURCE_SKIN_LIST \
and res_id not in original_res_regestry.keys():
portal_res.unregisterResource(res_id)
continue
# Backup 'enabled' property Registry's resourses to it's original state
if original_res_regestry.has_key(res_id):
act_Enabled_state = res_dict[res_id].getEnabled()
orig_Enabled_state = original_res_regestry[res_id]['enabled']
if act_Enabled_state != orig_Enabled_state:
pos = portal_res.getResourcePosition(res_id)
resource = res_dict[res_id]
res = original_res_regestry[res_id]
portal_res.unregisterResource(res_id)
resRegisterFunction(res_id, **res)
portal_res.moveResource(res_id, pos)
def customizeSlots(portal, pp_up, out):
# Get original Site's column lists
orig_left_slots = left_column = list(portal.left_slots)
orig_right_slots = right_column = list(portal.right_slots)
# Save original Site's LEFT and RIGHT slots
addProperty(pp_up, 'q_left_slots', orig_left_slots, 'lines', out)
addProperty(pp_up, 'q_right_slots', orig_right_slots, 'lines', out)
# blend-with-site - to portal's slots adding only new one from skin-porduct
# blend-with-skin - portal slots forming in the following manner:
# first adding skin-porduct's slots, than new one from portal
# replace - to portal's slots forming only from the skin-porduct's slot list
if SLOT_FORMING == "blend_with_skin":
left_column, right_column = formSlotsColumn(LEFT_SLOTS, RIGHT_SLOTS,
orig_left_slots, orig_right_slots, MAIN_COLUMN)
elif SLOT_FORMING == "blend_with_site":
left_column, right_column = formSlotsColumn(orig_left_slots, orig_right_slots,
LEFT_SLOTS, RIGHT_SLOTS, MAIN_COLUMN )
elif SLOT_FORMING == "replace":
left_column, right_column = formSlotsColumn(LEFT_SLOTS, RIGHT_SLOTS, [], [], MAIN_COLUMN)
# REPLACE SITE's column slots
portal.left_slots = tuple(left_column)
portal.right_slots = tuple(right_column)
print >> out, "Complited portal slots customization ..."
# main_column ("left" / "right" / "both") mean which of the MAIN column is favour
def formSlotsColumn(main_left, main_right, slave_left=[], slave_right=[], main_column="both"):
result_left = main_left
result_right = main_right
if main_column == "left":
# 1) APPEND to MAIN_LEFT list *new for main_left column* slots from slave_left list
# 2) APPEND to MAIN_RIGHT list *new for both main columns* slots from slave_right
# 3) REMOVE slots from MAIN_RIGHT list, which are *doubled* in MAIN_LEFT
[result_left.append(slot) for slot in slave_left if slot not in result_left]
[result_right.append(slot) for slot in slave_right \
if slot not in result_right and slot not in result_left]
[result_right.remove(slot) for slot in result_left if slot in result_right]
elif main_column == "right":
# 1) APPEND to MAIN_LEFT list *new for main_right column* slots from slave_left list
# 2) APPEND to MAIN_RIGHT list *new for both main columns* slots from slave_right
# 3) REMOVE slots from MAIN_LEFT list, which are *doubled* in MAIN_RIGHT
[result_right.append(slot) for slot in slave_right if slot not in result_right]
[result_left.append(slot) for slot in slave_left \
if slot not in result_left and slot not in result_right]
[result_left.remove(slot) for slot in result_right if slot in result_left]
elif main_column == "both":
# 1) APPEND to MAIN_LEFT list *new for both main columns* slots from slave_left list
# 2) APPEND to MAIN_RIGHT list *new for both main columns* slots from slave_right
[result_left.append(slot) for slot in slave_left \
if slot not in result_left and slot not in result_right]
[result_right.append(slot) for slot in slave_right \
if slot not in result_right and slot not in result_left]
return [result_left, result_right]
def getProperty(pp, ps, id, default=[]):
""" Get property from portal_properties/[property_sheet]"""
res = default
if ps in pp.objectIds() and pp[ps].hasProperty(id):
res = pp[ps].getProperty(id, default)
return res
| [
"mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946"
] | mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946 |
e8beca5ce87695f9d15372be7f28e9d6e5d42874 | d529052185eb31608b506a136bbda5df5642a7b3 | /CSE310/Lab 4/PingServer.py | 8a45c6228d6e443bc6bb71b9a16cf6d445d1c66c | [] | no_license | SirKitboard/Notes | c015a1269b79753f8ed574c8278b047e76c72928 | 27475e22145842c883e5d98737b5b9b833f3bdc5 | refs/heads/master | 2021-08-19T17:03:58.465814 | 2017-11-27T01:49:46 | 2017-11-27T01:49:46 | 51,348,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | # pylint: disable=W,C
# UDP Server
import random
from socket import *
serverSocket = socket(AF_INET, SOCK_DGRAM)
serverSocket.bind(('',8920))
while True:
rand = random.randint(0,10)
message, address = serverSocket.recvfrom(1024)
message.upper()
if rand < 4:
continue
serverSocket.sendto(message, address)
| [
"adibalwani@gmail.com"
] | adibalwani@gmail.com |
8b892512045bcf74b56df0a6ff066dd0d4a27861 | ecc426c035a22e4bbbb44572d24ad566dd1bb945 | /_archived_versions/20180107_invalidated_archives/20171117_scikit_boosted_trees/src/data_scripts/elb_repo.py | 8173dc8e2dc74ff17441316e63d85981a65b561c | [] | no_license | dfaivre/python-ml-poc-2018 | 14a5acb46888d3bf11373cfcb7e0ee570ce42346 | 932be1a3007473e6748771fa1629b677e252627d | refs/heads/master | 2023-01-21T18:30:17.734781 | 2020-11-23T01:26:19 | 2020-11-23T01:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | from sqlalchemy import create_engine, text as sql_text
import util.config
settings = util.config.load_settings()
field_info_db = create_engine(settings['db']['fieldinfo'])
def get_enroll_year_id(enroll_id: int):
sql = """
SELECT ym.ID AS YearId
FROM FieldInfo.trials.TrialEnrollment te
JOIN PlatformManager..YearIDMapping ym ON ym.YearUID = te.YearUID
WHERE te.id = :enroll_id"""
return field_info_db.execute(sql_text(sql), enroll_id=enroll_id).scalar()
def get_elb_harvest_year_ids(year=2016):
sql = """
SELECT DISTINCT (l.yearid) AS yearid
FROM Wolverine.layers.SourceLayerDataType dt
JOIN Wolverine.layers.SourceLayer AS l ON l.SourceLayerDataTypeID = dt.ID
JOIN PlatformManager..YearIDMapping ym ON ym.id = l.YearID
WHERE dt.name LIKE '%elb harvest%' AND ym.Year = :year
ORDER BY yearid"""
return [r for (r,) in list(
field_info_db.execute(sql_text(sql), year=year)
)]
| [
"dfaivre@premiercrop.com"
] | dfaivre@premiercrop.com |
d42a9e9ffd48430e27071e5d3fb645a67a7da413 | 99ab72000a8e74528a7950e4b4d8bea15e12b5b5 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/bigquery/v2/bigquery_v2_messages.py | acd332407333ff5c233f603e1779a12babd4e201 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | harshit496/smart_assistant | 306014c6d4c4e4fe9da3c513a55383162aedfa3a | 22bffbaea7fac6a8c2fb5e32a1d6c9ceed2129ed | refs/heads/master | 2021-06-28T16:03:41.299321 | 2020-11-25T14:27:44 | 2020-11-25T14:27:44 | 171,503,815 | 0 | 3 | null | 2020-07-25T07:08:25 | 2019-02-19T15:57:51 | Python | UTF-8 | Python | false | false | 129,112 | py | """Generated message classes for bigquery version v2.
A data platform for customers to create, manage, share and query data.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import message_types as _message_types
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'bigquery'
class BigQueryModelTraining(_messages.Message):
r"""A BigQueryModelTraining object.
Fields:
currentIteration: [Output-only, Beta] Index of current ML training
iteration. Updated during create model query job to show job progress.
expectedTotalIterations: [Output-only, Beta] Expected number of iterations
for the create model query job specified as num_iterations in the input
query. The actual total number of iterations may be less than this
number due to early stop.
"""
currentIteration = _messages.IntegerField(1, variant=_messages.Variant.INT32)
expectedTotalIterations = _messages.IntegerField(2)
class BigqueryDatasetsDeleteRequest(_messages.Message):
r"""A BigqueryDatasetsDeleteRequest object.
Fields:
datasetId: Dataset ID of dataset being deleted
deleteContents: If True, delete all the tables in the dataset. If False
and the dataset contains tables, the request will fail. Default is False
projectId: Project ID of the dataset being deleted
"""
datasetId = _messages.StringField(1, required=True)
deleteContents = _messages.BooleanField(2)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsDeleteResponse(_messages.Message):
r"""An empty BigqueryDatasetsDelete response."""
class BigqueryDatasetsGetRequest(_messages.Message):
r"""A BigqueryDatasetsGetRequest object.
Fields:
datasetId: Dataset ID of the requested dataset
projectId: Project ID of the requested dataset
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsInsertRequest(_messages.Message):
r"""A BigqueryDatasetsInsertRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
projectId: Project ID of the new dataset
"""
dataset = _messages.MessageField('Dataset', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsListRequest(_messages.Message):
r"""A BigqueryDatasetsListRequest object.
Fields:
all: Whether to list all datasets, including hidden ones
filter: An expression for filtering the results of the request by label.
The syntax is "labels.<name>[:<value>]". Multiple filters can be ANDed
together by connecting with a space. Example:
"labels.department:receiving labels.active". See Filtering datasets
using labels for details.
maxResults: The maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the datasets to be listed
"""
all = _messages.BooleanField(1)
filter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
class BigqueryDatasetsPatchRequest(_messages.Message):
r"""A BigqueryDatasetsPatchRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsUpdateRequest(_messages.Message):
r"""A BigqueryDatasetsUpdateRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryJobsCancelRequest(_messages.Message):
r"""A BigqueryJobsCancelRequest object.
Fields:
jobId: [Required] Job ID of the job to cancel
location: The geographic location of the job. Required except for US and
EU. See details at https://cloud.google.com/bigquery/docs/locations#spec
ifying_your_location.
projectId: [Required] Project ID of the job to cancel
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
class BigqueryJobsGetQueryResultsRequest(_messages.Message):
r"""A BigqueryJobsGetQueryResultsRequest object.
Fields:
jobId: [Required] Job ID of the query job
location: The geographic location where the job should run. Required
except for US and EU. See details at https://cloud.google.com/bigquery/d
ocs/locations#specifying_your_location.
maxResults: Maximum number of results to read
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: [Required] Project ID of the query job
startIndex: Zero-based index of the starting row
timeoutMs: How long to wait for the query to complete, in milliseconds,
before returning. Default is 10 seconds. If the timeout passes before
the job completes, the 'jobComplete' field in the response will be false
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
startIndex = _messages.IntegerField(6, variant=_messages.Variant.UINT64)
timeoutMs = _messages.IntegerField(7, variant=_messages.Variant.UINT32)
class BigqueryJobsGetRequest(_messages.Message):
r"""A BigqueryJobsGetRequest object.
Fields:
jobId: [Required] Job ID of the requested job
location: The geographic location of the job. Required except for US and
EU. See details at https://cloud.google.com/bigquery/docs/locations#spec
ifying_your_location.
projectId: [Required] Project ID of the requested job
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
class BigqueryJobsInsertRequest(_messages.Message):
r"""A BigqueryJobsInsertRequest object.
Fields:
job: A Job resource to be passed as the request body.
projectId: Project ID of the project that will be billed for the job
"""
job = _messages.MessageField('Job', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsListRequest(_messages.Message):
r"""A BigqueryJobsListRequest object.
Enums:
ProjectionValueValuesEnum: Restrict information returned to a set of
selected fields
StateFilterValueValuesEnum: Filter for job state
Fields:
allUsers: Whether to display jobs owned by all users in the project.
Default false
maxCreationTime: Max value for job creation time, in milliseconds since
the POSIX epoch. If set, only jobs created before or at this timestamp
are returned
maxResults: Maximum number of results to return
minCreationTime: Min value for job creation time, in milliseconds since
the POSIX epoch. If set, only jobs created after or at this timestamp
are returned
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the jobs to list
projection: Restrict information returned to a set of selected fields
stateFilter: Filter for job state
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Restrict information returned to a set of selected fields
Values:
full: Includes all job data
minimal: Does not include the job configuration
"""
full = 0
minimal = 1
class StateFilterValueValuesEnum(_messages.Enum):
r"""Filter for job state
Values:
done: Finished jobs
pending: Pending jobs
running: Running jobs
"""
done = 0
pending = 1
running = 2
allUsers = _messages.BooleanField(1)
maxCreationTime = _messages.IntegerField(2, variant=_messages.Variant.UINT64)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
minCreationTime = _messages.IntegerField(4, variant=_messages.Variant.UINT64)
pageToken = _messages.StringField(5)
projectId = _messages.StringField(6, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
stateFilter = _messages.EnumField('StateFilterValueValuesEnum', 8, repeated=True)
class BigqueryJobsQueryRequest(_messages.Message):
r"""A BigqueryJobsQueryRequest object.
Fields:
projectId: Project ID of the project billed for the query
queryRequest: A QueryRequest resource to be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
queryRequest = _messages.MessageField('QueryRequest', 2)
class BigqueryProjectsGetServiceAccountRequest(_messages.Message):
r"""A BigqueryProjectsGetServiceAccountRequest object.
Fields:
projectId: Project ID for which the service account is requested.
"""
projectId = _messages.StringField(1, required=True)
class BigqueryProjectsListRequest(_messages.Message):
r"""A BigqueryProjectsListRequest object.
Fields:
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
class BigqueryTabledataInsertAllRequest(_messages.Message):
r"""A BigqueryTabledataInsertAllRequest object.
Fields:
datasetId: Dataset ID of the destination table.
projectId: Project ID of the destination table.
tableDataInsertAllRequest: A TableDataInsertAllRequest resource to be
passed as the request body.
tableId: Table ID of the destination table.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableDataInsertAllRequest = _messages.MessageField('TableDataInsertAllRequest', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTabledataListRequest(_messages.Message):
r"""A BigqueryTabledataListRequest object.
Fields:
datasetId: Dataset ID of the table to read
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, identifying the result
set
projectId: Project ID of the table to read
selectedFields: List of fields to return (comma-separated). If
unspecified, all fields are returned
startIndex: Zero-based index of the starting row to read
tableId: Table ID of the table to read
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
selectedFields = _messages.StringField(5)
startIndex = _messages.IntegerField(6, variant=_messages.Variant.UINT64)
tableId = _messages.StringField(7, required=True)
class BigqueryTablesDeleteRequest(_messages.Message):
r"""A BigqueryTablesDeleteRequest object.
Fields:
datasetId: Dataset ID of the table to delete
projectId: Project ID of the table to delete
tableId: Table ID of the table to delete
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class BigqueryTablesDeleteResponse(_messages.Message):
r"""An empty BigqueryTablesDelete response."""
class BigqueryTablesGetRequest(_messages.Message):
r"""A BigqueryTablesGetRequest object.
Fields:
datasetId: Dataset ID of the requested table
projectId: Project ID of the requested table
selectedFields: List of fields to return (comma-separated). If
unspecified, all fields are returned
tableId: Table ID of the requested table
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
selectedFields = _messages.StringField(3)
tableId = _messages.StringField(4, required=True)
class BigqueryTablesInsertRequest(_messages.Message):
r"""A BigqueryTablesInsertRequest object.
Fields:
datasetId: Dataset ID of the new table
projectId: Project ID of the new table
table: A Table resource to be passed as the request body.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
class BigqueryTablesListRequest(_messages.Message):
r"""A BigqueryTablesListRequest object.
Fields:
datasetId: Dataset ID of the tables to list
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the tables to list
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
class BigqueryTablesPatchRequest(_messages.Message):
r"""A BigqueryTablesPatchRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTablesUpdateRequest(_messages.Message):
r"""A BigqueryTablesUpdateRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigtableColumn(_messages.Message):
r"""A BigtableColumn object.
Fields:
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. 'encoding' can also be set at
the column family level. However, the setting at this level takes
precedence if 'encoding' is set at both levels.
fieldName: [Optional] If the qualifier is not a valid BigQuery field
identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier
must be provided as the column field name and is used as field name in
queries.
onlyReadLatest: [Optional] If this is set, only the latest version of
value in this column are exposed. 'onlyReadLatest' can also be set at
the column family level. However, the setting at this level takes
precedence if 'onlyReadLatest' is set at both levels.
qualifierEncoded: [Required] Qualifier of the column. Columns in the
parent column family that has this exact qualifier are exposed as .
field. If the qualifier is valid UTF-8 string, it can be specified in
the qualifier_string field. Otherwise, a base-64 encoded value must be
set to qualifier_encoded. The column field name is the same as the
column qualifier. However, if the qualifier is not a valid BigQuery
field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid
identifier must be provided as field_name.
qualifierString: A string attribute.
type: [Optional] The type to convert the value in cells of this column.
The values are expected to be encoded using HBase Bytes.toBytes function
when using the BINARY encoding value. Following BigQuery types are
allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default
type is BYTES. 'type' can also be set at the column family level.
However, the setting at this level takes precedence if 'type' is set at
both levels.
"""
encoding = _messages.StringField(1)
fieldName = _messages.StringField(2)
onlyReadLatest = _messages.BooleanField(3)
qualifierEncoded = _messages.BytesField(4)
qualifierString = _messages.StringField(5)
type = _messages.StringField(6)
class BigtableColumnFamily(_messages.Message):
r"""A BigtableColumnFamily object.
Fields:
columns: [Optional] Lists of columns that should be exposed as individual
fields as opposed to a list of (column name, value) pairs. All columns
whose qualifier matches a qualifier in this list can be accessed as ..
Other columns can be accessed as a list through .Column field.
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. This can be overridden for a
specific column by listing that column in 'columns' and specifying an
encoding for it.
familyId: Identifier of the column family.
onlyReadLatest: [Optional] If this is set only the latest version of value
are exposed for all columns in this column family. This can be
overridden for a specific column by listing that column in 'columns' and
specifying a different setting for that column.
type: [Optional] The type to convert the value in cells of this column
family. The values are expected to be encoded using HBase Bytes.toBytes
function when using the BINARY encoding value. Following BigQuery types
are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN
Default type is BYTES. This can be overridden for a specific column by
listing that column in 'columns' and specifying a type for it.
"""
columns = _messages.MessageField('BigtableColumn', 1, repeated=True)
encoding = _messages.StringField(2)
familyId = _messages.StringField(3)
onlyReadLatest = _messages.BooleanField(4)
type = _messages.StringField(5)
class BigtableOptions(_messages.Message):
r"""A BigtableOptions object.
Fields:
columnFamilies: [Optional] List of column families to expose in the table
schema along with their types. This list restricts the column families
that can be referenced in queries and specifies their value types. You
can use this list to do type conversions - see the 'type' field for more
details. If you leave this list empty, all column families are present
in the table schema and their values are read as BYTES. During a query
only the column families referenced in that query are read from
Bigtable.
ignoreUnspecifiedColumnFamilies: [Optional] If field is true, then the
column families that are not specified in columnFamilies list are not
exposed in the table schema. Otherwise, they are read with BYTES type
values. The default value is false.
readRowkeyAsString: [Optional] If field is true, then the rowkey column
families will be read and converted to string. Otherwise they are read
with BYTES type values and users need to manually cast them with CAST if
necessary. The default value is false.
"""
columnFamilies = _messages.MessageField('BigtableColumnFamily', 1, repeated=True)
ignoreUnspecifiedColumnFamilies = _messages.BooleanField(2)
readRowkeyAsString = _messages.BooleanField(3)
class Clustering(_messages.Message):
r"""A Clustering object.
Fields:
fields: [Repeated] One or more fields on which data should be clustered.
Only top-level, non-repeated, simple-type fields are supported. When you
cluster a table using multiple columns, the order of columns you specify
is important. The order of the specified columns determines the sort
order of the data.
"""
fields = _messages.StringField(1, repeated=True)
class CsvOptions(_messages.Message):
r"""A CsvOptions object.
Fields:
allowJaggedRows: [Optional] Indicates if BigQuery should accept rows that
are missing trailing optional columns. If true, BigQuery treats missing
trailing columns as null values. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. The default
value is false.
allowQuotedNewlines: [Optional] Indicates if BigQuery should allow quoted
data sections that contain newline characters in a CSV file. The default
value is false.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when reading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
encoding = _messages.StringField(3)
fieldDelimiter = _messages.StringField(4)
quote = _messages.StringField(5, default=u'"')
skipLeadingRows = _messages.IntegerField(6)
class Dataset(_messages.Message):
r"""A Dataset object.
Messages:
AccessValueListEntry: A AccessValueListEntry object.
LabelsValue: The labels associated with this dataset. You can use these to
organize and group your datasets. You can set this property when
inserting or updating a dataset. See Creating and Updating Dataset
Labels for more information.
Fields:
access: [Optional] An array of objects that define dataset access for one
or more entities. You can set this property when inserting or updating a
dataset in order to control who is allowed to access the data. If
unspecified at dataset creation time, BigQuery adds default dataset
access for the following entities: access.specialGroup: projectReaders;
access.role: READER; access.specialGroup: projectWriters; access.role:
WRITER; access.specialGroup: projectOwners; access.role: OWNER;
access.userByEmail: [dataset creator email]; access.role: OWNER;
creationTime: [Output-only] The time when this dataset was created, in
milliseconds since the epoch.
datasetReference: [Required] A reference that identifies the dataset.
defaultPartitionExpirationMs: [Optional] The default partition expiration
for all partitioned tables in the dataset, in milliseconds. Once this
property is set, all newly-created partitioned tables in the dataset
will have an expirationMs property in the timePartitioning settings set
to this value, and changing the value will only affect new tables, not
existing ones. The storage in a partition will have an expiration time
of its partition time plus this value. Setting this property overrides
the use of defaultTableExpirationMs for partitioned tables: only one of
defaultTableExpirationMs and defaultPartitionExpirationMs will be used
for any new partitioned table. If you provide an explicit
timePartitioning.expirationMs when creating or updating a partitioned
table, that value takes precedence over the default partition expiration
time indicated by this property.
defaultTableExpirationMs: [Optional] The default lifetime of all tables in
the dataset, in milliseconds. The minimum value is 3600000 milliseconds
(one hour). Once this property is set, all newly-created tables in the
dataset will have an expirationTime property set to the creation time
plus the value in this property, and changing the value will only affect
new tables, not existing ones. When the expirationTime for a given table
is reached, that table will be deleted automatically. If a table's
expirationTime is modified or removed before the table expires, or if
you provide an explicit expirationTime when creating a table, that value
takes precedence over the default expiration time indicated by this
property.
description: [Optional] A user-friendly description of the dataset.
etag: [Output-only] A hash of the resource.
friendlyName: [Optional] A descriptive name for the dataset.
id: [Output-only] The fully-qualified unique name of the dataset in the
format projectId:datasetId. The dataset name without the project name is
given in the datasetId field. When creating a new dataset, leave this
field blank, and instead specify the datasetId field.
kind: [Output-only] The resource type.
labels: The labels associated with this dataset. You can use these to
organize and group your datasets. You can set this property when
inserting or updating a dataset. See Creating and Updating Dataset
Labels for more information.
lastModifiedTime: [Output-only] The date when this dataset or any of its
tables was last modified, in milliseconds since the epoch.
location: The geographic location where the dataset should reside. The
default value is US. See details at
https://cloud.google.com/bigquery/docs/locations.
selfLink: [Output-only] A URL that can be used to access the resource
again. You can use this URL in Get or Update requests to the resource.
"""
class AccessValueListEntry(_messages.Message):
r"""A AccessValueListEntry object.
Fields:
domain: [Pick one] A domain to grant access to. Any users signed in with
the domain specified will be granted the specified access. Example:
"example.com". Maps to IAM policy member "domain:DOMAIN".
groupByEmail: [Pick one] An email address of a Google Group to grant
access to. Maps to IAM policy member "group:GROUP".
iamMember: [Pick one] Some other type of member that appears in the IAM
Policy but isn't a user, group, domain, or special group.
role: [Required] Describes the rights granted to the user specified by
the other member of the access object. The following string values are
supported: READER, WRITER, OWNER.
specialGroup: [Pick one] A special group to grant access to. Possible
values include: projectOwners: Owners of the enclosing project.
projectReaders: Readers of the enclosing project. projectWriters:
Writers of the enclosing project. allAuthenticatedUsers: All
authenticated BigQuery users. Maps to similarly-named IAM members.
userByEmail: [Pick one] An email address of a user to grant access to.
For example: fred@example.com. Maps to IAM policy member "user:EMAIL"
or "serviceAccount:EMAIL".
view: [Pick one] A view from a different dataset to grant access to.
Queries executed against that view will have read access to tables in
this dataset. The role field is not required when this field is set.
If that view is updated by any user, access to the view needs to be
granted again via an update operation.
"""
domain = _messages.StringField(1)
groupByEmail = _messages.StringField(2)
iamMember = _messages.StringField(3)
role = _messages.StringField(4)
specialGroup = _messages.StringField(5)
userByEmail = _messages.StringField(6)
view = _messages.MessageField('TableReference', 7)
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""The labels associated with this dataset. You can use these to organize
and group your datasets. You can set this property when inserting or
updating a dataset. See Creating and Updating Dataset Labels for more
information.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
access = _messages.MessageField('AccessValueListEntry', 1, repeated=True)
creationTime = _messages.IntegerField(2)
datasetReference = _messages.MessageField('DatasetReference', 3)
defaultPartitionExpirationMs = _messages.IntegerField(4)
defaultTableExpirationMs = _messages.IntegerField(5)
description = _messages.StringField(6)
etag = _messages.StringField(7)
friendlyName = _messages.StringField(8)
id = _messages.StringField(9)
kind = _messages.StringField(10, default=u'bigquery#dataset')
labels = _messages.MessageField('LabelsValue', 11)
lastModifiedTime = _messages.IntegerField(12)
location = _messages.StringField(13)
selfLink = _messages.StringField(14)
class DatasetList(_messages.Message):
r"""A DatasetList object.
Messages:
DatasetsValueListEntry: A DatasetsValueListEntry object.
Fields:
datasets: An array of the dataset resources in the project. Each resource
contains basic information. For full information about a particular
dataset resource, use the Datasets: get method. This property is omitted
when there are no datasets in the project.
etag: A hash value of the results page. You can use this property to
determine if the page has changed since the last request.
kind: The list type. This property always returns the value
"bigquery#datasetList".
nextPageToken: A token that can be used to request the next results page.
This property is omitted on the final results page.
"""
class DatasetsValueListEntry(_messages.Message):
r"""A DatasetsValueListEntry object.
Messages:
LabelsValue: The labels associated with this dataset. You can use these
to organize and group your datasets.
Fields:
datasetReference: The dataset reference. Use this property to access
specific parts of the dataset's ID, such as project ID or dataset ID.
friendlyName: A descriptive name for the dataset, if one exists.
id: The fully-qualified, unique, opaque ID of the dataset.
kind: The resource type. This property always returns the value
"bigquery#dataset".
labels: The labels associated with this dataset. You can use these to
organize and group your datasets.
location: The geographic location where the data resides.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""The labels associated with this dataset. You can use these to
organize and group your datasets.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
datasetReference = _messages.MessageField('DatasetReference', 1)
friendlyName = _messages.StringField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default=u'bigquery#dataset')
labels = _messages.MessageField('LabelsValue', 5)
location = _messages.StringField(6)
datasets = _messages.MessageField('DatasetsValueListEntry', 1, repeated=True)
etag = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#datasetList')
nextPageToken = _messages.StringField(4)
class DatasetReference(_messages.Message):
r"""A DatasetReference object.
Fields:
datasetId: [Required] A unique ID for this dataset, without the project
name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or
underscores (_). The maximum length is 1,024 characters.
projectId: [Optional] The ID of the project containing this dataset.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
class DestinationTableProperties(_messages.Message):
r"""A DestinationTableProperties object.
Fields:
description: [Optional] The description for the destination table. This
will only be used if the destination table is newly created. If the
table already exists and a value different than the current description
is provided, the job will fail.
friendlyName: [Optional] The friendly name for the destination table. This
will only be used if the destination table is newly created. If the
table already exists and a value different than the current friendly
name is provided, the job will fail.
"""
description = _messages.StringField(1)
friendlyName = _messages.StringField(2)
class EncryptionConfiguration(_messages.Message):
r"""A EncryptionConfiguration object.
Fields:
kmsKeyName: [Optional] Describes the Cloud KMS encryption key that will be
used to protect destination BigQuery table. The BigQuery Service Account
associated with your project requires access to this encryption key.
"""
kmsKeyName = _messages.StringField(1)
class ErrorProto(_messages.Message):
r"""A ErrorProto object.
Fields:
debugInfo: Debugging information. This property is internal to Google and
should not be used.
location: Specifies where the error occurred, if present.
message: A human-readable description of the error.
reason: A short error code that summarizes the error.
"""
debugInfo = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
reason = _messages.StringField(4)
class ExplainQueryStage(_messages.Message):
r"""A ExplainQueryStage object.
Fields:
completedParallelInputs: Number of parallel input segments completed.
computeMsAvg: Milliseconds the average shard spent on CPU-bound tasks.
computeMsMax: Milliseconds the slowest shard spent on CPU-bound tasks.
computeRatioAvg: Relative amount of time the average shard spent on CPU-
bound tasks.
computeRatioMax: Relative amount of time the slowest shard spent on CPU-
bound tasks.
endMs: Stage end time represented as milliseconds since epoch.
id: Unique ID for stage within plan.
inputStages: IDs for stages that are inputs to this stage.
name: Human-readable name for stage.
parallelInputs: Number of parallel input segments to be processed.
readMsAvg: Milliseconds the average shard spent reading input.
readMsMax: Milliseconds the slowest shard spent reading input.
readRatioAvg: Relative amount of time the average shard spent reading
input.
readRatioMax: Relative amount of time the slowest shard spent reading
input.
recordsRead: Number of records read into the stage.
recordsWritten: Number of records written by the stage.
shuffleOutputBytes: Total number of bytes written to shuffle.
shuffleOutputBytesSpilled: Total number of bytes written to shuffle and
spilled to disk.
startMs: Stage start time represented as milliseconds since epoch.
status: Current status for the stage.
steps: List of operations within the stage in dependency order
(approximately chronological).
waitMsAvg: Milliseconds the average shard spent waiting to be scheduled.
waitMsMax: Milliseconds the slowest shard spent waiting to be scheduled.
waitRatioAvg: Relative amount of time the average shard spent waiting to
be scheduled.
waitRatioMax: Relative amount of time the slowest shard spent waiting to
be scheduled.
writeMsAvg: Milliseconds the average shard spent on writing output.
writeMsMax: Milliseconds the slowest shard spent on writing output.
writeRatioAvg: Relative amount of time the average shard spent on writing
output.
writeRatioMax: Relative amount of time the slowest shard spent on writing
output.
"""
completedParallelInputs = _messages.IntegerField(1)
computeMsAvg = _messages.IntegerField(2)
computeMsMax = _messages.IntegerField(3)
computeRatioAvg = _messages.FloatField(4)
computeRatioMax = _messages.FloatField(5)
endMs = _messages.IntegerField(6)
id = _messages.IntegerField(7)
inputStages = _messages.IntegerField(8, repeated=True)
name = _messages.StringField(9)
parallelInputs = _messages.IntegerField(10)
readMsAvg = _messages.IntegerField(11)
readMsMax = _messages.IntegerField(12)
readRatioAvg = _messages.FloatField(13)
readRatioMax = _messages.FloatField(14)
recordsRead = _messages.IntegerField(15)
recordsWritten = _messages.IntegerField(16)
shuffleOutputBytes = _messages.IntegerField(17)
shuffleOutputBytesSpilled = _messages.IntegerField(18)
startMs = _messages.IntegerField(19)
status = _messages.StringField(20)
steps = _messages.MessageField('ExplainQueryStep', 21, repeated=True)
waitMsAvg = _messages.IntegerField(22)
waitMsMax = _messages.IntegerField(23)
waitRatioAvg = _messages.FloatField(24)
waitRatioMax = _messages.FloatField(25)
writeMsAvg = _messages.IntegerField(26)
writeMsMax = _messages.IntegerField(27)
writeRatioAvg = _messages.FloatField(28)
writeRatioMax = _messages.FloatField(29)
class ExplainQueryStep(_messages.Message):
r"""A ExplainQueryStep object.
Fields:
kind: Machine-readable operation type.
substeps: Human-readable stage descriptions.
"""
kind = _messages.StringField(1)
substeps = _messages.StringField(2, repeated=True)
class ExternalDataConfiguration(_messages.Message):
r"""A ExternalDataConfiguration object.
Fields:
autodetect: Try to detect schema and format options automatically. Any
option specified explicitly will be honored.
bigtableOptions: [Optional] Additional options if sourceFormat is set to
BIGTABLE.
compression: [Optional] The compression type of the data source. Possible
values include GZIP and NONE. The default value is NONE. This setting is
ignored for Google Cloud Bigtable, Google Cloud Datastore backups and
Avro formats.
csvOptions: Additional properties to set if sourceFormat is set to CSV.
googleSheetsOptions: [Optional] Additional options if sourceFormat is set
to GOOGLE_SHEETS.
hivePartitioningMode: [Optional, Experimental] If hive partitioning is
enabled, which mode to use. Two modes are supported: - AUTO:
automatically infer partition key name(s) and type(s). - STRINGS:
automatic infer partition key name(s). All types are strings. Not all
storage formats support hive partitioning -- requesting hive
partitioning on an unsupported format will lead to an error.
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore
backups: This setting is ignored. Avro: This setting is ignored.
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when reading data. If the number of bad records exceeds this
value, an invalid error is returned in the job result. This is only
valid for CSV, JSON, and Google Sheets. The default value is 0, which
requires that all records are valid. This setting is ignored for Google
Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
schema: [Optional] The schema for the data. Schema is required for CSV and
JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud
Datastore backups, and Avro formats.
sourceFormat: [Required] The data format. For CSV files, specify "CSV".
For Google sheets, specify "GOOGLE_SHEETS". For newline-delimited JSON,
specify "NEWLINE_DELIMITED_JSON". For Avro files, specify "AVRO". For
Google Cloud Datastore backups, specify "DATASTORE_BACKUP". [Beta] For
Google Cloud Bigtable, specify "BIGTABLE".
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud. For Google Cloud Storage URIs: Each URI can contain one
'*' wildcard character and it must come after the 'bucket' name. Size
limits related to load jobs apply to external data sources. For Google
Cloud Bigtable URIs: Exactly one URI can be specified and it has be a
fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
For Google Cloud Datastore backups, exactly one URI can be specified.
Also, the '*' wildcard character is not allowed.
"""
autodetect = _messages.BooleanField(1)
bigtableOptions = _messages.MessageField('BigtableOptions', 2)
compression = _messages.StringField(3)
csvOptions = _messages.MessageField('CsvOptions', 4)
googleSheetsOptions = _messages.MessageField('GoogleSheetsOptions', 5)
hivePartitioningMode = _messages.StringField(6)
ignoreUnknownValues = _messages.BooleanField(7)
maxBadRecords = _messages.IntegerField(8, variant=_messages.Variant.INT32)
schema = _messages.MessageField('TableSchema', 9)
sourceFormat = _messages.StringField(10)
sourceUris = _messages.StringField(11, repeated=True)
class GetQueryResultsResponse(_messages.Message):
r"""A GetQueryResultsResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] The first errors or warnings encountered during the
running of the job. The final message includes the number of errors that
caused the process to stop. Errors here do not necessarily mean that the
job has completed or was unsuccessful.
etag: A hash of this response.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the BigQuery Job that was created to run the
query. This field will be present even if the original request timed
out, in which case GetQueryResults can be used to read the results once
the query has completed. Since this API only returns the first page of
results, subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type of the response.
numDmlAffectedRows: [Output-only] The number of rows affected by a DML
statement. Present only for DML statements INSERT, UPDATE or DELETE.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above. Present
only when the query completes successfully.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results. Present only when the query completes successfully.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
etag = _messages.StringField(3)
jobComplete = _messages.BooleanField(4)
jobReference = _messages.MessageField('JobReference', 5)
kind = _messages.StringField(6, default=u'bigquery#getQueryResultsResponse')
numDmlAffectedRows = _messages.IntegerField(7)
pageToken = _messages.StringField(8)
rows = _messages.MessageField('TableRow', 9, repeated=True)
schema = _messages.MessageField('TableSchema', 10)
totalBytesProcessed = _messages.IntegerField(11)
totalRows = _messages.IntegerField(12, variant=_messages.Variant.UINT64)
class GetServiceAccountResponse(_messages.Message):
r"""A GetServiceAccountResponse object.
Fields:
email: The service account email address.
kind: The resource type of the response.
"""
email = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#getServiceAccountResponse')
class GoogleSheetsOptions(_messages.Message):
r"""A GoogleSheetsOptions object.
Fields:
range: [Beta] [Optional] Range of a sheet to query from. Only used when
non-empty. Typical format:
sheet_name!top_left_cell_id:bottom_right_cell_id For example:
sheet1!A1:B20
skipLeadingRows: [Optional] The number of rows at the top of a sheet that
BigQuery will skip when reading the data. The default value is 0. This
property is useful if you have header rows that should be skipped. When
autodetect is on, behavior is the following: * skipLeadingRows
unspecified - Autodetect tries to detect headers in the first row. If
they are not detected, the row is read as data. Otherwise data is read
starting from the second row. * skipLeadingRows is 0 - Instructs
autodetect that there are no headers and data should be read starting
from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1
rows and tries to detect headers in row N. If headers are not detected,
row N is just skipped. Otherwise row N is used to extract column names
for the detected schema.
"""
range = _messages.StringField(1)
skipLeadingRows = _messages.IntegerField(2)
class IterationResult(_messages.Message):
r"""A IterationResult object.
Fields:
durationMs: [Output-only, Beta] Time taken to run the training iteration
in milliseconds.
evalLoss: [Output-only, Beta] Eval loss computed on the eval data at the
end of the iteration. The eval loss is used for early stopping to avoid
overfitting. No eval loss if eval_split_method option is specified as
no_split or auto_split with input data size less than 500 rows.
index: [Output-only, Beta] Index of the ML training iteration, starting
from zero for each training run.
learnRate: [Output-only, Beta] Learning rate used for this iteration, it
varies for different training iterations if learn_rate_strategy option
is not constant.
trainingLoss: [Output-only, Beta] Training loss computed on the training
data at the end of the iteration. The training loss function is defined
by model type.
"""
durationMs = _messages.IntegerField(1)
evalLoss = _messages.FloatField(2)
index = _messages.IntegerField(3, variant=_messages.Variant.INT32)
learnRate = _messages.FloatField(4)
trainingLoss = _messages.FloatField(5)
class Job(_messages.Message):
r"""A Job object.
Fields:
configuration: [Required] Describes the job configuration.
etag: [Output-only] A hash of this resource.
id: [Output-only] Opaque ID field of the job
jobReference: [Optional] Reference describing the unique-per-user name of
the job.
kind: [Output-only] The type of the resource.
selfLink: [Output-only] A URL that can be used to access this resource
again.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Output-only] The status of this job. Examine this value when
polling an asynchronous job to see if the job is complete.
user_email: [Output-only] Email address of the user who ran the job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
etag = _messages.StringField(2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
selfLink = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
class JobCancelResponse(_messages.Message):
r"""A JobCancelResponse object.
Fields:
job: The final state of the job.
kind: The resource type of the response.
"""
job = _messages.MessageField('Job', 1)
kind = _messages.StringField(2, default=u'bigquery#jobCancelResponse')
class JobConfiguration(_messages.Message):
r"""A JobConfiguration object.
Messages:
LabelsValue: The labels associated with this job. You can use these to
organize and group your jobs. Label keys and values can be no longer
than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
Fields:
copy: [Pick one] Copies a table.
dryRun: [Optional] If set, don't actually run this job. A valid query will
return a mostly empty response with some processing statistics, while an
invalid query will return the same error it would if it wasn't a dry
run. Behavior of non-query jobs is undefined.
extract: [Pick one] Configures an extract job.
jobTimeoutMs: [Optional] Job timeout in milliseconds. If this time limit
is exceeded, BigQuery may attempt to terminate the job.
jobType: [Output-only] The type of the job. Can be QUERY, LOAD, EXTRACT,
COPY or UNKNOWN.
labels: The labels associated with this job. You can use these to organize
and group your jobs. Label keys and values can be no longer than 63
characters, can only contain lowercase letters, numeric characters,
underscores and dashes. International characters are allowed. Label
values are optional. Label keys must start with a letter and each label
in the list must have a different key.
load: [Pick one] Configures a load job.
query: [Pick one] Configures a query job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""The labels associated with this job. You can use these to organize and
group your jobs. Label keys and values can be no longer than 63
characters, can only contain lowercase letters, numeric characters,
underscores and dashes. International characters are allowed. Label values
are optional. Label keys must start with a letter and each label in the
list must have a different key.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
copy = _messages.MessageField('JobConfigurationTableCopy', 1)
dryRun = _messages.BooleanField(2)
extract = _messages.MessageField('JobConfigurationExtract', 3)
jobTimeoutMs = _messages.IntegerField(4)
jobType = _messages.StringField(5)
labels = _messages.MessageField('LabelsValue', 6)
load = _messages.MessageField('JobConfigurationLoad', 7)
query = _messages.MessageField('JobConfigurationQuery', 8)
class JobConfigurationExtract(_messages.Message):
r"""A JobConfigurationExtract object.
Fields:
compression: [Optional] The compression type to use for exported files.
Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default
value is NONE. DEFLATE and SNAPPY are only supported for Avro.
destinationFormat: [Optional] The exported file format. Possible values
include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV.
Tables with nested or repeated fields cannot be exported as CSV.
destinationUri: [Pick one] DEPRECATED: Use destinationUris instead,
passing only one URI as necessary. The fully-qualified Google Cloud
Storage URI where the extracted table should be written.
destinationUris: [Pick one] A list of fully-qualified Google Cloud Storage
URIs where the extracted table should be written.
fieldDelimiter: [Optional] Delimiter to use between fields in the exported
data. Default is ','
printHeader: [Optional] Whether to print out a header row in the results.
Default is true.
sourceTable: [Required] A reference to the table being exported.
"""
compression = _messages.StringField(1)
destinationFormat = _messages.StringField(2)
destinationUri = _messages.StringField(3)
destinationUris = _messages.StringField(4, repeated=True)
fieldDelimiter = _messages.StringField(5)
printHeader = _messages.BooleanField(6, default=True)
sourceTable = _messages.MessageField('TableReference', 7)
class JobConfigurationLoad(_messages.Message):
r"""A JobConfigurationLoad object.
Fields:
allowJaggedRows: [Optional] Accept rows that are missing trailing optional
columns. The missing values are treated as nulls. If false, records with
missing trailing columns are treated as bad records, and if there are
too many bad records, an invalid error is returned in the job result.
The default value is false. Only applicable to CSV, ignored for other
formats.
allowQuotedNewlines: Indicates if BigQuery should allow quoted data
sections that contain newline characters in a CSV file. The default
value is false.
autodetect: [Optional] Indicates if we should automatically infer the
options and schema for CSV and JSON sources.
clustering: [Beta] Clustering specification for the destination table.
Must be specified with time-based partitioning, data in the table will
be first partitioned and subsequently clustered.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationEncryptionConfiguration: Custom encryption configuration (e.g.,
Cloud KMS keys).
destinationTable: [Required] The destination table to load the data into.
destinationTableProperties: [Beta] [Optional] Properties with which to
create the destination table if it is new.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file. The
separator can be any ISO-8859-1 single-byte character. To use a
character in the range 128-255, you must encode the character as UTF8.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
hivePartitioningMode: [Optional, Experimental] If hive partitioning is
enabled, which mode to use. Two modes are supported: - AUTO:
automatically infer partition key name(s) and type(s). - STRINGS:
automatic infer partition key name(s). All types are strings. Not all
storage formats support hive partitioning -- requesting hive
partitioning on an unsupported format will lead to an error.
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when running the job. If the number of bad records exceeds
this value, an invalid error is returned in the job result. This is only
valid for CSV and JSON. The default value is 0, which requires that all
records are valid.
nullMarker: [Optional] Specifies a string that represents a null value in
a CSV file. For example, if you specify "\N", BigQuery interprets "\N"
as a null value when loading a CSV file. The default value is the empty
string. If you set this property to a custom value, BigQuery throws an
error if an empty string is present for all data types except for STRING
and BYTE. For STRING and BYTE columns, BigQuery interprets the empty
string as an empty value.
projectionFields: If sourceFormat is set to "DATASTORE_BACKUP", indicates
which entity properties to load into BigQuery from a Cloud Datastore
backup. Property names are case sensitive and must be top-level
properties. If no properties are specified, BigQuery loads all
properties. If any named property isn't found in the Cloud Datastore
backup, an invalid error is returned in the job result.
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
rangePartitioning: [TrustedTester] Range partitioning specification for
this table. Only one of timePartitioning and rangePartitioning should be
specified.
schema: [Optional] The schema for the destination table. The schema can be
omitted if the destination table already exists, or if you're loading
data from Google Cloud Datastore.
schemaInline: [Deprecated] The inline schema. For CSV schemas, specify as
"Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER,
baz:FLOAT".
schemaInlineFormat: [Deprecated] The format of the schemaInline property.
schemaUpdateOptions: Allows the schema of the destination table to be
updated as a side effect of the load job if a schema is autodetected or
supplied in the job configuration. Schema update options are supported
in two cases: when writeDisposition is WRITE_APPEND; when
writeDisposition is WRITE_TRUNCATE and the destination table is a
partition of a table, specified by partition decorators. For normal
tables, WRITE_TRUNCATE will always overwrite the schema. One or more of
the following values are specified: ALLOW_FIELD_ADDITION: allow adding a
nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a
required field in the original schema to nullable.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when loading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
sourceFormat: [Optional] The format of the data files. For CSV files,
specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For
newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro,
specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC".
The default value is CSV.
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud. For Google Cloud Storage URIs: Each URI can contain one
'*' wildcard character and it must come after the 'bucket' name. Size
limits related to load jobs apply to external data sources. For Google
Cloud Bigtable URIs: Exactly one URI can be specified and it has be a
fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
For Google Cloud Datastore backups: Exactly one URI can be specified.
Also, the '*' wildcard character is not allowed.
timePartitioning: Time-based partitioning specification for the
destination table. Only one of timePartitioning and rangePartitioning
should be specified.
useAvroLogicalTypes: [Optional] If sourceFormat is set to "AVRO",
indicates whether to enable interpreting logical types into their
corresponding types (ie. TIMESTAMP), instead of only using their raw
types (ie. INTEGER).
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_APPEND. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
autodetect = _messages.BooleanField(3)
clustering = _messages.MessageField('Clustering', 4)
createDisposition = _messages.StringField(5)
destinationEncryptionConfiguration = _messages.MessageField('EncryptionConfiguration', 6)
destinationTable = _messages.MessageField('TableReference', 7)
destinationTableProperties = _messages.MessageField('DestinationTableProperties', 8)
encoding = _messages.StringField(9)
fieldDelimiter = _messages.StringField(10)
hivePartitioningMode = _messages.StringField(11)
ignoreUnknownValues = _messages.BooleanField(12)
maxBadRecords = _messages.IntegerField(13, variant=_messages.Variant.INT32)
nullMarker = _messages.StringField(14)
projectionFields = _messages.StringField(15, repeated=True)
quote = _messages.StringField(16, default=u'"')
rangePartitioning = _messages.MessageField('RangePartitioning', 17)
schema = _messages.MessageField('TableSchema', 18)
schemaInline = _messages.StringField(19)
schemaInlineFormat = _messages.StringField(20)
schemaUpdateOptions = _messages.StringField(21, repeated=True)
skipLeadingRows = _messages.IntegerField(22, variant=_messages.Variant.INT32)
sourceFormat = _messages.StringField(23)
sourceUris = _messages.StringField(24, repeated=True)
timePartitioning = _messages.MessageField('TimePartitioning', 25)
useAvroLogicalTypes = _messages.BooleanField(26)
writeDisposition = _messages.StringField(27)
class JobConfigurationQuery(_messages.Message):
r"""A JobConfigurationQuery object.
Messages:
TableDefinitionsValue: [Optional] If querying an external data source
outside of BigQuery, describes the data format, location and other
properties of the data source. By defining these properties, the data
source can then be queried as if it were a standard BigQuery table.
Fields:
allowLargeResults: [Optional] If true and query uses legacy SQL dialect,
allows the query to produce arbitrarily large result tables at a slight
cost in performance. Requires destinationTable to be set. For standard
SQL queries, this flag is ignored and large results are always allowed.
However, you must still set destinationTable when result size exceeds
the allowed maximum response size.
clustering: [Beta] Clustering specification for the destination table.
Must be specified with time-based partitioning, data in the table will
be first partitioned and subsequently clustered.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
defaultDataset: [Optional] Specifies the default dataset to use for
unqualified table names in the query. Note that this does not alter
behavior of unqualified dataset names.
destinationEncryptionConfiguration: Custom encryption configuration (e.g.,
Cloud KMS keys).
destinationTable: [Optional] Describes the table where the query results
should be stored. If not present, a new table will be created to store
the results. This property must be set for large results that exceed the
maximum response size.
flattenResults: [Optional] If true and query uses legacy SQL dialect,
flattens all nested and repeated fields in the query results.
allowLargeResults must be true if this is set to false. For standard SQL
queries, this flag is ignored and results are never flattened.
maximumBillingTier: [Optional] Limits the billing tier for this job.
Queries that have resource usage beyond this tier will fail (without
incurring a charge). If unspecified, this will be set to your project
default.
maximumBytesBilled: [Optional] Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail (without
incurring a charge). If unspecified, this will be set to your project
default.
parameterMode: Standard SQL only. Set to POSITIONAL to use positional (?)
query parameters or to NAMED to use named (@myparam) query parameters in
this query.
preserveNulls: [Deprecated] This property is deprecated.
priority: [Optional] Specifies a priority for the query. Possible values
include INTERACTIVE and BATCH. The default value is INTERACTIVE.
query: [Required] SQL query text to execute. The useLegacySql field can be
used to indicate whether the query uses legacy SQL or standard SQL.
queryParameters: Query parameters for standard SQL queries.
rangePartitioning: [TrustedTester] Range partitioning specification for
this table. Only one of timePartitioning and rangePartitioning should be
specified.
schemaUpdateOptions: Allows the schema of the destination table to be
updated as a side effect of the query job. Schema update options are
supported in two cases: when writeDisposition is WRITE_APPEND; when
writeDisposition is WRITE_TRUNCATE and the destination table is a
partition of a table, specified by partition decorators. For normal
tables, WRITE_TRUNCATE will always overwrite the schema. One or more of
the following values are specified: ALLOW_FIELD_ADDITION: allow adding a
nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a
required field in the original schema to nullable.
tableDefinitions: [Optional] If querying an external data source outside
of BigQuery, describes the data format, location and other properties of
the data source. By defining these properties, the data source can then
be queried as if it were a standard BigQuery table.
timePartitioning: Time-based partitioning specification for the
destination table. Only one of timePartitioning and rangePartitioning
should be specified.
useLegacySql: Specifies whether to use BigQuery's legacy SQL dialect for
this query. The default value is true. If set to false, the query will
use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ When useLegacySql is set to false, the value of
flattenResults is ignored; query will be run as if flattenResults is
false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. Moreover, the query cache is
only available when a query does not have a destination table specified.
The default value is true.
userDefinedFunctionResources: Describes user-defined function resources
used in the query.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data and uses the schema from the query result. WRITE_APPEND: If
the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a
'duplicate' error is returned in the job result. The default value is
WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able
to complete the job successfully. Creation, truncation and append
actions occur as one atomic update upon job completion.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class TableDefinitionsValue(_messages.Message):
r"""[Optional] If querying an external data source outside of BigQuery,
describes the data format, location and other properties of the data
source. By defining these properties, the data source can then be queried
as if it were a standard BigQuery table.
Messages:
AdditionalProperty: An additional property for a TableDefinitionsValue
object.
Fields:
additionalProperties: Additional properties of type
TableDefinitionsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a TableDefinitionsValue object.
Fields:
key: Name of the additional property.
value: A ExternalDataConfiguration attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('ExternalDataConfiguration', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
allowLargeResults = _messages.BooleanField(1, default=False)
clustering = _messages.MessageField('Clustering', 2)
createDisposition = _messages.StringField(3)
defaultDataset = _messages.MessageField('DatasetReference', 4)
destinationEncryptionConfiguration = _messages.MessageField('EncryptionConfiguration', 5)
destinationTable = _messages.MessageField('TableReference', 6)
flattenResults = _messages.BooleanField(7, default=True)
maximumBillingTier = _messages.IntegerField(8, variant=_messages.Variant.INT32, default=1)
maximumBytesBilled = _messages.IntegerField(9)
parameterMode = _messages.StringField(10)
preserveNulls = _messages.BooleanField(11)
priority = _messages.StringField(12)
query = _messages.StringField(13)
queryParameters = _messages.MessageField('QueryParameter', 14, repeated=True)
rangePartitioning = _messages.MessageField('RangePartitioning', 15)
schemaUpdateOptions = _messages.StringField(16, repeated=True)
tableDefinitions = _messages.MessageField('TableDefinitionsValue', 17)
timePartitioning = _messages.MessageField('TimePartitioning', 18)
useLegacySql = _messages.BooleanField(19, default=True)
useQueryCache = _messages.BooleanField(20, default=True)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 21, repeated=True)
writeDisposition = _messages.StringField(22)
class JobConfigurationTableCopy(_messages.Message):
r"""A JobConfigurationTableCopy object.
Fields:
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationEncryptionConfiguration: Custom encryption configuration (e.g.,
Cloud KMS keys).
destinationTable: [Required] The destination table
sourceTable: [Pick one] Source table to copy.
sourceTables: [Pick one] Source tables to copy.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_EMPTY. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
createDisposition = _messages.StringField(1)
destinationEncryptionConfiguration = _messages.MessageField('EncryptionConfiguration', 2)
destinationTable = _messages.MessageField('TableReference', 3)
sourceTable = _messages.MessageField('TableReference', 4)
sourceTables = _messages.MessageField('TableReference', 5, repeated=True)
writeDisposition = _messages.StringField(6)
class JobList(_messages.Message):
r"""A JobList object.
Messages:
JobsValueListEntry: A JobsValueListEntry object.
Fields:
etag: A hash of this page of results.
jobs: List of jobs that were requested.
kind: The resource type of the response.
nextPageToken: A token to request the next page of results.
"""
class JobsValueListEntry(_messages.Message):
r"""A JobsValueListEntry object.
Fields:
configuration: [Full-projection-only] Specifies the job configuration.
errorResult: A result object that will be present only if the job has
failed.
id: Unique opaque ID of the job.
jobReference: Job reference uniquely identifying the job.
kind: The resource type.
state: Running state of the job. When the state is DONE, errorResult can
be checked to determine whether the job succeeded or failed.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Full-projection-only] Describes the state of the job.
user_email: [Full-projection-only] Email address of the user who ran the
job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
errorResult = _messages.MessageField('ErrorProto', 2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
state = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
etag = _messages.StringField(1)
jobs = _messages.MessageField('JobsValueListEntry', 2, repeated=True)
kind = _messages.StringField(3, default=u'bigquery#jobList')
nextPageToken = _messages.StringField(4)
class JobReference(_messages.Message):
r"""A JobReference object.
Fields:
jobId: [Required] The ID of the job. The ID must contain only letters
(a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum
length is 1,024 characters.
location: The geographic location of the job. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_locatio
n.
projectId: [Required] The ID of the project containing this job.
"""
jobId = _messages.StringField(1)
location = _messages.StringField(2)
projectId = _messages.StringField(3)
class JobStatistics(_messages.Message):
r"""A JobStatistics object.
Messages:
ReservationUsageValueListEntry: A ReservationUsageValueListEntry object.
Fields:
completionRatio: [TrustedTester] [Output-only] Job progress (0.0 -> 1.0)
for LOAD and EXTRACT jobs.
creationTime: [Output-only] Creation time of this job, in milliseconds
since the epoch. This field will be present on all jobs.
endTime: [Output-only] End time of this job, in milliseconds since the
epoch. This field will be present whenever a job is in the DONE state.
extract: [Output-only] Statistics for an extract job.
load: [Output-only] Statistics for a load job.
query: [Output-only] Statistics for a query job.
quotaDeferments: [Output-only] Quotas which delayed this job's start time.
reservationUsage: [Output-only] Job resource usage breakdown by
reservation.
startTime: [Output-only] Start time of this job, in milliseconds since the
epoch. This field will be present when the job transitions from the
PENDING state to either RUNNING or DONE.
totalBytesProcessed: [Output-only] [Deprecated] Use the bytes processed in
the query statistics instead.
totalSlotMs: [Output-only] Slot-milliseconds for the job.
"""
class ReservationUsageValueListEntry(_messages.Message):
r"""A ReservationUsageValueListEntry object.
Fields:
name: [Output-only] Reservation name or "unreserved" for on-demand
resources usage.
slotMs: [Output-only] Slot-milliseconds the job spent in the given
reservation.
"""
name = _messages.StringField(1)
slotMs = _messages.IntegerField(2)
completionRatio = _messages.FloatField(1)
creationTime = _messages.IntegerField(2)
endTime = _messages.IntegerField(3)
extract = _messages.MessageField('JobStatistics4', 4)
load = _messages.MessageField('JobStatistics3', 5)
query = _messages.MessageField('JobStatistics2', 6)
quotaDeferments = _messages.StringField(7, repeated=True)
reservationUsage = _messages.MessageField('ReservationUsageValueListEntry', 8, repeated=True)
startTime = _messages.IntegerField(9)
totalBytesProcessed = _messages.IntegerField(10)
totalSlotMs = _messages.IntegerField(11)
class JobStatistics2(_messages.Message):
r"""A JobStatistics2 object.
Messages:
ReservationUsageValueListEntry: A ReservationUsageValueListEntry object.
Fields:
billingTier: [Output-only] Billing tier for the job.
cacheHit: [Output-only] Whether the query result was fetched from the
query cache.
ddlOperationPerformed: The DDL operation performed, possibly dependent on
the pre-existence of the DDL target. Possible values (new values might
be added in the future): "CREATE": The query created the DDL target.
"SKIP": No-op. Example cases: the query is CREATE TABLE IF NOT EXISTS
while the table already exists, or the query is DROP TABLE IF EXISTS
while the table does not exist. "REPLACE": The query replaced the DDL
target. Example case: the query is CREATE OR REPLACE TABLE, and the
table already exists. "DROP": The query deleted the DDL target.
ddlTargetTable: The DDL target table. Present only for CREATE/DROP
TABLE/VIEW queries.
estimatedBytesProcessed: [Output-only] The original estimate of bytes
processed for the job.
modelTraining: [Output-only, Beta] Information about create model query
job progress.
modelTrainingCurrentIteration: [Output-only, Beta] Deprecated; do not use.
modelTrainingExpectedTotalIteration: [Output-only, Beta] Deprecated; do
not use.
numDmlAffectedRows: [Output-only] The number of rows affected by a DML
statement. Present only for DML statements INSERT, UPDATE or DELETE.
queryPlan: [Output-only] Describes execution plan for the query.
referencedTables: [Output-only] Referenced tables for the job. Queries
that reference more than 50 tables will not have a complete list.
reservationUsage: [Output-only] Job resource usage breakdown by
reservation.
schema: [Output-only] The schema of the results. Present only for
successful dry run of non-legacy SQL queries.
statementType: The type of query statement, if valid. Possible values (new
values might be added in the future): "SELECT": SELECT query. "INSERT":
INSERT query; see https://cloud.google.com/bigquery/docs/reference
/standard-sql/data-manipulation-language. "UPDATE": UPDATE query; see
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-
manipulation-language. "DELETE": DELETE query; see
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-
manipulation-language. "MERGE": MERGE query; see
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-
manipulation-language. "CREATE_TABLE": CREATE [OR REPLACE] TABLE without
AS SELECT. "CREATE_TABLE_AS_SELECT": CREATE [OR REPLACE] TABLE ... AS
SELECT ... . "DROP_TABLE": DROP TABLE query. "CREATE_VIEW": CREATE [OR
REPLACE] VIEW ... AS SELECT ... . "DROP_VIEW": DROP VIEW query.
"ALTER_TABLE": ALTER TABLE query. "ALTER_VIEW": ALTER VIEW query.
timeline: [Output-only] [Beta] Describes a timeline of job execution.
totalBytesBilled: [Output-only] Total bytes billed for the job.
totalBytesProcessed: [Output-only] Total bytes processed for the job.
totalBytesProcessedAccuracy: [Output-only] For dry-run jobs,
totalBytesProcessed is an estimate and this field specifies the accuracy
of the estimate. Possible values can be: UNKNOWN: accuracy of the
estimate is unknown. PRECISE: estimate is precise. LOWER_BOUND: estimate
is lower bound of what the query would cost. UPPER_BOUND: estiamte is
upper bound of what the query would cost.
totalPartitionsProcessed: [Output-only] Total number of partitions
processed from all partitioned tables referenced in the job.
totalSlotMs: [Output-only] Slot-milliseconds for the job.
undeclaredQueryParameters: Standard SQL only: list of undeclared query
parameters detected during a dry run validation.
"""
class ReservationUsageValueListEntry(_messages.Message):
r"""A ReservationUsageValueListEntry object.
Fields:
name: [Output-only] Reservation name or "unreserved" for on-demand
resources usage.
slotMs: [Output-only] Slot-milliseconds the job spent in the given
reservation.
"""
name = _messages.StringField(1)
slotMs = _messages.IntegerField(2)
billingTier = _messages.IntegerField(1, variant=_messages.Variant.INT32)
cacheHit = _messages.BooleanField(2)
ddlOperationPerformed = _messages.StringField(3)
ddlTargetTable = _messages.MessageField('TableReference', 4)
estimatedBytesProcessed = _messages.IntegerField(5)
modelTraining = _messages.MessageField('BigQueryModelTraining', 6)
modelTrainingCurrentIteration = _messages.IntegerField(7, variant=_messages.Variant.INT32)
modelTrainingExpectedTotalIteration = _messages.IntegerField(8)
numDmlAffectedRows = _messages.IntegerField(9)
queryPlan = _messages.MessageField('ExplainQueryStage', 10, repeated=True)
referencedTables = _messages.MessageField('TableReference', 11, repeated=True)
reservationUsage = _messages.MessageField('ReservationUsageValueListEntry', 12, repeated=True)
schema = _messages.MessageField('TableSchema', 13)
statementType = _messages.StringField(14)
timeline = _messages.MessageField('QueryTimelineSample', 15, repeated=True)
totalBytesBilled = _messages.IntegerField(16)
totalBytesProcessed = _messages.IntegerField(17)
totalBytesProcessedAccuracy = _messages.StringField(18)
totalPartitionsProcessed = _messages.IntegerField(19)
totalSlotMs = _messages.IntegerField(20)
undeclaredQueryParameters = _messages.MessageField('QueryParameter', 21, repeated=True)
class JobStatistics3(_messages.Message):
r"""A JobStatistics3 object.
Fields:
badRecords: [Output-only] The number of bad records encountered. Note that
if the job has failed because of more bad records encountered than the
maximum allowed in the load job configuration, then this number can be
less than the total number of bad records present in the input data.
inputFileBytes: [Output-only] Number of bytes of source data in a load
job.
inputFiles: [Output-only] Number of source files in a load job.
outputBytes: [Output-only] Size of the loaded data in bytes. Note that
while a load job is in the running state, this value may change.
outputRows: [Output-only] Number of rows imported in a load job. Note that
while an import job is in the running state, this value may change.
"""
badRecords = _messages.IntegerField(1)
inputFileBytes = _messages.IntegerField(2)
inputFiles = _messages.IntegerField(3)
outputBytes = _messages.IntegerField(4)
outputRows = _messages.IntegerField(5)
class JobStatistics4(_messages.Message):
r"""A JobStatistics4 object.
Fields:
destinationUriFileCounts: [Output-only] Number of files per destination
URI or URI pattern specified in the extract configuration. These values
will be in the same order as the URIs specified in the 'destinationUris'
field.
"""
destinationUriFileCounts = _messages.IntegerField(1, repeated=True)
class JobStatus(_messages.Message):
r"""A JobStatus object.
Fields:
errorResult: [Output-only] Final error result of the job. If present,
indicates that the job has completed and was unsuccessful.
errors: [Output-only] The first errors encountered during the running of
the job. The final message includes the number of errors that caused the
process to stop. Errors here do not necessarily mean that the job has
completed or was unsuccessful.
state: [Output-only] Running state of the job.
"""
errorResult = _messages.MessageField('ErrorProto', 1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
state = _messages.StringField(3)
@encoding.MapUnrecognizedFields('additionalProperties')
class JsonObject(_messages.Message):
r"""Represents a single JSON object.
Messages:
AdditionalProperty: An additional property for a JsonObject object.
Fields:
additionalProperties: Additional properties of type JsonObject
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a JsonObject object.
Fields:
key: Name of the additional property.
value: A JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
JsonValue = extra_types.JsonValue
class MaterializedViewDefinition(_messages.Message):
r"""A MaterializedViewDefinition object.
Fields:
lastRefreshTime: [Output-only] [TrustedTester] The time when this
materialized view was last modified, in milliseconds since the epoch.
query: [Required] A query whose result is persisted.
"""
lastRefreshTime = _messages.IntegerField(1)
query = _messages.StringField(2)
class ModelDefinition(_messages.Message):
r"""A ModelDefinition object.
Messages:
ModelOptionsValue: [Output-only, Beta] Model options used for the first
training run. These options are immutable for subsequent training runs.
Default values are used for any options not specified in the input
query.
Fields:
modelOptions: [Output-only, Beta] Model options used for the first
training run. These options are immutable for subsequent training runs.
Default values are used for any options not specified in the input
query.
trainingRuns: [Output-only, Beta] Information about ml training runs, each
training run comprises of multiple iterations and there may be multiple
training runs for the model if warm start is used or if a user decides
to continue a previously cancelled query.
"""
class ModelOptionsValue(_messages.Message):
r"""[Output-only, Beta] Model options used for the first training run.
These options are immutable for subsequent training runs. Default values
are used for any options not specified in the input query.
Fields:
labels: A string attribute.
lossType: A string attribute.
modelType: A string attribute.
"""
labels = _messages.StringField(1, repeated=True)
lossType = _messages.StringField(2)
modelType = _messages.StringField(3)
modelOptions = _messages.MessageField('ModelOptionsValue', 1)
trainingRuns = _messages.MessageField('TrainingRun', 2, repeated=True)
class ProjectList(_messages.Message):
r"""A ProjectList object.
Messages:
ProjectsValueListEntry: A ProjectsValueListEntry object.
Fields:
etag: A hash of the page of results
kind: The type of list.
nextPageToken: A token to request the next page of results.
projects: Projects to which you have at least READ access.
totalItems: The total number of projects in the list.
"""
class ProjectsValueListEntry(_messages.Message):
r"""A ProjectsValueListEntry object.
Fields:
friendlyName: A descriptive name for this project.
id: An opaque ID of this project.
kind: The resource type.
numericId: The numeric ID of this project.
projectReference: A unique reference to this project.
"""
friendlyName = _messages.StringField(1)
id = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#project')
numericId = _messages.IntegerField(4, variant=_messages.Variant.UINT64)
projectReference = _messages.MessageField('ProjectReference', 5)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#projectList')
nextPageToken = _messages.StringField(3)
projects = _messages.MessageField('ProjectsValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class ProjectReference(_messages.Message):
r"""A ProjectReference object.
Fields:
projectId: [Required] ID of the project. Can be either the numeric ID or
the assigned ID of the project.
"""
projectId = _messages.StringField(1)
class QueryParameter(_messages.Message):
r"""A QueryParameter object.
Fields:
name: [Optional] If unset, this is a positional parameter. Otherwise,
should be unique within a query.
parameterType: [Required] The type of this parameter.
parameterValue: [Required] The value of this parameter.
"""
name = _messages.StringField(1)
parameterType = _messages.MessageField('QueryParameterType', 2)
parameterValue = _messages.MessageField('QueryParameterValue', 3)
class QueryParameterType(_messages.Message):
r"""A QueryParameterType object.
Messages:
StructTypesValueListEntry: A StructTypesValueListEntry object.
Fields:
arrayType: [Optional] The type of the array's elements, if this is an
array.
structTypes: [Optional] The types of the fields of this struct, in order,
if this is a struct.
type: [Required] The top level type of this field.
"""
class StructTypesValueListEntry(_messages.Message):
r"""A StructTypesValueListEntry object.
Fields:
description: [Optional] Human-oriented description of the field.
name: [Optional] The name of this field.
type: [Required] The type of this field.
"""
description = _messages.StringField(1)
name = _messages.StringField(2)
type = _messages.MessageField('QueryParameterType', 3)
arrayType = _messages.MessageField('QueryParameterType', 1)
structTypes = _messages.MessageField('StructTypesValueListEntry', 2, repeated=True)
type = _messages.StringField(3)
class QueryParameterValue(_messages.Message):
r"""A QueryParameterValue object.
Messages:
StructValuesValue: [Optional] The struct field values, in order of the
struct type's declaration.
Fields:
arrayValues: [Optional] The array values, if this is an array type.
structValues: [Optional] The struct field values, in order of the struct
type's declaration.
value: [Optional] The value of this value, if a simple scalar type.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class StructValuesValue(_messages.Message):
r"""[Optional] The struct field values, in order of the struct type's
declaration.
Messages:
AdditionalProperty: An additional property for a StructValuesValue
object.
Fields:
additionalProperties: Additional properties of type StructValuesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a StructValuesValue object.
Fields:
key: Name of the additional property.
value: A QueryParameterValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('QueryParameterValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
arrayValues = _messages.MessageField('QueryParameterValue', 1, repeated=True)
structValues = _messages.MessageField('StructValuesValue', 2)
value = _messages.StringField(3)
class QueryRequest(_messages.Message):
r"""A QueryRequest object.
Fields:
defaultDataset: [Optional] Specifies the default datasetId and projectId
to assume for any unqualified table names in the query. If not set, all
table names in the query string must be qualified in the format
'datasetId.tableId'.
dryRun: [Optional] If set to true, BigQuery doesn't run the job. Instead,
if the query is valid, BigQuery returns statistics about the job such as
how many bytes would be processed. If the query is invalid, an error
returns. The default value is false.
kind: The resource type of the request.
location: The geographic location where the job should run. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_locatio
n.
maxResults: [Optional] The maximum number of rows of data to return per
page of results. Setting this flag to a small value such as 1000 and
then paging through results might improve reliability when the query
result set is large. In addition to this limit, responses are also
limited to 10 MB. By default, there is no maximum row count, and only
the byte limit applies.
parameterMode: Standard SQL only. Set to POSITIONAL to use positional (?)
query parameters or to NAMED to use named (@myparam) query parameters in
this query.
preserveNulls: [Deprecated] This property is deprecated.
query: [Required] A query string, following the BigQuery query syntax, of
the query to execute. Example: "SELECT count(f1) FROM
[myProjectId:myDatasetId.myTableId]".
queryParameters: Query parameters for Standard SQL queries.
timeoutMs: [Optional] How long to wait for the query to complete, in
milliseconds, before the request times out and returns. Note that this
is only a timeout for the request, not the query. If the query takes
longer to run than the timeout value, the call returns without any
results and with the 'jobComplete' flag set to false. You can call
GetQueryResults() to wait for the query to complete and read the
results. The default value is 10000 milliseconds (10 seconds).
useLegacySql: Specifies whether to use BigQuery's legacy SQL dialect for
this query. The default value is true. If set to false, the query will
use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ When useLegacySql is set to false, the value of
flattenResults is ignored; query will be run as if flattenResults is
false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. The default value is true.
"""
defaultDataset = _messages.MessageField('DatasetReference', 1)
dryRun = _messages.BooleanField(2)
kind = _messages.StringField(3, default=u'bigquery#queryRequest')
location = _messages.StringField(4)
maxResults = _messages.IntegerField(5, variant=_messages.Variant.UINT32)
parameterMode = _messages.StringField(6)
preserveNulls = _messages.BooleanField(7)
query = _messages.StringField(8)
queryParameters = _messages.MessageField('QueryParameter', 9, repeated=True)
timeoutMs = _messages.IntegerField(10, variant=_messages.Variant.UINT32)
useLegacySql = _messages.BooleanField(11, default=True)
useQueryCache = _messages.BooleanField(12, default=True)
class QueryResponse(_messages.Message):
r"""A QueryResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] The first errors or warnings encountered during the
running of the job. The final message includes the number of errors that
caused the process to stop. Errors here do not necessarily mean that the
job has completed or was unsuccessful.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the Job that was created to run the query. This
field will be present even if the original request timed out, in which
case GetQueryResults can be used to read the results once the query has
completed. Since this API only returns the first page of results,
subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type.
numDmlAffectedRows: [Output-only] The number of rows affected by a DML
statement. Present only for DML statements INSERT, UPDATE or DELETE.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
If this query was a dry run, this is the number of bytes that would be
processed if the query were run.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
jobComplete = _messages.BooleanField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#queryResponse')
numDmlAffectedRows = _messages.IntegerField(6)
pageToken = _messages.StringField(7)
rows = _messages.MessageField('TableRow', 8, repeated=True)
schema = _messages.MessageField('TableSchema', 9)
totalBytesProcessed = _messages.IntegerField(10)
totalRows = _messages.IntegerField(11, variant=_messages.Variant.UINT64)
class QueryTimelineSample(_messages.Message):
r"""A QueryTimelineSample object.
Fields:
activeUnits: Total number of units currently being processed by workers.
This does not correspond directly to slot usage. This is the largest
value observed since the last sample.
completedUnits: Total parallel units of work completed by this query.
elapsedMs: Milliseconds elapsed since the start of query execution.
pendingUnits: Total parallel units of work remaining for the active
stages.
totalSlotMs: Cumulative slot-ms consumed by the query.
"""
activeUnits = _messages.IntegerField(1)
completedUnits = _messages.IntegerField(2)
elapsedMs = _messages.IntegerField(3)
pendingUnits = _messages.IntegerField(4)
totalSlotMs = _messages.IntegerField(5)
class RangePartitioning(_messages.Message):
r"""A RangePartitioning object.
Messages:
RangeValue: [TrustedTester] [Required] Defines the ranges for range
partitioning.
Fields:
field: [TrustedTester] [Required] The table is partitioned by this field.
The field must be a top-level NULLABLE/REQUIRED field. The only
supported type is INTEGER/INT64.
range: [TrustedTester] [Required] Defines the ranges for range
partitioning.
"""
class RangeValue(_messages.Message):
r"""[TrustedTester] [Required] Defines the ranges for range partitioning.
Fields:
end: [TrustedTester] [Required] The end of range partitioning,
exclusive.
interval: [TrustedTester] [Required] The width of each interval.
start: [TrustedTester] [Required] The start of range partitioning,
inclusive.
"""
end = _messages.IntegerField(1)
interval = _messages.IntegerField(2)
start = _messages.IntegerField(3)
field = _messages.StringField(1)
range = _messages.MessageField('RangeValue', 2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: An opaque string that represents a user for quota purposes.
Must not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: Deprecated. Please use quotaUser instead.
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class Streamingbuffer(_messages.Message):
r"""A Streamingbuffer object.
Fields:
estimatedBytes: [Output-only] A lower-bound estimate of the number of
bytes currently in the streaming buffer.
estimatedRows: [Output-only] A lower-bound estimate of the number of rows
currently in the streaming buffer.
oldestEntryTime: [Output-only] Contains the timestamp of the oldest entry
in the streaming buffer, in milliseconds since the epoch, if the
streaming buffer is available.
"""
estimatedBytes = _messages.IntegerField(1, variant=_messages.Variant.UINT64)
estimatedRows = _messages.IntegerField(2, variant=_messages.Variant.UINT64)
oldestEntryTime = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
class Table(_messages.Message):
r"""A Table object.
Messages:
LabelsValue: The labels associated with this table. You can use these to
organize and group your tables. Label keys and values can be no longer
than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
Fields:
clustering: [Beta] Clustering specification for the table. Must be
specified with partitioning, data in the table will be first partitioned
and subsequently clustered.
creationTime: [Output-only] The time when this table was created, in
milliseconds since the epoch.
description: [Optional] A user-friendly description of this table.
encryptionConfiguration: Custom encryption configuration (e.g., Cloud KMS
keys).
etag: [Output-only] A hash of the table metadata. Used to ensure there
were no concurrent modifications to the resource when attempting an
update. Not guaranteed to change when the table contents or the fields
numRows, numBytes, numLongTermBytes or lastModifiedTime change.
expirationTime: [Optional] The time when this table expires, in
milliseconds since the epoch. If not present, the table will persist
indefinitely. Expired tables will be deleted and their storage
reclaimed. The defaultTableExpirationMs property of the encapsulating
dataset can be used to set a default expirationTime on newly created
tables.
externalDataConfiguration: [Optional] Describes the data format, location,
and other properties of a table stored outside of BigQuery. By defining
these properties, the data source can then be queried as if it were a
standard BigQuery table.
friendlyName: [Optional] A descriptive name for this table.
id: [Output-only] An opaque ID uniquely identifying the table.
kind: [Output-only] The type of the resource.
labels: The labels associated with this table. You can use these to
organize and group your tables. Label keys and values can be no longer
than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
lastModifiedTime: [Output-only] The time when this table was last
modified, in milliseconds since the epoch.
location: [Output-only] The geographic location where the table resides.
This value is inherited from the dataset.
materializedView: [Optional] Materialized view definition.
model: [Output-only, Beta] Present iff this table represents a ML model.
Describes the training information for the model, and it is required to
run 'PREDICT' queries.
numBytes: [Output-only] The size of this table in bytes, excluding any
data in the streaming buffer.
numLongTermBytes: [Output-only] The number of bytes in the table that are
considered "long-term storage".
numPhysicalBytes: [Output-only] [TrustedTester] The physical size of this
table in bytes, excluding any data in the streaming buffer. This
includes compression and storage used for time travel.
numRows: [Output-only] The number of rows of data in this table, excluding
any data in the streaming buffer.
rangePartitioning: [TrustedTester] Range partitioning specification for
this table. Only one of timePartitioning and rangePartitioning should be
specified.
requirePartitionFilter: [Beta] [Optional] If set to true, queries over
this table require a partition filter that can be used for partition
elimination to be specified.
schema: [Optional] Describes the schema of this table.
selfLink: [Output-only] A URL that can be used to access this resource
again.
streamingBuffer: [Output-only] Contains information regarding this table's
streaming buffer, if one is present. This field will be absent if the
table is not being streamed to or if there is no data in the streaming
buffer.
tableReference: [Required] Reference describing the ID of this table.
timePartitioning: Time-based partitioning specification for this table.
Only one of timePartitioning and rangePartitioning should be specified.
type: [Output-only] Describes the table type. The following values are
supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined
by a SQL query. [TrustedTester] MATERIALIZED_VIEW: SQL query whose
result is persisted. EXTERNAL: A table that references data stored in an
external storage system, such as Google Cloud Storage. The default value
is TABLE.
view: [Optional] The view definition.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""The labels associated with this table. You can use these to organize
and group your tables. Label keys and values can be no longer than 63
characters, can only contain lowercase letters, numeric characters,
underscores and dashes. International characters are allowed. Label values
are optional. Label keys must start with a letter and each label in the
list must have a different key.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clustering = _messages.MessageField('Clustering', 1)
creationTime = _messages.IntegerField(2)
description = _messages.StringField(3)
encryptionConfiguration = _messages.MessageField('EncryptionConfiguration', 4)
etag = _messages.StringField(5)
expirationTime = _messages.IntegerField(6)
externalDataConfiguration = _messages.MessageField('ExternalDataConfiguration', 7)
friendlyName = _messages.StringField(8)
id = _messages.StringField(9)
kind = _messages.StringField(10, default=u'bigquery#table')
labels = _messages.MessageField('LabelsValue', 11)
lastModifiedTime = _messages.IntegerField(12, variant=_messages.Variant.UINT64)
location = _messages.StringField(13)
materializedView = _messages.MessageField('MaterializedViewDefinition', 14)
model = _messages.MessageField('ModelDefinition', 15)
numBytes = _messages.IntegerField(16)
numLongTermBytes = _messages.IntegerField(17)
numPhysicalBytes = _messages.IntegerField(18)
numRows = _messages.IntegerField(19, variant=_messages.Variant.UINT64)
rangePartitioning = _messages.MessageField('RangePartitioning', 20)
requirePartitionFilter = _messages.BooleanField(21, default=False)
schema = _messages.MessageField('TableSchema', 22)
selfLink = _messages.StringField(23)
streamingBuffer = _messages.MessageField('Streamingbuffer', 24)
tableReference = _messages.MessageField('TableReference', 25)
timePartitioning = _messages.MessageField('TimePartitioning', 26)
type = _messages.StringField(27)
view = _messages.MessageField('ViewDefinition', 28)
class TableCell(_messages.Message):
r"""A TableCell object.
Fields:
v: A extra_types.JsonValue attribute.
"""
v = _messages.MessageField('extra_types.JsonValue', 1)
class TableDataInsertAllRequest(_messages.Message):
r"""A TableDataInsertAllRequest object.
Messages:
RowsValueListEntry: A RowsValueListEntry object.
Fields:
ignoreUnknownValues: [Optional] Accept rows that contain values that do
not match the schema. The unknown values are ignored. Default is false,
which treats unknown values as errors.
kind: The resource type of the response.
rows: The rows to insert.
skipInvalidRows: [Optional] Insert all valid rows of a request, even if
invalid rows exist. The default value is false, which causes the entire
request to fail if any invalid rows exist.
templateSuffix: If specified, treats the destination table as a base
template, and inserts the rows into an instance table named
"{destination}{templateSuffix}". BigQuery will manage creation of the
instance table, using the schema of the base template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-
tables for considerations when working with templates tables.
"""
class RowsValueListEntry(_messages.Message):
r"""A RowsValueListEntry object.
Fields:
insertId: [Optional] A unique ID for each row. BigQuery uses this
property to detect duplicate insertion requests on a best-effort
basis.
json: [Required] A JSON object that contains a row of data. The object's
properties and values must match the destination table's schema.
"""
insertId = _messages.StringField(1)
json = _messages.MessageField('JsonObject', 2)
ignoreUnknownValues = _messages.BooleanField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllRequest')
rows = _messages.MessageField('RowsValueListEntry', 3, repeated=True)
skipInvalidRows = _messages.BooleanField(4)
templateSuffix = _messages.StringField(5)
class TableDataInsertAllResponse(_messages.Message):
r"""A TableDataInsertAllResponse object.
Messages:
InsertErrorsValueListEntry: A InsertErrorsValueListEntry object.
Fields:
insertErrors: An array of errors for rows that were not inserted.
kind: The resource type of the response.
"""
class InsertErrorsValueListEntry(_messages.Message):
r"""A InsertErrorsValueListEntry object.
Fields:
errors: Error information for the row indicated by the index property.
index: The index of the row that error applies to.
"""
errors = _messages.MessageField('ErrorProto', 1, repeated=True)
index = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
insertErrors = _messages.MessageField('InsertErrorsValueListEntry', 1, repeated=True)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllResponse')
class TableDataList(_messages.Message):
r"""A TableDataList object.
Fields:
etag: A hash of this page of results.
kind: The resource type of the response.
pageToken: A token used for paging results. Providing this token instead
of the startIndex parameter can help you retrieve stable results when an
underlying table is changing.
rows: Rows of results.
totalRows: The total number of rows in the complete table.
"""
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataList')
pageToken = _messages.StringField(3)
rows = _messages.MessageField('TableRow', 4, repeated=True)
totalRows = _messages.IntegerField(5)
class TableFieldSchema(_messages.Message):
r"""A TableFieldSchema object.
Messages:
CategoriesValue: [Optional] The categories attached to this field, used
for field-level access control.
Fields:
categories: [Optional] The categories attached to this field, used for
field-level access control.
description: [Optional] The field description. The maximum length is 1,024
characters.
fields: [Optional] Describes the nested schema fields if the type property
is set to RECORD.
mode: [Optional] The field mode. Possible values include NULLABLE,
REQUIRED and REPEATED. The default value is NULLABLE.
name: [Required] The field name. The name must contain only letters (a-z,
A-Z), numbers (0-9), or underscores (_), and must start with a letter or
underscore. The maximum length is 128 characters.
type: [Required] The field data type. Possible values include STRING,
BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT),
BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, RECORD
(where RECORD indicates that the field contains a nested schema) or
STRUCT (same as RECORD).
"""
class CategoriesValue(_messages.Message):
r"""[Optional] The categories attached to this field, used for field-level
access control.
Fields:
names: A list of category resource names. For example,
"projects/1/taxonomies/2/categories/3". At most 5 categories are
allowed.
"""
names = _messages.StringField(1, repeated=True)
categories = _messages.MessageField('CategoriesValue', 1)
description = _messages.StringField(2)
fields = _messages.MessageField('TableFieldSchema', 3, repeated=True)
mode = _messages.StringField(4)
name = _messages.StringField(5)
type = _messages.StringField(6)
class TableList(_messages.Message):
r"""A TableList object.
Messages:
TablesValueListEntry: A TablesValueListEntry object.
Fields:
etag: A hash of this page of results.
kind: The type of list.
nextPageToken: A token to request the next page of results.
tables: Tables in the requested dataset.
totalItems: The total number of tables in the dataset.
"""
class TablesValueListEntry(_messages.Message):
r"""A TablesValueListEntry object.
Messages:
LabelsValue: The labels associated with this table. You can use these to
organize and group your tables.
ViewValue: Additional details for a view.
Fields:
clustering: [Beta] Clustering specification for this table, if
configured.
creationTime: The time when this table was created, in milliseconds
since the epoch.
expirationTime: [Optional] The time when this table expires, in
milliseconds since the epoch. If not present, the table will persist
indefinitely. Expired tables will be deleted and their storage
reclaimed.
friendlyName: The user-friendly name for this table.
id: An opaque ID of the table
kind: The resource type.
labels: The labels associated with this table. You can use these to
organize and group your tables.
tableReference: A reference uniquely identifying the table.
timePartitioning: The time-based partitioning specification for this
table, if configured.
type: The type of table. Possible values are: TABLE, VIEW.
view: Additional details for a view.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""The labels associated with this table. You can use these to organize
and group your tables.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
class ViewValue(_messages.Message):
r"""Additional details for a view.
Fields:
useLegacySql: True if view is defined in legacy SQL dialect, false if
in standard SQL.
"""
useLegacySql = _messages.BooleanField(1)
clustering = _messages.MessageField('Clustering', 1)
creationTime = _messages.IntegerField(2)
expirationTime = _messages.IntegerField(3)
friendlyName = _messages.StringField(4)
id = _messages.StringField(5)
kind = _messages.StringField(6, default=u'bigquery#table')
labels = _messages.MessageField('LabelsValue', 7)
tableReference = _messages.MessageField('TableReference', 8)
timePartitioning = _messages.MessageField('TimePartitioning', 9)
type = _messages.StringField(10)
view = _messages.MessageField('ViewValue', 11)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableList')
nextPageToken = _messages.StringField(3)
tables = _messages.MessageField('TablesValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class TableReference(_messages.Message):
r"""A TableReference object.
Fields:
datasetId: [Required] The ID of the dataset containing this table.
projectId: [Required] The ID of the project containing this table.
tableId: [Required] The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is
1,024 characters.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
tableId = _messages.StringField(3)
class TableRow(_messages.Message):
r"""A TableRow object.
Fields:
f: Represents a single row in the result set, consisting of one or more
fields.
"""
f = _messages.MessageField('TableCell', 1, repeated=True)
class TableSchema(_messages.Message):
r"""A TableSchema object.
Fields:
fields: Describes the fields in a table.
"""
fields = _messages.MessageField('TableFieldSchema', 1, repeated=True)
class TimePartitioning(_messages.Message):
r"""A TimePartitioning object.
Fields:
expirationMs: [Optional] Number of milliseconds for which to keep the
storage for partitions in the table. The storage in a partition will
have an expiration time of its partition time plus this value.
field: [Beta] [Optional] If not set, the table is partitioned by pseudo
column, referenced via either '_PARTITIONTIME' as TIMESTAMP type, or
'_PARTITIONDATE' as DATE type. If field is specified, the table is
instead partitioned by this field. The field must be a top-level
TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED.
requirePartitionFilter: A boolean attribute.
type: [Required] The only type supported is DAY, which will generate one
partition per day.
"""
expirationMs = _messages.IntegerField(1)
field = _messages.StringField(2)
requirePartitionFilter = _messages.BooleanField(3)
type = _messages.StringField(4)
class TrainingRun(_messages.Message):
r"""A TrainingRun object.
Messages:
TrainingOptionsValue: [Output-only, Beta] Training options used by this
training run. These options are mutable for subsequent training runs.
Default values are explicitly stored for options not specified in the
input query of the first training run. For subsequent training runs, any
option not explicitly specified in the input query will be copied from
the previous training run.
Fields:
iterationResults: [Output-only, Beta] List of each iteration results.
startTime: [Output-only, Beta] Training run start time in milliseconds
since the epoch.
state: [Output-only, Beta] Different state applicable for a training run.
IN PROGRESS: Training run is in progress. FAILED: Training run ended due
to a non-retryable failure. SUCCEEDED: Training run successfully
completed. CANCELLED: Training run cancelled by the user.
trainingOptions: [Output-only, Beta] Training options used by this
training run. These options are mutable for subsequent training runs.
Default values are explicitly stored for options not specified in the
input query of the first training run. For subsequent training runs, any
option not explicitly specified in the input query will be copied from
the previous training run.
"""
class TrainingOptionsValue(_messages.Message):
r"""[Output-only, Beta] Training options used by this training run. These
options are mutable for subsequent training runs. Default values are
explicitly stored for options not specified in the input query of the
first training run. For subsequent training runs, any option not
explicitly specified in the input query will be copied from the previous
training run.
Fields:
earlyStop: A boolean attribute.
l1Reg: A number attribute.
l2Reg: A number attribute.
learnRate: A number attribute.
learnRateStrategy: A string attribute.
lineSearchInitLearnRate: A number attribute.
maxIteration: A string attribute.
minRelProgress: A number attribute.
warmStart: A boolean attribute.
"""
earlyStop = _messages.BooleanField(1)
l1Reg = _messages.FloatField(2)
l2Reg = _messages.FloatField(3)
learnRate = _messages.FloatField(4)
learnRateStrategy = _messages.StringField(5)
lineSearchInitLearnRate = _messages.FloatField(6)
maxIteration = _messages.IntegerField(7)
minRelProgress = _messages.FloatField(8)
warmStart = _messages.BooleanField(9)
iterationResults = _messages.MessageField('IterationResult', 1, repeated=True)
startTime = _message_types.DateTimeField(2)
state = _messages.StringField(3)
trainingOptions = _messages.MessageField('TrainingOptionsValue', 4)
class UserDefinedFunctionResource(_messages.Message):
r"""A UserDefinedFunctionResource object.
Fields:
inlineCode: [Pick one] An inline resource that contains code for a user-
defined function (UDF). Providing a inline code resource is equivalent
to providing a URI for a file containing the same code.
resourceUri: [Pick one] A code resource to load from a Google Cloud
Storage URI (gs://bucket/path).
"""
inlineCode = _messages.StringField(1)
resourceUri = _messages.StringField(2)
class ViewDefinition(_messages.Message):
r"""A ViewDefinition object.
Fields:
query: [Required] A query that BigQuery executes when the view is
referenced.
useLegacySql: Specifies whether to use BigQuery's legacy SQL for this
view. The default value is true. If set to false, the view will use
BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ Queries and views that reference this view must use the same
flag value.
userDefinedFunctionResources: Describes user-defined function resources
used in the query.
"""
query = _messages.StringField(1)
useLegacySql = _messages.BooleanField(2)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 3, repeated=True)
| [
"hshah496@gmail.com"
] | hshah496@gmail.com |
bccf3a2046edc6f810228a7de9fc70ecac8ad0e9 | 23c0c86a3e6419e1f50a7e70a7a9b5d31775c9af | /ZleceniaBadan/migrations2/0004_auto_20200315_1712.py | 8c1931ccaec64d23705daeb0804738624862db84 | [] | no_license | DanielTrochonowicz/zlecenie-badan | 7e71e06abc386fa92f4841eb3f5a107f9ed115d1 | 724f2d917715b6b69a41d1dd16116abd76123d01 | refs/heads/master | 2022-12-17T11:22:05.586535 | 2020-03-20T23:12:16 | 2020-03-20T23:12:16 | 247,378,494 | 0 | 0 | null | 2022-11-22T05:23:55 | 2020-03-15T01:00:18 | Python | UTF-8 | Python | false | false | 1,182 | py | # Generated by Django 3.0.4 on 2020-03-15 16:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ZleceniaBadan', '0003_auto_20200315_1703'),
]
operations = [
migrations.AddField(
model_name='zleceniabadan',
name='id',
field=models.AutoField(auto_created=True, default=None, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AlterField(
model_name='extrainfo',
name='rodzaj',
field=models.IntegerField(choices=[(3, 'BARDZO_ZLY'), (1, 'DOBRY'), (3, 'NIE_WYLECZALNY'), (2, 'SLABY'), (0, 'Nieznany')], default=0),
),
migrations.AlterField(
model_name='zleceniabadan',
name='badanie',
field=models.CharField(default='', max_length=128),
),
migrations.AlterField(
model_name='zleceniabadan',
name='info',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='ZleceniaBadan.ExtraInfo'),
),
]
| [
"trochonowiczdaniel@wp.pl"
] | trochonowiczdaniel@wp.pl |
e1cdce30a4b71c76503c91779866ff39841ff975 | c3fd2cffb0c082ddea93361306095f1af74fa3bc | /fizzbuzz.py | 83ca8fe4ed000978433c45158a58f37c3ab34599 | [] | no_license | pmalexander/HTML-CSS-Breakout | 0853c9dcfce9aa05a955f861d2729b061fbe3798 | 9ffb7c2493e589eb5092a801f8941a4ef8c3d936 | refs/heads/master | 2021-09-06T07:44:52.917616 | 2018-02-03T23:42:43 | 2018-02-03T23:42:43 | 120,119,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | print ("FizzBuzz counting up to 100!")
b = 1
n = 150
fz = n % 3 == 0
bz = n % 5 == 0
#can't figure out if should use 'while' statement, unsure on how to set specific numbers to reflect divisible numbers
numbers = 1
increasing = True
while increasing:
if (numbers <= n):
print("{}".format(numbers))
numbers #use?
else:
increasing = False
#maybe 'for'?
for n in range(b, n):
if (n % 3 == 0):
print("Fizz")
for n in range(b, n):
if (n % 5 == 0):
print("Buzz")
for n in range(b, n):
if (n % 3 == 0) and (n % 5 == 0):
print("FizzBuzz")
| [
"pmalexander5@gmail.com"
] | pmalexander5@gmail.com |
32cb8b67bf03817c2fb04cc1652db2eaddc857a4 | eeb47c97585543575e01265599e1a4350f8a0bc9 | /trading_ES_breakout_june2021.py | 3e53c8a2fd71b45e348740fd15d8f98055a489ba | [
"MIT"
] | permissive | spawnaga/ES_futures_options | c4d047fe129c9a66e9b7d1164a42f18157fa79db | 2bab7480ba1d3806ffe9e13edcb7173c86d2cad6 | refs/heads/master | 2023-06-24T00:55:14.446632 | 2021-06-21T08:42:07 | 2021-06-21T08:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,058 | py | from datetime import datetime, timedelta, time
import nest_asyncio
import numpy as np
import pandas as pd
import statsmodels.api as sm
import talib as ta
from ib_insync import *
from stocktrends import Renko
import sys
import math
nest_asyncio.apply() # enable nest asyncio
sys.setrecursionlimit(10 ** 9) # set recursion limit to 1000000000
pd.options.mode.chained_assignment = None # remove a warning
def x_round(x):
return round(x*4)/4
class get_data:
""" A class to get ES Technical analysis and next 2 days expiration date and delta 60 option strikes for
whatever ES price at """
def __init__(self):
pass
def next_exp_weekday(self):
""" Set next expiration date for contract 0 = Monday, 1 = Tuesday, etc..."""
weekdays = {2: [5, 6, 0], 4: [0, 1, 2], 0: [3, 4]}
today = datetime.today().weekday()
for exp, day in weekdays.items():
if today in day:
return exp # return the 2nd next weekday number
def next_weekday(self, d, weekday):
""" Translate weekdays number to a date for example next Mon = October 19th 2020"""
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
date_to_return = d + timedelta(days_ahead) # 0 = Monday, 1=Tus self.ES day, 2=Wed self.ES day...
return date_to_return.strftime('%Y%m%d') # return the date in the form of (yearmonthday) ex:(20201019)
def get_strikes_and_expiration(self):
""" When used, returns strikes and expiration for the ES futures options"""
ES = Future(symbol='ES', lastTradeDateOrContractMonth='20210917', exchange='GLOBEX',
currency='USD')
ib.qualifyContracts(ES)
expiration = self.next_weekday(datetime.today(), self.next_exp_weekday())
chains = ib.reqSecDefOptParams(underlyingSymbol='ES', futFopExchange='GLOBEX', underlyingSecType='FUT',
underlyingConId=ES.conId)
chain = util.df(chains)
strikes = chain[chain['expirations'].astype(str).str.contains(expiration)].loc[:, 'strikes'].values[0]
[ESValue] = ib.reqTickers(ES)
ES_price = ESValue.marketPrice()
strikes = [strike for strike in strikes
if strike % 5 == 0
and ES_price - 10 < strike < ES_price + 10]
return strikes, expiration
def get_contract(self, right, net_liquidation):
""" Get contracts for ES futures options by using get_strikes_and_expiration function"""
strikes, expiration = self.get_strikes_and_expiration()
for strike in strikes:
contract = FuturesOption(symbol='ES', lastTradeDateOrContractMonth=expiration,
strike=strike, right=right, exchange='GLOBEX')
ib.qualifyContracts(contract)
price = ib.reqMktData(contract, "", False, False)
if float(price.last) * 50 >= net_liquidation:
continue
else:
return contract
def slope(self, ser, n):
"""function to calculate the slope of n consecutive points on a plot"""
slopes = [i * 0 for i in range(n - 1)]
for i in range(n, len(ser) + 1):
y = ser[i - n:i]
x = np.array(range(n))
y_scaled = (y - y.min()) / (y.max() - y.min())
x_scaled = (x - x.min()) / (x.max() - x.min())
x_scaled = sm.add_constant(x_scaled)
model = sm.OLS(y_scaled, x_scaled)
results = model.fit()
slopes.append(results.params[-1])
slope_angle = (np.rad2deg(np.arctan(np.array(slopes))))
return np.array(slope_angle)
def renko_df(self, df_raw, ATR=120):
# df_raw = df_raw[-500:]
# df_raw.reset_index(inplace=True)
df_raw = df_raw.reset_index()
renko = Renko(df_raw[['date', 'open', 'high', 'low', 'close', 'volume']])
renko.brick_size = ATR
df = renko.get_ohlc_data()
df['bar_num'] = np.where(df['uptrend'] == True, 1, np.where(df['uptrend'] == False, -1, 0))
for i in range(1, len(df["bar_num"])):
if df["bar_num"].iloc[i] > 0 and df["bar_num"].iloc[i - 1] > 0:
df["bar_num"].iloc[i] += df["bar_num"].iloc[i - 1]
elif df["bar_num"].iloc[i] < 0 and df["bar_num"].iloc[i - 1] < 0:
df["bar_num"].iloc[i] += df["bar_num"].iloc[i - 1]
df.drop_duplicates(subset="date", keep="last", inplace=True)
df_raw = df_raw.merge(df.loc[:, ["date", "bar_num"]], how="outer", on="date")
df_raw["bar_num"].fillna(method='ffill', inplace=True)
# df_raw["adx_slope"] = slope(df_raw['adx'], 5)
# print(df_raw.iloc[:2,:])
# print(f'**************{len(df_raw)}**********************')
return df_raw
def tech_analysis(self, df, period):
df = df[['open', 'high', 'low', 'close', 'volume']]
df['atr'] = ta.ATR(df['high'], df['low'], df['close'], 10)
df = df.reset_index().fillna(method='ffill')
df = self.renko_df(df, df['atr'].mean())
df['OBV'] = ta.OBV(df['close'], df['volume'])
df["obv_slope"] = self.slope(df['OBV'], 5)
df["roll_max_cp"] = df["high"].rolling(10).max()
df["roll_min_cp"] = df["low"].rolling(10).min()
df["roll_max_vol"] = df["volume"].rolling(10).max()
# df.columns = [str(col) + (f'_{period}' if 'date' not in col else '') for col in df.columns]
return df
class Trade:
""" This class will trade the data from get_data class in interactive brokers. It includes strategy,
buying/selling criteria, and controls all connections to interactive brokers orders.
"""
def __init__(self):
self.call_cost = -1
self.put_cost = -1
self.portfolio = []
self.connect()
self.ohlc_dict = {
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'
}
contract = Future(symbol='ES', lastTradeDateOrContractMonth='20210917', exchange='GLOBEX', currency='USD') # define
ib.qualifyContracts(contract)
self.ES = ib.reqHistoricalData(contract=contract, endDateTime='', durationStr='2 D',
barSizeSetting='3 mins', whatToShow='TRADES', useRTH=False, keepUpToDate=True,
timeout=10) # start data collection for ES-Mini
df_raw = util.df(self.ES)
df_1= df_raw.set_index('date')
df_5 = df_1.resample('5T').agg(self.ohlc_dict)
df_5.columns = ['open','high','low','close','volume']
df_1 = res.tech_analysis(df_1,1)
df_5 = res.tech_analysis(df_5, 5)
self.data = pd.merge(df_1,df_5,on='date', how='outer').fillna(method='ffill')
self.data_raw = self.data
self.stock_owned = np.zeros(2) # get data from get data class
self.option_position() # check holding positions and initiate contracts for calls and puts
ib.sleep(1)
self.call_option_volume = np.ones(20) # start call options volume array to get the max volume in the last 20
self.put_option_volume = np.ones(20) # start put options volume array to get the max volume in the last 20 ticks
self.submitted = 0 # order submission flag
self.portfolio = ib.portfolio()
self.put_contract_price = 0.25 * round(
((self.put_option_price.ask + self.put_option_price.bid) / 2) / 0.25) # calculate average put price
self.call_contract_price = 0.25 * round(
((self.call_option_price.ask + self.call_option_price.bid) / 2) / 0.25) # calculate average call price
self.options_price = np.array(
[self.call_contract_price, self.put_contract_price]) # set an array for options prices
self.max_call_price = self.call_option_price.bid # define max call price (use to compare to current price)
self.max_put_price = self.put_option_price.bid # define max put price (use to compare to current price)
self.prev_cash = 0
self.cash_in_hand = 0
self.total_liquidity = 0
self.portfolio_value = 0
self.unrealizedPNL = 0
self.realizedPNL = 0
self.cash_in_hand = 0
self.realizedPNL = 0
self.unrealizedPNL = 0
self.portfolio_value = 0
self.barnumb_lock = False
self.barnumb_value = 0
for self.account in ib.accountValues(): # get initial account value
self.cash_in_hand = float(
self.account.value) if (
self.account.tag == 'TotalCashValue' and self.account.account == 'DU1347520') else self.cash_in_hand
self.portfolio_value = float(
self.account.value) if (
self.account.tag == 'GrossPositionValue' and self.account.account == 'DU1347520') else self.portfolio_value
self.unrealizedPNL = float(
self.account.value) if (
self.account.tag == 'UnrealizedPnL' and self.account.account == 'DU1347520') else self.unrealizedPNL
self.realizedPNL = float(
self.account.value) if (
self.account.tag == 'RealizedPnL' and self.account.account == 'DU1347520') else self.realizedPNL
self.reqId = []
self.second_buy = False
ib.reqGlobalCancel() # Making sure all orders for buying selling are canceled before starting trading
def trade(self, ES, hasNewBar=None):
# if not hasNewBar:
# return
if self.submitted == 1:
print('working on an order, wait please')
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
return
df_raw = util.df(self.ES)
df_raw.set_index('date', inplace=True)
df_raw = res.tech_analysis(df_raw, 1)
self.data_raw = df_raw
if self.data_raw.iloc[-1, 1] == 0:
return
df = self.data_raw[
['high', 'low', 'close', 'volume',
'roll_max_cp',
'roll_min_cp', 'roll_max_vol', 'atr', 'obv_slope', 'bar_num']].tail(
20) # filter data
if self.stock_owned.any() > 0 and not np.isnan(self.max_call_price) and not np.isnan(
self.max_put_price):
self.max_call_price = self.call_option_price.bid if self.call_option_price.bid > self.max_call_price else \
self.max_call_price
self.max_put_price = self.put_option_price.bid if self.put_option_price.bid > self.max_put_price else \
self.max_put_price # check if holding positions and how much the max price for current position
else:
self.max_call_price = self.call_option_price.bid
self.max_put_price = self.put_option_price.bid
if self.stock_owned[0] > 0:
print(f'Call cost was = {self.call_cost}')
print((self.call_option_price.bid - self.call_cost))
elif self.stock_owned[1] > 0:
print(f'Put cost was = {self.put_cost}')
print((self.put_option_price.bid - self.put_cost))
buy_index, sell_index, take_profit = self.strategy(df) # set initial buy index to None
print(f'stocks owning = {self.stock_owned}')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
if not len(sell_index) == 0: # start selling to stop loss
if len(buy_index) == 0:
for i in sell_index:
# self.stock_owned[i] = 0
if len(self.portfolio) > 0:
contract = self.call_contract if i == 0 else self.put_contract
ib.qualifyContracts(contract)
price = ib.reqMktData(contract, '', False, False, None)
self.flatten_position(contract, price)
self.submitted = 0
else:
for i in sell_index:
# self.stock_owned[i] = 0
if len(self.portfolio) > 0:
contract = self.call_contract if i == 0 else self.put_contract
ib.qualifyContracts(contract)
price = ib.reqMktData(contract, '', False, False, None)
self.flatten_position(contract, price)
ib.sleep(0)
for i in buy_index:
contract = res.get_contract('C', 2000) if i == 0 else res.get_contract('P', 2000)
ib.qualifyContracts(contract)
if self.cash_in_hand > (self.options_price[i] * 50) and self.cash_in_hand > self.portfolio_value \
and (self.stock_owned[0] < 1 or self.stock_owned[1] < 1) and len(
self.portfolio) == 0:
price = ib.reqMktData(contract, '', False, False)
ib.sleep(1)
quantity = int((self.cash_in_hand / (self.options_price[i] * 50))) - 1 if \
int((self.cash_in_hand / (self.options_price[i] * 50))) > 1 else 1
self.block_buying = 1
self.open_position(contract=contract, quantity=quantity, price=price)
self.submitted = 0
self.second_buy = False
elif not len(take_profit) == 0: # start selling to take profit
for i in take_profit:
print(self.stock_owned[i])
print(len(self.portfolio))
if len(self.portfolio) > 0:
contract = self.call_contract if i == 0 else self.put_contract
ib.qualifyContracts(contract)
price = ib.reqMktData(contract, '', False, False, None)
self.take_profit(contract, price)
self.submitted = 0
elif not len(buy_index) == 0: # start buying to start trade
if self.stock_owned.any() > 4:
print('cancel buying too many contracts')
return
print(f'buying index = {buy_index}')
for i in buy_index:
if not self.stock_owned.any() > 0:
contract = res.get_contract('C', 2000) if i == 0 else res.get_contract('P', 2000)
ib.qualifyContracts(contract)
else:
contract = self.call_contract if i == 0 else self.put_contract
if self.cash_in_hand > (self.options_price[i] * 50) \
and (self.stock_owned[0] < 2 or self.stock_owned[1] < 2):
price = ib.reqMktData(contract, '', False, False)
ib.sleep(1)
quantity = int((self.cash_in_hand / (self.options_price[i] * 50))) - 1 if \
int((self.cash_in_hand / (self.options_price[i] * 50))) > 1 else ib.positions()[
0].position if self.second_buy is True else 1
self.block_buying = 1
self.open_position(contract=contract, quantity=quantity, price=price)
self.submitted = 0
def strategy(self, df):
"""
Strategy to trade is:
Opening positions if:
- Buying ES Calls options when ES breaks the resistance from the last 30 minutes and the volume is
higher than the last 30 minutes
- Buying ES Puts options when ES breaks the support from the last 30 ninutes and the volume is
higher than the last 30 minutes
Closing positions if:
- For calls:
* Candle's previous close price - candle's previous atr was higher than current candles' low price
* The current call option's price is less than 0.5 from the highest price and current candle's
OBV slope angle is less than 0
- For puts:
* Candle's previous close price + candle's previous atr was lower than current candles' high price
* The current put option's price is less than 0.5 from the highest price and current candle's
OBV slope angle is more than 0
"""
buy_index = [] # set initial buy index to None
sell_index = [] # set initial sell index to None
take_profit = [] # set initial take profit index to None
i = -1 # use to get the last data in dataframe+
print( f'volume this minute so far = {df["volume"].iloc[i]}, max volume last 10 minutes = {df["roll_max_vol"].iloc[i-1]}')
print(f'price = {df["low"].iloc[i - 1]}, max_high ={df["roll_max_cp"].iloc[i-1]}, max_low = {df["roll_min_cp"].iloc[i - 1]}')
print('buying put', df["low"].iloc[i] <= df["roll_min_cp"].iloc[i - 1] ,
df["volume"].iloc[i] >df["roll_max_vol"].iloc[i - 1])
print('buying call',(df['high'].iloc[i] >= df["roll_max_cp"].iloc[i-1]), \
df["volume"].iloc[i]>df["roll_max_vol"].iloc[i-1])
print(
f'bar numb = {self.barnumb_lock} and self.barnumb_value= {self.barnumb_value} df["bar_num"] = {df["bar_num"].iloc[-1]}')
print(f'max call price = {self.max_call_price} and max put price= {self.max_put_price} and obv slope = {df["obv_slope"].iloc[i]}')
print(f'current call bid price = {self.call_option_price.bid} and current put bid price = {self.put_option_price.bid}')
if (self.portfolio_value != 0 and self.stock_owned[0] == 0 and self.stock_owned[1] == 0) or (
self.stock_owned[0] != 0 or self.stock_owned[1] != 0 and self.portfolio_value == 0):
self.option_position()
self.submitted = 0
if self.call_option_price.bid < 1.25 or np.isnan(self.call_option_price.bid) or self.put_option_price.bid < 1.25 \
or np.isnan(self.put_option_price.bid) or (self.data_raw.iloc[-1, 2] < 100):
print('glitch or slippage in option prices, cancel check')
return buy_index, sell_index, take_profit
elif (self.stock_owned[0] == 0 and self.stock_owned[1] == 0) and (
df['high'].iloc[i-1] >= df["roll_max_cp"].iloc[i-1] and
df["volume"].iloc[i]>0.6*df["roll_max_vol"].iloc[i-1]) and buy_index == [] and self.submitted == 0:
print("Buy call")
buy_index.append(0)
self.submitted = 1
return buy_index, sell_index, take_profit
elif (self.stock_owned[0] == 0 and self.stock_owned[1] == 0) and (
df["low"].iloc[i-1]<= df["roll_min_cp"].iloc[i-1] and
df["volume"].iloc[i]>0.6*df["roll_max_vol"].iloc[i-1]) and buy_index == [] and self.submitted == 0:
print("Buy put")
buy_index.append(1)
self.submitted = 1
return buy_index, sell_index, take_profit
elif (self.stock_owned[0] >= 1) and not np.isnan(self.call_option_price.bid) and \
((df['low'].iloc[i]<df['close'].iloc[i-1] - df['atr'].iloc[i-1]) or (self.call_option_price.bid < self.max_call_price and df['obv_slope'].iloc[i] <= 0))\
and \
self.call_option_price.bid > self.call_option_price.modelGreeks.optPrice and self.submitted == 0:
# conditions to sell calls to stop loss
self.submitted = 1
print("sell call")
sell_index.append(0)
return buy_index, sell_index, take_profit
elif (self.stock_owned[1] >= 1) and not np.isnan(self.put_option_price.bid) and \
((df["high"].iloc[i]>df['close'].iloc[i-1] + df['atr'].iloc[i-1]) or (self.put_option_price.bid < self.max_put_price and df['obv_slope'].iloc[i] >= 0))\
and \
self.put_option_price.bid > self.put_option_price.modelGreeks.optPrice and self.submitted == 0:
# conditions to sell puts to stop loss
print("sell put")
sell_index.append(1)
self.submitted = 1
return buy_index, sell_index, take_profit
# elif (self.stock_owned[0] >= 1) and not np.isnan(self.call_option_price.bid) and \
# df['low'].iloc[i] <= df["roll_min_cp"].iloc[i - 1] and \
# df['volume'].iloc[i] >0.6*df["roll_max_vol"].iloc[i - 1] and \
# self.call_option_price.bid > self.call_option_price.modelGreeks.optPrice and self.submitted == 0:
#
# self.submitted = 1
# print("sell call buy put")
# sell_index.append(0)
# buy_index.append(1)
#
# return buy_index, sell_index, take_profit
# #
#
# elif (self.stock_owned[1] >= 1) and not np.isnan(self.put_option_price.bid) and \
# df["high"].iloc[i] >= df["roll_max_cp"].iloc[i] and \
# df['volume'].iloc[i] >0.6*df["roll_max_vol"].iloc[i - 1] and \
# self.put_option_price.bid > self.put_option_price.modelGreeks.optPrice and self.submitted == 0:
# # conditions to sell puts to stop loss
#
# print("sell put buy call")
# sell_index.append(1)
# buy_index.append(0)
# self.submitted = 1
# return buy_index, sell_index, take_profit
elif self.barnumb_lock is True and self.barnumb_value != self.data_raw["bar_num"].iloc[i]:
self.submitted = 0
self.barnumb_lock = False
self.barnumb_value = 0
return buy_index, sell_index, take_profit
else:
print("Hold")
return buy_index, sell_index, take_profit
def error(self, reqId=None, errorCode=None, errorString=None, contract=None): # error handler
print(errorCode, errorString)
if errorCode in [2104, 2108, 2158, 10182, 1102, 2106, 2107] and len(self.reqId) < 1:
self.reqId.append(reqId)
ib.cancelHistoricalData(self.ES)
del self.ES
ib.sleep(30)
ES = Future(symbol='ES', lastTradeDateOrContractMonth='20210917', exchange='GLOBEX',
currency='USD') # define
# ES-Mini futures contract
ib.qualifyContracts(ES)
self.ES = ib.reqHistoricalData(contract=ES, endDateTime='', durationStr='2 D',
barSizeSetting='3 mins', whatToShow='TRADES', useRTH=False, keepUpToDate=True,
timeout=10) # start data collection for ES-Mini
print('attempt to restart data check')
if len(self.ES) == 0:
print(self.ES)
self.error()
self.reqId = []
else:
ib.sleep(1)
self.reqId = []
self.ES.updateEvent += self.trade
self.trade(self.ES)
elif errorCode == 201:
self.option_position()
def flatten_position(self, contract, price): # flat position to stop loss
print('flatttttttttttttttttttttttttttttttttttttttttttttttttttttt')
portfolio = self.portfolio
for each in portfolio: # check current position and select contract
print(price.bid)
if each.contract != contract:
if contract.right == 'C':
self.call_contract = each.contract
elif contract.right == 'P':
self.put_contract = each.contract
return
ib.qualifyContracts(each.contract)
action = 'SELL' # to offset the long portfolio
totalQuantity = abs(each.position) # check holding quantity
print(f'price = {price.bid + 0.25}')
print(f'Flatten Position: {action} {totalQuantity} {contract.localSymbol}')
order = LimitOrder(action=action, totalQuantity=totalQuantity, lmtPrice=x_round((price.ask + price.bid)/2),
account='U2809143') if each.position > 0 \
else MarketOrder(action=action, totalQuantity=totalQuantity,
account='U2809143') # closing position as fast as possible
trade = ib.placeOrder(each.contract, order)
ib.sleep(10) # waiting 10 secs
if not trade.orderStatus.remaining == 0:
ib.cancelOrder(order) # canceling order if not filled
self.submitted = 0
else:
if trade.orderStatus.status == 'Filled':
self.barnumb_lock = True
self.barnumb_value = self.data_raw['bar_num'].iloc[-1]
self.submitted = 0
print(trade.orderStatus.status)
ib.sleep(0)
return
def take_profit(self, contract, price): # start taking profit
if np.isnan(price.bid) or self.stock_owned.any()==1:
self.submitted = 0
return
print('take_________________profit')
portfolio = self.portfolio
for each in portfolio:
if each.contract != contract:
if contract.right == 'C':
self.call_contract = each.contract
elif contract.right == 'P':
self.put_contract = each.contract
return
# if (price.bid - 0.5) <= 0.25 + (each.averageCost / 50): # check if profit did happen
# print(price.bid, each.averageCost / 50)
# print('cancel sell no profit yet')
# self.submitted = 0
# return
ib.qualifyContracts(each.contract)
action = 'SELL' # to offset the long portfolio
totalQuantity = abs(each.position)
print(f'price = {price.bid}')
print(f'Take profit Position: {action} {totalQuantity} {contract.localSymbol}')
order = LimitOrder(action=action, totalQuantity=totalQuantity, lmtPrice=x_round((price.ask + price.bid)/2), account='U2809143')
trade = ib.placeOrder(each.contract, order)
ib.sleep(15)
if not trade.orderStatus.remaining == 0:
ib.cancelOrder(order)
self.submitted = 0
else:
self.barnumb_value = self.data_raw['bar_num'].iloc[-1]
self.barnumb_lock = True
self.submitted = 0
print(trade.orderStatus.status)
return
def open_position(self, contract, quantity, price): # start position
import math
if len(ib.positions()) > 0 or len(ib.reqAllOpenOrders()) > 0 :
# if (len(ib.positions()) > 0 or len(ib.reqAllOpenOrders()) > 0) and (self.second_buy is False):
print('Rejected to buy, either because the time of trade or there is another order or current loss >= 200')
self.submitted = 0
return
quantity = 4 if int(math.floor(price.bid*50 / (float(self.cash_in_hand)))) > 4 else 1
order = LimitOrder(action='BUY', totalQuantity=quantity,
lmtPrice=price.ask, account='U2809143') # round(25 * round(price[i]/25, 2), 2))
trade = ib.placeOrder(contract, order)
print(f'buying {"CALL" if contract.right == "C" else "PUT"}')
ib.sleep(15)
if not trade.orderStatus.status == "Filled":
ib.cancelOrder(order)
self.submitted = 0
else:
self.stock_owned = np.array([quantity, 0]) if contract.right == "C" else np.array([0, quantity])
self.second_buy = False
self.submitted = 0
self.submitted = 0
print(trade.orderStatus.status)
return
def option_position(self, event=None):
position = ib.portfolio()
call_position = None
put_position = None
if len(position) == 0:
self.stock_owned = np.zeros(2)
self.portfolio = position
self.call_cost = -1
self.put_cost = -1
self.call_contract = res.get_contract('C', 2000)
ib.qualifyContracts(self.call_contract)
self.put_contract = res.get_contract('P', 2000)
ib.qualifyContracts(self.put_contract)
self.call_option_price = ib.reqMktData(self.call_contract, '', False,
False) # start data collection for calls
self.put_option_price = ib.reqMktData(self.put_contract, '', False, False) # start data collection for puts
ib.sleep(1)
return
else:
if self.call_cost or self.put_cost:
pass
if self.portfolio != position:
self.portfolio = position
for each in position:
if each.contract.right == 'C':
call_position = each.contract
put_position = None
ib.qualifyContracts(call_position)
self.stock_owned[0] = each.position
self.call_cost = 0.25 * round(each.averageCost / 50 / 0.25)
elif each.contract.right == 'P':
put_position = each.contract
call_position = None
ib.qualifyContracts(put_position)
self.stock_owned[1] = each.position
self.put_cost = 0.25 * round(each.averageCost / 50 / 0.25)
self.call_cost = self.call_cost if not isinstance(call_position, type(None)) else -1
self.put_cost = self.put_cost if not isinstance(put_position, type(None)) else -1
self.call_contract = call_position if not isinstance(call_position, type(None)) else res.get_contract(
'C', 2000)
ib.qualifyContracts(self.call_contract)
self.put_contract = put_position if not isinstance(put_position, type(None)) else res.get_contract('P',
2000)
ib.qualifyContracts(self.put_contract)
self.call_option_price = ib.reqMktData(self.call_contract, '', False,
False) # start data collection for calls
self.put_option_price = ib.reqMktData(self.put_contract, '', False,
False) # start data collection for puts
ib.sleep(0)
return
else:
self.portfolio = position
return
@staticmethod
def connect():
ib.disconnect()
ib.connect('127.0.0.1', 7496, clientId=np.random.randint(10, 1000))
ib.client.MaxRequests = 55
print('reconnected')
@staticmethod
def roll_contract(option_vol, value):
option_vol = np.roll(option_vol, -1)
option_vol[-1] = value
return option_vol
def account_update(self, value=None):
self.cash_in_hand = float(
value.value) if value.tag == 'TotalCashValue' and value.account == 'DU1347520' else self.cash_in_hand
self.portfolio_value = float(
value.value) if value.tag == 'GrossPositionValue' and value.account == 'DU1347520' else self.portfolio_value
self.unrealizedPNL = float(
value.value) if value.tag == 'UnrealizedPnL' and value.account == 'DU1347520' else self.unrealizedPNL
self.realizedPNL = float(
value.value) if value.tag == 'RealizedPnL' and value.account == 'DU1347520' else self.realizedPNL
if self.prev_cash != self.cash_in_hand:
self.prev_cash = self.cash_in_hand
if self.submitted == 1:
self.submitted = 0
def is_time_between(begin_time, end_time, check_time=None):
# If check time is not given, default to current UTC time
check_time = check_time or datetime.now().time()
if begin_time < end_time:
return begin_time <= check_time <= end_time
else: # crosses midnight
return check_time >= begin_time or check_time <= end_time
def main():
ib.positionEvent += trading.option_position
# ib.updatePortfolioEvent += trading.option_position
ib.accountValueEvent += trading.account_update
ib.errorEvent += trading.error
trading.ES.updateEvent += trading.trade
ib.run()
def maybe_make_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
if __name__ == '__main__':
ib = IB()
import os
path = os.getcwd()
TRADES_FOLDER = f'{path}/trades_logs'
maybe_make_dir(TRADES_FOLDER)
my_file = os.path.join(TRADES_FOLDER, f'/log_{datetime.strftime(datetime.now(), "%m_%d_%H_%M")}.txt')
if not os.path.exists(my_file):
file = open(f'{TRADES_FOLDER}/log_{datetime.strftime(datetime.now(), "%m_%d_%H_%M")}.txt', 'a+',
encoding='utf-8')
# file = open(os.path.dirname(TRADES_FOLDER) + f'/log_{datetime.strftime(datetime.now(), "%m_%d_%H_%M")}.txt')
while is_time_between(time(14, 00),
time(15, 00)):
wait_time = 60 - datetime.now().minute
print(f"wait until market opens in {wait_time} minutes")
ib.sleep(60)
res = get_data()
trading = Trade()
try:
main()
except ValueError:
ib.sleep(5)
main()
except Exception as e:
print(e)
ib.disconnect()
file.close()
except "peer closed connection":
ib.sleep(5)
main()
except "asyncio.exceptions.TimeoutError":
ib.sleep(5)
main()
except KeyboardInterrupt:
print('User stopped running')
ib.disconnect()
file.close()
| [
"spawnaga@gmail.com"
] | spawnaga@gmail.com |
906e71b3da95d219c2e5402da517c6b949186b6a | b6582cb10fad0d6e5007865c5085ca30edc96251 | /KivyLightningATM_LCD/master_layout.py | c4853716142e9a2c10465ad0b01c54d39eb916b3 | [] | no_license | d3m0-sm/KivyLightningATM_Repo | 4d476caa4f0924fe57a4dde2611b471df9f447ac | daa2c097f12725606c315d41cbd3eb5933681f5f | refs/heads/master | 2022-04-21T07:33:21.670085 | 2020-04-18T11:05:24 | 2020-04-18T11:05:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,133 | py | from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.graphics import Rectangle, Color
class MasterLayout(FloatLayout):
'''It's a little bit tricky to create a colored background or some graphics. If the screen size changes the
size of the colored background has to be resized too. Also the relation has to be the same as before.
This class sums all layouts up, created before and gives them a structure. Every time an empty colored page
in a different size and position is added, so it looks like boarders'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
"""The method initiates the whole layout. If this method doesn't end nothing
is shown"""
# sets variables for the different colored pages with different sizes and positions
self.s0 = StandardLayout0()
self.s1 = StandardLayout1(size_hint=(0.9, 0.8), pos_hint={'x': 0.05, 'y': 0.05})
self.s2 = StandardLayout2(size_hint=(0.883, 0.778), pos_hint={'x': 0.058, 'y': 0.062})
self.s3 = StandardLayout3(size_hint=(0.565, 0.1), pos_hint={'x': 0.22, 'y': 0.88})
self.s4 = StandardLayout4(size_hint=(0.55, 0.08), pos_hint={'x': 0.228, 'y': 0.89})
self.s5 = StandardLayout5()
# adds the pages to the main page
self.add_widget(self.s0)
self.add_widget(self.s1)
self.add_widget(self.s2)
self.add_widget(self.s3)
self.add_widget(self.s4)
self.add_widget(self.s5)
# button is created nearly the same as the label
# size_hint = is the size relatively to the parent widget, which is the page actually
# pos_hint is the same as above
# this button has no function here because no touchscreen is used
# it's used because of an easy way to implement a rectangle with a label in it
self.button_home = Button(text="ATM",
size_hint=(0.2, 0.1),
pos_hint={'top': 0.98, 'right': 0.215},
background_color=(0, 0, 0, 1),
color=(0.8, 1, 1, 1),
font_size=35)
self.add_widget(self.button_home)
# same as above
self.button_back = Button(text="ATM",
size_hint=(0.2, 0.1),
pos_hint={'top': 0.98, 'right': 0.99},
background_color=(0, 0, 0, 1),
color=(0.8, 1, 1, 1),
font_size=35)
self.add_widget(self.button_back)
class StandardLayout0(FloatLayout):
'''main background'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
# creates a colored canvas
with self.canvas.before:
# color is set by red, green, blue, opacity --> 0 means 0 and 1 means 255
Color(0.8, 1, 1, 1)
self.rect = Rectangle(size=self.size, pos=self.pos)
# binds the size of the canvas to a method
# if the size changes the update rect method is called
self.bind(size=self._update_rect, pos=self._update_rect)
def _update_rect(self, instance, value):
'''necessary to resize and relocate the colored canvas, if the main screen changed'''
# updates the position of the canvas
self.rect.pos = instance.pos
# updates the size of the canvas
self.rect.size = instance.size
# same as above
class StandardLayout1(FloatLayout):
'''outside boarder'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
with self.canvas.before:
Color(0, 0, 0, 1)
self.rect = Rectangle(size=self.size, pos=self.pos)
self.bind(size=self._update_rect, pos=self._update_rect)
def _update_rect(self, instance, value):
self.rect.pos = instance.pos
self.rect.size = instance.size
# same as above
class StandardLayout2(FloatLayout):
'''inside edge of the outside boarder'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
with self.canvas.before:
Color(0.8, 1, 1, 1)
self.rect = Rectangle(size=self.size, pos=self.pos)
self.bind(size=self._update_rect, pos=self._update_rect)
def _update_rect(self, instance, value):
self.rect.pos = instance.pos
self.rect.size = instance.size
# same as above
class StandardLayout3(FloatLayout):
'''Lighting boarder'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
with self.canvas.before:
Color(0, 0, 0, 1)
self.rect = Rectangle(size=self.size, pos=self.pos)
self.bind(size=self._update_rect, pos=self._update_rect)
def _update_rect(self, instance, value):
self.rect.pos = instance.pos
self.rect.size = instance.size
# same as above
class StandardLayout4(FloatLayout):
'''inside of the Lightning boarder'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
with self.canvas.before:
Color(0.8, 1, 1, 1)
self.rect = Rectangle(size=self.size, pos=self.pos)
self.bind(size=self._update_rect, pos=self._update_rect)
def _update_rect(self, instance, value):
self.rect.pos = instance.pos
self.rect.size = instance.size
class StandardLayout5(FloatLayout):
'''creates a Lightning Label'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
# creates a label and puts it in instance variable
# pos_hint = is a relative position --> 0 is 0% , 1 is 100% --> 0,0 is bottom left !!!
# color is set by red, green, blue, opacity --> 0 means 0 and 1 means 255
self.label_top = Label(text="LIGHTING",
pos_hint={'center_x': 0.5, 'center_y': 0.93},
font_size=45,
color=(0, 0, 0, 1))
self.add_widget(self.label_top)
| [
"talentpierre@gmail.com"
] | talentpierre@gmail.com |
b36481f344b66fb5631d75801b0018c7a515d504 | c1b293511b5a000059dd05a6cf243931558ecf5b | /project4/k-means_true/Kmeans_true.py | f43ca9f833e8c1a452c83fe4b0bc562082cabd09 | [] | no_license | zzlegion/speech | 6d102edf1e91175c3aa8971c8cffce651adea3fd | 261612e3d2c639c2a97071b7bce25dfe2ce1a876 | refs/heads/master | 2021-01-12T11:43:54.307428 | 2016-11-09T08:39:01 | 2016-11-09T08:39:01 | 72,282,793 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,890 | py | #coding: utf-8
import sys
import numpy as np
import profile
from time import time
import os
import mfcc
import record
def distance(vector,j):
""""Cal the Gaussian distance between input vector and the jth mean vector"""
res = 0.5 * np.sum(np.log(2 * np.pi * var[j]))+ 0.5* np.sum((vector-mean[j])**2*1.0/var[j])
return res
def initialize():
# average the sequence into 5 states
global segment_info
segment_info = np.arange(5 * training_sequence_num).reshape(training_sequence_num, 5)
segment_info = segment_info % (5)
segment_info.dtype = int
for i in xrange(training_sequence_num):
val = num_of_frames[i]*1.0/5
segment_info[i] =segment_info[i]*val
def update_parameters():
global mean
mean = np.zeros(5 * 39).reshape(5, 39)
global var
var = np.zeros(5 * 39).reshape(5, 39)
global trans_p
trans_p = [0] * 4
global self_trans
self_trans = [0] * 5
# number of vectors belonging to the jth segment.
global Nj
Nj = [0] * 5
for i in xrange(5):
for k in xrange(training_sequence_num):
if i < 4:
Nj[i] += segment_info[k][i + 1] - segment_info[k][i]
else:
Nj[i] += num_of_frames[k] - segment_info[k][4]
# ####################### initialize means ###################################
i=0
for k in xrange(training_sequence_num):
for i in xrange(num_of_frames[k]):
if i >=segment_info[k][0] and i<segment_info[k][1]:
mean[0] += all_training_sequences[k][i]
elif i >=segment_info[k][1] and i<segment_info[k][2]:
mean[1] +=all_training_sequences[k][i]
elif i >=segment_info[k][2] and i<segment_info[k][3]:
mean[2] +=all_training_sequences[k][i]
elif i >=segment_info[k][3] and i<segment_info[k][4]:
mean[3] +=all_training_sequences[k][i]
elif i >=segment_info[k][4]:
mean[4] +=all_training_sequences[k][i]
for i in xrange(5):
mean[i] = mean[i]*1.0/Nj[i]
####################### initialize variances ###################################
for k in xrange(training_sequence_num):
for i in xrange(num_of_frames[k]):
if i >=segment_info[k][0] and i<segment_info[k][1]:
var[0] += (all_training_sequences[k][i]- mean[0]) ** 2
elif i >=segment_info[k][1] and i<segment_info[k][2]:
var[1] += (all_training_sequences[k][i] - mean[1]) ** 2
elif i >=segment_info[k][2] and i<segment_info[k][3]:
var[2] += (all_training_sequences[k][i] - mean[2]) ** 2
elif i >=segment_info[k][3] and i<segment_info[k][4]:
var[3] += (all_training_sequences[k][i] - mean[3]) ** 2
elif i >=segment_info[k][4]:
var[4] += (all_training_sequences[k][i] - mean[4]) ** 2
for i in xrange(5):
var[i] = var[i]*1.0/Nj[i]
############################## compte transition scores ##########################
for i in xrange(4):
trans_p[i] = training_sequence_num*1.0/Nj[i]
trans_p[i] = - np.log(trans_p[i])
for i in xrange(4):
self_trans[i] = 1- training_sequence_num*1.0/Nj[i]
self_trans[i] = -np.log(self_trans[i])
self_trans[4] = 0
def segment():
trellis = np.zeros(5*2).reshape(2,5)
cur_paths = [] # ๆฏไธชnode้ฝๅ
ณ่ไธๆกpath๏ผๆฏๆกpath็จlist่กจ็คบ๏ผpathsๆฏ5ไธชpath็้ๅ
pre_paths = []
for sequence_index in xrange(training_sequence_num):
###################### ๅฏน็ฌฌๆก่ฎญ็ปๅฝ้ณๆไฝ ################################
length = num_of_frames[sequence_index]
####################### ๅๅงๅpath #########################################
pre_paths[:] = []
pre_paths.append([0])
pre_paths.append([1])
pre_paths.append([2])
pre_paths.append([3])
pre_paths.append([4])
cur_paths[:] = []
cur_paths.append([])
cur_paths.append([])
cur_paths.append([])
cur_paths.append([])
cur_paths.append([])
####################### ๅๅงๅtrellis ######################################
trellis.fill(sys.maxint)
vector = all_training_sequences[sequence_index][0] # ็ฌฌiไธชsequence็็ฌฌไธไธชmfccๅ้,็ฌฌ0ไธชmfccๅ้ไธบ0*39
trellis[0][0] = distance(vector,0) # trellis[0][0]ไธบvectorๅstate0็่ท็ฆป
####################### ่ฎก็ฎtrellis ########################################
for index in xrange(1,length):
vector = all_training_sequences[sequence_index][index]
node_cost0 = distance(vector,0)
trellis[1][0] = trellis[0][0] + node_cost0 + self_trans[0] # ่ฎก็ฎๆฏไธๅ็็ฌฌไธไธชๅ
็ด ,ๅช่ฝไปไธไธๅ็็ฌฌไธไธชๅ
็ด ๅพๅฐ
cur_paths[0] = pre_paths[0][:] # ็จslice ๆฐๅปบไธไธชlist ๅนถcopy prepaths[0]็ๅ
ๅฎนใcopy list ๆนๆณ้ sliceๆๅฟซ
cur_paths[0].append(0) # ็นtrellis[1][0]็path ๅช่ฝๆฏ [0 0]
for node_index in xrange(1,5): # state 1 2 3 4
node_cost = distance(vector,node_index)
stay_edge_cost = self_trans[node_index]
move_edge_cost = trans_p[node_index-1]
stay_cost = trellis[0][node_index] + node_cost + stay_edge_cost
move_cost = trellis[0][node_index-1] + node_cost + move_edge_cost
if stay_cost < move_cost:
trellis[1][node_index] = stay_cost
cur_paths[node_index] = pre_paths[node_index][:]
cur_paths[node_index].append(node_index) ## ๆๅฝๅ่็นๅ ๅ
ฅpathไธญ
else:
trellis[1][node_index] = move_cost
cur_paths[node_index] = pre_paths[node_index-1][:] ## ๅคๅถไธไธ่็น็path
cur_paths[node_index].append(node_index) ## ๆๅฝๅ่็นๅ ๅ
ฅpathไธญ
#################### ๅฐ trellis[1][:] ๅคๅถๅฐ trellis[0][:] ################
trellis[0][:] = trellis[1][:]
#print trellis[0]
pre_paths[:] = cur_paths[:]
#print pre_paths
#print cur_paths[4]
##################### cur_paths[4]ๅณไธบๅๆฎต็best path ################################
state = 1
for j in xrange(1,length-1):
if cur_paths[4][j]!=cur_paths[4][j-1]:
segment_info[sequence_index][state] = j ### ็จsegment infoๆฅ่ฎฐๅฝstateๅๅ็ๅฐๆน
state += 1
print segment_info[sequence_index]
def kmeans(integer):
################################# ๅๅงๅ uniformly ๅๆฎต ###############################
initialize()
update_parameters()
for i in xrange(4): ################# ๅฐtransition probability ๅนณๅๅ้
########
trans_p[i] = 0.5
self_trans[i] = 0.5
################################# ๅฎไน pre_seg_info ##################################
pre_seg_info = np.arange(5 * training_sequence_num).reshape(training_sequence_num, 5)
pre_seg_info = pre_seg_info % (5)
pre_seg_info.dtype = int
for i in xrange(training_sequence_num):
for j in xrange(5):
pre_seg_info[i][j] = segment_info[i][j]
changed = True
ite=0
while(changed):
print("iteration ",ite)
changed = False
segment()
print(segment_info)
for i in xrange(training_sequence_num):
for j in xrange(5):
if pre_seg_info[i][j] != segment_info[i][j]:
changed = True
break
update_parameters()
for i in xrange(training_sequence_num):
for j in xrange(5):
pre_seg_info[i][j] = segment_info[i][j]
ite = ite+1
############################## compte transition scores ##########################
trans_p.append(training_sequence_num * 1.0 / Nj[4])
self_trans[4] = 1 - training_sequence_num * 1.0 / Nj[4]
np.savetxt(str(integer)+"hmm_mean.txt",mean)
np.savetxt(str(integer) + "hmm_var.txt", var)
np.savetxt(str(integer) + "hmm_segment_info.txt", segment_info)
np.savetxt(str(integer) + "hmm_trans_p.txt", trans_p)
np.savetxt(str(integer) + "hmm_self_trans.txt", self_trans)
def load_hmm_model(integer):
# global segment_info
# segment_info = np.arange(5 * training_sequence_num).reshape(training_sequence_num, 5)
# segment_info.dtype = int
global mean
mean = np.zeros(5 * 39).reshape(5, 39)
global var
var = np.zeros(5 * 39).reshape(5, 39)
global trans_p
trans_p = [0] * 4
global self_trans
self_trans = [0] * 5
mean=np.loadtxt(str(integer)+"hmm_mean.txt")
var=np.loadtxt(str(integer) + "hmm_var.txt")
trans_p = np.loadtxt(str(integer) + "hmm_trans_p.txt")
self_trans = np.loadtxt(str(integer) + "hmm_self_trans.txt")
def hmm(test_sequence,speak_number,name,isOnline):
cost=[0 for col in xrange(10)]
length = len(test_sequence)
for i in xrange(10):
##################### load ็ฌฌiไธชๆฐๅญ็hmm model ###########################
load_hmm_model(i)
##################### ๅฏน test_sequence ๅ k means #########################
trellis = np.zeros(5 * 2).reshape(2, 5)
cur_paths = [] # ๆฏไธชnode้ฝๅ
ณ่ไธๆกpath๏ผๆฏๆกpath็จlist่กจ็คบ๏ผpathsๆฏ5ไธชpath็้ๅ
pre_paths = []
####################### ๅๅงๅpath #########################################
pre_paths[:] = []
pre_paths.append([0])
pre_paths.append([1])
pre_paths.append([2])
pre_paths.append([3])
pre_paths.append([4])
cur_paths[:] = []
cur_paths.append([])
cur_paths.append([])
cur_paths.append([])
cur_paths.append([])
cur_paths.append([])
####################### ๅๅงๅtrellis ######################################
trellis.fill(sys.maxint)
vector = test_sequence[0] # test_sequence็็ฌฌ0ไธชmfccๅ้
trellis[0][0] = distance(vector, 0) # trellis[0][0]ไธบvectorๅstate0็่ท็ฆป
####################### ่ฎก็ฎtrellis ########################################
for index in xrange(1, length):
vector = test_sequence[index]
trellis[1][0] = trellis[0][0] + distance(vector,0) + self_trans[0] # ่ฎก็ฎๆฏไธๅ็็ฌฌไธไธชๅ
็ด ,ๅช่ฝไปไธไธๅ็็ฌฌไธไธชๅ
็ด ๅพๅฐ
cur_paths[0] = pre_paths[0][:] # ็จslice ๆฐๅปบไธไธชlist ๅนถcopy prepaths[0]็ๅ
ๅฎนใcopy list ๆนๆณ้ sliceๆๅฟซ
cur_paths[0].append(0) # ็นtrellis[1][0]็path ๅช่ฝๆฏ [0 0]
for node_index in xrange(1, 5): # state 1 2 3 4
node_cost = distance(vector, node_index)
stay_cost = trellis[0][node_index] + node_cost + self_trans[node_index]
move_cost = trellis[0][node_index - 1] + node_cost + trans_p[node_index-1]
if stay_cost < move_cost:
trellis[1][node_index] = stay_cost
cur_paths[node_index] = pre_paths[node_index][:]
cur_paths[node_index].append(node_index) ## ๆๅฝๅ่็นๅ ๅ
ฅpathไธญ
else:
trellis[1][node_index] = move_cost
cur_paths[node_index] = pre_paths[node_index - 1][:] ## ๅคๅถไธไธ่็น็path
cur_paths[node_index].append(node_index) ## ๆๅฝๅ่็นๅ ๅ
ฅpathไธญ
#################### ๅฐ trellis[1][:] ๅคๅถๅฐ trellis[0][:] ################
trellis[0][:] = trellis[1][:]
pre_paths[:] = cur_paths[:]
##################### ไฟๅญtest_sequence ๅฏน็ฌฌiไธชๆจกๆฟ็ๆๅฐcost ################################
cost[i] = trellis[1][4]
#print(cost)
mincost=cost[0]
minindex=0
for index,ele in enumerate(cost):
if ele < mincost:
minindex = index
mincost = ele
if isOnline:
print "You are speaking ",minindex
print "Cost is ",mincost
else:
if minindex == speak_number:
print("Right!! Min cost is ",mincost)
else:
print("Wrong.. Should be ",speak_number," But is ",minindex,mincost)
print(name)
def align(train_data):
for i in xrange(10):
print "hmm model: ",i
global num_of_frames
num_of_frames = []
global all_training_sequences
all_training_sequences = []
### ไฟไผ็train sequence
for index in train_data[0]:
sequence = np.loadtxt(".\\junyo_iso_11_1\\"+str(i)+"_"+str(index)+".txt")
#sequence = sequence[1:]
num_of_frames.append(len(sequence))
all_training_sequences.append(sequence)
### ๅฅ็็train sequence
for index in train_data[1]:
sequence = np.loadtxt(".\\jianwei_iso_11_1\\" + str(i) + "_" + str(index) + ".txt")
#sequence = sequence[1:]
num_of_frames.append(len(sequence))
all_training_sequences.append(sequence)
kmeans(i)
del num_of_frames
del all_training_sequences
def test(isOnline,test_data):
if isOnline:
test_sequence = mfcc.mfcc(record.record(),"fast_mode")
hmm(test_sequence,3,"test0",True)
else:
for i in xrange(10):
### ไฟไผ็test sequence
for index in test_data[0]:
name = ".\\txtDictionary\\junyo_" + str(i) + "_" + str(index) + ".txt"
sequence = np.loadtxt(name)
sequence = sequence[1:]
hmm(sequence, i, name,False)
### ๅฅ็็test sequence
for index in test_data[1]:
name = ".\\txtDictionary\\" + str(i) + "_" + str(index) + ".txt"
sequence = np.loadtxt(name)
sequence = sequence[1:]
hmm(sequence, i, name,False)
if __name__ == '__main__':
train_data = []
train_data.append([0,1,2,3,4]) ## ไฟไผ็็จไบ่ฎญ็ป็ๅฝ้ณindex
train_data.append([0,1,2,3,4]) ## ๅฅ็็็จไบ่ฎญ็ป็ๅฝ้ณindex
training_sequence_num = len(train_data[0])+len(train_data[1])
#test_data = []
#test_data.append([]) ## ไฟไผ็็จไบๆต่ฏ็ๅฝ้ณindex
#test_data.append([3,5]) ## ๅฅ็็็จไบๆต่ฏ็ๅฝ้ณindex
align(train_data)
#test(True,test_data) ## True ่กจ็คบonl | [
"noreply@github.com"
] | zzlegion.noreply@github.com |
2e6dc41d85cb634f7b13531e16179d653b06e00b | 349428feb6cec2444a4420322d200b08fdcb6978 | /ForexRL/test/design_pattern/test_decorator.py | 310574430c67b34acb6f90701069458c460d7dc8 | [] | no_license | paohuz/TempProj | ac7d8195cf8b1000df8eb4c9fd2ab3807c9b688c | 2b44fd8a7148d2acfdfd8040e611dfde4631bedc | refs/heads/master | 2020-06-21T17:15:12.172148 | 2020-04-29T05:57:36 | 2020-04-29T05:57:36 | 197,512,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | class AbstractComponent:
def Operation(self):
raise NotImplementedError('Operation() must be defined in subclass')
class ConcreteComponent(AbstractComponent):
def Operation(self):
print('ConcreteComponent: Operation()')
class Decorator(AbstractComponent):
def __init__(self, obj):
self.comp = obj
def Operation(self):
print('Decorator:: Operation()')
self.comp.Operation()
class ConcreteDecoratorA(Decorator):
def __init__(self, obj):
Decorator.__init__(self, obj)
self.addedState = None
def Operation(self):
Decorator.Operation(self)
self.addedState = 1
print('ConcreteDecoratorA: Operation()')
print(f'ConcreteDecoratorA: addedState = {self.addedState}')
class ConcreteDecoratorB(Decorator):
def __init__(self, obj):
Decorator.__init__(self, obj)
def Operation(self):
Decorator.Operation(self)
print('ConcreteDecoratorB: Operation()')
self.AddedBehavior()
def AddedBehavior(self):
print('ConcreteDecoratorB: AddedBehavior()')
myComponent = ConcreteDecoratorA(ConcreteDecoratorB(ConcreteComponent()))
myComponent.Operation()
| [
"paohuz@gmail.com"
] | paohuz@gmail.com |
184c32820c936dfc530004beb6b0a0c8e580eefb | a55ddcf148a1e0d6ad8fece464da3deae6b18702 | /ICPC Practicec/chefexam.py | 849139f2b05ad3af5aa0d5341dbeba83529cdb6f | [] | no_license | vishaldeyiiest/Codechef | 929289d70a197bdb43f69e5aed29a6e91c5bb421 | e7534a3bba72317ca702cc140d2eb9e94c9e1407 | refs/heads/master | 2021-05-03T12:32:54.102656 | 2016-10-27T13:14:10 | 2016-10-27T13:14:10 | 72,109,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | def solve(l, n, m):
maxsum = sum(x[1] for x in l)
minsum = sum(x[0] for x in l)
if maxsum < m*n:
return -1
if minsum >= m*n:
return 0
l.sort(key = lambda x: x[2], reverse = True)
hr = 0
c = [0]*n
i = 0
while minsum < m*n:
if l[i][0] + l[i][2]*(c[i]+1) <= l[i][1]:
c[i] = c[i]+1
minsum += l[i][2]
hr += 1
else:
i = i+1
return hr
n, m = map(int, raw_input().split())
l = []
for i in range(n):
l.append(tuple(map(int, raw_input().split())))
print solve(l, n, m)
| [
"vishal.iiestcst@gmail.com"
] | vishal.iiestcst@gmail.com |
d83a9791c87c2ce546badc89430581f08fd60a08 | 903bf38b8e4ae7e38a73372df09b357cad4b9079 | /df_user/models.py | 39b86f4c6e360eb30629ae618cd4d3b95efb9efe | [] | no_license | FixYouZi/dajangoDF | 562ad0ca68920e5c0775b6b73214c220679b0211 | 2465456ef5bb917bc3c2d339da631e4dbcc71b89 | refs/heads/master | 2020-03-19T06:14:35.181258 | 2018-06-04T09:36:06 | 2018-06-04T09:36:06 | 136,003,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django.db import models
# Create your models here.
class UserInfo(models.Model):
user_name = models.CharField(max_length=50)
pwd = models.CharField(max_length=40)
uemail = models.EmailField()
ushou = models.CharField(max_length=20, default='')
uaddress = models.CharField(max_length=100, default='')
uyoubian = models.CharField(max_length=6, default='')
uphone = models.CharField(max_length=11, default='')
| [
"35181810+FixYouZi@users.noreply.github.com"
] | 35181810+FixYouZi@users.noreply.github.com |
f0809203f523fb2e51459094f4ff4c4dd2440f9b | 091cc684740bc76932352d230db4a08bf011b7ec | /interview_cake_daily_practice/array_and_string_manipulation/reverse_string_test.py | b13f3cab260ef99a96d37ae18f44c6123593b882 | [] | no_license | dm36/interview-practice | 5232601d8de23e80557b3e2a96ff9d3589017052 | 612966ea0a813faaabd5dca98ce6dd524b8b4cef | refs/heads/master | 2022-02-14T00:46:20.395743 | 2019-07-19T19:47:34 | 2019-07-19T19:47:34 | 197,820,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | import unittest
def reverse(list_of_chars):
# Reverse the input list of chars in place
i = 0
j = len(list_of_chars) - 1
while i < j:
list_of_chars[i], list_of_chars[j] = list_of_chars[j], list_of_chars[i]
i += 1
j -= 1
# Tests
class Test(unittest.TestCase):
def test_empty_string(self):
list_of_chars = []
reverse(list_of_chars)
expected = []
self.assertEqual(list_of_chars, expected)
def test_single_character_string(self):
list_of_chars = ['A']
reverse(list_of_chars)
expected = ['A']
self.assertEqual(list_of_chars, expected)
def test_longer_string(self):
list_of_chars = ['A', 'B', 'C', 'D', 'E']
reverse(list_of_chars)
expected = ['E', 'D', 'C', 'B', 'A']
self.assertEqual(list_of_chars, expected)
unittest.main(verbosity=2)
| [
"dhruv.madhawk@gmail.com"
] | dhruv.madhawk@gmail.com |
3e1ff0153900bee2026a9b016f9da094c9b89214 | e063e9799d4c0473abebff4fb9d0a06c7e6660d8 | /myconfig.py | 75dd3933dab5af6f7ddc35728bbb9262fc6949cb | [] | no_license | afcarl/yelp-analysis | cf33c95f94c0e73e5a37b18a65de46c65563d360 | 82091f8a31e58a46099caeb02c93e2b7319ce4d9 | refs/heads/master | 2020-03-21T05:56:31.918268 | 2016-09-20T23:27:19 | 2016-09-20T23:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | data_root = '/Users/Tony/Dropbox/CMU_MLT/DialPort/YelpDomain/yelp_dataset_challenge_academic_dataset/'
result_root = '/Users/Tony/Dropbox/CMU_MLT/DialPort/YelpDomain/yelp_dataset_challenge_academic_dataset/result/'
cache_root = '/Users/Tony/Dropbox/CMU_MLT/DialPort/YelpDomain/yelp_dataset_challenge_academic_dataset/cache/'
review_data = 'yelp_academic_dataset_review.json'
business_data = 'yelp_academic_dataset_business.json'
tip_data = 'yelp_academic_dataset_tip.json'
ontology_data = 'categories.json'
attribute_info_csv = 'attribute_info.csv'
category_info_csv = 'categories_info.csv'
top_category_info_csv = 'top_categories_info.csv'
inside_category_info_csv = 'inside_categories_info.csv'
| [
"zhaotiancheng.hz@gmail.com"
] | zhaotiancheng.hz@gmail.com |
6853cc8da0e9ee00d664aad325a46c64f57190dc | d64f0d530f2863cc08636b2640abeb7a442794ab | /back-end/SOA/db/scripts/keywords.py | 35dc060716aa43700c8afca3bf371c7f020caed4 | [] | no_license | John-Atha/askMeAnything | aa99818de887deabdd2aa49cbc5fee4b5c257fbd | 53f7310f19688b74d0e207800154466fd680400d | refs/heads/main | 2023-06-18T03:58:17.243570 | 2021-07-14T11:07:28 | 2021-07-14T11:07:28 | 358,830,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | import os
prefix = "INSERT INTO public.\"keyword\"(name) VALUES("
keys = ["Django", "Spring boot", "Express js", "Node js", "Laravel",
"Python", "c++", "algorithms", "data structures", "sql",
"databases", "postgres", "heroku", "vscode", "webstorm"
"web developing", "html", "css", "vanilla js", "react",
"angular", "react-native", "vue", "flutter", "selenium",
"ubuntu", "linux", "windows"]
fileDir = os.path.dirname(os.path.realpath('__file__'))
filename = os.path.join(fileDir, '../sql/keywords.sql')
filename = os.path.abspath(os.path.realpath(filename))
f = open(filename, "a", encoding='utf-8')
for key in keys:
f.write(prefix+"'"+key+"');\n") | [
"giannisj3@gmail.com"
] | giannisj3@gmail.com |
f3895d457cf6e83ec4bce43312e970ebe6c689bd | d01532c1237825dc5505a247c2d289fd6ff7602e | /PMU-Homology/TAE.py | 0e606de307da97e8ed773eddb2cd114a029e9092 | [] | no_license | BEbillionaireUSD/Generator-Coherency-Identification | 95779feac0654bd06e7e9c3e389e43504af90238 | 04510fbc9ffd55f4c2e2ccece97f7fe9dd79d6a4 | refs/heads/main | 2023-08-22T19:54:56.410367 | 2021-10-29T06:24:18 | 2021-10-29T06:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,576 | py |
from keras.models import Model
from keras.layers import Input, LeakyReLU, MaxPool1D, LSTM, TimeDistributed, Dense, Reshape, Flatten
from keras.layers import UpSampling2D, Conv2DTranspose, Lambda
from keras.layers import Conv1D, Bidirectional
import keras.backend as K
import tensorflow as tf
def temporal_autoencoder(input_dim, timesteps, n_filters=50, kernel_size=10, strides=1, pool_size=10, n_units=[50, 1]):
assert(timesteps % pool_size == 0)
# Input
x = Input(shape=(timesteps, input_dim), name='input_seq')
# Encoder
encoded = Conv1D(n_filters, kernel_size, strides=strides, padding='same', activation='linear', name='Conv_encode')(x)
encoded = LeakyReLU()(encoded)
encoded = MaxPool1D(pool_size)(encoded)
encoded = Bidirectional(LSTM(n_units[0], return_sequences=True), merge_mode='sum', name='LSTM1')(encoded)
encoded = LeakyReLU()(encoded)
encoded = Bidirectional(LSTM(n_units[1], return_sequences=True), merge_mode='sum', name='LSTM2')(encoded)
encoded = LeakyReLU(name='latent')(encoded)
# Decoder
decoded = Reshape((-1, 1, n_units[1]), name='reshape')(encoded)
decoded = UpSampling2D((pool_size, 1), name='upsampling')(decoded)
decoded = Conv2DTranspose(input_dim, (kernel_size, 1), padding='same', name='conv2dtranspose')(decoded)
output = Reshape((-1, input_dim), name='output_seq')(decoded)
# AE model
autoencoder = Model(inputs=x, outputs=output, name='AE')
# Encoder model
encoder = Model(inputs=x, outputs=encoded, name='encoder')
# Create input for decoder model
encoded_input = Input(shape=(timesteps // pool_size, n_units[1]), name='decoder_input')
# Internal layers in decoder
decoded = autoencoder.get_layer('reshape')(encoded_input)
decoded = autoencoder.get_layer('upsampling')(decoded)
decoded = autoencoder.get_layer('conv2dtranspose')(decoded)
decoder_output = autoencoder.get_layer('output_seq')(decoded)
# Decoder model
decoder = Model(inputs=encoded_input, outputs=decoder_output, name='decoder')
return autoencoder, encoder, decoder
def temporal_autoencoder_lstm_ae(input_dim, timesteps, n_units=[50, 1]):
x = Input(shape=(timesteps, input_dim), name='input_seq')
encoded = LSTM(n_units[0], return_sequences=True)(x)
encoded = LeakyReLU(name='latent')(encoded)
decoded = LSTM(n_units[0], return_sequences=True, name='LSTM')(encoded)
decoded = LeakyReLU(name='act')(decoded)
decoded = TimeDistributed(Dense(units=input_dim), name='dense')(decoded) # sequence labeling
output = Reshape((-1, input_dim), name='output_seq')(decoded)
autoencoder = Model(inputs=x, outputs=output, name='AE')
encoder = Model(inputs=x, outputs=encoded, name='encoder')
encoded_input = Input(shape=(timesteps,n_units[0]), name='decoder_input')
decoded = autoencoder.get_layer('LSTM')(encoded_input)
decoded = autoencoder.get_layer('act')(decoded)
decoded = autoencoder.get_layer('dense')(decoded)
decoder_output = autoencoder.get_layer('output_seq')(decoded)
decoder = Model(inputs=encoded_input, outputs=decoder_output, name='decoder')
return autoencoder, encoder, decoder
def sampling(args):
z_mean, z_log_var = args
batch, dim = tf.shape(z_mean)[0], tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
def temporal_autoencoder_vae(input_dim, timesteps, n_units=[1024, 256]):
x = Input(shape=(timesteps, input_dim), name='input_seq')
encoded = Flatten()(x)
encoded = Dense(n_units[0], activation='relu')(encoded)
z_mean = Dense(n_units[1], name='z_mean')(encoded)
z_log_var = Dense(n_units[1], name='z_log_var')(encoded)
z = Lambda(sampling, output_shape=(n_units[1],))([z_mean, z_log_var])
encoded_out = Reshape((n_units[1], -1))(z)
decoded = Dense(n_units[0],activation='relu', name='dense1')(z)
decoded = Dense(input_dim, activation='sigmoid', name='dense2')(decoded)
decoded = Dense(input_dim*timesteps,activation='sigmoid', name='dense3')(decoded)
output = Reshape((timesteps, input_dim), name='output_seq')(decoded)
autoencoder = Model(inputs=x, outputs=output, name='AE')
encoder = Model(inputs=x, outputs=encoded_out, name='encoder')
encoded_input = Input(shape=(n_units[1],))
decoded = autoencoder.get_layer('dense1')(encoded_input)
decoded = autoencoder.get_layer('dense2')(decoded)
decoded = autoencoder.get_layer('dense3')(decoded)
decoder_output = autoencoder.get_layer('output_seq')(decoded)
decoder = Model(inputs=encoded_input,
outputs=decoder_output, name='decoder')
return autoencoder, encoder, decoder
def temporal_autoencoder_cnn_ae(input_dim, timesteps, n_filters=50, kernel_size=10, strides=1, pool_size=10, n_units=[50, 1]):
assert(timesteps % pool_size == 0)
x = Input(shape=(timesteps, input_dim), name='input_seq')
encoded = Conv1D(n_filters, kernel_size, strides=strides, padding='same', activation='linear')(x)
encoded = LeakyReLU()(encoded)
encoded = MaxPool1D(pool_size)(encoded)
encoded = Dense(n_units[0], activation='relu')(encoded)
encoded = Dense(n_units[1], activation='relu')(encoded)
# Decoder
decoded = Reshape((-1, 1, n_units[1]), name='reshape')(encoded)
decoded = UpSampling2D((pool_size, 1), name='upsampling')(decoded)
decoded = Conv2DTranspose(
input_dim, (kernel_size, 1), padding='same', name='conv2dtranspose')(decoded)
output = Reshape((-1, input_dim), name='output_seq')(decoded)
# AE model
autoencoder = Model(inputs=x, outputs=output, name='AE')
# Encoder model
encoder = Model(inputs=x, outputs=encoded, name='encoder')
# Create input for decoder model
encoded_input = Input(
shape=(timesteps // pool_size, 1), name='decoder_input')
# Internal layers in decoder
decoded = autoencoder.get_layer('reshape')(encoded_input)
decoded = autoencoder.get_layer('upsampling')(decoded)
decoded = autoencoder.get_layer('conv2dtranspose')(decoded)
decoder_output = autoencoder.get_layer('output_seq')(decoded)
# Decoder model
decoder = Model(inputs=encoded_input,
outputs=decoder_output, name='decoder')
return autoencoder, encoder, decoder
def temporal_autoencoder_sae(input_dim, timesteps, n_units=[256, 2]):
x = Input(shape=(timesteps, input_dim), name='input_seq')
encoded = Flatten()(x)
encoded = Dense(256, activation='relu')(encoded)
encoded = Dense(128, activation='relu')(encoded)
encoded = Dense(32, activation='relu')(encoded)
encoded = Reshape((32,-1))(encoded)
decoded = Flatten()(encoded)
decoded = Dense(128, activation='relu', name='dense1')(decoded)
decoded = Dense(256, activation='relu', name='dense2')(decoded)
decoded = Dense(units=input_dim*timesteps, name='dense')(decoded)
output = Reshape((timesteps, input_dim), name='output_seq')(decoded)
autoencoder = Model(inputs=x, outputs=output, name='AE')
encoder = Model(inputs=x, outputs=encoded, name='encoder')
encoded_input = Input(shape=(32,))
decoded = autoencoder.get_layer('dense1')(encoded_input)
decoded = autoencoder.get_layer('dense2')(decoded)
decoded = autoencoder.get_layer('dense')(decoded)
decoder_output = autoencoder.get_layer('output_seq')(decoded)
decoder = Model(inputs=encoded_input,
outputs=decoder_output, name='decoder')
return autoencoder, encoder, decoder
| [
"noreply@github.com"
] | BEbillionaireUSD.noreply@github.com |
f31f1b0eb25153d9b59aa5e1ef6d2ebc1d12ff5b | 48e5fa56d4878382a6ab7b094f0f6d06fb2c8673 | /Solver.py | 0a5226115a9000242f466cf118a2503468732fc4 | [
"MIT"
] | permissive | nurmukhametov/exrop | f62f55381afa44bd213760c2e2256f488d3fd612 | 8092a2854c74e1036e9d2bcd11ce2c8157ea14c3 | refs/heads/master | 2021-01-01T00:53:18.269531 | 2020-02-07T15:31:23 | 2020-02-07T15:31:23 | 239,106,045 | 0 | 0 | MIT | 2020-02-08T10:09:12 | 2020-02-08T10:09:11 | null | UTF-8 | Python | false | false | 18,533 | py | import code
import pickle
from itertools import combinations, chain
from triton import *
from Gadget import *
from RopChain import *
def initialize():
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.ALIGNED_MEMORY, True)
ctx.setAstRepresentationMode(AST_REPRESENTATION.PYTHON)
return ctx
def isintersect(a,b):
for i in a:
for j in b:
if i==j:
return True
return False
def findCandidatesWriteGadgets(gadgets, avoid_char=None):
candidates = {}
for gadget in list(gadgets):
badchar = False
if avoid_char:
for char in avoid_char:
addrb = gadget.addr.to_bytes(8, 'little')
if char in addrb:
badchar = True
break
if badchar:
continue
if gadget.is_memory_write:
isw = gadget.is_memory_write
if not isw in candidates:
candidates[isw] = [gadget]
continue
candidates[isw].append(gadget)
return candidates
def findForRet(gadgets, min_diff_sp=0, not_write_regs=set(), avoid_char=None):
for gadget in list(gadgets):
badchar = False
if avoid_char:
for char in avoid_char:
addrb = gadget.addr.to_bytes(8, 'little')
if char in addrb:
badchar = True
break
if badchar:
continue
if isintersect(not_write_regs, gadget.written_regs):
continue
if not gadget.is_memory_read and not gadget.is_memory_write and gadget.end_type == TYPE_RETURN and gadget.diff_sp == min_diff_sp:
return gadget
def findPivot(gadgets, not_write_regs=set(), avoid_char=None):
candidates = []
for gadget in list(gadgets):
badchar = False
if avoid_char:
for char in avoid_char:
addrb = gadget.addr.to_bytes(8, 'little')
if char in addrb:
badchar = True
break
if badchar:
continue
if isintersect(not_write_regs, gadget.written_regs):
continue
if gadget.pivot:
candidates.append(gadget)
return candidates
def findSyscall(gadgets, not_write_regs=set(), avoid_char=None):
syscall_noret = None
for gadget in list(gadgets):
badchar = False
if avoid_char:
for char in avoid_char:
addrb = gadget.addr.to_bytes(8, 'little')
if char in addrb:
badchar = True
break
if badchar:
continue
if isintersect(not_write_regs, gadget.written_regs):
continue
if not gadget.is_memory_read and not gadget.is_memory_write and gadget.is_syscall:
if gadget.end_type == TYPE_RETURN:
return gadget
syscall_noret = gadget
return syscall_noret
def findCandidatesGadgets(gadgets, regs_write, regs_items, not_write_regs=set(), avoid_char=None, cand_write_first=False):
candidates_pop = []
candidates_write = []
candidates_depends = []
candidates_defined = []
candidates_defined2 = []
candidates_no_return = []
candidates_for_ret = []
depends_regs = set()
for gadget in list(gadgets):
if isintersect(not_write_regs, gadget.written_regs) or gadget.is_memory_read or gadget.is_memory_write or gadget.end_type in [TYPE_UNKNOWN, TYPE_JMP_MEM, TYPE_CALL_MEM]:
gadgets.remove(gadget)
continue
badchar = False
if avoid_char:
for char in avoid_char:
addrb = gadget.addr.to_bytes(8, 'little')
if char in addrb:
badchar = True
break
if badchar:
continue
if isintersect(regs_write,set(gadget.defined_regs.keys())):
if regs_items and isintersect(regs_items, set(gadget.defined_regs.items())):
candidates_defined2.append(gadget)
else:
candidates_defined.append(gadget)
gadgets.remove(gadget)
depends_regs.update(gadget.depends_regs)
continue
if isintersect(regs_write,gadget.popped_regs):
candidates_pop.append(gadget)
gadgets.remove(gadget)
depends_regs.update(gadget.depends_regs)
continue
if isintersect(regs_write,gadget.written_regs):
candidates_write.append(gadget)
gadgets.remove(gadget)
depends_regs.update(gadget.depends_regs)
continue
if depends_regs:
candidates_depends = findCandidatesGadgets(gadgets, depends_regs, set(), not_write_regs)
if cand_write_first:
candidates = candidates_write + candidates_defined2 + candidates_pop + candidates_defined + candidates_depends # ordered by useful gadgets
else:
candidates = candidates_defined2 + candidates_pop + candidates_defined + candidates_write + candidates_no_return + candidates_depends # ordered by useful gadgets
for gadget in gadgets:
if gadget.diff_sp in [8,0]:
candidates_for_ret.append(gadget)
gadgets.remove(gadget)
candidates += candidates_for_ret
return candidates
def extract_byte(bv, pos):
return (bv >> pos*8) & 0xff
def filter_byte(astctxt, bv, bc, bsize):
nbv = []
for i in range(bsize):
nbv.append(astctxt.lnot(astctxt.equal(astctxt.extract(i*8+7, i*8, bv),astctxt.bv(bc, 8))))
return nbv
def check_contain_avoid_char(regvals, avoid_char):
for char in avoid_char:
for val in regvals:
if isinstance(val, str):
continue
valb = val.to_bytes(8, 'little')
if char in valb:
return True
return False
def get_all_written(tmp_solved):
written_regs = set()
for solved in tmp_solved:
written_regs.update(solved.get_written_regs())
return written_regs
def get_all_solved(tmp_solved):
solved_regs = set()
for solved in tmp_solved:
solved_regs.update(solved.get_solved_regs())
return solved_regs
def insert_tmp_solved(tmp_solved, solved):
intersect = False
if isintersect(solved.get_written_regs(), get_all_solved(tmp_solved)):
intersect = True
if intersect and len(tmp_solved) > 0:
for i in range(len(tmp_solved)-1, -1, -1):
solved_before = get_all_solved(tmp_solved[:i+1])
if isintersect(solved.get_solved_regs(), tmp_solved[i].get_written_regs()) and not isintersect(solved_before, solved.get_written_regs()):
tmp_solved.insert(i+1, solved)
break
regs_used_after = get_all_written(tmp_solved)
if i == 0:
if not isintersect(solved.get_solved_regs(), regs_used_after):
tmp_solved.insert(0, solved)
else:
return False
else:
tmp_solved.append(solved)
return True
def solveGadgets(gadgets, solves, avoid_char=None, keep_regs=set(), add_type=dict(), for_refind=set(), rec_limit=0):
regs = ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]
find_write_first = False
if avoid_char:
find_write_first = check_contain_avoid_char(solves.values(), avoid_char)
candidates = findCandidatesGadgets(gadgets[:], set(solves.keys()), set(solves.items()), avoid_char=avoid_char, cand_write_first=find_write_first)
ctx = initialize()
astCtxt = ctx.getAstContext()
chains = RopChain()
reg_refind = set()
if rec_limit >= 30: # maximum recursion
return []
for gadget in candidates:
tmp_solved_ordered = []
tmp_solved_regs = set()
tmp_solved_ordered2 = []
if not gadget.is_asted:
gadget.buildAst()
reg_to_reg_solve = set()
if isintersect(keep_regs, gadget.written_regs):
continue
for reg,val in solves.items():
if reg not in gadget.written_regs or reg in gadget.end_reg_used:
continue
regAst = gadget.regAst[reg]
if reg in gadget.defined_regs and gadget.defined_regs[reg] == val:
tmp_solved_regs.add(reg)
tmp_solved_ordered.append([])
if isinstance(val, str):
reg_to_reg_solve.add(val)
continue
refind_dict = {}
if isinstance(val, str): # probably registers
if reg in gadget.defined_regs and isinstance(gadget.defined_regs[reg], str) and gadget.defined_regs[reg] != reg:
refind_dict[gadget.defined_regs[reg]] = val
hasil = []
else:
continue
else:
if avoid_char:
if reg in gadget.defined_regs and isinstance(gadget.defined_regs[reg],int):
continue
childs = astCtxt.search(regAst, AST_NODE.VARIABLE)
filterbyte = []
hasil = False
valb = val.to_bytes(8, 'little')
lval = len(valb.strip(b"\x00"))
for char in avoid_char:
if char in valb:
for child in childs:
for char in avoid_char:
fb = filter_byte(astCtxt, child, char, lval)
filterbyte.extend(fb)
if filterbyte:
filterbyte.append(regAst == astCtxt.bv(val,64))
if filterbyte:
filterbyte = astCtxt.land(filterbyte)
hasil = list(ctx.getModel(filterbyte).values())
if not hasil: # try to find again
hasil = list(ctx.getModel(regAst == astCtxt.bv(val,64)).values())
else:
hasil = list(ctx.getModel(regAst == astCtxt.bv(val,64)).values())
for v in hasil:
alias = v.getVariable().getAlias()
if 'STACK' not in alias: # check if value is found not in stack
if alias in regs and alias not in refind_dict: # check if value is found in reg
# check if reg for next search contain avoid char, if
# true break
if alias == reg and avoid_char:
valb = v.getValue().to_bytes(8, 'little')
for char in avoid_char:
if char in valb:
hasil = False
refind_dict = False
if not hasil:
break
if ((alias != reg and (alias,val) not in for_refind) or v.getValue() != val):
refind_dict[alias] = v.getValue() # re-search value with new reg
else:
hasil = False
refind_dict = False
break
else:
hasil = False
break
elif avoid_char: # check if stack is popped contain avoid char
for char in avoid_char:
if char in val.to_bytes(8, 'little'):
hasil = False
refind_dict = False
break
if refind_dict:
# print((gadget,refind_dict,rec_limit))
tmp_for_refind = for_refind.copy() # don't overwrite old value
tmp_for_refind.add((reg,val))
reg_refind.update(set(list(refind_dict.keys())))
hasil = solveGadgets(candidates[:], refind_dict, avoid_char, for_refind=tmp_for_refind, rec_limit=rec_limit+1)
if hasil:
if isinstance(val, str):
reg_to_reg_solve.add(gadget.defined_regs[reg])
if not isinstance(hasil, RopChain):
type_chain = CHAINITEM_TYPE_VALUE
if add_type and reg in add_type and add_type[reg] == CHAINITEM_TYPE_ADDR:
type_chain = CHAINITEM_TYPE_ADDR
hasil = ChainItem.parseFromModel(hasil, type_val=type_chain)
tmp_solved_ordered.append(hasil)
tmp_solved_regs.add(reg)
else:
if insert_tmp_solved(tmp_solved_ordered2, hasil):
tmp_solved_regs.add(reg)
if not tmp_solved_regs:
continue
if gadget.end_type != TYPE_RETURN:
if isintersect(set(list(solves.keys())), gadget.end_reg_used) or not gadget.end_ast:
continue
next_gadget = None
# print("handling no return gadget")
diff = 0
if gadget.end_type == TYPE_JMP_REG:
next_gadget = findForRet(candidates[:], 0, tmp_solved_regs, avoid_char=avoid_char)
elif gadget.end_type == TYPE_CALL_REG:
next_gadget = findForRet(candidates[:], 8, tmp_solved_regs, avoid_char=avoid_char)
diff = 8
if not next_gadget:
continue
gadget.end_gadget = next_gadget
gadget.diff_sp += next_gadget.diff_sp - diff
regAst = gadget.end_ast
val = gadget.end_gadget.addr
hasil = list(ctx.getModel(regAst == val).values())
refind_dict = {}
type_chains = {}
for v in hasil:
alias = v.getVariable().getAlias()
if 'STACK' not in alias:
if alias in regs and alias not in refind_dict:
refind_dict[alias] = v.getValue()
type_chains[alias] = CHAINITEM_TYPE_ADDR
else:
hasil = False
break
if refind_dict:
reg_to_reg_solve.update(tmp_solved_regs)
reg_to_reg_solve.update(reg_refind)
hasil = solveGadgets(gadgets, refind_dict, avoid_char, add_type=type_chains, keep_regs=reg_to_reg_solve, rec_limit=rec_limit+1)
if not hasil:
continue
if not isinstance(hasil, RopChain):
type_chain = CHAINITEM_TYPE_ADDR
hasil = ChainItem.parseFromModel(hasil, type_val=type_chain)
tmp_solved_ordered.append(hasil)
else:
insert_tmp_solved(tmp_solved_ordered2, hasil)
tmp_solved_ordered.extend(tmp_solved_ordered2)
dep_regs = set()
if reg_to_reg_solve:
dep_regs = reg_to_reg_solve - tmp_solved_regs
tmp_chain = Chain()
tmp_chain.set_solved(gadget, tmp_solved_ordered, tmp_solved_regs, depends_regs=dep_regs)
if not chains.insert_chain(tmp_chain):
# print("failed insert")
continue # can't insert chain
for reg in tmp_solved_regs:
if reg in solves:
del solves[reg]
if not solves:
return chains
return []
def solveWriteGadgets(gadgets, solves, avoid_char=None):
regs = ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]
final_solved = []
candidates = findCandidatesWriteGadgets(gadgets[:], avoid_char=avoid_char)
ctx = initialize()
gwr = list(candidates.keys())
chains = RopChain()
gwr.sort()
for w in gwr:
for gadget in candidates[w]:
if not gadget.is_asted:
gadget.buildAst()
for addr,val in list(solves.items())[:]:
mem_ast = gadget.memory_write_ast[0]
if mem_ast[1].getBitvectorSize() != 64:
break
addrhasil = ctx.getModel(mem_ast[0] == addr).values()
valhasil = ctx.getModel(mem_ast[1] == val).values()
if not addrhasil or not valhasil:
break
hasil = list(addrhasil) + list(valhasil)
refind_dict = {}
# code.interact(local=locals())
for v in hasil:
alias = v.getVariable().getAlias()
if 'STACK' not in alias:
if alias in regs and alias not in refind_dict:
refind_dict[alias] = v.getValue()
else:
hasil = False
break
if hasil and refind_dict:
hasil = solveGadgets(gadgets[:], refind_dict, avoid_char=avoid_char)
if hasil:
del solves[addr]
chain = Chain()
chain.set_solved(gadget, [hasil])
chains.insert_chain(chain)
if not solves:
return chains
def solvePivot(gadgets, addr_pivot, avoid_char=None):
regs = ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]
candidates = findPivot(gadgets, avoid_char=avoid_char)
ctx = initialize()
chains = RopChain()
for gadget in candidates:
if not gadget.is_asted:
gadget.buildAst()
hasil = ctx.getModel(gadget.pivot_ast == addr_pivot).values()
for v in hasil:
alias = v.getVariable().getAlias()
refind_dict = dict()
if 'STACK' not in alias:
if alias in regs and alias not in refind_dict:
refind_dict[alias] = v.getValue()
else:
hasil = False
break
else:
idxchain = int(alias.replace("STACK", ""))
new_diff_sp = (idxchain+1)*8
if hasil and refind_dict:
hasil = solveGadgets(gadgets[:], refind_dict, avoid_char=avoid_char)
new_diff_sp = 0
if not hasil:
continue
gadget.diff_sp = new_diff_sp
chain = Chain()
chain.set_solved(gadget, [hasil])
chains.insert_chain(chain)
return chains
| [
"n0psledbyte@gmail.com"
] | n0psledbyte@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.