blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09e93c96494e49a2d86292c5fba4b0ca5dd942cb | bda539dedce550a3242cd9a2a4bb61782a924c12 | /git/training/runTraining.py | 26cb6e270bf3928f7bc110871628979131099a59 | [] | no_license | joohwankim/deepgazekickoff | 37222c937faca070a848bf97b1d85df518659d48 | b30cc51de247be75cee510240868e6a31e08a815 | refs/heads/master | 2020-03-18T10:17:21.847000 | 2018-05-29T14:31:21 | 2018-05-29T14:31:21 | 134,605,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | """
Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-ND 4.0 license (https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode).
"""
import argparse, logging, os, dlcore.train, sys
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--job', required=True, help='Which network to train. Specify a folder containing configuration file')
parser.add_argument('-v', '--var', nargs='*', action='append', help='A varaible and value pair')
parser.add_argument('-r', '--resume', default=None, help='Address to a checkpoint file. If given, resume training from the checkpoint file.')
args = parser.parse_args()
#config = dlcore.train.loadModule(os.path.join(args.job,'config.py'))
config = dlcore.train.loadModule(args.job)
if args.var:
for var in args.var:
dtype = type(getattr(config, var[0]))
if len(var) == 2:
setattr(config, var[0], dtype(var[1]))
if os.path.abspath(config.result_dir) == os.path.abspath('./'):
config.result_dir = os.path.normpath(args.job)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
#logging.basicConfig(stream=sys.stdout, level=logging.INFO)
#logging.basicConfig(filename=os.path.join(config.result_dir,config.log), level=config.log_level)
dlcore.train.main(config, args.resume)
| [
"sckim@nvidia.com"
] | sckim@nvidia.com |
4ec4d34dab5f7644e361280ca777fc5fb41fdb92 | 782efe22f3251a701796e68e82fbce27c2ce2d8f | /Discussion/migrations/0042_auto_20200515_1217.py | 0b9097e87756d35dd0f7ac9dcbb9599753393a94 | [] | no_license | Escalation99/Workev | ffc10e64776bf90d206a4a7a8ef3655c22f0223b | c2312c54c152b823e991ef5955b5d2df7ff58222 | refs/heads/main | 2023-03-13T05:36:54.386719 | 2021-03-06T10:27:06 | 2021-03-06T10:27:06 | 310,613,595 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-15 05:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Discussion', '0041_auto_20200513_1402'),
]
operations = [
migrations.AlterField(
model_name='post',
name='category',
field=models.CharField(choices=[('Announcement', 'Announcement'), ('Other', 'Other'), ('Meeting', 'Meeting'), ('Jobdesc', 'Jobdesc')], default='Jobdesc', max_length=50),
),
]
| [
"raytommy1234@gmail.com"
] | raytommy1234@gmail.com |
5827494e28c8324f3fe91b182ec76744a95c029b | aef02ad0a2b36e763af4b6de84399fcbfb788faf | /LPHW/ex6.py | 4d116c7b7f3dba9e3e1cb77c6d4b06c35e1b0fbb | [] | no_license | kanishkd4/Python_Learning_code | 98cf74cbbeef34f594804b515438f24775feddbf | 62a6b1745f4c8624ed4207ab38c83f0a7ead99c9 | refs/heads/master | 2020-04-15T12:44:52.828258 | 2018-04-05T09:56:35 | 2018-04-05T09:56:35 | 61,795,436 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 410 | py |
x = "there are %d types of people." %10
binary = "binary"
do_not = "don't"
y = "those who know %s and those who %s." % (binary, do_not)
print x
print y
print "I said %r" % x
print "I also said: '%s'" % y
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
print joke_evaluation % hilarious
w = "this is the left side of.."
e = "a string with a right side."
print w + e
| [
"noreply@github.com"
] | kanishkd4.noreply@github.com |
1d539066706ca4f69d3130d49688deb922c477b3 | 98311c7b2b2257f14f0f4a0657363e893872798e | /project/src/python/practicum.py | e3f1dfcf9ef76f4b71a4dd1106d26832dc48802f | [
"MIT"
] | permissive | aslupin/Yak-Ngaen-Project | fed9a264a863e1174c00ec8ad360f1c03422f393 | c91b3cc83d2eda22b62fe877276bdd1a8a1b24fd | refs/heads/master | 2022-01-28T02:44:39.385903 | 2019-05-09T13:36:04 | 2019-05-09T13:36:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,616 | py | import usb
# RQ_SET_LED = 0
# RQ_SET_LED_VALUE = 1
# RQ_GET_SWITCH = 2
# RQ_GET_LIGHT = 3
RQ_GET_SOUND_PLAYER_I = 1
RQ_GET_SOUND_PLAYER_II = 2
####################################
def find_mcu_boards():
'''
Find all Practicum MCU boards attached to the machine, then return a list
of USB device handles for all the boards
>>> devices = find_mcu_boards()
>>> first_board = McuBoard(devices[0])
'''
boards = [dev for bus in usb.busses()
for dev in bus.devices
if (dev.idVendor,dev.idProduct) == (0x16c0,0x05dc)]
return boards
####################################
class McuBoard:
'''
Generic class for accessing Practicum MCU board via USB connection.
'''
################################
def __init__(self, dev):
self.device = dev
self.handle = dev.open()
################################
def usb_write(self, request, data=[], index=0, value=0):
'''
Send data output to the USB device (i.e., MCU board)
request: request number to appear as bRequest field on the USB device
index: 16-bit value to appear as wIndex field on the USB device
value: 16-bit value to appear as wValue field on the USB device
'''
reqType = usb.TYPE_VENDOR | usb.RECIP_DEVICE | usb.ENDPOINT_OUT
self.handle.controlMsg(
reqType, request, data, value=value, index=index)
################################
def usb_read(self, request, length=1, index=0, value=0):
'''
Request data input from the USB device (i.e., MCU board)
request: request number to appear as bRequest field on the USB device
length: number of bytes to read from the USB device
index: 16-bit value to appear as wIndex field on the USB device
value: 16-bit value to appear as wValue field on the USB device
If successful, the method returns a tuple of length specified
containing data returned from the MCU board.
'''
reqType = usb.TYPE_VENDOR | usb.RECIP_DEVICE | usb.ENDPOINT_IN
buf = self.handle.controlMsg(
reqType, request, length, value=value, index=index)
return buf
####################################
class PeriBoard:
################################
def __init__(self, mcu):
self.mcu = mcu
################################
# def get_sound_playeri(self):
# sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_I, length=2)
# return sound[0]
# def get_sound_playerii(self):
# sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_II, length=2)
# return sound[0]
def get_sound(self, player):
'''
Return the current reading of light sensor on peripheral board
'''
if(player == RQ_GET_SOUND_PLAYER_I):
sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_I, length=2)
return sound[0]
# return sound[0]
elif(player == RQ_GET_SOUND_PLAYER_II):
sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_II, length=2)
# return sound[0]
return sound[0]
# light[1] *= 256
# result = light[1] + light[0]
# return (sound[1] * 256 ) + sound[0]
# ################################
# def set_led(self, led_no, led_state):
# '''
# Set status of LED led_no on peripheral board to led_state
# (0 => off, 1 => on)
# '''
# self.mcu.usb_write(request=RQ_SET_LED, index=led_no, value=led_state)
# # return
# ################################
# def set_led_value(self, value):
# '''
# Display right 3 bits of value on peripheral board's LEDs
# '''
# self.mcu.usb_write(request=RQ_SET_LED_VALUE, value=value)
# # return
# ################################
# def get_switch(self):
# '''
# Return a boolean value indicating whether the switch on the peripheral
# board is currently pressed
# '''
# state = self.mcu.usb_read(request=RQ_GET_SWITCH, length=1)
# return state[0] == 1
# ################################
# def get_light(self):
# '''
# Return the current reading of light sensor on peripheral board
# '''
# light = self.mcu.usb_read(request=RQ_GET_LIGHT, length=2)
# # light[1] *= 256
# # result = light[1] + light[0]
# return ( light[1] * 256 ) + light[0]
| [
"poon_arsene_lupin@hotmail.com"
] | poon_arsene_lupin@hotmail.com |
0d5612bcf83e90343b35f237bfbb6536fe5a32fc | 99dbc0388a1396d9d0f636ba6ad4e7ce6b646637 | /app/frontend/views.py | 4ae6ddaba7060c91bb981e1d735c5289f1895cb6 | [] | no_license | thefedoration/tracker-widgets | 9469f27a023cc6c4f3cb1161f39452deb58ce282 | 47bd08030a8ced3b6ddf2c48cc41f8f0b705aa79 | refs/heads/master | 2021-06-22T04:44:47.565674 | 2017-05-15T13:32:27 | 2017-05-15T13:32:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | from django.shortcuts import render, redirect
# serves up frontend bundle
def index(request):
# if user is logged in, fire up the frontend app
if request.user.is_authenticated():
return render(request, 'frontend/index.html')
# otherwise not logged in, send them to login screen
path = request.path
if path[0] == '/':
path = path[1:]
return redirect('/accounts/login/?next=%s' % path)
| [
"fedor@pymetrics.com"
] | fedor@pymetrics.com |
a921a15b368f2785bb530b40113b34630061be52 | 0974dd03a2c169c9186d74bb9c4f80ea68802331 | /bin/f2py2 | 3f101a41698295cedc62c9ebc308c351e8e37718 | [] | no_license | devashah7/instameme | edb4a0cf8e80560eef54e0aa40a19fd4deb0c99c | 8ba27800dc5624f80672fae3f727ece5fcd779a2 | refs/heads/master | 2020-08-09T06:25:03.011987 | 2019-10-09T20:42:57 | 2019-10-09T20:42:57 | 214,018,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | #!/home/dshah/Desktop/insta/insta/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dshah@hcn-inc.com"
] | dshah@hcn-inc.com | |
8951afe2b51d654fd469ed7fd936879e3610aa30 | 35894bca47cf0c9a51a05caf7b56a0d69c05b033 | /04_subrotinas_numpy/25_fibonacci.py | 1067f8b8abc1c15bc44a985e9b4f892471d34f46 | [] | no_license | alcebytes/Phyton-Estudo | 0a2d33f5f3e668e6ab2f99e5e4499545a3bc1273 | a3f9a0b3e0a91d71a9359480d6ec17e692572694 | refs/heads/master | 2023-01-14T17:24:16.486956 | 2020-10-08T02:02:02 | 2020-10-08T02:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | import time as time
num_iter = int(input("Digitar o valor do número máximo para a sequência de Fibonacci = "))
tempo_inicio = time.time()
#tempo_inicio_CPU = time.clock() #ABSOLETO
tempo_inicio_CPU = time.process_time()
tempo_inicio_CPU_2 = time.perf_counter()
# f(0)
f = []
f.append(0)
print(f)
# f(1)
f.append(1)
print(f)
"""
f(n + 2) = f(n) + f(n + 1)
for n in range(0, num_iter - 2, 1)
f.append(f[n] + f[n + 1] )
"""
n = 0
while n <= num_iter - 3:
f.append(f[n] + f[n + 1])
n = n + 1
print(f)
# Imprimir último termo de f
print(f[-1])
# Outra forma:
print(f[len(f) - 1])
tempo_fim = time.time() - tempo_inicio
print("O tempo de execução da aplicação é", tempo_fim, "s")
tempo_fim_CPU_2 = time.perf_counter() - tempo_inicio_CPU_2
print("O tempo de execução da CPU é", tempo_fim_CPU_2)
tempo_fim_CPU = time.process_time() - tempo_inicio_CPU
print("O tempo de execução da CPU é", tempo_fim_CPU)
| [
"x_kata@hotmail.com"
] | x_kata@hotmail.com |
d1963e7cc009082ee066bf6e7b3db7e2a3f62383 | 6472cd640341f4bcc3867c3579a87ee8b763ae15 | /conventionalAI/venv/lib/python3.6/site-packages/rivescript/rivescript.py | 95fdae4013df4e458c3448a8cef0dfdd83a16f8e | [] | no_license | iafjayoza/Machine_Learning | 7e9664cb6da6e0521e3475c9f80acd3ff15d1fc8 | aba8fd939194b839da03f4c1ebd9eac8331d0b90 | refs/heads/master | 2023-06-01T14:28:59.391643 | 2021-06-25T17:12:57 | 2021-06-25T17:12:57 | 380,303,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103,656 | py | #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2016 Noah Petherbridge
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from six import text_type
import sys
import os
import re
import string
import random
import pprint
import copy
import codecs
from . import __version__
from . import python
# Common regular expressions.
class RE(object):
equals = re.compile('\s*=\s*')
ws = re.compile('\s+')
objend = re.compile('^\s*<\s*object')
weight = re.compile('\{weight=(\d+)\}')
inherit = re.compile('\{inherits=(\d+)\}')
wilds = re.compile('[\s\*\#\_]+')
nasties = re.compile('[^A-Za-z0-9 ]')
crlf = re.compile('<crlf>')
literal_w = re.compile(r'\\w')
array = re.compile(r'\@(.+?)\b')
def_syntax = re.compile(r'^.+(?:\s+.+|)\s*=\s*.+?$')
name_syntax = re.compile(r'[^a-z0-9_\-\s]')
utf8_trig = re.compile(r'[A-Z\\.]')
trig_syntax = re.compile(r'[^a-z0-9(\|)\[\]*_#@{}<>=\s]')
cond_syntax = re.compile(r'^.+?\s*(?:==|eq|!=|ne|<>|<|<=|>|>=)\s*.+?=>.+?$')
utf8_meta = re.compile(r'[\\<>]')
utf8_punct = re.compile(r'[.?,!;:@#$%^&*()]')
cond_split = re.compile(r'\s*=>\s*')
cond_parse = re.compile(r'^(.+?)\s+(==|eq|!=|ne|<>|<|<=|>|>=)\s+(.+?)$')
topic_tag = re.compile(r'\{topic=(.+?)\}')
set_tag = re.compile(r'<set (.+?)=(.+?)>')
bot_tag = re.compile(r'<bot (.+?)>')
get_tag = re.compile(r'<get (.+?)>')
star_tags = re.compile(r'<star(\d+)>')
botstars = re.compile(r'<botstar(\d+)>')
input_tags = re.compile(r'<input([1-9])>')
reply_tags = re.compile(r'<reply([1-9])>')
random_tags = re.compile(r'\{random\}(.+?)\{/random\}')
redir_tag = re.compile(r'\{@(.+?)\}')
tag_search = re.compile(r'<([^<]+?)>')
placeholder = re.compile(r'\x00(\d+)\x00')
zero_star = re.compile(r'^\*$')
optionals = re.compile(r'\[(.+?)\]')
# Version of RiveScript we support.
rs_version = 2.0
# Exportable constants.
RS_ERR_MATCH = "[ERR: No reply matched]"
RS_ERR_REPLY = "[ERR: No reply found]"
RS_ERR_DEEP_RECURSION = "[ERR: Deep recursion detected]"
RS_ERR_OBJECT = "[ERR: Error when executing Python object]"
RS_ERR_OBJECT_HANDLER = "[ERR: No Object Handler]"
RS_ERR_OBJECT_MISSING = "[ERR: Object Not Found]"
class RiveScript(object):
"""A RiveScript interpreter for Python 2 and 3."""
# Concatenation mode characters.
_concat_modes = dict(
none="",
space=" ",
newline="\n",
)
############################################################################
# Initialization and Utility Methods #
############################################################################
def __init__(self, debug=False, strict=True, depth=50, log="", utf8=False):
"""Initialize a new RiveScript interpreter.
bool debug: Specify a debug mode.
bool strict: Strict mode (RS syntax errors are fatal)
str log: Specify a log file for debug output to go to (instead of STDOUT).
int depth: Specify the recursion depth limit.
bool utf8: Enable UTF-8 support."""
###
# User configurable fields.
###
# Debugging
self._debug = debug # Debug mode
self._log = log # Debug log file
# Unicode stuff
self._utf8 = utf8 # UTF-8 mode
self.unicode_punctuation = re.compile(r'[.,!?;:]')
# Misc.
self._strict = strict # Strict mode
self._depth = depth # Recursion depth limit
###
# Internal fields.
###
self._gvars = {} # 'global' variables
self._bvars = {} # 'bot' variables
self._subs = {} # 'sub' variables
self._person = {} # 'person' variables
self._arrays = {} # 'array' variables
self._users = {} # 'user' variables
self._freeze = {} # frozen 'user' variables
self._includes = {} # included topics
self._lineage = {} # inherited topics
self._handlers = {} # Object handlers
self._objlangs = {} # Languages of objects used
self._topics = {} # Main reply structure
self._thats = {} # %Previous reply structure
self._sorted = {} # Sorted buffers
self._syntax = {} # Syntax tracking (filenames & line no.'s)
self._regexc = { # Precomputed regexes for speed optimizations.
"trigger": {},
"subs": {},
"person": {},
}
# "Current request" variables.
self._current_user = None # The current user ID.
# Define the default Python language handler.
self._handlers["python"] = python.PyRiveObjects()
self._say("Interpreter initialized.")
@classmethod
def VERSION(self=None):
"""Return the version number of the RiveScript library.
This may be called as either a class method or a method of a RiveScript object."""
return __version__
def _say(self, message):
if self._debug:
print("[RS] {}".format(message))
if self._log:
# Log it to the file.
fh = open(self._log, 'a')
fh.write("[RS] " + message + "\n")
fh.close()
def _warn(self, message, fname='', lineno=0):
header = "[RS]"
if self._debug:
header = "[RS::Warning]"
if len(fname) and lineno > 0:
print(header, message, "at", fname, "line", lineno)
else:
print(header, message)
############################################################################
# Loading and Parsing Methods #
############################################################################
def load_directory(self, directory, ext=None):
"""Load RiveScript documents from a directory.
Provide `ext` as a list of extensions to search for. The default list
is `.rive`, `.rs`"""
self._say("Loading from directory: " + directory)
if ext is None:
# Use the default extensions - .rive is preferable.
ext = ['.rive', '.rs']
elif type(ext) == str:
# Backwards compatibility for ext being a string value.
ext = [ext]
if not os.path.isdir(directory):
self._warn("Error: " + directory + " is not a directory.")
return
for item in os.listdir(directory):
for extension in ext:
if item.lower().endswith(extension):
# Load this file.
self.load_file(os.path.join(directory, item))
break
def load_file(self, filename):
"""Load and parse a RiveScript document."""
self._say("Loading file: " + filename)
fh = codecs.open(filename, 'r', 'utf-8')
lines = fh.readlines()
fh.close()
self._say("Parsing " + str(len(lines)) + " lines of code from " + filename)
self._parse(filename, lines)
def stream(self, code):
"""Stream in RiveScript source code dynamically.
`code` can either be a string containing RiveScript code or an array
of lines of RiveScript code."""
self._say("Streaming code.")
if type(code) in [str, text_type]:
code = code.split("\n")
self._parse("stream()", code)
def _parse(self, fname, code):
"""Parse RiveScript code into memory."""
self._say("Parsing code")
# Track temporary variables.
topic = 'random' # Default topic=random
lineno = 0 # Line numbers for syntax tracking
comment = False # In a multi-line comment
inobj = False # In an object
objname = '' # The name of the object we're in
objlang = '' # The programming language of the object
objbuf = [] # Object contents buffer
ontrig = '' # The current trigger
repcnt = 0 # Reply counter
concnt = 0 # Condition counter
isThat = '' # Is a %Previous trigger
# Local (file scoped) parser options.
local_options = dict(
concat="none", # Concat mode for ^Continue command
)
# Read each line.
for lp, line in enumerate(code):
lineno += 1
self._say("Line: " + line + " (topic: " + topic + ") incomment: " + str(inobj))
if len(line.strip()) == 0: # Skip blank lines
continue
# In an object?
if inobj:
if re.match(RE.objend, line):
# End the object.
if len(objname):
# Call the object's handler.
if objlang in self._handlers:
self._objlangs[objname] = objlang
self._handlers[objlang].load(objname, objbuf)
else:
self._warn("Object creation failed: no handler for " + objlang, fname, lineno)
objname = ''
objlang = ''
objbuf = []
inobj = False
else:
objbuf.append(line)
continue
line = line.strip() # Trim excess space. We do it down here so we
# don't mess up python objects!
# Look for comments.
if line[:2] == '//': # A single-line comment.
continue
elif line[0] == '#':
self._warn("Using the # symbol for comments is deprecated", fname, lineno)
elif line[:2] == '/*': # Start of a multi-line comment.
if '*/' not in line: # Cancel if the end is here too.
comment = True
continue
elif '*/' in line:
comment = False
continue
if comment:
continue
# Separate the command from the data.
if len(line) < 2:
self._warn("Weird single-character line '" + line + "' found.", fname, lineno)
continue
cmd = line[0]
line = line[1:].strip()
# Ignore inline comments if there's a space before and after
# the // symbols.
if " // " in line:
line = line.split(" // ")[0].strip()
# Run a syntax check on this line.
syntax_error = self.check_syntax(cmd, line)
if syntax_error:
# There was a syntax error! Are we enforcing strict mode?
syntax_error = "Syntax error in " + fname + " line " + str(lineno) + ": " \
+ syntax_error + " (near: " + cmd + " " + line + ")"
if self._strict:
raise Exception(syntax_error)
else:
self._warn(syntax_error)
return # Don't try to continue
# Reset the %Previous state if this is a new +Trigger.
if cmd == '+':
isThat = ''
# Do a lookahead for ^Continue and %Previous commands.
for i in range(lp + 1, len(code)):
lookahead = code[i].strip()
if len(lookahead) < 2:
continue
lookCmd = lookahead[0]
lookahead = lookahead[1:].strip()
# Only continue if the lookahead line has any data.
if len(lookahead) != 0:
# The lookahead command has to be either a % or a ^.
if lookCmd != '^' and lookCmd != '%':
break
# If the current command is a +, see if the following is
# a %.
if cmd == '+':
if lookCmd == '%':
isThat = lookahead
break
else:
isThat = ''
# If the current command is a ! and the next command(s) are
# ^, we'll tack each extension on as a line break (which is
# useful information for arrays).
if cmd == '!':
if lookCmd == '^':
line += "<crlf>" + lookahead
continue
# If the current command is not a ^ and the line after is
# not a %, but the line after IS a ^, then tack it on to the
# end of the current line.
if cmd != '^' and lookCmd != '%':
if lookCmd == '^':
line += self._concat_modes.get(
local_options["concat"], ""
) + lookahead
else:
break
self._say("Command: " + cmd + "; line: " + line)
# Handle the types of RiveScript commands.
if cmd == '!':
# ! DEFINE
halves = re.split(RE.equals, line, 2)
left = re.split(RE.ws, halves[0].strip(), 2)
value, type, var = '', '', ''
if len(halves) == 2:
value = halves[1].strip()
if len(left) >= 1:
type = left[0].strip()
if len(left) >= 2:
var = ' '.join(left[1:]).strip()
# Remove 'fake' line breaks unless this is an array.
if type != 'array':
value = re.sub(RE.crlf, '', value)
# Handle version numbers.
if type == 'version':
# Verify we support it.
try:
if float(value) > rs_version:
self._warn("Unsupported RiveScript version. We only support " + rs_version, fname, lineno)
return
except:
self._warn("Error parsing RiveScript version number: not a number", fname, lineno)
continue
# All other types of defines require a variable and value name.
if len(var) == 0:
self._warn("Undefined variable name", fname, lineno)
continue
elif len(value) == 0:
self._warn("Undefined variable value", fname, lineno)
continue
# Handle the rest of the types.
if type == 'local':
# Local file-scoped parser options.
self._say("\tSet parser option " + var + " = " + value)
local_options[var] = value
elif type == 'global':
# 'Global' variables
self._say("\tSet global " + var + " = " + value)
if value == '<undef>':
try:
del(self._gvars[var])
except:
self._warn("Failed to delete missing global variable", fname, lineno)
else:
self._gvars[var] = value
# Handle flipping debug and depth vars.
if var == 'debug':
if value.lower() == 'true':
value = True
else:
value = False
self._debug = value
elif var == 'depth':
try:
self._depth = int(value)
except:
self._warn("Failed to set 'depth' because the value isn't a number!", fname, lineno)
elif var == 'strict':
if value.lower() == 'true':
self._strict = True
else:
self._strict = False
elif type == 'var':
# Bot variables
self._say("\tSet bot variable " + var + " = " + value)
if value == '<undef>':
try:
del(self._bvars[var])
except:
self._warn("Failed to delete missing bot variable", fname, lineno)
else:
self._bvars[var] = value
elif type == 'array':
# Arrays
self._say("\tArray " + var + " = " + value)
if value == '<undef>':
try:
del(self._arrays[var])
except:
self._warn("Failed to delete missing array", fname, lineno)
continue
# Did this have multiple parts?
parts = value.split("<crlf>")
# Process each line of array data.
fields = []
for val in parts:
if '|' in val:
fields.extend(val.split('|'))
else:
fields.extend(re.split(RE.ws, val))
# Convert any remaining '\s' escape codes into spaces.
for f in fields:
f = f.replace('\s', ' ')
self._arrays[var] = fields
elif type == 'sub':
# Substitutions
self._say("\tSubstitution " + var + " => " + value)
if value == '<undef>':
try:
del(self._subs[var])
except:
self._warn("Failed to delete missing substitution", fname, lineno)
else:
self._subs[var] = value
# Precompile the regexp.
self._precompile_substitution("subs", var)
elif type == 'person':
# Person Substitutions
self._say("\tPerson Substitution " + var + " => " + value)
if value == '<undef>':
try:
del(self._person[var])
except:
self._warn("Failed to delete missing person substitution", fname, lineno)
else:
self._person[var] = value
# Precompile the regexp.
self._precompile_substitution("person", var)
else:
self._warn("Unknown definition type '" + type + "'", fname, lineno)
elif cmd == '>':
# > LABEL
temp = re.split(RE.ws, line)
type = temp[0]
name = ''
fields = []
if len(temp) >= 2:
name = temp[1]
if len(temp) >= 3:
fields = temp[2:]
# Handle the label types.
if type == 'begin':
# The BEGIN block.
self._say("\tFound the BEGIN block.")
type = 'topic'
name = '__begin__'
if type == 'topic':
# Starting a new topic.
self._say("\tSet topic to " + name)
ontrig = ''
topic = name
# Does this topic include or inherit another one?
mode = '' # or 'inherits' or 'includes'
if len(fields) >= 2:
for field in fields:
if field == 'includes':
mode = 'includes'
elif field == 'inherits':
mode = 'inherits'
elif mode != '':
# This topic is either inherited or included.
if mode == 'includes':
if name not in self._includes:
self._includes[name] = {}
self._includes[name][field] = 1
else:
if name not in self._lineage:
self._lineage[name] = {}
self._lineage[name][field] = 1
elif type == 'object':
# If a field was provided, it should be the programming
# language.
lang = None
if len(fields) > 0:
lang = fields[0].lower()
# Only try to parse a language we support.
ontrig = ''
if lang is None:
self._warn("Trying to parse unknown programming language", fname, lineno)
lang = 'python' # Assume it's Python.
# See if we have a defined handler for this language.
if lang in self._handlers:
# We have a handler, so start loading the code.
objname = name
objlang = lang
objbuf = []
inobj = True
else:
# We don't have a handler, just ignore it.
objname = ''
objlang = ''
objbuf = []
inobj = True
else:
self._warn("Unknown label type '" + type + "'", fname, lineno)
elif cmd == '<':
# < LABEL
type = line
if type == 'begin' or type == 'topic':
self._say("\tEnd topic label.")
topic = 'random'
elif type == 'object':
self._say("\tEnd object label.")
inobj = False
elif cmd == '+':
# + TRIGGER
self._say("\tTrigger pattern: " + line)
if len(isThat):
self._initTT('thats', topic, isThat, line)
self._initTT('syntax', topic, line, 'thats')
self._syntax['thats'][topic][line]['trigger'] = (fname, lineno)
else:
self._initTT('topics', topic, line)
self._initTT('syntax', topic, line, 'topic')
self._syntax['topic'][topic][line]['trigger'] = (fname, lineno)
ontrig = line
repcnt = 0
concnt = 0
# Pre-compile the trigger's regexp if possible.
self._precompile_regexp(ontrig)
elif cmd == '-':
# - REPLY
if ontrig == '':
self._warn("Response found before trigger", fname, lineno)
continue
self._say("\tResponse: " + line)
if len(isThat):
self._thats[topic][isThat][ontrig]['reply'][repcnt] = line
self._syntax['thats'][topic][ontrig]['reply'][repcnt] = (fname, lineno)
else:
self._topics[topic][ontrig]['reply'][repcnt] = line
self._syntax['topic'][topic][ontrig]['reply'][repcnt] = (fname, lineno)
repcnt += 1
elif cmd == '%':
# % PREVIOUS
pass # This was handled above.
elif cmd == '^':
# ^ CONTINUE
pass # This was handled above.
elif cmd == '@':
# @ REDIRECT
self._say("\tRedirect response to " + line)
if len(isThat):
self._thats[topic][isThat][ontrig]['redirect'] = line
self._syntax['thats'][topic][ontrig]['redirect'] = (fname, lineno)
else:
self._topics[topic][ontrig]['redirect'] = line
self._syntax['topic'][topic][ontrig]['redirect'] = (fname, lineno)
elif cmd == '*':
# * CONDITION
self._say("\tAdding condition: " + line)
if len(isThat):
self._thats[topic][isThat][ontrig]['condition'][concnt] = line
self._syntax['thats'][topic][ontrig]['condition'][concnt] = (fname, lineno)
else:
self._topics[topic][ontrig]['condition'][concnt] = line
self._syntax['topic'][topic][ontrig]['condition'][concnt] = (fname, lineno)
concnt += 1
else:
self._warn("Unrecognized command \"" + cmd + "\"", fname, lineno)
continue
def check_syntax(self, cmd, line):
"""Syntax check a RiveScript command and line.
Returns a syntax error string on error; None otherwise."""
# Run syntax checks based on the type of command.
if cmd == '!':
# ! Definition
# - Must be formatted like this:
# ! type name = value
# OR
# ! type = value
match = re.match(RE.def_syntax, line)
if not match:
return "Invalid format for !Definition line: must be '! type name = value' OR '! type = value'"
elif cmd == '>':
# > Label
# - The "begin" label must have only one argument ("begin")
# - "topic" labels must be lowercased but can inherit other topics (a-z0-9_\s)
# - "object" labels must follow the same rules as "topic", but don't need to be lowercase
parts = re.split(" ", line, 2)
if parts[0] == "begin" and len(parts) > 1:
return "The 'begin' label takes no additional arguments, should be verbatim '> begin'"
elif parts[0] == "topic":
match = re.match(RE.name_syntax, line)
if match:
return "Topics should be lowercased and contain only numbers and letters"
elif parts[0] == "object":
match = re.match(RE.name_syntax, line)
if match:
return "Objects can only contain numbers and letters"
elif cmd == '+' or cmd == '%' or cmd == '@':
# + Trigger, % Previous, @ Redirect
# This one is strict. The triggers are to be run through the regexp engine,
# therefore it should be acceptable for the regexp engine.
# - Entirely lowercase
# - No symbols except: ( | ) [ ] * _ # @ { } < > =
# - All brackets should be matched
parens = 0 # Open parenthesis
square = 0 # Open square brackets
curly = 0 # Open curly brackets
angle = 0 # Open angled brackets
# Count brackets.
for char in line:
if char == '(':
parens += 1
elif char == ')':
parens -= 1
elif char == '[':
square += 1
elif char == ']':
square -= 1
elif char == '{':
curly += 1
elif char == '}':
curly -= 1
elif char == '<':
angle += 1
elif char == '>':
angle -= 1
# Any mismatches?
if parens != 0:
return "Unmatched parenthesis brackets"
elif square != 0:
return "Unmatched square brackets"
elif curly != 0:
return "Unmatched curly brackets"
elif angle != 0:
return "Unmatched angle brackets"
# In UTF-8 mode, most symbols are allowed.
if self._utf8:
match = re.match(RE.utf8_trig, line)
if match:
return "Triggers can't contain uppercase letters, backslashes or dots in UTF-8 mode."
else:
match = re.match(RE.trig_syntax, line)
if match:
return "Triggers may only contain lowercase letters, numbers, and these symbols: ( | ) [ ] * _ # @ { } < > ="
elif cmd == '-' or cmd == '^' or cmd == '/':
# - Trigger, ^ Continue, / Comment
# These commands take verbatim arguments, so their syntax is loose.
pass
elif cmd == '*':
# * Condition
# Syntax for a conditional is as follows:
# * value symbol value => response
match = re.match(RE.cond_syntax, line)
if not match:
return "Invalid format for !Condition: should be like '* value symbol value => response'"
return None
def deparse(self):
"""Return the in-memory RiveScript document as a Python data structure.
This would be useful for developing a user interface for editing
RiveScript replies without having to edit the RiveScript code
manually."""
# Data to return.
result = {
"begin": {
"global": {},
"var": {},
"sub": {},
"person": {},
"array": {},
"triggers": {},
"that": {},
},
"topic": {},
"that": {},
"inherit": {},
"include": {},
}
# Populate the config fields.
if self._debug:
result["begin"]["global"]["debug"] = self._debug
if self._depth != 50:
result["begin"]["global"]["depth"] = 50
# Definitions
result["begin"]["var"] = self._bvars.copy()
result["begin"]["sub"] = self._subs.copy()
result["begin"]["person"] = self._person.copy()
result["begin"]["array"] = self._arrays.copy()
result["begin"]["global"].update(self._gvars.copy())
# Topic Triggers.
for topic in self._topics:
dest = {} # Where to place the topic info
if topic == "__begin__":
# Begin block.
dest = result["begin"]["triggers"]
else:
# Normal topic.
if topic not in result["topic"]:
result["topic"][topic] = {}
dest = result["topic"][topic]
# Copy the triggers.
for trig, data in self._topics[topic].iteritems():
dest[trig] = self._copy_trigger(trig, data)
# %Previous's.
for topic in self._thats:
dest = {} # Where to place the topic info
if topic == "__begin__":
# Begin block.
dest = result["begin"]["that"]
else:
# Normal topic.
if topic not in result["that"]:
result["that"][topic] = {}
dest = result["that"][topic]
# The "that" structure is backwards: bot reply, then trigger, then info.
for previous, pdata in self._thats[topic].iteritems():
for trig, data in pdata.iteritems():
dest[trig] = self._copy_trigger(trig, data, previous)
# Inherits/Includes.
for topic, data in self._lineage.iteritems():
result["inherit"][topic] = []
for inherit in data:
result["inherit"][topic].append(inherit)
for topic, data in self._includes.iteritems():
result["include"][topic] = []
for include in data:
result["include"][topic].append(include)
return result
def write(self, fh, deparsed=None):
"""Write the currently parsed RiveScript data into a file.
Pass either a file name (string) or a file handle object.
This uses `deparse()` to dump a representation of the loaded data and
writes it to the destination file. If you provide your own data as the
`deparsed` argument, it will use that data instead of calling
`deparse()` itself. This way you can use `deparse()`, edit the data,
and use that to write the RiveScript document (for example, to be used
by a user interface for editing RiveScript without writing the code
directly)."""
# Passed a string instead of a file handle?
if type(fh) is str:
fh = codecs.open(fh, "w", "utf-8")
# Deparse the loaded data.
if deparsed is None:
deparsed = self.deparse()
# Start at the beginning.
fh.write("// Written by rivescript.deparse()\n")
fh.write("! version = 2.0\n\n")
# Variables of all sorts!
for kind in ["global", "var", "sub", "person", "array"]:
if len(deparsed["begin"][kind].keys()) == 0:
continue
for var in sorted(deparsed["begin"][kind].keys()):
# Array types need to be separated by either spaces or pipes.
data = deparsed["begin"][kind][var]
if type(data) not in [str, text_type]:
needs_pipes = False
for test in data:
if " " in test:
needs_pipes = True
break
# Word-wrap the result, target width is 78 chars minus the
# kind, var, and spaces and equals sign.
width = 78 - len(kind) - len(var) - 4
if needs_pipes:
data = self._write_wrapped("|".join(data), sep="|")
else:
data = " ".join(data)
fh.write("! {kind} {var} = {data}\n".format(
kind=kind,
var=var,
data=data,
))
fh.write("\n")
# Begin block.
if len(deparsed["begin"]["triggers"].keys()):
fh.write("> begin\n\n")
self._write_triggers(fh, deparsed["begin"]["triggers"], indent="\t")
fh.write("< begin\n\n")
# The topics. Random first!
topics = ["random"]
topics.extend(sorted(deparsed["topic"].keys()))
done_random = False
for topic in topics:
if topic not in deparsed["topic"]: continue
if topic == "random" and done_random: continue
if topic == "random": done_random = True
tagged = False # Used > topic tag
if topic != "random" or topic in deparsed["include"] or topic in deparsed["inherit"]:
tagged = True
fh.write("> topic " + topic)
if topic in deparsed["inherit"]:
fh.write(" inherits " + " ".join(deparsed["inherit"][topic]))
if topic in deparsed["include"]:
fh.write(" includes " + " ".join(deparsed["include"][topic]))
fh.write("\n\n")
indent = "\t" if tagged else ""
self._write_triggers(fh, deparsed["topic"][topic], indent=indent)
# Any %Previous's?
if topic in deparsed["that"]:
self._write_triggers(fh, deparsed["that"][topic], indent=indent)
if tagged:
fh.write("< topic\n\n")
return True
def _copy_trigger(self, trig, data, previous=None):
"""Make copies of all data below a trigger."""
# Copied data.
dest = {}
if previous:
dest["previous"] = previous
if "redirect" in data and data["redirect"]:
# @Redirect
dest["redirect"] = data["redirect"]
if "condition" in data and len(data["condition"].keys()):
# *Condition
dest["condition"] = []
for i in sorted(data["condition"].keys()):
dest["condition"].append(data["condition"][i])
if "reply" in data and len(data["reply"].keys()):
# -Reply
dest["reply"] = []
for i in sorted(data["reply"].keys()):
dest["reply"].append(data["reply"][i])
return dest
def _write_triggers(self, fh, triggers, indent=""):
"""Write triggers to a file handle."""
for trig in sorted(triggers.keys()):
fh.write(indent + "+ " + self._write_wrapped(trig, indent=indent) + "\n")
d = triggers[trig]
if "previous" in d:
fh.write(indent + "% " + self._write_wrapped(d["previous"], indent=indent) + "\n")
if "condition" in d:
for cond in d["condition"]:
fh.write(indent + "* " + self._write_wrapped(cond, indent=indent) + "\n")
if "redirect" in d:
fh.write(indent + "@ " + self._write_wrapped(d["redirect"], indent=indent) + "\n")
if "reply" in d:
for reply in d["reply"]:
fh.write(indent + "- " + self._write_wrapped(reply, indent=indent) + "\n")
fh.write("\n")
def _write_wrapped(self, line, sep=" ", indent="", width=78):
"""Word-wrap a line of RiveScript code for being written to a file."""
words = line.split(sep)
lines = []
line = ""
buf = []
while len(words):
buf.append(words.pop(0))
line = sep.join(buf)
if len(line) > width:
# Need to word wrap!
words.insert(0, buf.pop()) # Undo
lines.append(sep.join(buf))
buf = []
line = ""
# Straggler?
if line:
lines.append(line)
# Returned output
result = lines.pop(0)
if len(lines):
eol = ""
if sep == " ":
eol = "\s"
for item in lines:
result += eol + "\n" + indent + "^ " + item
return result
def _initTT(self, toplevel, topic, trigger, what=''):
"""Initialize a Topic Tree data structure."""
if toplevel == 'topics':
if topic not in self._topics:
self._topics[topic] = {}
if trigger not in self._topics[topic]:
self._topics[topic][trigger] = {}
self._topics[topic][trigger]['reply'] = {}
self._topics[topic][trigger]['condition'] = {}
self._topics[topic][trigger]['redirect'] = None
elif toplevel == 'thats':
if topic not in self._thats:
self._thats[topic] = {}
if trigger not in self._thats[topic]:
self._thats[topic][trigger] = {}
if what not in self._thats[topic][trigger]:
self._thats[topic][trigger][what] = {}
self._thats[topic][trigger][what]['reply'] = {}
self._thats[topic][trigger][what]['condition'] = {}
self._thats[topic][trigger][what]['redirect'] = {}
elif toplevel == 'syntax':
if what not in self._syntax:
self._syntax[what] = {}
if topic not in self._syntax[what]:
self._syntax[what][topic] = {}
if trigger not in self._syntax[what][topic]:
self._syntax[what][topic][trigger] = {}
self._syntax[what][topic][trigger]['reply'] = {}
self._syntax[what][topic][trigger]['condition'] = {}
self._syntax[what][topic][trigger]['redirect'] = {}
############################################################################
# Sorting Methods #
############################################################################
def sort_replies(self, thats=False):
"""Sort the loaded triggers."""
# This method can sort both triggers and that's.
triglvl = None
sortlvl = None
if thats:
triglvl = self._thats
sortlvl = 'thats'
else:
triglvl = self._topics
sortlvl = 'topics'
# (Re)Initialize the sort cache.
self._sorted[sortlvl] = {}
self._say("Sorting triggers...")
# Loop through all the topics.
for topic in triglvl:
self._say("Analyzing topic " + topic)
# Collect a list of all the triggers we're going to need to worry
# about. If this topic inherits another topic, we need to
# recursively add those to the list.
alltrig = self._topic_triggers(topic, triglvl)
# Keep in mind here that there is a difference between 'includes'
# and 'inherits' -- topics that inherit other topics are able to
# OVERRIDE triggers that appear in the inherited topic. This means
# that if the top topic has a trigger of simply '*', then *NO*
# triggers are capable of matching in ANY inherited topic, because
# even though * has the lowest sorting priority, it has an automatic
# priority over all inherited topics.
#
# The _topic_triggers method takes this into account. All topics
# that inherit other topics will have their triggers prefixed with
# a fictional {inherits} tag, which would start at {inherits=0} and
# increment if the topic tree has other inheriting topics. So we can
# use this tag to make sure topics that inherit things will have
# their triggers always be on top of the stack, from inherits=0 to
# inherits=n.
# Sort these triggers.
running = self._sort_trigger_set(alltrig)
# Save this topic's sorted list.
if sortlvl not in self._sorted:
self._sorted[sortlvl] = {}
self._sorted[sortlvl][topic] = running
# And do it all again for %Previous!
if not thats:
# This will sort the %Previous lines to best match the bot's last reply.
self.sort_replies(True)
# If any of those %Previous's had more than one +trigger for them,
# this will sort all those +triggers to pair back the best human
# interaction.
self._sort_that_triggers()
# Also sort both kinds of substitutions.
self._sort_list('subs', self._subs)
self._sort_list('person', self._person)
def _sort_that_triggers(self):
"""Make a sorted list of triggers that correspond to %Previous groups."""
self._say("Sorting reverse triggers for %Previous groups...")
if "that_trig" not in self._sorted:
self._sorted["that_trig"] = {}
for topic in self._thats:
if topic not in self._sorted["that_trig"]:
self._sorted["that_trig"][topic] = {}
for bottrig in self._thats[topic]:
if bottrig not in self._sorted["that_trig"][topic]:
self._sorted["that_trig"][topic][bottrig] = []
triggers = self._sort_trigger_set(self._thats[topic][bottrig].keys())
self._sorted["that_trig"][topic][bottrig] = triggers
def _sort_trigger_set(self, triggers):
"""Sort a group of triggers in optimal sorting order."""
# Create a priority map.
prior = {
0: [] # Default priority=0
}
for trig in triggers:
match, weight = re.search(RE.weight, trig), 0
if match:
weight = int(match.group(1))
if weight not in prior:
prior[weight] = []
prior[weight].append(trig)
# Keep a running list of sorted triggers for this topic.
running = []
# Sort them by priority.
for p in sorted(prior.keys(), reverse=True):
self._say("\tSorting triggers with priority " + str(p))
# So, some of these triggers may include {inherits} tags, if they
# came form a topic which inherits another topic. Lower inherits
# values mean higher priority on the stack.
inherits = -1 # -1 means no {inherits} tag
highest_inherits = -1 # highest inheritance number seen
# Loop through and categorize these triggers.
track = {
inherits: self._init_sort_track()
}
for trig in prior[p]:
self._say("\t\tLooking at trigger: " + trig)
# See if it has an inherits tag.
match = re.search(RE.inherit, trig)
if match:
inherits = int(match.group(1))
if inherits > highest_inherits:
highest_inherits = inherits
self._say("\t\t\tTrigger belongs to a topic which inherits other topics: level=" + str(inherits))
trig = re.sub(RE.inherit, "", trig)
else:
inherits = -1
# If this is the first time we've seen this inheritance level,
# initialize its track structure.
if inherits not in track:
track[inherits] = self._init_sort_track()
# Start inspecting the trigger's contents.
if '_' in trig:
# Alphabetic wildcard included.
cnt = self._word_count(trig)
self._say("\t\t\tHas a _ wildcard with " + str(cnt) + " words.")
if cnt > 1:
if cnt not in track[inherits]['alpha']:
track[inherits]['alpha'][cnt] = []
track[inherits]['alpha'][cnt].append(trig)
else:
track[inherits]['under'].append(trig)
elif '#' in trig:
# Numeric wildcard included.
cnt = self._word_count(trig)
self._say("\t\t\tHas a # wildcard with " + str(cnt) + " words.")
if cnt > 1:
if cnt not in track[inherits]['number']:
track[inherits]['number'][cnt] = []
track[inherits]['number'][cnt].append(trig)
else:
track[inherits]['pound'].append(trig)
elif '*' in trig:
# Wildcard included.
cnt = self._word_count(trig)
self._say("\t\t\tHas a * wildcard with " + str(cnt) + " words.")
if cnt > 1:
if cnt not in track[inherits]['wild']:
track[inherits]['wild'][cnt] = []
track[inherits]['wild'][cnt].append(trig)
else:
track[inherits]['star'].append(trig)
elif '[' in trig:
# Optionals included.
cnt = self._word_count(trig)
self._say("\t\t\tHas optionals and " + str(cnt) + " words.")
if cnt not in track[inherits]['option']:
track[inherits]['option'][cnt] = []
track[inherits]['option'][cnt].append(trig)
else:
# Totally atomic.
cnt = self._word_count(trig)
self._say("\t\t\tTotally atomic and " + str(cnt) + " words.")
if cnt not in track[inherits]['atomic']:
track[inherits]['atomic'][cnt] = []
track[inherits]['atomic'][cnt].append(trig)
# Move the no-{inherits} triggers to the bottom of the stack.
track[highest_inherits + 1] = track[-1]
del(track[-1])
# Add this group to the sort list.
for ip in sorted(track.keys()):
self._say("ip=" + str(ip))
for kind in ['atomic', 'option', 'alpha', 'number', 'wild']:
for wordcnt in sorted(track[ip][kind], reverse=True):
# Triggers with a matching word count should be sorted
# by length, descending.
running.extend(sorted(track[ip][kind][wordcnt], key=len, reverse=True))
running.extend(sorted(track[ip]['under'], key=len, reverse=True))
running.extend(sorted(track[ip]['pound'], key=len, reverse=True))
running.extend(sorted(track[ip]['star'], key=len, reverse=True))
return running
def _sort_list(self, name, items):
"""Sort a simple list by number of words and length."""
def by_length(word1, word2):
return len(word2) - len(word1)
# Initialize the list sort buffer.
if "lists" not in self._sorted:
self._sorted["lists"] = {}
self._sorted["lists"][name] = []
# Track by number of words.
track = {}
# Loop through each item.
for item in items:
# Count the words.
cword = self._word_count(item, all=True)
if cword not in track:
track[cword] = []
track[cword].append(item)
# Sort them.
output = []
for count in sorted(track.keys(), reverse=True):
sort = sorted(track[count], key=len, reverse=True)
output.extend(sort)
self._sorted["lists"][name] = output
def _init_sort_track(self):
"""Returns a new dict for keeping track of triggers for sorting."""
return {
'atomic': {}, # Sort by number of whole words
'option': {}, # Sort optionals by number of words
'alpha': {}, # Sort alpha wildcards by no. of words
'number': {}, # Sort number wildcards by no. of words
'wild': {}, # Sort wildcards by no. of words
'pound': [], # Triggers of just #
'under': [], # Triggers of just _
'star': [] # Triggers of just *
}
############################################################################
# Public Configuration Methods #
############################################################################
def set_handler(self, language, obj):
"""Define a custom language handler for RiveScript objects.
language: The lowercased name of the programming language,
e.g. python, javascript, perl
obj: An instance of a class object that provides the following interface:
class MyObjectHandler:
def __init__(self):
pass
def load(self, name, code):
# name = the name of the object from the RiveScript code
# code = the source code of the object
def call(self, rs, name, fields):
# rs = the current RiveScript interpreter object
# name = the name of the object being called
# fields = array of arguments passed to the object
return reply
Pass in a None value for the object to delete an existing handler (for example,
to prevent Python code from being able to be run by default).
Look in the `eg` folder of the rivescript-python distribution for an example
script that sets up a JavaScript language handler."""
# Allow them to delete a handler too.
if obj is None:
if language in self._handlers:
del self._handlers[language]
else:
self._handlers[language] = obj
def set_subroutine(self, name, code):
"""Define a Python object from your program.
This is equivalent to having an object defined in the RiveScript code, except
your Python code is defining it instead. `name` is the name of the object, and
`code` is a Python function (a `def`) that accepts rs,args as its parameters.
This method is only available if there is a Python handler set up (which there
is by default, unless you've called set_handler("python", None))."""
# Do we have a Python handler?
if 'python' in self._handlers:
self._handlers['python']._objects[name] = code
self._objlangs[name] = 'python'
else:
self._warn("Can't set_subroutine: no Python object handler!")
def set_global(self, name, value):
"""Set a global variable.
Equivalent to `! global` in RiveScript code. Set to None to delete."""
if value is None:
# Unset the variable.
if name in self._gvars:
del self._gvars[name]
self._gvars[name] = value
def set_variable(self, name, value):
"""Set a bot variable.
Equivalent to `! var` in RiveScript code. Set to None to delete."""
if value is None:
# Unset the variable.
if name in self._bvars:
del self._bvars[name]
self._bvars[name] = value
def set_substitution(self, what, rep):
"""Set a substitution.
Equivalent to `! sub` in RiveScript code. Set to None to delete."""
if rep is None:
# Unset the variable.
if what in self._subs:
del self._subs[what]
self._subs[what] = rep
def set_person(self, what, rep):
"""Set a person substitution.
Equivalent to `! person` in RiveScript code. Set to None to delete."""
if rep is None:
# Unset the variable.
if what in self._person:
del self._person[what]
self._person[what] = rep
def set_uservar(self, user, name, value):
"""Set a variable for a user."""
if user not in self._users:
self._users[user] = {"topic": "random"}
self._users[user][name] = value
def get_uservar(self, user, name):
"""Get a variable about a user.
If the user has no data at all, returns None. If the user doesn't have a value
set for the variable you want, returns the string 'undefined'."""
if user in self._users:
if name in self._users[user]:
return self._users[user][name]
else:
return "undefined"
else:
return None
def get_uservars(self, user=None):
"""Get all variables about a user (or all users).
If no username is passed, returns the entire user database structure. Otherwise,
only returns the variables for the given user, or None if none exist."""
if user is None:
# All the users!
return self._users
elif user in self._users:
# Just this one!
return self._users[user]
else:
# No info.
return None
def clear_uservars(self, user=None):
"""Delete all variables about a user (or all users).
If no username is passed, deletes all variables about all users. Otherwise, only
deletes all variables for the given user."""
if user is None:
# All the users!
self._users = {}
elif user in self._users:
# Just this one.
self._users[user] = {}
def freeze_uservars(self, user):
"""Freeze the variable state for a user.
This will clone and preserve a user's entire variable state, so that it can be
restored later with `thaw_uservars`."""
if user in self._users:
# Clone the user's data.
self._freeze[user] = copy.deepcopy(self._users[user])
else:
self._warn("Can't freeze vars for user " + user + ": not found!")
def thaw_uservars(self, user, action="thaw"):
"""Thaw a user's frozen variables.
The `action` can be one of the following options:
discard: Don't restore the user's variables, just delete the frozen copy.
keep: Keep the frozen copy after restoring the variables.
thaw: Restore the variables, then delete the frozen copy (default)."""
if user in self._freeze:
# What are we doing?
if action == "thaw":
# Thawing them out.
self.clear_uservars(user)
self._users[user] = copy.deepcopy(self._freeze[user])
del self._freeze[user]
elif action == "discard":
# Just discard the frozen copy.
del self._freeze[user]
elif action == "keep":
# Keep the frozen copy afterward.
self.clear_uservars(user)
self._users[user] = copy.deepcopy(self._freeze[user])
else:
self._warn("Unsupported thaw action")
else:
self._warn("Can't thaw vars for user " + user + ": not found!")
def last_match(self, user):
"""Get the last trigger matched for the user.
This will return the raw trigger text that the user's last message matched. If
there was no match, this will return None."""
return self.get_uservar(user, "__lastmatch__")
def trigger_info(self, trigger=None, dump=False):
"""Get information about a trigger.
Pass in a raw trigger to find out what file name and line number it appeared at.
This is useful for e.g. tracking down the location of the trigger last matched
by the user via last_match(). Returns a list of matching triggers, containing
their topics, filenames and line numbers. Returns None if there weren't
any matches found.
The keys in the trigger info is as follows:
* category: Either 'topic' (for normal) or 'thats' (for %Previous triggers)
* topic: The topic name
* trigger: The raw trigger text
* filename: The filename the trigger was found in.
* lineno: The line number the trigger was found on.
Pass in a true value for `dump`, and the entire syntax tracking
tree is returned."""
if dump:
return self._syntax
response = None
# Search the syntax tree for the trigger.
for category in self._syntax:
for topic in self._syntax[category]:
if trigger in self._syntax[category][topic]:
# We got a match!
if response is None:
response = list()
fname, lineno = self._syntax[category][topic][trigger]['trigger']
response.append(dict(
category=category,
topic=topic,
trigger=trigger,
filename=fname,
line=lineno,
))
return response
def current_user(self):
"""Retrieve the user ID of the current user talking to your bot.
This is mostly useful inside of a Python object macro to get the user ID of the
person who caused the object macro to be invoked (i.e. to set a variable for
that user from within the object).
This will return None if used outside of the context of getting a reply (i.e.
the value is unset at the end of the `reply()` method)."""
if self._current_user is None:
# They're doing it wrong.
self._warn("current_user() is meant to be used from within a Python object macro!")
return self._current_user
############################################################################
# Reply Fetching Methods #
############################################################################
def reply(self, user, msg, errors_as_replies=True):
"""Fetch a reply from the RiveScript brain."""
self._say("Get reply to [" + user + "] " + msg)
# Store the current user in case an object macro needs it.
self._current_user = user
# Format their message.
msg = self._format_message(msg)
reply = ''
# If the BEGIN block exists, consult it first.
if "__begin__" in self._topics:
begin = self._getreply(user, 'request', context='begin', ignore_object_errors=errors_as_replies)
# Okay to continue?
if '{ok}' in begin:
try:
reply = self._getreply(user, msg, ignore_object_errors=errors_as_replies)
except RiveScriptError as e:
if not errors_as_replies:
raise
reply = e.error_message
begin = begin.replace('{ok}', reply)
reply = begin
# Run more tag substitutions.
reply = self._process_tags(user, msg, reply, ignore_object_errors=errors_as_replies)
else:
# Just continue then.
try:
reply = self._getreply(user, msg, ignore_object_errors=errors_as_replies)
except RiveScriptError as e:
if not errors_as_replies:
raise
reply = e.error_message
# Save their reply history.
oldInput = self._users[user]['__history__']['input'][:8]
self._users[user]['__history__']['input'] = [msg]
self._users[user]['__history__']['input'].extend(oldInput)
oldReply = self._users[user]['__history__']['reply'][:8]
self._users[user]['__history__']['reply'] = [reply]
self._users[user]['__history__']['reply'].extend(oldReply)
# Unset the current user.
self._current_user = None
return reply
def _format_message(self, msg, botreply=False):
"""Format a user's message for safe processing."""
# Make sure the string is Unicode for Python 2.
if sys.version_info[0] < 3 and isinstance(msg, str):
msg = msg.decode('utf8')
# Lowercase it.
msg = msg.lower()
# Run substitutions on it.
msg = self._substitute(msg, "subs")
# In UTF-8 mode, only strip metacharacters and HTML brackets
# (to protect from obvious XSS attacks).
if self._utf8:
msg = re.sub(RE.utf8_meta, '', msg)
msg = re.sub(self.unicode_punctuation, '', msg)
# For the bot's reply, also strip common punctuation.
if botreply:
msg = re.sub(RE.utf8_punct, '', msg)
else:
# For everything else, strip all non-alphanumerics.
msg = self._strip_nasties(msg)
return msg
def _getreply(self, user, msg, context='normal', step=0, ignore_object_errors=True):
# Needed to sort replies?
if 'topics' not in self._sorted:
raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents")
# Initialize the user's profile?
if user not in self._users:
self._users[user] = {'topic': 'random'}
# Collect data on the user.
topic = self._users[user]['topic']
stars = []
thatstars = [] # For %Previous's.
reply = ''
# Avoid letting them fall into a missing topic.
if topic not in self._topics:
self._warn("User " + user + " was in an empty topic named '" + topic + "'")
topic = self._users[user]['topic'] = 'random'
# Avoid deep recursion.
if step > self._depth:
raise DeepRecursionError
# Are we in the BEGIN statement?
if context == 'begin':
topic = '__begin__'
# Initialize this user's history.
if '__history__' not in self._users[user]:
self._users[user]['__history__'] = {
'input': [
'undefined', 'undefined', 'undefined', 'undefined',
'undefined', 'undefined', 'undefined', 'undefined',
'undefined'
],
'reply': [
'undefined', 'undefined', 'undefined', 'undefined',
'undefined', 'undefined', 'undefined', 'undefined',
'undefined'
]
}
# More topic sanity checking.
if topic not in self._topics:
# This was handled before, which would mean topic=random and
# it doesn't exist. Serious issue!
raise NoDefaultRandomTopicError("no default topic 'random' was found")
# Create a pointer for the matched data when we find it.
matched = None
matchedTrigger = None
foundMatch = False
# See if there were any %Previous's in this topic, or any topic related
# to it. This should only be done the first time -- not during a
# recursive redirection. This is because in a redirection, "lastreply"
# is still gonna be the same as it was the first time, causing an
# infinite loop!
if step == 0:
allTopics = [topic]
if topic in self._includes or topic in self._lineage:
# Get all the topics!
allTopics = self._get_topic_tree(topic)
# Scan them all!
for top in allTopics:
self._say("Checking topic " + top + " for any %Previous's.")
if top in self._sorted["thats"]:
self._say("There is a %Previous in this topic!")
# Do we have history yet?
lastReply = self._users[user]["__history__"]["reply"][0]
# Format the bot's last reply the same way as the human's.
lastReply = self._format_message(lastReply, botreply=True)
self._say("lastReply: " + lastReply)
# See if it's a match.
for trig in self._sorted["thats"][top]:
botside = self._reply_regexp(user, trig)
self._say("Try to match lastReply (" + lastReply + ") to " + trig)
# Match??
match = re.match(botside, lastReply)
if match:
# Huzzah! See if OUR message is right too.
self._say("Bot side matched!")
thatstars = match.groups()
for subtrig in self._sorted["that_trig"][top][trig]:
humanside = self._reply_regexp(user, subtrig)
self._say("Now try to match " + msg + " to " + subtrig)
match = re.match(humanside, msg)
if match:
self._say("Found a match!")
matched = self._thats[top][trig][subtrig]
matchedTrigger = subtrig
foundMatch = True
# Get the stars!
stars = match.groups()
break
# Break if we found a match.
if foundMatch:
break
# Break if we found a match.
if foundMatch:
break
# Search their topic for a match to their trigger.
if not foundMatch:
for trig in self._sorted["topics"][topic]:
# Process the triggers.
regexp = self._reply_regexp(user, trig)
self._say("Try to match %r against %r (%r)" % (msg, trig, regexp))
# Python's regular expression engine is slow. Try a verbatim
# match if this is an atomic trigger.
isAtomic = self._is_atomic(trig)
isMatch = False
if isAtomic:
# Only look for exact matches, no sense running atomic triggers
# through the regexp engine.
if msg == trig:
isMatch = True
else:
# Non-atomic triggers always need the regexp.
match = re.match(regexp, msg)
if match:
# The regexp matched!
isMatch = True
# Collect the stars.
stars = match.groups()
if isMatch:
self._say("Found a match!")
# We found a match, but what if the trigger we've matched
# doesn't belong to our topic? Find it!
if trig not in self._topics[topic]:
# We have to find it.
matched = self._find_trigger_by_inheritance(topic, trig)
else:
# We do have it!
matched = self._topics[topic][trig]
foundMatch = True
matchedTrigger = trig
break
# Store what trigger they matched on. If their matched trigger is None,
# this will be too, which is great.
self._users[user]["__lastmatch__"] = matchedTrigger
if matched:
for nil in [1]:
# See if there are any hard redirects.
if matched["redirect"]:
self._say("Redirecting us to " + matched["redirect"])
redirect = self._process_tags(user, msg, matched["redirect"], stars, thatstars, step,
ignore_object_errors)
self._say("Pretend user said: " + redirect)
reply = self._getreply(user, redirect, step=(step + 1), ignore_object_errors=ignore_object_errors)
break
# Check the conditionals.
for con in sorted(matched["condition"]):
halves = re.split(RE.cond_split, matched["condition"][con])
if halves and len(halves) == 2:
condition = re.match(RE.cond_parse, halves[0])
if condition:
left = condition.group(1)
eq = condition.group(2)
right = condition.group(3)
potreply = halves[1]
self._say("Left: " + left + "; eq: " + eq + "; right: " + right + " => " + potreply)
# Process tags all around.
left = self._process_tags(user, msg, left, stars, thatstars, step, ignore_object_errors)
right = self._process_tags(user, msg, right, stars, thatstars, step, ignore_object_errors)
# Defaults?
if len(left) == 0:
left = 'undefined'
if len(right) == 0:
right = 'undefined'
self._say("Check if " + left + " " + eq + " " + right)
# Validate it.
passed = False
if eq == 'eq' or eq == '==':
if left == right:
passed = True
elif eq == 'ne' or eq == '!=' or eq == '<>':
if left != right:
passed = True
else:
# Gasp, dealing with numbers here...
try:
left, right = int(left), int(right)
if eq == '<':
if left < right:
passed = True
elif eq == '<=':
if left <= right:
passed = True
elif eq == '>':
if left > right:
passed = True
elif eq == '>=':
if left >= right:
passed = True
except:
self._warn("Failed to evaluate numeric condition!")
# How truthful?
if passed:
reply = potreply
break
# Have our reply yet?
if len(reply) > 0:
break
# Process weights in the replies.
bucket = []
for rep in sorted(matched["reply"]):
text = matched["reply"][rep]
weight = 1
match = re.match(RE.weight, text)
if match:
weight = int(match.group(1))
if weight <= 0:
self._warn("Can't have a weight <= 0!")
weight = 1
for i in range(0, weight):
bucket.append(text)
# Get a random reply.
reply = random.choice(bucket)
break
# Still no reply?
if not foundMatch:
raise NoMatchError
elif len(reply) == 0:
raise NoReplyError
self._say("Reply: " + reply)
# Process tags for the BEGIN block.
if context == "begin":
# BEGIN blocks can only set topics and uservars. The rest happen
# later!
reTopic = re.findall(RE.topic_tag, reply)
for match in reTopic:
self._say("Setting user's topic to " + match)
self._users[user]["topic"] = match
reply = reply.replace('{{topic={match}}}'.format(match=match), '')
reSet = re.findall(RE.set_tag, reply)
for match in reSet:
self._say("Set uservar " + str(match[0]) + "=" + str(match[1]))
self._users[user][match[0]] = match[1]
reply = reply.replace('<set {key}={value}>'.format(key=match[0], value=match[1]), '')
else:
# Process more tags if not in BEGIN.
reply = self._process_tags(user, msg, reply, stars, thatstars, step, ignore_object_errors)
return reply
def _substitute(self, msg, kind):
"""Run a kind of substitution on a message."""
# Safety checking.
if 'lists' not in self._sorted:
raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents")
if kind not in self._sorted["lists"]:
raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents")
# Get the substitution map.
subs = None
if kind == 'subs':
subs = self._subs
else:
subs = self._person
# Make placeholders each time we substitute something.
ph = []
i = 0
for pattern in self._sorted["lists"][kind]:
result = subs[pattern]
# Make a placeholder.
ph.append(result)
placeholder = "\x00%d\x00" % i
i += 1
cache = self._regexc[kind][pattern]
msg = re.sub(cache["sub1"], placeholder, msg)
msg = re.sub(cache["sub2"], placeholder + r'\1', msg)
msg = re.sub(cache["sub3"], r'\1' + placeholder + r'\2', msg)
msg = re.sub(cache["sub4"], r'\1' + placeholder, msg)
placeholders = re.findall(RE.placeholder, msg)
for match in placeholders:
i = int(match)
result = ph[i]
msg = msg.replace('\x00' + match + '\x00', result)
# Strip & return.
return msg.strip()
def _precompile_substitution(self, kind, pattern):
"""Pre-compile the regexp for a substitution pattern.
This will speed up the substitutions that happen at the beginning of
the reply fetching process. With the default brain, this took the
time for _substitute down from 0.08s to 0.02s"""
if pattern not in self._regexc[kind]:
qm = re.escape(pattern)
self._regexc[kind][pattern] = {
"qm": qm,
"sub1": re.compile(r'^' + qm + r'$'),
"sub2": re.compile(r'^' + qm + r'(\W+)'),
"sub3": re.compile(r'(\W+)' + qm + r'(\W+)'),
"sub4": re.compile(r'(\W+)' + qm + r'$'),
}
def _do_expand_array(self, array_name, depth=0):
""" Do recurrent array expansion, returning a set of keywords.
Exception is thrown when there are cyclical dependencies between
arrays or if the @array name references an undefined array."""
if depth > self._depth:
raise Exception("deep recursion detected")
if not array_name in self._arrays:
raise Exception("array '%s' not defined" % (array_name))
ret = list(self._arrays[array_name])
for array in self._arrays[array_name]:
if array.startswith('@'):
ret.remove(array)
expanded = self._do_expand_array(array[1:], depth+1)
ret.extend(expanded)
return set(ret)
def _expand_array(self, array_name):
""" Expand variables and return a set of keywords.
Warning is issued when exceptions occur."""
ret = self._arrays[array_name] if array_name in self._arrays else []
try:
ret = self._do_expand_array(array_name)
except Exception as e:
self._warn("Error expanding array '%s': %s" % (array_name, str(e)))
return ret
def _reply_regexp(self, user, regexp):
"""Prepares a trigger for the regular expression engine."""
if regexp in self._regexc["trigger"]:
# Already compiled this one!
return self._regexc["trigger"][regexp]
# If the trigger is simply '*' then the * there needs to become (.*?)
# to match the blank string too.
regexp = re.sub(RE.zero_star, r'<zerowidthstar>', regexp)
# Simple replacements.
regexp = regexp.replace('*', '(.+?)') # Convert * into (.+?)
regexp = regexp.replace('#', '(\d+?)') # Convert # into (\d+?)
regexp = regexp.replace('_', '(\w+?)') # Convert _ into (\w+?)
regexp = re.sub(r'\{weight=\d+\}', '', regexp) # Remove {weight} tags
regexp = regexp.replace('<zerowidthstar>', r'(.*?)')
# Optionals.
optionals = re.findall(RE.optionals, regexp)
for match in optionals:
parts = match.split("|")
new = []
for p in parts:
p = r'(?:\\s|\\b)+{}(?:\\s|\\b)+'.format(p)
new.append(p)
# If this optional had a star or anything in it, make it
# non-matching.
pipes = '|'.join(new)
pipes = re.sub(re.escape('(.+?)'), '(?:.+?)', pipes)
pipes = re.sub(re.escape('(\d+?)'), '(?:\d+?)', pipes)
pipes = re.sub(re.escape('([A-Za-z]+?)'), '(?:[A-Za-z]+?)', pipes)
regexp = re.sub(r'\s*\[' + re.escape(match) + '\]\s*',
'(?:' + pipes + r'|(?:\\s|\\b))', regexp)
# _ wildcards can't match numbers!
regexp = re.sub(RE.literal_w, r'[A-Za-z]', regexp)
# Filter in arrays.
arrays = re.findall(RE.array, regexp)
for array in arrays:
rep = ''
if array in self._arrays:
rep = r'(?:' + '|'.join(self._expand_array(array)) + ')'
regexp = re.sub(r'\@' + re.escape(array) + r'\b', rep, regexp)
# Filter in bot variables.
bvars = re.findall(RE.bot_tag, regexp)
for var in bvars:
rep = ''
if var in self._bvars:
rep = self._strip_nasties(self._bvars[var])
regexp = regexp.replace('<bot {var}>'.format(var=var), rep)
# Filter in user variables.
uvars = re.findall(RE.get_tag, regexp)
for var in uvars:
rep = ''
if var in self._users[user]:
rep = self._strip_nasties(self._users[user][var])
regexp = regexp.replace('<get {var}>'.format(var=var), rep)
# Filter in <input> and <reply> tags. This is a slow process, so only
# do it if we have to!
if '<input' in regexp or '<reply' in regexp:
for type in ['input', 'reply']:
tags = re.findall(r'<' + type + r'([0-9])>', regexp)
for index in tags:
rep = self._format_message(self._users[user]['__history__'][type][int(index) - 1])
regexp = regexp.replace('<{type}{index}>'.format(type=type, index=index), rep)
regexp = regexp.replace('<{type}>'.format(type=type),
self._format_message(self._users[user]['__history__'][type][0]))
# TODO: the Perl version doesn't do just <input>/<reply> in trigs!
return re.compile(r'^' + regexp + r'$')
def _precompile_regexp(self, trigger):
"""Precompile the regex for most triggers.
If the trigger is non-atomic, and doesn't include dynamic tags like
`<bot>`, `<get>`, `<input>/<reply>` or arrays, it can be precompiled
and save time when matching."""
if self._is_atomic(trigger):
return # Don't need a regexp for atomic triggers.
# Check for dynamic tags.
for tag in ["@", "<bot", "<get", "<input", "<reply"]:
if tag in trigger:
return # Can't precompile this trigger.
self._regexc["trigger"][trigger] = self._reply_regexp(None, trigger)
def _process_tags(self, user, msg, reply, st=[], bst=[], depth=0, ignore_object_errors=True):
"""Post process tags in a message."""
stars = ['']
stars.extend(st)
botstars = ['']
botstars.extend(bst)
if len(stars) == 1:
stars.append("undefined")
if len(botstars) == 1:
botstars.append("undefined")
# Tag shortcuts.
reply = reply.replace('<person>', '{person}<star>{/person}')
reply = reply.replace('<@>', '{@<star>}')
reply = reply.replace('<formal>', '{formal}<star>{/formal}')
reply = reply.replace('<sentence>', '{sentence}<star>{/sentence}')
reply = reply.replace('<uppercase>', '{uppercase}<star>{/uppercase}')
reply = reply.replace('<lowercase>', '{lowercase}<star>{/lowercase}')
# Weight and <star> tags.
reply = re.sub(RE.weight, '', reply) # Leftover {weight}s
if len(stars) > 0:
reply = reply.replace('<star>', stars[1])
reStars = re.findall(RE.star_tags, reply)
for match in reStars:
if int(match) < len(stars):
reply = reply.replace('<star{match}>'.format(match=match), stars[int(match)])
if len(botstars) > 0:
reply = reply.replace('<botstar>', botstars[1])
reStars = re.findall(RE.botstars, reply)
for match in reStars:
if int(match) < len(botstars):
reply = reply.replace('<botstar{match}>'.format(match=match), botstars[int(match)])
# <input> and <reply>
reply = reply.replace('<input>', self._users[user]['__history__']['input'][0])
reply = reply.replace('<reply>', self._users[user]['__history__']['reply'][0])
reInput = re.findall(RE.input_tags, reply)
for match in reInput:
reply = reply.replace('<input{match}>'.format(match=match),
self._users[user]['__history__']['input'][int(match) - 1])
reReply = re.findall(RE.reply_tags, reply)
for match in reReply:
reply = reply.replace('<reply{match}>'.format(match=match),
self._users[user]['__history__']['reply'][int(match) - 1])
# <id> and escape codes.
reply = reply.replace('<id>', user)
reply = reply.replace('\\s', ' ')
reply = reply.replace('\\n', "\n")
reply = reply.replace('\\#', '#')
# Random bits.
reRandom = re.findall(RE.random_tags, reply)
for match in reRandom:
output = ''
if '|' in match:
output = random.choice(match.split('|'))
else:
output = random.choice(match.split(' '))
reply = reply.replace('{{random}}{match}{{/random}}'.format(match=match), output)
# Person Substitutions and String Formatting.
for item in ['person', 'formal', 'sentence', 'uppercase', 'lowercase']:
matcher = re.findall(r'\{' + item + r'\}(.+?)\{/' + item + r'\}', reply)
for match in matcher:
output = None
if item == 'person':
# Person substitutions.
output = self._substitute(match, "person")
else:
output = self._string_format(match, item)
reply = reply.replace('{{{item}}}{match}{{/{item}}}'.format(item=item, match=match), output)
# Handle all variable-related tags with an iterative regex approach,
# to allow for nesting of tags in arbitrary ways (think <set a=<get b>>)
# Dummy out the <call> tags first, because we don't handle them right
# here.
reply = reply.replace("<call>", "{__call__}")
reply = reply.replace("</call>", "{/__call__}")
while True:
# This regex will match a <tag> which contains no other tag inside
# it, i.e. in the case of <set a=<get b>> it will match <get b> but
# not the <set> tag, on the first pass. The second pass will get the
# <set> tag, and so on.
match = re.search(RE.tag_search, reply)
if not match: break # No remaining tags!
match = match.group(1)
parts = match.split(" ", 1)
tag = parts[0].lower()
data = parts[1] if len(parts) > 1 else ""
insert = "" # Result of the tag evaluation
# Handle the tags.
if tag == "bot" or tag == "env":
# <bot> and <env> tags are similar.
target = self._bvars if tag == "bot" else self._gvars
if "=" in data:
# Setting a bot/env variable.
parts = data.split("=")
self._say("Set " + tag + " variable " + text_type(parts[0]) + "=" + text_type(parts[1]))
target[parts[0]] = parts[1]
else:
# Getting a bot/env variable.
insert = target.get(data, "undefined")
elif tag == "set":
# <set> user vars.
parts = data.split("=")
self._say("Set uservar " + text_type(parts[0]) + "=" + text_type(parts[1]))
self._users[user][parts[0]] = parts[1]
elif tag in ["add", "sub", "mult", "div"]:
# Math operator tags.
parts = data.split("=")
var = parts[0]
value = parts[1]
# Sanity check the value.
try:
value = int(value)
if var not in self._users[user]:
# Initialize it.
self._users[user][var] = 0
except:
insert = "[ERR: Math can't '{}' non-numeric value '{}']".format(tag, value)
# Attempt the operation.
try:
orig = int(self._users[user][var])
new = 0
if tag == "add":
new = orig + value
elif tag == "sub":
new = orig - value
elif tag == "mult":
new = orig * value
elif tag == "div":
new = orig / value
self._users[user][var] = new
except:
insert = "[ERR: Math couldn't '{}' to value '{}']".format(tag, self._users[user][var])
elif tag == "get":
insert = self._users[user].get(data, "undefined")
else:
# Unrecognized tag.
insert = "\x00{}\x01".format(match)
reply = reply.replace("<{}>".format(match), insert)
# Restore unrecognized tags.
reply = reply.replace("\x00", "<").replace("\x01", ">")
# Streaming code. DEPRECATED!
if '{!' in reply:
self._warn("Use of the {!...} tag is deprecated and not supported here.")
# Topic setter.
reTopic = re.findall(RE.topic_tag, reply)
for match in reTopic:
self._say("Setting user's topic to " + match)
self._users[user]["topic"] = match
reply = reply.replace('{{topic={match}}}'.format(match=match), '')
# Inline redirecter.
reRedir = re.findall(RE.redir_tag, reply)
for match in reRedir:
self._say("Redirect to " + match)
at = match.strip()
subreply = self._getreply(user, at, step=(depth + 1))
reply = reply.replace('{{@{match}}}'.format(match=match), subreply)
# Object caller.
reply = reply.replace("{__call__}", "<call>")
reply = reply.replace("{/__call__}", "</call>")
reCall = re.findall(r'<call>(.+?)</call>', reply)
for match in reCall:
parts = re.split(RE.ws, match)
output = ''
obj = parts[0]
args = []
if len(parts) > 1:
args = parts[1:]
# Do we know this object?
if obj in self._objlangs:
# We do, but do we have a handler for that language?
lang = self._objlangs[obj]
if lang in self._handlers:
# We do.
try:
output = self._handlers[lang].call(self, obj, user, args)
except python.PythonObjectError as e:
self._warn(str(e))
if not ignore_object_errors:
raise ObjectError(str(e))
output = RS_ERR_OBJECT
else:
if not ignore_object_errors:
raise ObjectError(RS_ERR_OBJECT_HANDLER)
output = RS_ERR_OBJECT_HANDLER
else:
if not ignore_object_errors:
raise ObjectError(RS_ERR_OBJECT_MISSING)
output = RS_ERR_OBJECT_MISSING
reply = reply.replace('<call>{match}</call>'.format(match=match), output)
return reply
def _string_format(self, msg, method):
"""Format a string (upper, lower, formal, sentence)."""
if method == "uppercase":
return msg.upper()
elif method == "lowercase":
return msg.lower()
elif method == "sentence":
return msg.capitalize()
elif method == "formal":
return string.capwords(msg)
############################################################################
# Topic inheritance Utility Methods #
############################################################################
def _topic_triggers(self, topic, triglvl, depth=0, inheritance=0, inherited=False):
"""Recursively scan a topic and return a list of all triggers."""
# Break if we're in too deep.
if depth > self._depth:
self._warn("Deep recursion while scanning topic inheritance")
# Important info about the depth vs inheritance params to this function:
# depth increments by 1 each time this function recursively calls itself.
# inheritance increments by 1 only when this topic inherits another
# topic.
#
# This way, '> topic alpha includes beta inherits gamma' will have this
# effect:
# alpha and beta's triggers are combined together into one matching
# pool, and then those triggers have higher matching priority than
# gamma's.
#
# The inherited option is True if this is a recursive call, from a topic
# that inherits other topics. This forces the {inherits} tag to be added
# to the triggers. This only applies when the top topic 'includes'
# another topic.
self._say("\tCollecting trigger list for topic " + topic + "(depth="
+ str(depth) + "; inheritance=" + str(inheritance) + "; "
+ "inherited=" + str(inherited) + ")")
# topic: the name of the topic
# triglvl: reference to self._topics or self._thats
# depth: starts at 0 and ++'s with each recursion
# Collect an array of triggers to return.
triggers = []
# Get those that exist in this topic directly.
inThisTopic = []
if topic in triglvl:
for trigger in triglvl[topic]:
inThisTopic.append(trigger)
# Does this topic include others?
if topic in self._includes:
# Check every included topic.
for includes in self._includes[topic]:
self._say("\t\tTopic " + topic + " includes " + includes)
triggers.extend(self._topic_triggers(includes, triglvl, (depth + 1), inheritance, True))
# Does this topic inherit others?
if topic in self._lineage:
# Check every inherited topic.
for inherits in self._lineage[topic]:
self._say("\t\tTopic " + topic + " inherits " + inherits)
triggers.extend(self._topic_triggers(inherits, triglvl, (depth + 1), (inheritance + 1), False))
# Collect the triggers for *this* topic. If this topic inherits any
# other topics, it means that this topic's triggers have higher
# priority than those in any inherited topics. Enforce this with an
# {inherits} tag.
if topic in self._lineage or inherited:
for trigger in inThisTopic:
self._say("\t\tPrefixing trigger with {inherits=" + str(inheritance) + "}" + trigger)
triggers.append("{inherits=" + str(inheritance) + "}" + trigger)
else:
triggers.extend(inThisTopic)
return triggers
def _find_trigger_by_inheritance(self, topic, trig, depth=0):
"""Locate the replies for a trigger in an inherited/included topic."""
# This sub was called because the user matched a trigger from the sorted
# array, but the trigger doesn't belong to their topic, and is instead
# in an inherited or included topic. This is to search for it.
# Prevent recursion.
if depth > self._depth:
self._warn("Deep recursion detected while following an inheritance trail!")
return None
# inheritance is more important than inclusion: triggers in one topic can
# override those in an inherited topic.
if topic in self._lineage:
for inherits in sorted(self._lineage[topic]):
# See if this inherited topic has our trigger.
if trig in self._topics[inherits]:
# Great!
return self._topics[inherits][trig]
else:
# Check what THAT topic inherits from.
match = self._find_trigger_by_inheritance(
inherits, trig, (depth + 1)
)
if match:
# Found it!
return match
# See if this topic has an "includes"
if topic in self._includes:
for includes in sorted(self._includes[topic]):
# See if this included topic has our trigger.
if trig in self._topics[includes]:
# Great!
return self._topics[includes][trig]
else:
# Check what THAT topic inherits from.
match = self._find_trigger_by_inheritance(
includes, trig, (depth + 1)
)
if match:
# Found it!
return match
# Don't know what else to do!
return None
def _get_topic_tree(self, topic, depth=0):
"""Given one topic, get the list of all included/inherited topics."""
# Break if we're in too deep.
if depth > self._depth:
self._warn("Deep recursion while scanning topic trees!")
return []
# Collect an array of all topics.
topics = [topic]
# Does this topic include others?
if topic in self._includes:
# Try each of these.
for includes in sorted(self._includes[topic]):
topics.extend(self._get_topic_tree(includes, depth + 1))
# Does this topic inherit others?
if topic in self._lineage:
# Try each of these.
for inherits in sorted(self._lineage[topic]):
topics.extend(self._get_topic_tree(inherits, depth + 1))
return topics
############################################################################
# Miscellaneous Private Methods #
############################################################################
def _is_atomic(self, trigger):
"""Determine if a trigger is atomic or not."""
# Atomic triggers don't contain any wildcards or parenthesis or anything
# of the sort. We don't need to test the full character set, just left
# brackets will do.
special = ['*', '#', '_', '(', '[', '<', '@']
for char in special:
if char in trigger:
return False
return True
def _word_count(self, trigger, all=False):
"""Count the words that aren't wildcards in a trigger."""
words = []
if all:
words = re.split(RE.ws, trigger)
else:
words = re.split(RE.wilds, trigger)
wc = 0 # Word count
for word in words:
if len(word) > 0:
wc += 1
return wc
def _strip_nasties(self, s):
"""Formats a string for ASCII regex matching."""
s = re.sub(RE.nasties, '', s)
return s
def _dump(self):
"""For debugging, dump the entire data structure."""
pp = pprint.PrettyPrinter(indent=4)
print("=== Variables ===")
print("-- Globals --")
pp.pprint(self._gvars)
print("-- Bot vars --")
pp.pprint(self._bvars)
print("-- Substitutions --")
pp.pprint(self._subs)
print("-- Person Substitutions --")
pp.pprint(self._person)
print("-- Arrays --")
pp.pprint(self._arrays)
print("=== Topic Structure ===")
pp.pprint(self._topics)
print("=== %Previous Structure ===")
pp.pprint(self._thats)
print("=== Includes ===")
pp.pprint(self._includes)
print("=== Inherits ===")
pp.pprint(self._lineage)
print("=== Sort Buffer ===")
pp.pprint(self._sorted)
print("=== Syntax Tree ===")
pp.pprint(self._syntax)
################################################################################
# Exception Classes #
################################################################################
class RiveScriptError(Exception):
"""RiveScript base exception class"""
def __init__(self, error_message=None):
super(RiveScriptError, self).__init__(error_message)
self.error_message = error_message
class NoMatchError(RiveScriptError):
"""No reply could be matched"""
def __init__(self):
super(NoMatchError, self).__init__(RS_ERR_MATCH)
class NoReplyError(RiveScriptError):
"""No reply could be found"""
def __init__(self):
super(NoReplyError, self).__init__(RS_ERR_REPLY)
class ObjectError(RiveScriptError):
"""An error occurred when executing a Python object"""
def __init__(self, error_message=RS_ERR_OBJECT):
super(ObjectError, self).__init__(error_message)
class DeepRecursionError(RiveScriptError):
"""Prevented an infinite loop / deep recursion, unable to retrieve a reply for this message"""
def __init__(self):
super(DeepRecursionError, self).__init__(RS_ERR_DEEP_RECURSION)
class NoDefaultRandomTopicError(Exception):
"""No default topic 'random' could be found, critical error"""
pass
class RepliesNotSortedError(Exception):
"""sort_replies() was not called after the RiveScript documents were loaded, critical error"""
pass
################################################################################
# Interactive Mode #
################################################################################
if __name__ == "__main__":
from interactive import interactive_mode
interactive_mode()
# vim:expandtab
| [
"jaykumar.oza@jeppesen.com"
] | jaykumar.oza@jeppesen.com |
9da01c5fe4850d89d6df0c28383d6624f962e764 | 83179abbad0032fd3c8c38a54260ac4239ba9df3 | /2021/python/day15/day15.py | 24a8f0e5bd6154fc5d2140e760a2d5b58031e146 | [] | no_license | yulrizka/adventofcode | 448ac89ae543c8a7ee46bb5f86abc62887e3a9ee | 74b89528e07ae6282763968d5bb3d8eea38e07ba | refs/heads/master | 2023-01-13T03:57:20.688851 | 2022-12-22T11:11:59 | 2022-12-22T11:11:59 | 225,181,497 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | import queue
import unittest
# with open("../../input/day15-sample") as f:
with open("../../input/day15") as f:
raw = [[int(x) for x in y] for y in f.read().strip().split("\n")]
def wrap(x):
while x > 9:
x = x - 9
return x
data2 = raw.copy()
for i in range(4):
row = list(map(lambda x: list(map(lambda y: wrap(y + (i + 1)), x)), raw))
data2 += row
for i, current_row in enumerate(data2):
rr = current_row.copy()
for j in range(4):
row = list(map(lambda y: wrap(y + (j + 1)), current_row))
rr += row
data2[i] = rr
nr = [-1, 0, 1, 0]
nc = [0, 1, 0, -1]
def solve(raw):
R = len(raw)
C = len(raw[0])
# build vertices
D = {}
G = {}
for r in range(R):
for c in range(C):
D[(r, c)] = float('inf')
for dd in range(4):
rr = r + nr[dd]
cc = c + nc[dd]
if 0 <= rr < R and 0 <= cc < C:
G[((r, c), (rr, cc))] = int(raw[rr][cc])
D[(0, 0)] = 0
# dijkstra
pq = queue.PriorityQueue()
pq.put((0, (0, 0)))
while not pq.empty():
(dist, current_vertex) = pq.get()
for dd in range(4):
rr = current_vertex[0] + nr[dd]
cc = current_vertex[1] + nc[dd]
if 0 <= rr < R and 0 <= cc < C:
neighbor = (rr, cc)
distance = G[(current_vertex, neighbor)]
old_cost = D[neighbor]
new_cost = D[current_vertex] + distance
if new_cost < old_cost:
D[neighbor] = new_cost
pq.put((new_cost, neighbor))
return D[(R - 1, C - 1)]
def part1():
return solve(raw)
def part2():
return solve(data2)
class TestSum(unittest.TestCase):
def test1(self):
ans = part1()
print(ans)
assert ans == 498
def test2(self):
ans = part2()
print(ans)
assert ans == 2901
| [
"yulrizka@users.noreply.github.com"
] | yulrizka@users.noreply.github.com |
ebb0ee33e3d8bde61a40935c59eb8b4e2c250d40 | 9e8a90e8c9bc90d9ea34b79e7553a7ba2fd4e6bf | /models/networkSwitch.py | bded10111bfacc3b5285280e628f5a076988367a | [] | no_license | lwyanne/CPAE | ddae51affcca8db0266bf66f091f165d95bd7837 | e155dfecf3f38ed7121a8a446dc4eeb4067b7e46 | refs/heads/master | 2023-07-28T13:12:22.372796 | 2021-08-27T15:09:58 | 2021-08-27T15:09:58 | 353,564,787 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185,346 | py | from __future__ import print_function
from torch.nn.utils.rnn import pack_padded_sequence
import inspect
import os, sys
import logging
# add the top-level directory of this project to sys.path so that we can import modules without error
from models.loss import Chimera_loss, record_loss, mask_where, mapping_where, mask_mapping_M
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
logger = logging.getLogger("cpc")
import numpy as np
import torch
import torch.nn as nn
import math
from models.utils import *
from models.datareader import *
from sklearn.metrics import roc_auc_score
from fastai.callbacks import *
from fastai.tabular import *
from fastai import tabular
from models.optimizer import ScheduledOptim
from sklearn.metrics import cohen_kappa_score as kappa, mean_absolute_error as mad, roc_auc_score as auroc
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def auroc_score(input, target):
input, target = input.cpu().numpy()[:, 1], target.cpu().numpy()
return roc_auc_score(target, input)
class AUROC(tabular.Callback):
"""
This is for output AUROC as a metric in fastai training process.
This has a small but acceptable issue. #TODO
"""
_order = -20 # Needs to run before the recorder
def __init__(self, learn, **kwargs):
self.learn = learn
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['AUROC'])
def on_epoch_begin(self, **kwargs):
self.output, self.target = [], []
def on_batch_end(self, last_target, last_output, train, **kwargs):
if not train:
try:
self.output.append(last_output)
except AttributeError:
self.output = []
try:
self.target.append(last_target)
except AttributeError:
self.target = []
def on_epoch_end(self, last_metrics, **kwargs):
if len(self.output) > 0:
output = torch.cat(self.output).cpu()
target = torch.cat(self.target).cpu()
preds = F.softmax(output, dim=1)
metric = roc_auc_score(target, preds, multi_class='ovo')
return add_metrics(last_metrics, [metric])
class biAUROC(tabular.Callback):
"""
This is for output AUROC as a metric in fastai training process.
This has a small but acceptable issue. #TODO
"""
_order = -20 # Needs to run before the recorder
def __init__(self, learn, **kwargs):
self.learn = learn
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['AUROC'])
def on_epoch_begin(self, **kwargs):
self.output, self.target = [], []
def on_batch_end(self, last_target, last_output, train, **kwargs):
if not train:
try:
self.output.append(last_output)
except AttributeError:
self.output = []
try:
self.target.append(last_target)
except AttributeError:
self.target = []
def on_epoch_end(self, last_metrics, **kwargs):
if len(self.output) > 0:
output = torch.cat(self.output).cpu()
target = torch.cat(self.target).cpu()
preds = F.softmax(output, dim=1)
metric = auroc_score(preds, target)
return add_metrics(last_metrics, [metric])
class MAD(tabular.Callback):
_order = -20
def __init__(self, learn, **kwargs):
self.learn = learn
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['MAD'])
def on_epoch_begin(self, **kwargs):
self.output, self.target = [], []
def on_batch_end(self, last_target, last_output, train, **kwargs):
if not train:
try:
self.output.append(last_output)
except AttributeError:
self.output = []
try:
self.target.append(last_target)
except AttributeError:
self.target = []
def on_epoch_end(self, last_metrics, **kwargs):
if len(self.output) > 0:
output = torch.cat(self.output)
target = torch.cat(self.target)
preds = torch.argmax(F.softmax(output, dim=1), dim=1, keepdim=False)
metric = mean_absolute_error(preds, target)
return add_metrics(last_metrics, [metric])
class CPclassifier(nn.Module):
"""
Combine the CPC and MLP, to make it possible to fine-tune on the downstream task
Note: Fine-tune is implemented via fastai learner.
"""
def __init__(self, CPmodel, MLP, freeze=False):
super(CPclassifier, self).__init__()
self.CPmodel = CPmodel
self.MLP = MLP
if freeze:
for param in self.CPmodel.parameters():
param.requires_grad = False
def forward(self, x):
if 'CP' in self.CPmodel.__class__.__name__ or 'AE_LSTM' in self.CPmodel.__class__.__name__:
x = self.CPmodel.get_reg_out(x)
else:
x = self.CPmodel.get_encode(x)
x = self.MLP(x)
return x
class CPAE1_S(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE1_S, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x # output shape: (N,n_points=192,n_features=76)
def recurrent(self, zt):
'''
GRU RNN
'''
batch_size = self.batch_size
# output shape: (N, n_frames, features,1)
hidden = self.init_hidden(batch_size)
output, hidden = self.gru(zt, hidden)
return output, hidden
def gru_to_ct(self, zt):
'''
return the last time_step of GRU result
'''
output, hidden = self.recurrent(zt)
c_t = output[:, -1, :].view(self.batch_size, self.gru_out)
return c_t, hidden
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def get_reg_out(self, x):
self.batch_size = x.shape[0]
x = x.squeeze(1).transpose(1, 2)
self.n_frames = x.shape[2]
z = self.encode(x).transpose(1, 2)
z = self.linear(z)
forward_seq = z[:, :, :]
c_t, hidden = self.gru_to_ct(forward_seq)
return c_t
def forward(self, x):
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
d = self.decode(z.transpose(1, 2))
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
#
encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(
device) # e.g.
# size
z = self.linear(z)
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
if self.beforeNCE: # ADD FC layers
pred = self.fcs(pred)
encode_samples = self.fcs(encode_samples)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(encode_samples, pred)
return d, nce, accuracy
class CPAE1_NO_BN(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE1_NO_BN, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
# nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x # output shape: (N,n_points=192,n_features=76)
def recurrent(self, zt):
'''
GRU RNN
'''
batch_size = self.batch_size
# output shape: (N, n_frames, features,1)
hidden = self.init_hidden(batch_size)
output, hidden = self.gru(zt, hidden)
return output, hidden
def gru_to_ct(self, zt):
'''
return the last time_step of GRU result
'''
output, hidden = self.recurrent(zt)
c_t = output[:, -1, :].view(self.batch_size, self.gru_out)
return c_t, hidden
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def get_reg_out(self, x):
self.batch_size = x.shape[0]
x = x.squeeze(1).transpose(1, 2)
self.n_frames = x.shape[2]
z = self.encode(x).transpose(1, 2)
z = self.linear(z)
forward_seq = z[:, :, :]
c_t, hidden = self.gru_to_ct(forward_seq)
return c_t
def forward(self, x):
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
d = self.decode(z.transpose(1, 2))
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
#
encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(
device) # e.g.
# size
z = self.linear(z)
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(encode_samples, pred)
return d, nce, accuracy
class CPAE1_LSTM(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE1_LSTM, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.LSTM(
self.embedded_features,
hidden_size=gru_out,
num_layers=2,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x # output shape: (N,n_points=192,n_features=76)
def recurrent(self, zt):
'''
GRU RNN
'''
batch_size = self.batch_size
# output shape: (N, n_frames, features,1)
hidden = self.init_hidden(batch_size)
hidden = torch.cat((hidden, hidden), dim=0)
hidden = (hidden, hidden)
output, hidden = self.gru(zt, hidden)
return output, hidden
def gru_to_ct(self, zt):
'''
return the last time_step of GRU result
'''
output, hidden = self.recurrent(zt)
c_t = output[:, -1, :].view(self.batch_size, self.gru_out)
return c_t, hidden
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def get_reg_out(self, x):
self.batch_size = x.shape[0]
x = x.squeeze(1).transpose(1, 2)
self.n_frames = x.shape[2]
z = self.encode(x).transpose(1, 2)
z = self.linear(z)
forward_seq = z[:, :, :]
c_t, hidden = self.gru_to_ct(forward_seq)
return c_t
def forward(self, x):
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
d = self.decode(z.transpose(1, 2))
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
#
encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(
device) # e.g.
# size
z = self.linear(z)
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(encode_samples, pred)
return d, nce, accuracy
class CPAE1_LSTM_NO_BN(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE1_LSTM_NO_BN, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
# nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.LSTM(
self.embedded_features,
hidden_size=gru_out,
num_layers=2,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x # output shape: (N,n_points=192,n_features=76)
def recurrent(self, zt):
'''
GRU RNN
'''
batch_size = self.batch_size
# output shape: (N, n_frames, features,1)
hidden = self.init_hidden(batch_size)
hidden = torch.cat((hidden, hidden), dim=0)
hidden = (hidden, hidden)
output, hidden = self.gru(zt, hidden)
return output, hidden
def gru_to_ct(self, zt):
'''
return the last time_step of GRU result
'''
output, hidden = self.recurrent(zt)
c_t = output[:, -1, :].view(self.batch_size, self.gru_out)
return c_t, hidden
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def get_reg_out(self, x):
self.batch_size = x.shape[0]
x = x.squeeze(1).transpose(1, 2)
self.n_frames = x.shape[2]
z = self.encode(x).transpose(1, 2)
z = self.linear(z)
forward_seq = z[:, :, :]
c_t, hidden = self.gru_to_ct(forward_seq)
return c_t
def forward(self, x):
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
d = self.decode(z.transpose(1, 2))
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
#
encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(
device) # e.g.
# size
z = self.linear(z)
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(encode_samples, pred)
return d, nce, accuracy
class CPAE2_S(CPAE1_S):
"""
Use conv1dtranspose in CPAE1
"""
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE2_S, self).__init__()
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.enSequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
self.deSequential = lambda inChannel, outChannel: nn.Sequential(
nn.ConvTranspose1d(inChannel, outChannel, kernel_size=3, padding=1),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.enSequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.deSequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
# deconvolution nn. unMaxPool
class CPAE3_S(CPAE2_S):
"""
Use conv1dtranspose in CPAE1 & Maxpooling & unpooing
"""
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE3_S, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
self.decode_channels = self.channels[::-1]
encodelist = []
count = 0
for i, j in zip(self.channels[:-1], self.channels[1:]):
encodelist.append(nn.ReflectionPad1d((0, 1)))
encodelist.append(nn.Conv1d(i, j, kernel_size=2, padding=0))
encodelist.append(nn.BatchNorm1d(j))
encodelist.append(nn.ReLU(inplace=True))
if count < 2:
encodelist.append(nn.ReflectionPad1d((0, 1)))
encodelist.append(nn.MaxPool1d(2, stride=1))
count += 1
self.encoder = nn.Sequential(*encodelist)
decodelist = []
count = 0
for i, j in zip(self.decode_channels[:-1], self.decode_channels[1:]):
decodelist.append(nn.ConvTranspose1d(i, j, kernel_size=3, padding=1))
decodelist.append(nn.BatchNorm1d(j))
decodelist.append(nn.ReLU(inplace=True))
self.decoder = nn.Sequential(*decodelist)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
# deconvolution nn. unMaxPool
class CPAE4_S(CPAE1_S):
def __int__(self):
super(CPAE4_S, self).__init__()
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
z = self.linear(z)
x = x.transpose(1, 2)
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
#
x_samples = torch.empty((self.time_step, self.batch_size, self.n_features)).float().to(
device) # e.g.
# size
for i in np.arange(1, self.time_step + 1):
x_samples[i - 1, :, :] = x[:, int(t_samples) + i, :]
reconstruct_samples = self.decode(pred.transpose(1, 2)).transpose(1, 2)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(x_samples, reconstruct_samples)
return accuracy, nce, x
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
class CPAE4_NO_BN(CPAE1_NO_BN):
def __int__(self):
super(CPAE4_NO_BN, self).__init__()
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
z = self.linear(z)
x = x.transpose(1, 2)
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
#
x_samples = torch.empty((self.time_step, self.batch_size, self.n_features)).float().to(
device) # e.g.
# size
for i in np.arange(1, self.time_step + 1):
x_samples[i - 1, :, :] = x[:, int(t_samples) + i, :]
reconstruct_samples = self.decode(pred.transpose(1, 2)).transpose(1, 2)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(x_samples, reconstruct_samples)
return accuracy, nce, x
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
class CPAE7_S(CPAE4_S):
"""
this CPAE simply make `f_i(x_i,x_j)` the chimera_loss function
"""
def __init__(self, embedded_featrues=8, gru_out=8, Lambda=[1, 1, 3]):
super(CPAE7_S, self).__init__(embedded_featrues,
gru_out) # to initiate the CPAE4 with embedded_featrues = 8, gru_out = 8
self.Lambda = torch.tensor(Lambda).float().cuda()
self.Lambda = self.Lambda / sum(self.Lambda) * 10
def weighted_mask(self, x):
"""
similar to chimera loss
"""
# x = x.transpose(0,1)
# d = d.transpose(0,1)
assert (x.shape[1] == 76)
mse_m = torch.ones(x.shape).to(device)
mask_m, mapping_m = mask_mapping_M(x)
return self.Lambda[0] * mse_m + self.Lambda[1] * mask_m + self.Lambda[2] * mapping_m
def compute_nce(self, x, d):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......x : x_samples , ( time_step, batch_size, conv_sizes[-1] )
......d : reconstruct_samples , the same shape as x, self.decode(z_hat)
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
x_w = self.weighted_mask(x[i]) * x[i]
total = torch.mm(x_w, torch.transpose(d[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
class CPLSTM(nn.Module):
"""
Bi-directional LSTM
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5):
# Smart way to filter the args
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
super(CPLSTM, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim // 2,
dropout=self.drop,
bidirectional=True,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
dropout=self.drop,
bidirectional=False,
batch_first=True
)
self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def encodeRegress(self, x):
x, _ = self.lstm1(x)
x, state = self.lstm2(x)
ht, ct = state
return x, ht, ct
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_preds = [0] * self.time_step
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t, :])
h, c = ht, ct
for i in range(1, self.time_step + 1):
c_preds[i - 1] = self.Wk[i - 1](ht)
_, h, c = self.encodeRegress(x[:, t + i, :])
c_latent.append(c)
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
def get_reg_out(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :])
# print(ht.shape)
return xt.reshape((x.shape[0], -1))
class CPLSTM2(nn.Module):
"""
LSTM
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5):
# Smart way to filter the args
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
super(CPLSTM2, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
dropout=self.drop,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
dropout=self.drop,
bidirectional=False,
batch_first=True
)
self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def encodeRegress(self, x):
x, _ = self.lstm1(x)
x, state = self.lstm2(x)
ht, ct = state
return x, ht, ct
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_preds = [0] * self.time_step
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
h, c = ht, ct
for i in range(1, self.time_step + 1):
c_preds[i - 1] = self.Wk[i - 1](ht)
_, h, c = self.encodeRegress(x[:, t + i, :])
c_latent.append(c)
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
def get_reg_out(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :])
print(xt.shape)
return xt
# return xt.reshape((x.shape[0],-1))
class CPLSTM3(nn.Module):
"""
CPLSTM2 with dropout in non-recurrent layers and FC added.
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5):
# Smart way to filter the args
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
super(CPLSTM3, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.dropout = nn.Dropout(self.drop)
self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.fcs = nn.Sequential(
nn.Linear(self.dim, self.dim),
nn.ReLU(inplace=True),
nn.Linear(self.dim, self.dim)
)
for model in [self.lstm1, self.lstm2, self.fcs]:
self.initialize_weights(model)
for model in self.Wk:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x, warm=False):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.dim)
if warm:
x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0))
_, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0))
# print([i.shape for i in state1],h0.shape,c0.shape)
x, state1 = self.lstm1(x[:, :, :], state1)
x, state2 = self.lstm2(x[:, :, :], state2)
ht, ct = state2
else:
x, state1 = self.lstm1(x[:, :, :], (h0, c0))
x, state2 = self.lstm2(x[:, :, :], (h0, c0))
ht, ct = state2
return x, ht, ct
#
#
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_preds = [0] * self.time_step
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t, :])
h, c = ht, ct
for i in range(1, self.time_step + 1):
c_preds[i - 1] = self.fcs(self.Wk[i - 1](ht))
_, h, c = self.encodeRegress(x[:, t + i, :])
c_latent.append(self.fcs(c))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
def get_reg_out(self, x, stack=False, warm=False, conti=False):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :], warm)
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
if stack: return torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1)
return xt[:, -1, :].squeeze(1)
class CPLSTM4(nn.Module):
"""
CPLSTM4------use lstm as Wk
mode=1 use hidden states when predict. else use cell states
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False, switch=True):
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
self.mode = mode
self.noct = noct
super(CPLSTM4, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm3 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
if self.noct:
self.stack_dim = self.dim * 192
else:
self.stack_dim = self.dim * 193
self.dropout = nn.Dropout(self.drop)
# self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.switch = switch
if self.switch == False:
self.softmax = nn.Softmax(dim=1)
self.lsoftmax = nn.LogSoftmax(dim=1)
else:
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.fcs = nn.Sequential(
nn.Linear(self.dim, self.dim),
nn.ReLU(inplace=True),
nn.Linear(self.dim, self.dim)
)
for model in [self.lstm1, self.lstm2, self.lstm3, self.fcs]:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def freeze_encode(self):
for param in self.lstm1.parameters():
param.requires_grad = False
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
if conti:
x, state1 = self.lstm1(x)
x, state2 = self.lstm2(x)
ht, ct = state2
return x, ht, ct
(h0, c0) = self.init_hidden(bs, self.dim)
if warm:
x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0))
_, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0))
# print([i.shape for i in state1],h0.shape,c0.shape)
x, state1 = self.lstm1(x[:, :, :], state1)
x, state2 = self.lstm2(x[:, :, :], state2)
ht, ct = state2
else:
x, state1 = self.lstm1(x[:, :, :], (h0, c0))
x, state2 = self.lstm2(x[:, :, :], (h0, c0))
ht, ct = state2
return x, ht, ct
#
def predict(self, z, hz, cz, ts, mode=1):
""""
if mode==1: return hidden states; else return cell states"""
h, c = hz, cz
x_previous = z
c_preds = torch.empty((self.time_step, self.bs, self.dim)).to(device)
for i in range(ts):
x_pred, (h, c) = self.lstm3(x_previous, (h, c))
if mode:
c_preds[i, :, :] = h
else:
c_preds[i, :, :] = c # mode = 0
x_previous = x_pred
return c_preds
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
for i in range(1, self.time_step + 1):
_, h, c = self.encodeRegress(x[:, t + i, :]) # init with zeros
c_latent.append(self.fcs(c))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
def get_reg_out(self, x, stack=False, warm=False, conti=False):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :], warm, conti)
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1)))
if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1))
return xt[:, -1, :].squeeze(1)
class CPLSTM4C(nn.Module):
"""
re-init hidden at time point t
mode=1 use hidden states when predict. else use cell states
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
self.mode = mode
self.noct = noct
super(CPLSTM4C, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm3 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
if self.noct:
self.stack_dim = self.dim * 192
else:
self.stack_dim = self.dim * 193
self.dropout = nn.Dropout(self.drop)
# self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.fcs = nn.Sequential(
nn.Linear(self.dim, self.dim),
nn.ReLU(inplace=True),
nn.Linear(self.dim, self.dim)
)
for model in [self.lstm1, self.lstm2, self.lstm3, self.fcs]:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
if conti:
x, state1 = self.lstm1(x)
x, state2 = self.lstm2(x)
ht, ct = state2
return x, ht, ct
(h0, c0) = self.init_hidden(bs, self.dim)
if warm:
x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0))
_, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0))
# print([i.shape for i in state1],h0.shape,c0.shape)
x, state1 = self.lstm1(x[:, :, :], state1)
x, state2 = self.lstm2(x[:, :, :], state2)
ht, ct = state2
else:
x, state1 = self.lstm1(x[:, :, :], (h0, c0))
x, state2 = self.lstm2(x[:, :, :], (h0, c0))
ht, ct = state2
return x, ht, ct
#
def predict(self, z, hz, cz, ts, mode=1):
""""
if mode==1: return hidden states; else return cell states"""
h, c = hz, cz
x_previous = z
c_preds = torch.empty((self.time_step, self.bs, self.dim)).to(device)
for i in range(ts):
x_pred, (h, c) = self.lstm3(x_previous, (h, c))
if mode:
c_preds[i, :, :] = h
else:
c_preds[i, :, :] = c
x_previous = x_pred
return c_preds
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
(h0, c0) = self.init_hidden(self.bs, self.dim)
h1, c1 = h0, c0
h2, c2 = h0, c0
for i in range(1, self.time_step + 1):
# BUG : self.time_step ? i
tmp, (h1, c1) = self.lstm1(x[:, t + i, :], (h1, c1))
_, (h2, c2) = self.lstm2(tmp, (h2, c2))
c_latent.append(self.fcs(c2))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, None
def get_reg_out(self, x, stack=False, warm=False, conti=False):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :], warm, conti)
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1)))
if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1))
return xt[:, -1, :].squeeze(1)
class CPLSTM3H(CPLSTM3):
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5):
super(CPLSTM3H, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step)
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_preds = [0] * self.time_step
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t, :])
h, c = ht, ct
for i in range(1, self.time_step + 1):
c_preds[i - 1] = self.fcs(self.Wk[i - 1](ht))
_, h, c = self.encodeRegress(x[:, t + i, :])
c_latent.append(self.fcs(h))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
class CPLSTM4H(CPLSTM4):
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1):
super(CPLSTM4H, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode)
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
# xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
# c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
#
# # for i in range(1, self.time_step + 1):
# x, (h, c) = self.lstm1(x[:, t + 1:t+self.time_step+1, :])
# c_latent=self.fcs(x)
z_embeds, _ = self.lstm1(x)
_, (hidden_ct, cell_ct) = self.lstm2(z_embeds[:, :t + 1, :])
z_preds_time_step = self.fcs(
self.predict(hidden_ct.transpose(0, 1), hidden_ct, cell_ct, self.time_step, self.mode))
z_embeds_time_step = z_embeds[:, t + 1:t + self.time_step + 1, :]
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(z_embeds_time_step[:, i, :].squeeze(0),
torch.transpose(z_preds_time_step[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, None
class CPAELSTM41(nn.Module):
"""
CPLSTM4------use lstm as Wk
mode=1 use hidden states when predict. else use cell states
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
self.mode = mode
self.noct = noct
super(CPAELSTM41, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm3 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True)
self.dropout = nn.Dropout(self.drop)
# self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.de_fc = nn.Sequential(nn.Linear(self.dim, self.input_dim),
nn.ReLU(inplace=True),
nn.Linear(self.input_dim, self.input_dim),
nn.ReLU(inplace=True),
)
self.fcs = nn.Sequential(
nn.Linear(self.input_dim, self.input_dim),
nn.ReLU(inplace=True),
nn.Linear(self.input_dim, self.input_dim)
)
for model in [self.lstm1, self.lstm2, self.lstm3, self.fcs]:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
if conti:
x, state1 = self.lstm1(x)
x, state2 = self.lstm2(x)
ht, ct = state2
return x, ht, ct
(h0, c0) = self.init_hidden(bs, self.dim)
if warm:
x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0))
_, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0))
# print([i.shape for i in state1],h0.shape,c0.shape)
x, state1 = self.lstm1(x[:, :, :], state1)
x, state2 = self.lstm2(x[:, :, :], state2)
ht, ct = state2
else:
x, state1 = self.lstm1(x[:, :, :], (h0, c0))
x, state2 = self.lstm2(x[:, :, :], (h0, c0))
ht, ct = state2
return x, ht, ct
#
#
def predict(self, z, hz, cz, ts, mode=1):
""""
if mode==1: return hidden states; else return cell states"""
h, c = hz, cz
x_previous = z
c_preds = torch.empty((self.time_step, self.bs, self.dim)).to(device)
for i in range(ts):
x_pred, (h, c) = self.lstm3(x_previous, (h, c))
if mode:
c_preds[i, :, :] = h
else:
c_preds[i, :, :] = c
x_previous = x_pred
return c_preds
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
x_preds = self.fcs(self.de_fc(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode)))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(self.fcs(x[:, t + i + 1, :]).squeeze(1), torch.transpose(x_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, None
def get_reg_out(self, x, stack=False, warm=False, conti=False):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :], warm, conti)
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1)))
if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1))
return xt[:, -1, :].squeeze(1)
class CPAELSTM42(CPAELSTM41):
"""
two layer lstm as decoder to reconstruct x.
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1):
super(CPAELSTM42, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode)
self.lstm3 = nn.LSTM(
input_size=self.input_dim,
num_layers=1,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
#
# self.dropout=nn.Dropout(self.drop)
# # self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
# self.softmax = nn.Softmax(dim=0)
# self.lsoftmax = nn.LogSoftmax(dim=0)
self.de_fc = nn.Sequential(
nn.Linear(self.dim, self.input_dim),
nn.ReLU(inplace=True)
)
# self.fcs=nn.Sequential(
# nn.Linear(self.input_dim,self.input_dim),
# nn.ReLU(inplace=True),
#
# nn.Linear(self.input_dim,self.input_dim)
# )
# for model in [self.lstm1,self.lstm2,self.lstm3,self.fcs]:
# self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
# BUG
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.dim)
x, _ = self.lstm1(x, (h0, c0))
x, state = self.lstm2(x, (h0, c0))
ht, ct = state
return x, ht, ct
#
#
def predict(self, z, hz, cz, ts, mode=1):
""""
if mode==1: return hidden states; else return cell states"""
h, c = self.de_fc(hz), self.de_fc(cz)
x_previous = self.de_fc(z)
x_preds = torch.empty((self.time_step, self.bs, self.input_dim)).to(device)
for i in range(ts):
x_pred, (h, c) = self.lstm3(x_previous, (h, c))
if mode:
x_preds[i, :, :] = h
else:
x_preds[i, :, :] = c
x_previous = x_pred
return x_preds
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
x_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
# for i in range(1,self.time_step+1):
# _, h,c=self.encodeRegress(x[:,t+i,:])
# c_latent.append(self.fcs(c))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(self.fcs(x[:, t + i + 1, :]).squeeze(1), torch.transpose(x_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, None
def get_reg_out(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :])
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
return xt[:, -1, :].squeeze(1)
class CPAELSTM43(CPLSTM4H):
"""
add decoder constraint in loss function
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1):
super(CPAELSTM43, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode)
self.lstm4 = nn.LSTM(
input_size=self.dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
self.batch_size = self.bs
for i in np.arange(0, self.time_step):
try:
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
except IndexError:
print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
raise AssertionError
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def encode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.dim)
x, _ = self.lstm1(x, (h0, c0))
return x
def decode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.input_dim)
x, _ = self.lstm4(x, (h0, c0))
return x
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
x_ori = x
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
# for i in range(1, self.time_step + 1):
# x, h, c = self.encodeRegress(x[:, t + 1:t+self.time_step+1, :])
z, (h, c) = self.lstm1(x)
c_latent = self.fcs(z[:, t + 1:t + self.time_step + 1, :]) # with memory
x_hat = self.decode(z)
nce, acc = self.compute_nce(c_latent.transpose(0, 1), c_preds)
return x_hat, nce, acc
class CPAELSTM44(CPLSTM4):
"""
add decoder constraint in loss function
sim: similarity function. 'dot' for dot product, 'cosine' for cosine similarity
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, t_range=None,mode=1,sym=False, sim='dot',temperature=1,pred_mode='step'):
super(CPAELSTM44, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode)
self.lstm4 = nn.LSTM(
input_size=self.dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
self.sym=sym
self.sim=sim
self.temperature=temperature
self.t_range=t_range
self.pred_mode = pred_mode
if self.pred_mode=='future':
self.W_pred = nn.Linear(self.dim, self.dim)
def sim_func(self,a,b):
if self.sim=='cosine':
print('use cosine')
a=a/a.norm(dim=-1,keepdim=True)
b=b/b.norm(dim=-1,keepdim=True)
a=self.temperature*a
b=self.temperature*b
return torch.mm(a,b.T)
elif self.sim=='dot':
print('use dot')
return torch.mm(a,b.T)
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
self.batch_size = self.bs
for i in np.arange(0, self.time_step):
try:
total = self.sim_func(encode_samples[i], pred[i]) # e.g. size 8*8
except IndexError:
print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
raise AssertionError
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
if self.sym:
nce += 1/2*(torch.sum(torch.diag((nn.LogSoftmax(dim=0)(total)))) + torch.sum(torch.diag((nn.LogSoftmax(dim=1)(total)))))# nce is a tensor
else:
nce += torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def encode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.dim)
x, _ = self.lstm1(x, (h0, c0))
return x
def decode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.input_dim)
x, _ = self.lstm4(x, (h0, c0))
return x
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
x_ori = x
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
# for i in range(1, self.time_step + 1):
# x, h, c = self.encodeRegress(x[:, t + 1:t+self.time_step+1, :])
z_after_t = []
for i in range(1, self.time_step + 1):
_, h, c = self.encodeRegress(x[:, t + i, :])
z_after_t.append(self.fcs(c))
z_after_t = torch.cat(z_after_t, 0)
c_embeds = self.fcs(z_after_t)
z_all = torch.cat((xt, z_after_t.transpose(0, 1)), 1)
x_hat = self.decode(z_all)
nce, acc = self.compute_nce(c_embeds, c_preds)
return x_hat, nce, acc
def pred_future(self, x):
x=self.check_input(x)
# print(self.t_range)
# print(self.max_len)
t_range=(self.max_len*self.t_range[0],self.max_len*self.t_range[1])
# print(t_range)
# t_range = (self.max_len *2// 3, 4 * self.max_len // 5)
# print(x.shape)
x_ori = x
if self.max_len>192: t=192
else:
t = torch.randint(low=int(t_range[0]), high=int(t_range[1]), size=(1,)).long() # choose a point to split the time series
# print('t is %s'%t)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out_past, _ = self.encodeRegress(x[:, :t + 1, :])
latent_future, _, hidden_reg_out_future, _ = self.encodeRegress(x[:, t + 1:self.max_len, :])
del x
hidden_reg_out_pred = self.fcs(self.W_pred(hidden_reg_out_past))
latent_all = torch.cat((latent_past, latent_future), 1)
del latent_future,latent_past
latent_all_attention = torch.mul(latent_all, self.cal_att2(latent_all))
del latent_all
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(self.fcs(hidden_reg_out_future), hidden_reg_out_pred)
return x_hat, nce, acc
class CPAELSTM44_AT(CPLSTM4):
"""
add decoder constraint in loss function
pred_mode: 'step' for timestep prediction
'future' for using past to predict future
"""
def __init__(self, dim, bn, dropout, task,t_range=None,
depth=2, num_classes=1,
input_dim=76, flat_attention=False,time_step=5, sim='dot',temperature=1,mode=1, switch=True, pred_mode='step',sym=False):
super(CPAELSTM44_AT, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode, switch)
self.lstm4 = nn.LSTM(
input_size=self.dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
self.att1 = nn.Linear(self.dim, self.dim)
self.att2 = nn.Linear(self.dim, self.dim)
self.flat_attention=flat_attention
self.sim=sim
self.temperature=temperature
self.t_range=t_range
self.pred_mode = pred_mode
if self.pred_mode=='future':
self.W_pred = nn.Linear(self.dim, self.dim)
self.sym=sym #whether use symmetric loss
def cal_att1(self,x):
if self.flat_attention:
x=self.att1(x)
assert x.shape[-1]==self.dim
# x=torch.transpose(x,1,2)
# torch.nn.BatchNorm1d(self.dim)
# x=torch.transpose(x,1,2)
nn.Softmax(dim=-1)
else:
x=self.att1(x)
return x
def cal_att2(self,x):
if self.flat_attention:
x=self.att2(x)
# x=torch.transpose(x,1,2)
# torch.nn.BatchNorm1d(self.dim)
# x=torch.transpose(x,1,2)
nn.Softmax(dim=-1)
else:
x=self.att2(x)
return x
def sim_func(self,a,b):
if self.sim=='cosine':
a=a/a.norm(dim=-1,keepdim=True)
b=b/b.norm(dim=-1,keepdim=True)
a=self.temperature*a
b=self.temperature*b
print('using cosine')
return torch.mm(a,b.T)
elif self.sim=='dot':
print('using dot')
return torch.mm(a,b.T)
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
self.batch_size = self.bs
if self.pred_mode=='step':
for i in np.arange(0, self.time_step):
try:
print('self.sim is ',self.sim)
total = self.sim_func(encode_samples[i], pred[i]) # e.g. size 8*8
except IndexError:
print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
raise AssertionError
# print(total)
if self.sym:
nce += 1/2*(torch.sum(torch.diag((nn.LogSoftmax(dim=0)(total)))) + torch.sum(torch.diag((nn.LogSoftmax(dim=1)(total)))))# nce is a tensor
else:
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
elif self.pred_mode=='future':
total=self.sim_func(encode_samples[0],pred[0])
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
# torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# correct_2=torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=1),
# torch.arange(0, self.batch_size).cuda()))
# print(correct,correct_2)
# print(total)
if self.sym:
nce += 1/2*(torch.sum(torch.diag((nn.LogSoftmax(dim=0)(total)))) + torch.sum(torch.diag((nn.LogSoftmax(dim=1)(total)))))# nce is a tensor
else:
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size
accuracy =1. * correct.item() / self.batch_size
return nce, accuracy
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
# print(x.shape)
latents, state1 = self.lstm1(x)
del x
latents_to_pred = torch.mul(latents, self.cal_att1(latents))
regs, state2 = self.lstm2(latents_to_pred)
del latents_to_pred
ht, ct = state2
return latents, regs, ht, ct
def get_reg_out(self, x, stack=False, warm=False, conti=False, ifbn=False):
bs = x.shape[0]
x = self.dropout(x)
latents, state1 = self.lstm1(x)
# latents_to_pred = torch.mul(latents, self.att1(latents))
regs, state2 = self.lstm2(latents)
ht, ct = state2
return regs[:, -1, :].squeeze(1)
def encode(self, x):
bs = x.shape[0]
x = self.dropout(x)
x, (h, c) = self.lstm1(x)
return x, h, c
def decode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.input_dim)
x, _ = self.lstm4(x, (h0, c0))
return x
def check_input(self, x):
if type(x) == dict:
dic = x
x = dic['data'].squeeze(0)
self.max_len = min(dic['length'])
self.bs = x.shape[0]
elif len(x.shape) == 4:
x = x.squeeze(1)
self.bs = x.shape[0]
self.max_len = x.shape[1]
elif x.shape[1] == 76:
x = x.transpose(1, 2)
self.bs = x.shape[0]
self.max_len = x.shape[1]
else:
self.max_len=x.shape[1]
self.bs=x.shape[0]
return x
def pred_future(self, x):
x=self.check_input(x)
# print(self.t_range)
# print(self.max_len)
t_range=(self.max_len*self.t_range[0],self.max_len*self.t_range[1])
# print(t_range)
# t_range = (self.max_len *2// 3, 4 * self.max_len // 5)
# print(x.shape)
x_ori = x
if self.max_len>192: t=192
else:
t = torch.randint(low=int(t_range[0]), high=int(t_range[1]), size=(1,)).long() # choose a point to split the time series
# print('t is %s'%t)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out_past, _ = self.encodeRegress(x[:, :t + 1, :])
latent_future, _, hidden_reg_out_future, _ = self.encodeRegress(x[:, t + 1:self.max_len, :])
del x
hidden_reg_out_pred = self.fcs(self.W_pred(hidden_reg_out_past))
latent_all = torch.cat((latent_past, latent_future), 1)
del latent_future,latent_past
latent_all_attention = torch.mul(latent_all, self.cal_att2(latent_all))
del latent_all
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(self.fcs(hidden_reg_out_future), hidden_reg_out_pred)
return x_hat, nce, acc
def pred_timestep(self, x):
x=self.check_input(x)
t = torch.randint(low=20, high=self.max_len - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out, cell_reg_out = self.encodeRegress(x[:, :t + 1, :])
latent_preds = self.fcs(
self.predict(hidden_reg_out.transpose(0, 1), hidden_reg_out, cell_reg_out, self.time_step, self.mode))
latent_future = []
for i in range(1, self.time_step + 1):
_, h, c = self.encode(x[:, t + i, :])
latent_future.append(self.fcs(c[-1]))
latent_future = torch.stack(latent_future, 0)
latent_all = torch.cat((latent_past, latent_future.transpose(0, 1)), 1)
latent_all_attention = torch.mul(latent_all, self.cal_att2(latent_all))
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(latent_future, latent_preds)
return x_hat, nce, acc
def forward(self, x):
if self.pred_mode == 'future':
x_hat, nce, acc = self.pred_future(x)
else:
x_hat, nce, acc = self.pred_timestep(x)
return x_hat, nce, acc
class SelfAttention(nn.Module):
def __init__(self, in_dim):
super(SelfAttention,self).__init__()
self.chanel_in = in_dim
self.Wq = nn.Linear(in_dim , in_dim)
self.Wk = nn.Linear(in_dim , in_dim)
self.Wv = nn.Linear(in_dim , in_dim)
self.gamma = in_dim
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X W X H) (batch_size X C X 76 X 192)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
# x: (48, 144, 256)
m_batchsize, width, height = x.size()
proj_query = self.Wq(x)
proj_key = self.Wk(x)
energy = torch.matmul(proj_key.transpose(1,2),proj_query) / (self.gamma**0.5)
attention = self.softmax(energy)
proj_value = self.Wv(x)
out = torch.matmul(proj_value, attention)
return out
class CPAELSTM44_selfAT(CPLSTM4):
"""
add decoder constraint in loss function
pred_mode: 'step' for timestep prediction
'future' for using past to predict future
"""
def __init__(self, dim, bn, dropout, task,t_range=None,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, switch=True, pred_mode='step'):
super(CPAELSTM44_selfAT, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode, switch)
self.lstm4 = nn.LSTM(
input_size=self.dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
self.att1 = SelfAttention(self.dim)
self.att2 = SelfAttention(self.dim)
self.t_range=t_range
self.pred_mode = pred_mode
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
self.batch_size = self.bs
if self.pred_mode=='step':
for i in np.arange(0, self.time_step):
try:
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
except IndexError:
print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
raise AssertionError
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
elif self.pred_mode=='future':
total=torch.mm(encode_samples[0],torch.transpose(pred[0], 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce = torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size
accuracy =1. * correct.item() / self.batch_size
return nce, accuracy
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
latents, state1 = self.lstm1(x)
del x
# latents (48,144,256)
latents_to_pred = self.att1(latents)
regs, state2 = self.lstm2(latents_to_pred)
del latents_to_pred
ht, ct = state2
return latents, regs, ht, ct
def get_reg_out(self, x, stack=False, warm=False, conti=False, ifbn=False):
bs = x.shape[0]
x = self.dropout(x)
latents, state1 = self.lstm1(x)
# latents_to_pred = torch.mul(latents, self.att1(latents))
regs, state2 = self.lstm2(latents)
ht, ct = state2
return regs[:, -1, :].squeeze(1)
def encode(self, x):
bs = x.shape[0]
x = self.dropout(x)
x, (h, c) = self.lstm1(x)
return x, h, c
def decode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.input_dim)
x, _ = self.lstm4(x, (h0, c0))
return x
def check_input(self, x):
if type(x) == dict:
dic = x
x = dic['data'].squeeze(0)
self.max_len = min(dic['length'])
self.bs = x.shape[0]
elif len(x.shape) == 4:
x = x.squeeze(1)
self.bs = x.shape[0]
self.max_len = x.shape[1]
elif x.shape[1] == 76:
x = x.transpose(1, 2)
self.bs = x.shape[0]
self.max_len = x.shape[1]
else:
self.max_len=x.shape[1]
self.bs=x.shape[0]
return x
def pred_future(self, x):
x=self.check_input(x)
# print(self.t_range)
# print(self.max_len)
t_range=(self.max_len*self.t_range[0],self.max_len*self.t_range[1])
# print(t_range)
# t_range = (self.max_len *2// 3, 4 * self.max_len // 5)
# print(x.shape)
x_ori = x
if self.max_len>192: t=192
else:
t = torch.randint(low=int(t_range[0]), high=int(t_range[1]), size=(1,)).long() # choose a point to split the time series
# print('t is %s'%t)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out_past, _ = self.encodeRegress(x[:, :t + 1, :])
latent_future, _, hidden_reg_out_future, _ = self.encodeRegress(x[:, t + 1:self.max_len, :])
del x
hidden_reg_out_pred = self.fcs(self.W_pred(hidden_reg_out_past))
latent_all = torch.cat((latent_past, latent_future), 1)
del latent_future,latent_past
latent_all_attention = self.att2(latent_all)
del latent_all
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(self.fcs(hidden_reg_out_future), hidden_reg_out_pred)
return x_hat, nce, acc
def pred_timestep(self, x):
# x (48,192,76)
x=self.check_input(x)
# x (48,192,76)
t = torch.randint(low=20, high=self.max_len - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out, cell_reg_out = self.encodeRegress(x[:, :t + 1, :])
latent_preds = self.fcs(
self.predict(hidden_reg_out.transpose(0, 1), hidden_reg_out, cell_reg_out, self.time_step, self.mode))
latent_future = []
for i in range(1, self.time_step + 1):
_, h, c = self.encode(x[:, t + i, :])
latent_future.append(self.fcs(c[-1]))
latent_future = torch.stack(latent_future, 0)
latent_all = torch.cat((latent_past, latent_future.transpose(0, 1)), 1)
latent_all_attention = self.att2(latent_all)
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(latent_future, latent_preds)
return x_hat, nce, acc
def forward(self, x):
if self.pred_mode == 'future':
x_hat, nce, acc = self.pred_future(x)
else:
x_hat, nce, acc = self.pred_timestep(x)
return x_hat, nce, acc
# class CPAELSTM45(CPLSTM4):
# """
# CPLSTM4+ CPAE4
# """
#
# def __init__(self, dim, bn, dropout, task,
# depth=2, num_classes=1,
# input_dim=76, time_step=5, mode=1):
# super(CPAELSTM45, self).__init__(dim, bn, dropout, task,
# depth, num_classes,
# input_dim, time_step, mode)
#
# self.fcs3 = nn.Sequential(
# nn.Linear(self.input_dim, self.input_dim),
# nn.ReLU(inplace=True),
# nn.Linear(self.input_dim, self.input_dim)
# )
# self.lstm4 = nn.LSTM(
# input_size=self.dim,
# hidden_size=self.input_dim,
# bidirectional=False,
# batch_first=True)
#
# def encode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.dim)
# x, _ = self.lstm1(x, (h0, c0))
# return x
#
# def decode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.input_dim)
# x, _ = self.lstm4(x, (h0, c0))
# return x
#
# def compute_nce(self, encode_samples, pred):
# '''
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss--------------
# -----------------------------------------------------------------------------------
# ...argument:
# ......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
# ......pred : Wk[i]( C_t )
# '''
# nce = 0 # average over time_step and batch
# self.batch_size = self.bs
# for i in np.arange(0, self.time_step):
# try:
# total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# except IndexError:
# print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
# raise AssertionError
# # print(total)
# correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
# torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
# nce /= -1. * self.batch_size * self.time_step
# accuracy = 1. * correct.item() / self.batch_size
#
# return nce, accuracy
#
# def forward(self, x):
# if len(x.shape) == 4: x = x.squeeze(1)
# if x.shape[1] == 76: x = x.transpose(1, 2)
# x_ori = x
# t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# self.bs = x.shape[0]
# xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
#
# z_after_t = []
#
# for i in range(1, self.time_step + 1):
# _, h, c = self.encodeRegress(x[:, t + i, :])
# z_after_t.append(c)
# z_after_t = torch.cat(z_after_t, 0)
#
# x_hat = self.decode(z_after_t)
# nce, acc = self.compute_nce(self.fcs3(x_ori[:, t + 1:t + 1 + self.time_step, :]).transpose(0, 1),
# self.fcs3(x_hat))
# nce2, acc2 = self.compute_nce((x_ori[:, t + 1:t + 1 + self.time_step, :]).transpose(0, 1), x_hat)
# print('acc after fc', acc)
# print('acc before fc', acc2)
# return acc, nce, None
#
#
# class CPAELSTM46(CPLSTM4):
# """
# CPLSTM4+ CPAE4
# """
#
# def __init__(self, dim, bn, dropout, task,
# depth=2, num_classes=1,
# input_dim=76, time_step=5, mode=1):
# super(CPAELSTM46, self).__init__(dim, bn, dropout, task,
# depth, num_classes,
# input_dim, time_step, mode)
#
# self.lstm4 = nn.LSTM(
# input_size=self.dim,
# hidden_size=self.input_dim,
# bidirectional=False,
# batch_first=True)
#
# def encode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.dim)
# x, _ = self.lstm1(x, (h0, c0))
# return x
#
# def decode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.input_dim)
# x, _ = self.lstm4(x, (h0, c0))
# return x
#
# def compute_nce(self, encode_samples, pred):
# '''
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss--------------
# -----------------------------------------------------------------------------------
# ...argument:
# ......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
# ......pred : Wk[i]( C_t )
# '''
# nce = 0 # average over time_step and batch
# self.batch_size = self.bs
# for i in np.arange(0, self.time_step):
# try:
# total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# except IndexError:
# print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
# raise AssertionError
# # print(total)
# correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
# torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
# nce /= -1. * self.batch_size * self.time_step
# accuracy = 1. * correct.item() / self.batch_size
#
# return nce, accuracy
#
# def forward(self, x):
# if len(x.shape) == 4: x = x.squeeze(1)
# if x.shape[1] == 76: x = x.transpose(1, 2)
# x_ori = x
# t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# self.bs = x.shape[0]
# xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
#
# z_after_t = []
#
# for i in range(1, self.time_step + 1):
# _, h, c = self.encodeRegress(x[:, t + i, :])
# z_after_t.append(c)
# z_after_t = torch.cat(z_after_t, 0)
#
# x_hat = self.decode(z_after_t)
# nce, acc = self.compute_nce((x_ori[:, t + 1:t + 1 + self.time_step, :]).transpose(0, 1), x_hat)
# return acc, nce, None
#
#
# class CPAELSTM4_AT(CPLSTM4):
# def __init__(self, dim, bn, dropout, task,
# depth=2, num_classes=1,
# input_dim=76, time_step=5, mode=1, switch=True):
# super(CPAELSTM4_AT, self).__init__(dim, bn, dropout, task,
# depth, num_classes,
# input_dim, time_step, mode)
#
# self.lstm1 = nn.LSTM(
# input_size=self.input_dim,
# hidden_size=self.dim,
# num_layers=3,
# bidirectional=False,
# batch_first=True
# )
# self.lstm4 = nn.LSTM(
# input_size=self.dim,
# hidden_size=self.input_dim,
# bidirectional=False,
# batch_first=True)
# self.switch = switch
# if self.switch == False:
# self.softmax = nn.Softmax(dim=1)
# self.lsoftmax = nn.LogSoftmax(dim=1)
# self.att1 = nn.Linear(self.dim, self.dim) # attend to decoder
# self.att2 = nn.Linear(self.dim, self.dim) # attend to predictor
#
# def encodeRegress(self, x, warm=False, conti=False):
# bs = x.shape[0]
# x = self.dropout(x)
# latents, state1 = self.lstm1(x)
# latents_to_pred = torch.mul(latents, self.att1(latents))
# regs, state2 = self.lstm2(latents_to_pred)
# ht, ct = state2
# return latents, regs, ht, ct
#
# def get_reg_out(self, x, stack=False, warm=False, conti=False, ifbn=False):
# # TODO:
# bs = x.shape[0]
# x = self.dropout(x)
# latents, state1 = self.lstm1(x)
#
# # latents_to_pred = torch.mul(latents, self.att1(latents))
# regs, state2 = self.lstm2(latents)
# ht, ct = state2
# return regs[:, -1, :].squeeze(1)
#
# def compute_nce(self, encode_samples, pred):
# '''
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss--------------
# -----------------------------------------------------------------------------------
# ...argument:
# ......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
# ......pred : Wk[i]( C_t )
# '''
# nce = 0 # average over time_step and batch
# self.batch_size = self.bs
# for i in np.arange(0, self.time_step):
# try:
# total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# except IndexError:
# print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
# raise AssertionError
# # print(total)
# correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
# torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
# nce /= -1. * self.batch_size * self.time_step
# accuracy = 1. * correct.item() / self.batch_size
#
# return nce, accuracy
#
# def encode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# x, (h, c) = self.lstm1(x)
# return x, h, c
#
# def decode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.input_dim)
# x, _ = self.lstm4(x, (h0, c0))
# return x
#
# def forward(self, x):
# # check shape
# if len(x.shape) == 4: x = x.squeeze(1)
# if x.shape[1] == 76: x = x.transpose(1, 2)
# self.bs = x.shape[0]
#
# # randomly choose a time point
# t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
#
# # encode the past and put into regressor
# latent_past, _, hidden_reg_out, cell_reg_out = self.encodeRegress(x[:, :t + 1, :])
# latent_preds = self.fcs(
# self.predict(hidden_reg_out.transpose(0, 1), hidden_reg_out, cell_reg_out, self.time_step, self.mode))
#
# latent_future = []
# for i in range(1, self.time_step + 1):
# _, h, c = self.encode(x[:, t + i, :])
# latent_future.append(self.fcs(c[-1]))
#
# latent_future = torch.stack(latent_future, 0)
#
# latent_all = torch.cat((latent_past, latent_future.transpose(0, 1)), 1)
# latent_all_attention = torch.mul(latent_all, self.att2(latent_all))
# x_hat = self.decode(latent_all_attention)
# nce, acc = self.compute_nce(latent_future, latent_preds)
#
# return x_hat, nce, acc
class CDCK3_S(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
n_points=192,
n_features=76,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
kernel_sizes=[(2, i) for i in [76, 32, 64, 64, 128, 256, 512, 1024, 512, 128, 64]],
time_step=30):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CDCK3_S, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
# self.decode_channels = self.channels[::-1]
# self.decoder = nn.ModuleList(
# [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
# ).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def forward(self, x):
batch_size = x.shape[0]
# input shape: (N,C=1,n_points=192,n_features=76)
if len(x.shape) == 4: x = x.squeeze(1)
if x.shape[1] == 192: x = x.transpose(1, 2)
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
# output shape: (N, C=conv_sizes[-1], n_frames,1)
# output shape: (N, C=conv_sizes[-1], n_frames,1)
self.n_frames = x.shape[2]
t_samples = torch.randint(self.n_frames - self.time_step - 1, size=(1,)).long()
encode_samples = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(
device) # e.g. size
c_t = torch.zeros(size=(batch_size, self.gru_out)).float().to(device)
hidden = self.init_hidden(batch_size, use_gpu=True)
init_hidden = hidden
# reshape for gru
x = x.view(batch_size, self.n_frames, self.conv_sizes[-1])
# output shape: (N, n_frames, conv_sizes[-1])
x = self.linear(x)
# output shape: (N, n_frames, embedded_features)
for i in np.arange(1, self.time_step + 1):
hidden = init_hidden
encode_samples[i - 1, :, :] = x[:, int(t_samples) + i, :]
forward_seq = x[:, :int(t_samples) + 1, :]
# ----->SHAPE: (N,t_samples+1,embedded_features)
output, hidden = self.gru(forward_seq, hidden)
c_t = output[:, -1, :].view(batch_size, self.gru_out)
pred = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
if self.beforeNCE: # ADD FC layers
pred = self.fcs(pred)
encode_samples = self.fcs(encode_samples)
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss------------------------------------------------
# -----------------------------------------------------------------------------------
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, batch_size).to(device))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * batch_size * self.time_step
accuracy = 1. * correct.item() / batch_size
return accuracy, nce, hidden
def sub_forward(self, x):
# input shape: (N,C=1,n_points=192,n_features=76)
f = iter(self.convs)
g = iter(self.bns)
for i in range(len(self.conv_sizes)):
x = next(f)(x)
x = next(g)(x)
x = nn.ReLU(inplace=True)(x)
x = x.transpose(1, 3)
return x
def get_reg_out(self, x, every=False):
batch_size = x.shape[0]
# input shape: (N,C=1,n_points=192,n_features=76)
if len(x.shape) == 4: x = x.squeeze(1)
if x.shape[1] == 192: x = x.transpose(1, 2)
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
# zt
# output shape: (N, C=conv_sizes[-1], n_frames,1)
self.n_frames = x.shape[2]
t_samples = torch.randint(self.n_frames - self.time_step - 1, size=(1,)).long()
encode_samples = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(
device) # e.g. size
c_t = torch.zeros(size=(batch_size, self.gru_out)).float().to(device)
hidden = self.init_hidden(batch_size)
init_hidden = hidden
# reshape for gru
x = x.view(batch_size, self.n_frames, self.conv_sizes[-1])
# output shape: (N, n_frames, conv_sizes[-1])
x = self.linear(x)
# output shape: (N, n_frames, embedded_features)
hidden = init_hidden
output, hidden = self.gru(x, hidden)
c_t = output[:, -1, :].view(batch_size, self.gru_out)
return c_t
class CDCK2(nn.Module):
def __init__(self,
time_step,
batch_size,
frame_size,
fix_frame=True,
n_frames=None,
conv_sizes=[64, 128, 512, 128, 64, 32, 16],
n_flat_features_per_frame=None,
embedded_features=22,
gru_out=32
):
"""data should be formatted as
Input: (batch size, n_frames, frame_size, features)
*****If the frame_size and n_frames are identical for every batch,
*****Please set fix_frame=True, and please provide n_frames
:type conv_sizes: list
"""
super(CDCK2, self).__init__()
self.beforeNCE = False
self.frame_size = frame_size
self.batch_size = batch_size
self.time_step = time_step
self.fix_frame = fix_frame
self.n_frames = n_frames
self.n_flat_features_per_frame = n_flat_features_per_frame
self.embedded_features = embedded_features
self.gru_out = gru_out
if not self.fix_frame:
self.encoder = nn.Sequential(
nn.MaxPool2d(4, stride=1),
nn.Conv2d(1, 4, kernel_size=2, stride=1, padding=1, bias=False),
nn.BatchNorm2d(4),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, stride=1),
nn.Conv2d(4, 8, kernel_size=2, stride=4, padding=2, bias=False),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
nn.Conv2d(8, self.embedded_features, kernel_size=2, stride=2, padding=1, bias=False),
nn.BatchNorm2d(self.embedded_features),
nn.ReLU(inplace=True),
nn.Flatten()
)
if self.fix_frame:
self.convs = nn.ModuleList([nn.Conv2d(self.n_frames, conv_sizes[0], kernel_size=2, stride=1, padding=2,
bias=False, groups=self.n_frames)]
+ [
nn.Conv2d(i, j, kernel_size=2, stride=1, padding=2, bias=False,
groups=self.n_frames)
for i, j in zip(conv_sizes[:-1], conv_sizes[1:])
]
)
self.bns = nn.ModuleList(
[nn.BatchNorm2d(i) for i in conv_sizes]
)
self.maxpooling = nn.MaxPool2d(2, stride=1)
self.ReLU = nn.ReLU(inplace=True)
self.softmax = nn.Softmax()
self.lsoftmax = nn.LogSoftmax()
if n_flat_features_per_frame:
self.linear = nn.Linear(self.n_flat_features_per_frame, self.embedded_features)
self.gru = nn.GRU(self.embedded_features, self.gru_out, num_layers=1, bidirectional=False,
batch_first=True).to(device)
self.Wk = nn.ModuleList(
[nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# initialize gru
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers)
self.beforeNCE = True
def update_flat_features(self, n_flat_features_per_frame):
self.n_flat_features_per_frame = n_flat_features_per_frame
self.linear = nn.Linear(self.n_flat_features_per_frame, self.embedded_features).to(device)
self.gru = nn.GRU(self.embedded_features, self.gru_out, num_layers=1, bidirectional=False, batch_first=True).to(
device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# initialize gru
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
if self.fix_frame:
if use_gpu:
return torch.zeros(1, batch_size, self.gru_out).to(device)
else:
return torch.zeros(1, batch_size, self.gru_out)
if not self.fix_frame:
if use_gpu:
return torch.zeros(1, 1, self.gru_out).to(device)
else:
return torch.zeros(1, 1, self.gru_out)
def forward(self, x):
# Convert into frames
# shape of x:(N,1,n_points,features)
x, frame_ends = makeFrameDimension(x, self.frame_size,
self.n_frames) # shape of x:(batch_size,n_frames,frame_size, n_features)
# shape of x:(N,n_frames,points_per_frame,features)
batch_size = x.shape[0]
# !warning!!!!! The last batch in the dataset may have batch_size < self.batch_size.
# !!!!!!!!!!!!!! So cannot use self.batch_size here
self.n_frames = x.shape[1]
# -----------------------------------------------------------------------------------
# --------------Pick a random time point------------------------------------------------
# -----------------------------------------------------------------------------------
if not self.fix_frame:
t_samples = torch.empty((batch_size, 1))
for i in range(batch_size):
try:
t_samples[i] = torch.randint(int((frame_ends[i] - self.time_step - 1).item()),
size=(1,)).long() # randomly pick time stamps
except RuntimeError: # some patients have very few frames so we have to choose the first frame to start
frame_ends[i] = self.time_step + 3
t_samples[i] = 1
if self.fix_frame:
t_samples = torch.randint(self.n_frames - self.time_step - 1, size=(1,)).long()
# -----------------------------------------------------------------------------------
# --------------DO THE EMBEDDING------------------------------------------------
# ------------------------------------------------------------------------------------
if not self.fix_frame:
z = torch.empty((batch_size, self.n_frames, self.embedded_features)).float().to(device)
for i in range(self.n_frames):
y = (x[:, i, :, :].unsqueeze(1)).clone().to(device)
y = self.encoder(y) # ------>SHAPE: (N,n_flat_features_per_frame)
# calculate n_flat_features_per_frame if it is unkown
if self.n_flat_features_per_frame == None:
self.n_flat_features_per_frame = y.shape[1]
logger.info('-----n_flat_features_per_frame=%d' % self.n_flat_features_per_frame)
return self.n_flat_features_per_frame
y = self.linear(y) # ----->SHAPE: (N,embedded_features)
z[:, i, :] = y.squeeze(1) # --->SHAPE: (N, 1, embedded_features)
del x, y
if self.fix_frame:
# x:(8,24,8,76) (N,n_frames,points_per_frame,features)
f = iter(self.convs)
g = iter(self.bns)
for i in range(len(self.convs)):
x = next(f)(x)
try:
x = nn.MaxPool2d(2, stride=2)(x)
except RuntimeError:
pass
x = next(g)(x)
x = self.ReLU(x)
x = nn.Flatten(start_dim=2, end_dim=-1)(x)
z = x
del x
# z: (8,144) (N,flat_features)
# calculate n_flat_features_per_frame if it is unkown
if self.n_flat_features_per_frame == None:
self.n_flat_features_per_frame = int(z.shape[2] * z.shape[1] / self.n_frames)
logger.info('-----n_flat_features_per_frame=%d' % self.n_flat_features_per_frame)
return self.n_flat_features_per_frame
z = z.view(batch_size, self.n_frames, self.n_flat_features_per_frame)
# ---->SHAPE: (N,n_frames,n_flat_features_per_frame)
z = self.linear(z) # ----->SHAPE: (N,n_frames,embedded_features)
encode_samples = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(
device) # e.g. size
# ----->SHAPE: (T,N,embedded_features)
c_t = torch.zeros(size=(batch_size, self.gru_out)).float().to(device)
# output of GRU,------>SHAPE:(N, n_gru_out)
# -----------------------------------------------------------------------------------
# --------------GET GRU OUTPUT------------------------------------------------
# -----------------------------------------------------------------------------------
forward_seq = []
hidden = self.init_hidden(len(z), use_gpu=True)
init_hidden = hidden
if not self.fix_frame:
for j in range(batch_size):
hidden = init_hidden
t = t_samples[j]
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1][j] = z[j, int(t_samples[j].item()) + i, :]
forward_seq.append(z[j, :int(t_samples[j].item()) + 1, :])
output, hidden = self.gru(forward_seq[j].unsqueeze(0), hidden)
c_t[j] = output[:, -1, :].view(1, self.gru_out)
if self.fix_frame:
for i in np.arange(1, self.time_step + 1):
hidden = init_hidden
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
# ----->SHAPE: (N,t_samples+1,embedded_features)
output, hidden = self.gru(forward_seq, hidden)
c_t = output[:, -1, :].view(batch_size, self.gru_out)
pred = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
if self.beforeNCE: # ADD FC layers
pred = self.fcs(pred)
encode_samples = self.fcs(encode_samples)
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss------------------------------------------------
# -----------------------------------------------------------------------------------
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, batch_size).to(device))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * batch_size * self.time_step
accuracy = 1. * correct.item() / batch_size
return accuracy, nce, hidden
def get_reg_out(self, x, every=False):
"""
Get the output of the regression model (GRU).
batch_size could be different from the batch_size used in training process
This function is only applicable for the case in which the samples share the same length,
which means that the self.fix_frame=True
"""
x, _ = makeFrameDimension(x, self.frame_size, x.shape[1])
self.n_frames = x.shape[1]
batch_size = x.size()[0]
if self.fix_frame:
f = iter(self.convs)
g = iter(self.bns)
for i in range(len(self.convs)):
x = next(f)(x)
try:
x = nn.MaxPool2d(2, stride=2)(x)
except RuntimeError:
pass
x = next(g)(x)
x = self.ReLU(x)
x = nn.Flatten(start_dim=2, end_dim=-1)(x)
z = x
# self.n_flat_features_per_frame=z.shape[1]/self.n_frames
z = z.view(batch_size, self.n_frames, self.n_flat_features_per_frame)
# ---->SHAPE: (N,n_frames,embedded_features)
z = self.linear(z) # ----->SHAPE: (N,n_frames,embedded_features)
hidden = self.init_hidden(batch_size)
output, hidden = self.gru(z, hidden) # output size e.g. 8*128*256
# ---->SHAPE: (N,n_frames,n_gru_out)
else:
z = torch.empty((batch_size, self.n_frames, self.embedded_features)).float().to(device)
for i in range(self.n_frames):
y = (x[:, i, :, :].unsqueeze(1)).clone().to(device)
y = self.encoder(y) # ------>SHAPE: (N,n_flat_features_per_frame)
# calculate n_flat_features_per_frame if it is unkown
if self.n_flat_features_per_frame == None:
self.n_flat_features_per_frame = y.shape[1]
logger.info('-----n_flat_features_per_frame=%d' % self.n_flat_features_per_frame)
return self.n_flat_features_per_frame
y = self.linear(y) # ----->SHAPE: (N,embedded_features)
z[:, i, :] = y.squeeze(1) # --->SHAPE: (N, 1, embedded_features)
del x, y
c = torch.zeros(size=(batch_size, self.n_frames, self.gru_out)).float().to(device)
for j in range(batch_size):
hidden = self.init_hidden(batch_size)
output, hidden = self.gru(z[j, :, :].unsqueeze(0), hidden)
c[j, :, :] = output[:, :, :].view(1, self.n_frames, self.gru_out)
output = c
if every:
return output # return output from gru of every frame
# ---->SHAPE: (N,n_frames,n_gru_out)
else:
return output[:, -1, :] # only return the last output
# ---->SHAPE: (N,n_gru_out)
def get_latent(self, x, every=True):
"""
Get the latent vectors of each frame
"""
batch_size = x.size()[0]
x, _ = makeFrameDimension(x, self.frame_size, x.shape[1])
z = self.encoder(x)
self.n_flat_features_per_frame = z.shape[1] / self.n_frames
z = z.view(batch_size, self.n_frames, self.n_flat_features_per_frame)
return z
class AE1(nn.Module):
"""
trivial autoencoder
"""
def __init__(
self,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
):
super(AE1, self).__init__()
self.conv_sizes = conv_sizes
encodelist = []
enChannels = [1] + conv_sizes
count = 0
for i in range(len(enChannels) - 1):
encodelist.append(nn.Conv2d(enChannels[i], enChannels[i + 1], kernel_size=2))
encodelist.append(nn.BatchNorm2d(enChannels[i + 1]))
encodelist.append(nn.ReLU(inplace=True))
# if count < 2:
# encodelist.append(nn.MaxPool2d(2,stride=1))
count += 1
deChannels = enChannels[::-1]
decodelist = []
for i in range(len(enChannels) - 1):
# if count >= len(enChannels) - 3:
# decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=3))
# else:
decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=2))
decodelist.append(nn.BatchNorm2d(deChannels[i + 1]))
decodelist.append(nn.ReLU(inplace=True))
count += 1
self.encoder = nn.Sequential(*encodelist)
self.decoder = nn.Sequential(*decodelist)
def forward(self, x):
y = x
if len(x.shape) == 3: x.unsqueeze(1)
x = self.encoder(x)
# print(x.shape)
torch.cuda.empty_cache()
x = self.decoder(x)
torch.cuda.empty_cache()
# print(x.shape)
if len(x.shape) == 4: x.squeeze(1)
loss = nn.MSELoss(reduction='mean')(x, y)
torch.cuda.empty_cache()
return -1, loss, x # make sure it is consistent with other models training function
class AE2_S(nn.Module):
"""
Auto encoder, only move via time direction. Same design in CPAE1
"""
def __init__(
self,
embedded_features,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
n_points=192,
n_features=76,
):
self.conv_sizes = conv_sizes
super(AE2_S, self).__init__()
self.embedded_features = embedded_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features)
self.delinear = nn.Linear(self.embedded_features, self.conv_sizes[-1])
def forward(self, x):
# input (batch,192,76)
if len(x.shape) == 4: x = x.squeeze(1)
y = x
x = x.transpose(1, 2) # (b,76,192)
x = self.encode(x).transpose(1, 2) # x: (batch, n_time, conv[-1])
x = self.linear(x) # (batch, time,embedded_features)
x = nn.BatchNorm1d(self.embedded_features).to(device)(x.transpose(1, 2)).transpose(1, 2)
x = nn.ReLU(inplace=True).to(device)(x)
x = self.delinear(x) # (batch, time, conv[-1])
x = nn.BatchNorm1d(self.conv_sizes[-1]).to(device)(x.transpose(1, 2)).transpose(1, 2)
x = nn.ReLU(inplace=True).to(device)(x)
x = self.decode(x.transpose(1, 2)) # (batch,76,192)
x = x.transpose(1, 2)
loss = nn.MSELoss(reduction='mean')(x, y)
return -1, loss, x
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x
def get_encode(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
x = x.transpose(1, 2)
x = self.encode(x).transpose(1, 2)
x = nn.Flatten()(x)
return x # output shape: (N,192*12)
class CAE1(AE1):
"""
Contrastive Auto-encoder based on AE1
"""
def __init__(self):
super(CAE1, self).__init__()
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def forward(self, x):
# get batch size
bs = x.shape[0]
y = x
_, _, x = super().forward(x)
loss, acc = self.compute_nce(x, y)
del y
return acc, loss, x
def compute_nce(self, x_hat, x):
bs = x.shape[0]
assert x.shape == x_hat.shape
nce = 0
x = x.view(bs, -1)
x_hat = x_hat.view(bs, -1)
total = torch.mm(x_hat, x.T)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, bs).cuda()))
nce = torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * bs
acc = 1. * correct.item() / bs
torch.cuda.empty_cache()
del x, x_hat
return nce, acc
class CAE11(nn.Module):
def __init__(
self,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
):
super(CAE11, self).__init__()
self.conv_sizes = conv_sizes
encodelist = []
enChannels = [1] + conv_sizes
count = 0
for i in range(len(enChannels) - 1):
encodelist.append(nn.Conv2d(enChannels[i], enChannels[i + 1], kernel_size=2))
encodelist.append(nn.BatchNorm2d(enChannels[i + 1]))
encodelist.append(nn.ReLU(inplace=True))
# if count < 2:
# encodelist.append(nn.MaxPool2d(2,stride=1))
count += 1
deChannels = enChannels[::-1]
decodelist = []
for i in range(len(enChannels) - 1):
# if count >= len(enChannels) - 3:
# decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=3))
# else:
decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=2))
decodelist.append(nn.BatchNorm2d(deChannels[i + 1]))
decodelist.append(nn.ReLU(inplace=True))
count += 1
self.encoder = nn.Sequential(*encodelist)
self.decoder = nn.Sequential(*decodelist)
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def forward(self, x):
y = x
if len(x.shape) == 3: x.unsqueeze(1)
x = self.encoder(x)
# print(x.shape)
torch.cuda.empty_cache()
x = self.decoder(x)
torch.cuda.empty_cache()
# print(x.shape)
if len(x.shape) == 4: x.squeeze(1)
torch.cuda.empty_cache()
loss, acc = self.compute_nce(x, y)
del y
return acc, loss, x
def compute_nce(self, x_hat, x):
bs = x.shape[0]
assert x.shape == x_hat.shape
nce = 0
x = x.view(bs, -1)
x_hat = x_hat.view(bs, -1)
total = torch.mm(x_hat, x.T)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, bs).cuda()))
nce = torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * bs
acc = 1. * correct.item() / bs
torch.cuda.empty_cache()
del x, x_hat
return nce, acc
class CAE2_S(AE2_S):
"""
Contrastive auto-encoder based on AE2
"""
def __init__(
self,
embedded_features,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
n_points=192,
n_features=76,
):
self.conv_sizes = conv_sizes
self.embedded_features = embedded_features
super(CAE2_S, self).__init__(self.embedded_features, self.conv_sizes)
# # . If is int, uses the same padding in all boundaries.
# # If a 4-tuple, uses (left ,right ,top ,bottom )
# self.channels = [n_features] + conv_sizes
#
# # the core part of model list
# self.sequential = lambda inChannel, outChannel: nn.Sequential(
# nn.ReflectionPad1d((0, 1)),
# nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
# nn.BatchNorm1d(outChannel),
# nn.ReLU(inplace=True)
# )
#
# # ** minded the length should be 1 element shorter than # of channels
# self.encoder = nn.ModuleList(
# [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
# )
#
# self.decode_channels = self.channels[::-1]
# self.decoder = nn.ModuleList(
# [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
# )
# self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features)
# self.delinear = nn.Linear(self.embedded_features, self.conv_sizes[-1])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
y = x
x = x.transpose(1, 2) # (b,76,192)
x = self.encode(x).transpose(1, 2) # x: (batch, n_time, conv[-1])
x = self.linear(x) # (batch, time,embedded_features)
x = nn.BatchNorm1d(self.embedded_features).to(device)(x.transpose(1, 2)).transpose(1, 2)
x = nn.ReLU(inplace=True).to(device)(x)
x = self.delinear(x) # (batch, time, conv[-1])
x = nn.BatchNorm1d(self.conv_sizes[-1]).to(device)(x.transpose(1, 2)).transpose(1, 2)
x = nn.ReLU(inplace=True).to(device)(x)
x = self.decode(x.transpose(1, 2)) # (batch,76,192)
x = x.transpose(1, 2)
loss, acc = self.compute_nce(x, y) # TODO:
return acc, loss, x
def compute_nce(self, x_hat, x):
bs = x.shape[0]
assert x.shape == x_hat.shape
nce = 0
x = x.view(bs, -1)
x_hat = x_hat.reshape(bs, -1)
total = torch.mm(x_hat, x.T)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, bs).cuda()))
nce = torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * bs
acc = 1. * correct.item() / bs
return nce, acc
class Basic_Cnn(nn.Module):
def __init__(self, seed, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], n_features=76, out=2):
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
super(Basic_Cnn, self).__init__()
torch.manual_seed(seed)
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.out = out
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
)
self.fc = nn.Sequential(
nn.Linear(self.channels[-1], 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, self.out),
nn.LogSoftmax(dim=1)
)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
self.apply(self._weights_init)
# def relevant_points(n):
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x) # ouput shape: (N,8,192)
y = self.fc(x[:, :, -1])
return y
def train(args, model, device, train_loader, optimizer, epoch, batch_size, lr=None):
# turn on the training mode
model.train()
logger = logging.getLogger("cpc")
if 'CPAE' not in args['model_type'] or 'CPAE4' in args['model_type'] or (
'CPAE7' in args['model_type']) or 'CPAELSTM41' in args['model_type'] or 'CPAELSTM42' in args['model_type']:
for batch_idx, sample in enumerate(train_loader):
if sample == 1: continue
sigs, labels = zip(*sample)
sigs = torch.stack(sigs)
labels = torch.stack(labels)
data = sigs.float().unsqueeze(1).to(device) # add channel dimension
data.requires_grad = True
optimizer.zero_grad()
# If n_flat_features_per_frame is not provided, then the forward() of the above sentence will return
# n_flat_features_per_frame and the below sentence will raise TypeError.
# Then get the n_flat_features_per_frame and update this to the model
# DO the forward again
result = model(data)
try:
acc, loss, hidden = result
except TypeError:
n_flat_features_per_frame = result
return result
loss.backward()
optimizer.step()
if lr is None:
lr = optimizer.update_learning_rate() # See optimizer.py
# print(lr)
if batch_idx % args['log_interval'] == 0:
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tlr:{:.5f}\tAccuracy: {:.4f}\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), lr, acc, loss.item()))
del sigs, labels, sample, data, hidden, acc, loss
torch.cuda.empty_cache()
elif 'CPAE' in args['model_type']:
model.train()
logger.info('\n --------------------------- epoch {} ------------------------- \n'.format(epoch))
if args.get('lambda'): logger.info('weights are %s' % args['lambda'])
local_loss = []
for ii, batch in enumerate(train_loader):
if batch == 1:
continue
X, y = zip(*batch)
X = torch.stack(X).to(device)
X.requires_grad = True
# y = torch.tensor(y).long().to('cuda') # y is not used here in autoencoder
optimizer.zero_grad()
D, nce, accuracy = model(X) # decoded
l = args.get('Lambda')
if l:
loss = Chimera_loss(D, X, nce, l)
else:
loss = Chimera_loss(D, X, nce)
loss.backward()
optimizer.step()
local_loss.append(loss.item())
if ii % 100 == 0: # verbose
new_lr = optimizer.update_learning_rate()
logger.info('\t {:.5f} {:.5f}'.format(loss.item(), new_lr))
del X, y, batch, D, nce, accuracy, loss, ii
torch.cuda.empty_cache()
logger.info('\n ---------------------- mean loss : {:.5f} ---------------------- \n'.format(
np.mean(local_loss)))
torch.cuda.empty_cache()
torch.cuda.empty_cache()
def validation(model, args, device, validation_loader):
logger = logging.getLogger("cpc")
logger.info("Starting Validation")
if 'CPAE' not in args['model_type'] or 'CPAELSTM42' in args['model_type'] or ('CPAE4' in args['model_type']) or (
'CPAE7' in args['model_type']) or 'CPAELSTM41' in args['model_type']:
model.eval()
total_loss = 0
total_acc = 0
with torch.no_grad():
for _, sample in enumerate(validation_loader):
if sample == 1: continue
sigs, _ = zip(*sample)
sigs = torch.stack(sigs)
data = sigs.float().unsqueeze(1).to(device)
acc, loss, hidden = model(data)
total_loss += len(data) * loss
total_acc += len(data) * acc
torch.cuda.empty_cache()
del sigs, sample
return total_acc, total_loss
else:
model.eval()
loss_ls = []
total_loss = 0
total_acc = 0
for ii, batch in enumerate(validation_loader):
if batch == 1: continue
X, y = zip(*batch)
X = torch.stack(X).to('cuda')
D, nce, accuracy = model(X) # decoded
if args.get('lambda'):
total_loss += Chimera_loss(D, X, nce, args['lambda']).detach().cpu().numpy()
else:
total_loss += Chimera_loss(D, X, nce).detach().cpu().numpy()
loss_ls.append(record_loss(D, X, nce))
total_acc += len(X) * accuracy
torch.cuda.empty_cache()
del X, y, batch, D, nce, accuracy
loss_ls = np.stack(loss_ls)
logger.info('\n ------- validation ------- \n'.format(ii))
logger.info('\t NCE \t MSE \t MASK MSE \t MAPPING MSE')
logger.info('\t {:.4f} \t {:.4f} \t {:.4f} \t {:.4f}'.format(*np.mean(loss_ls, axis=0)))
return total_acc, total_loss
def define_model(args_json, Model, train_loader):
model_args = filter_args(args_json, Model)
model = Model(**model_args)
optimizer = eval(args_json['optimizer'])
if args_json.get('n_flat_features_per_frame') is None and Model == CDCK2:
args_json['n_flat_features_per_frame'] = train(args_json, model, device, train_loader, optimizer, 2,
args_json['batch_size'])
del model
model_args = filter_args(args_json, Model)
model = Model(**model_args)
model.update_flat_features(args_json['n_flat_features_per_frame'])
if args_json.get('fcs') is not None:
model.add_fcs(args_json['fcs']) # add fc layers if required
return model.to(device), optimizer
def save_intermediate(Model, args_json, device):
setting_name = get_setting_name(args_json['model_best'])
logging_dir = args_json['logging_dir']
checkpoint_path = os.path.join(
args_json['top_path'],
'logs/cpc/',
args_json['model_type'],
args_json['model_best']
)
checkpoint = torch.load(checkpoint_path, map_location='cpu')
print('Starting to generate intermediate data\n')
train_loader, validation_loader, test_loader = split_Structure_Inhospital(
args_json, percentage=1) # BUG every data sample is the same!!!
model, optimizer = define_model(args_json, Model, train_loader)
model.load_state_dict(checkpoint['state_dict'])
model = model.to(device)
context_train = []
context_val = []
context_test = []
y_train = []
y_test = []
y_val = []
model.eval()
with torch.no_grad():
for _, sample in enumerate(train_loader):
if sample == 1: break
x, y = zip(*sample)
out = model.get_reg_out(
(
torch.stack(x).float().unsqueeze(1).to(device)
)
).cpu()
context_train.append(out)
torch.cuda.empty_cache()
y_train.append((torch.stack(y)))
del sample, x, y, out
context_train = torch.cat(context_train).cpu().numpy()
y_train = torch.cat(y_train).cpu().numpy()
np.save(os.path.join(logging_dir, setting_name + '-x_train'), context_train)
np.save(os.path.join(logging_dir, setting_name + '-y_train'), y_train)
print('Getting training intermediate vectors done. saved in %s' % logging_dir)
torch.cuda.empty_cache()
del context_train, y_train
for _, sample in enumerate(validation_loader):
if sample == 1: break
x, y = zip(*sample)
context_val.append(model.get_reg_out(
(
torch.stack(
x
).float().unsqueeze(1).to(device)
)
)
)
y_val.append((torch.stack(y)))
del sample, x, y
context_val = torch.cat(context_val).cpu().numpy()
y_val = torch.cat(y_val).cpu().numpy()
np.save(os.path.join(logging_dir, setting_name + '-x_val'), context_val)
np.save(os.path.join(logging_dir, setting_name + '-y_val'), y_val)
print('Getting validation intermediate vectors done. saved in %s' % logging_dir)
torch.cuda.empty_cache()
del context_val, y_val
for _, sample in enumerate(test_loader):
if sample == 1: break
x, y = zip(*sample)
context_test.append(model.get_reg_out(
(
torch.stack(
x
).float().unsqueeze(1).to(device)
)
)
)
y_test.append((torch.stack(y)))
del sample, x, y
context_test = torch.cat(context_test).cpu().numpy()
y_test = torch.cat(y_test).cpu().numpy()
np.save(os.path.join(logging_dir, setting_name + '-x_test'), context_test)
np.save(os.path.join(logging_dir, setting_name + '-y_test'), y_test)
print('Getting test intermediate vectors done. saved in %s' % logging_dir)
torch.cuda.empty_cache()
del context_test, y_test
def snapshot(dir_path, run_name, state):
snapshot_file = os.path.join(dir_path,
run_name + '-model_best.pth')
# torch.save can save any object
# dict type object in our cases
torch.save(state, snapshot_file)
logger.info("Snapshot saved to {}\n".format(snapshot_file))
def my_collate(batch):
"""Add paddings to samples in one batch to make sure that they have the same length.
Args:
Input:
Output:
data(tensor): a batch of data of patients with the same length
labels(tensor): the labels of the data in this batch
durations(tensor): the original lengths of the patients in the batch
Shape:
Input:
Output:
data: (batch_size,length,num_features)
labels: (batch_size,)
durations:(batch_size,)
"""
if len(batch) == 1:
return 1 # if batch size=1, it should be the last batch. we cannot compute the nce loss, so ignore this batch.
if len(batch) > 1:
data = []
labels = []
durations = []
batch = sorted(batch, key=lambda x: x['duration'], reverse=True)
for sample in batch:
data.append(sample['patient'])
labels.append(sample['death'])
durations.append(sample['duration'])
max_len, n_feats = data[0].shape
data = [np.array(s, dtype=float) for s in data]
data = [torch.from_numpy(s).float() for s in data]
labels = [label for label in labels]
durations = [duration for duration in durations]
data = [torch.cat((s, torch.zeros(max_len - s.shape[0], n_feats)), 0) if s.shape[0] != max_len else s for s in
data]
data = torch.stack(data, 0) # shape:[24,2844,462]
labels = torch.stack(labels, 0)
durations = torch.stack(durations, 0) # max:2844
return data, labels, durations
class MLP(nn.Module):
def __init__(self, hidden_sizes, seed, in_features=8, out=2, dropout=True):
torch.manual_seed(seed)
super(MLP, self).__init__()
hidden_sizes = [in_features] + hidden_sizes + [out]
l = []
torch.manual_seed(seed)
fcs = [nn.Linear(i, j, bias=True) for i, j in zip(hidden_sizes[:-1], hidden_sizes[1:])]
relu = nn.ReLU(inplace=True)
drop = nn.Dropout(p=0.2)
torch.manual_seed(seed)
bns = [nn.BatchNorm1d(i) for i in hidden_sizes[1:]]
# apply(_weights_init)
for i in range(len(hidden_sizes) - 1):
l.append(fcs[i])
if i != len(hidden_sizes) - 2:
l.append(relu)
l.append(bns[i])
if dropout: l.append(drop)
self.mymodules = nn.Sequential(*l)
for model in self.mymodules:
self.initialize_weights(model)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
def forward(self, x):
# print(x.shape)
if len(x.shape) == 4:
x = x.squeeze(1) # fastai has a strange issue here.
x = self.mymodules(x)
# print (x)
# print(x.shape)
return x
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def valid(self, data_loader, iterations='all', metrics=None):
if metrics == None: metrics = self.metrics
loss = [None] * len(metrics)
overall_loss = []
self.model.eval()
with torch.no_grad():
for i, batch in enumerate(data_loader):
if iterations != 'all':
if i >= iterations: return overall_loss
ct, y = zip(*batch)
ct = torch.stack(ct).squeeze(1).to(device)
y = torch.stack(y).cpu()
pred = self.model(ct).cpu() # forward
for i, metric in enumerate(metrics):
loss[i] = metric(pred, y) # loss
overall_loss.append((loss))
del loss, ct, y, pred
return overall_loss
class LR(nn.Module):
def __init__(self, seed, in_features=8, out=2):
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
super(LR, self).__init__()
torch.manual_seed(seed)
self.linear = nn.Linear(in_features, out)
def forward(self, x):
return F.log_softmax(self.linear(x), dim=1)
def load_intermediate(top_path, setting_name, model_type):
middata_dir = os.path.join(top_path, 'logs', 'imp', model_type)
x_train = np.load(os.path.join(middata_dir, setting_name + '-x_train.npy'))
y_train = np.load(os.path.join(middata_dir, setting_name + '-y_train.npy'))
x_val = np.load(os.path.join(middata_dir, setting_name + '-x_val.npy'))
y_val = np.load(os.path.join(middata_dir, setting_name + '-y_val.npy'))
x_test = np.load(os.path.join(middata_dir, setting_name + '-x_test.npy'))
y_test = np.load(os.path.join(middata_dir, setting_name + '-y_test.npy'))
return {
'x_train': x_train,
'y_train': y_train,
'x_val': x_val,
'y_val': y_val,
'x_test': x_test,
'y_test': y_test
}
def tabular_frame(args_json):
data_intermediate = load_intermediate(args_json['top_path'], args_json['setting_name'], args_json['model_type'])
x_train, y_train, x_val, y_val, x_test, y_test = data_intermediate['x_train'], data_intermediate['y_train'], \
data_intermediate['x_val'], data_intermediate['y_val'], \
data_intermediate['x_test'], data_intermediate['y_test']
train_df = pd.DataFrame(np.hstack((x_train, y_train)), columns=list(range(8)) + ['y'])
val_df = pd.DataFrame(np.hstack((x_val, y_val)), columns=list(range(8)) + ['y'])
test_df = pd.DataFrame(np.hstack((x_test, y_test)), columns=list(range(8)) + ['y'])
return train_df, val_df, test_df
def dataset_intermediate(args_json):
data_intermediate = load_intermediate(args_json['top_path'], args_json['setting_name'], args_json['model_type'])
x_train, y_train, x_val, y_val, x_test, y_test = data_intermediate['x_train'], data_intermediate['y_train'], \
data_intermediate['x_val'], data_intermediate['y_val'], \
data_intermediate['x_test'], data_intermediate['y_test']
train_set, val_set, test_set = TrivialDataset(x_train, y_train), \
TrivialDataset(x_val, y_val), \
TrivialDataset(x_test, y_test)
return train_set, val_set, test_set
def data_loader_intermediate(args_json):
data_intermediate = load_intermediate(args_json['top_path'], args_json['setting_name'], args_json['model_type'])
x_train, y_train, x_val, y_val, x_test, y_test = data_intermediate['x_train'], data_intermediate['y_train'], \
data_intermediate['x_val'], data_intermediate['y_val'], \
data_intermediate['x_test'], data_intermediate['y_test']
train_set, val_set, test_set = TrivialDataset(x_train, y_train), \
TrivialDataset(x_val, y_val), \
TrivialDataset(x_test, y_test)
train_loader, val_loader, test_loader = DataLoader(train_set, shuffle=True, batch_size=args_json['batch_size'],
collate_fn=my_collate_fix,
num_workers=args_json['num_workers']), \
DataLoader(val_set, batch_size=args_json['batch_size'], shuffle=True,
collate_fn=my_collate_fix,
num_workers=args_json['num_workers']), \
DataLoader(test_set, shuffle=False, batch_size=args_json['batch_size'],
collate_fn=my_collate_fix,
num_workers=args_json['num_workers'])
return train_loader, val_loader, test_loader
def binary_acc(y_pred, y_test):
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc = correct_results_sum / y_test.shape[0]
acc = torch.round(acc * 100)
return acc
def fastai_dl(train_set, val_set, test_set, device, batch_size=64, num_workers=24):
# fastai dataloader
return tabular.DataBunch.create(train_ds=train_set, valid_ds=val_set, test_ds=test_set,
bs=batch_size, num_workers=num_workers, device=device,
)
def train_mlp(model, train_loader, val_loader, epoch, lr, optimizer):
lossfn = nn.CrossEntropyLoss()
for epoch in range(epoch):
train_loss = []
train_acc = []
val_loss = []
val_acc = []
model.train()
for i, batch in enumerate(train_loader):
ct, y = zip(*batch)
ct = torch.stack(ct).squeeze(1).to(device)
y = torch.stack(y).to(device)
# ---------- train mlp ---------
optimizer.zero_grad()
pred = model(ct) # forward
loss = lossfn(pred, y) # loss
acc = sum(torch.eq(torch.argmax(pred, axis=1), y)).item() / len(y) * 100
train_acc.append(acc)
loss.backward() # compute loss
optimizer.step() # update
torch.cuda.empty_cache()
train_loss.append(loss.item())
del pred, loss, acc, ct, y
model.eval()
with torch.no_grad():
for i, batch in enumerate(val_loader):
ct, y = zip(*batch)
ct = torch.stack(ct).squeeze(1).to(device)
y = torch.stack(y).to(device)
# ---------- validation predicted by mlp ---------
pred = model(ct) # forward
loss = lossfn(pred, y) # loss
acc = sum(torch.eq(torch.argmax(pred, axis=1), y)).item() / len(y) * 100
val_acc.append(acc)
val_loss.append(loss.item())
torch.cuda.empty_cache()
del pred, loss, acc, ct, y
# print out statistics
verbose(epoch, train_loss, train_acc, val_loss, val_acc)
class Basic_LSTM(nn.Module):
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
self.out = 2 if task in ['ihm', 'dd'] else 10
super(Basic_LSTM, self).__init__()
self.lstm1 = nn.LSTM(
input_size=input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.fc = nn.Sequential(
nn.Linear(dim, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, self.out),
nn.LogSoftmax(dim=1)
)
for model in [self.lstm1, self.fc]:
self.initialize_weights(model)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def forward(self, x):
xt, state1 = self.lstm1(x)
y = self.fc(xt[:, -1, :])
return y
class AE_LSTM(nn.Module):
"""
CPLSTM4------use lstm as Wk
mode=1 use hidden states when predict. else use cell states
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
self.dim = dim # hidden dimension
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
self.mode = mode
self.noct = noct
super(AE_LSTM, self).__init__()
# encoder
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
# decoder
# minded that hidden_size is different
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True
)
# not used
if self.noct:
self.stack_dim = self.dim * 192
else:
self.stack_dim = self.dim * 193
self.dropout = nn.Dropout(self.drop)
# self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
for model in [self.lstm1, self.lstm2]:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def get_reg_out(self, x, stack=False, warm=False, conti=False):
# check input shape
if len(x.shape) == 4: x = x.squeeze(1)
if x.shape[1] == 76: x = x.transpose(1, 2)
xt, (ht, ct) = self.lstm1(x)
if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1)))
if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1))
return xt[:, -1, :].squeeze(1)
def get_encode(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
if x.shape[1] == 76: x = x.transpose(1, 2)
x,_,_ = self.lstm1(x)
x = nn.Flatten()(x)
return x
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
self.bs = x.shape[0]
x_t, state1 = self.lstm1(x) # encoder part : zeros init
x_hat, state2 = self.lstm2(x_t) # decoder part : zeros init
loss = nn.MSELoss(reduction='mean')(x, x_hat)
return -1, loss, x # make sure it is consistent with other models training function
class CAE_LSTM(AE_LSTM):
"""
constrastive auto-encoder with LSTM backbone
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
super(CAE_LSTM, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step, mode, noct)
# get reg out is also the same as Basic LSTM_AE
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
self.bs = x.shape[0]
x_t, state1 = self.lstm1(x) # encoder part : zeros init
x_hat, state2 = self.lstm2(x_t) # decoder part : zeros init
loss, acc = self.compute_nce(x_hat, x)
return acc, loss, x
def compute_nce(self, x_hat, x):
bs = x.shape[0]
assert x.shape == x_hat.shape
nce = 0
x = x.view(bs, -1)
x_hat = x_hat.reshape(bs, -1)
total = torch.mm(x_hat, x.T)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, bs).cuda()))
nce = torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * bs
acc = 1. * correct.item() / bs
return nce, acc
| [
"anonymousparti28@gmail.com"
] | anonymousparti28@gmail.com |
86c976754fbba24178415faffcb3f295036aef07 | b4e9c9927f4839dcf2e03b26d51cc08f1ad5c362 | /el7toel8/acme/actors/acmestoragemigrator/actor.py | 9a312a6f0840ebb57cf8e6aa7e8d83031aa43db0 | [] | no_license | shaded-enmity/isv-repositories | cbde5baacf49029a4122541987ec30f634c9c85f | 8f52c44cc6c0d663c5ceef2eea92aea759d20899 | refs/heads/master | 2022-06-21T00:11:25.417412 | 2020-05-06T15:05:18 | 2020-05-06T15:05:18 | 261,727,873 | 0 | 0 | null | 2020-05-06T15:05:19 | 2020-05-06T10:39:07 | null | UTF-8 | Python | false | false | 1,380 | py | from leapp.actors import Actor
from leapp.models import Report, AcmeStorageInfo
from leapp.tags import FirstBootPhaseTag, IPUWorkflowTag
from leapp import reporting
import os
class AcmeStorageMigrator(Actor):
"""
Migrate ACME Storage device from old location to the new one
"""
name = 'acme_storage_migrator'
consumes = (AcmeStorageInfo,)
produces = (Report,)
tags = (FirstBootPhaseTag, IPUWorkflowTag)
def process(self):
acme_storage_info = next(self.consume(AcmeStorageInfo),None)
# Rename the device
if acme_storage_info.has_device and acme_storage_info.has_kernel_module:
os.rename('/dev/acme0', '/dev/acme')
# Emit a report message informing the system administrator that the device
# path has been changed
reporting.create_report([
reporting.Title('ACME Storage device path migrated'),
reporting.Summary('ACME Storage device path has been changed to /dev/acme'),
reporting.Severity(reporting.Severity.INFO),
reporting.Tags([reporting.Tags.OS_FACTS]),
reporting.RelatedResource('device', '/dev/acme'),
reporting.ExternalLink(
url='https://acme.corp/storage-rhel',
title='ACME Storage on RHEL'
)
])
| [
"noreply@github.com"
] | shaded-enmity.noreply@github.com |
ee4ec4d60d0e2809301b28150cd9934a01d330c6 | bffa0938e70732e992a5d5dc5fb30559fd0ceb7b | /Zadanie9/main.py | 7be093c8309014ad5a6351c1465cc96dc7c501d2 | [] | no_license | LenovoDobrynin/zadanie9.py | 281d5d4850dfb374a273701f588a323f0c524b48 | 54d75ff2645d447b6f31a9327feaadb1fbf0f21e | refs/heads/master | 2023-08-28T00:35:01.091714 | 2021-10-14T20:38:18 | 2021-10-14T20:38:18 | 417,274,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | a1 = float(input('Введите число а1: '))
a2 = float(input('Введите число a2: '))
a3 = float(input('Введите число a3: '))
b1 = float(input('Введите число b1: '))
b2 = float(input('Введите число b2: '))
b3 = float(input('Введите число b3: '))
c1 = float(input('Введите число c1: '))
c2 = float(input('Введите число c2: '))
c3 = float(input('Введите число c3: '))
d1 = float(input('Введите число d1: '))
d2 = float(input('Введите число d2: '))
d3 = float(input('Введите число d3: '))
delta = a1*b2*c3+b1*c2*a3+c1*a2*b3-c1*b2*a3-a1*c2*b3-b1*a2*c3
if delta == 0:
print('Главный определитель системы равен нулю')
else:
delta1 = d1*b2*c3+b1*c2*d3+c1*d2*b3-c1*b2*d3-d1*c2*b3-b1*d2*c3
delta2 = a1*d2*c3+d1*c2*a3+c1*a2*d3-c1*d2*a3-a1*c2*d3-d1*a2*c3
delta3 = a1*b2*d3+b1*d2*a3+d1*a2*b3-d1*b2*a3-a1*d2*b3-b1*a2*d3
x = delta1/delta
y = delta2/delta
z = delta3/delta
print(x,y,z) | [
"noreply@github.com"
] | LenovoDobrynin.noreply@github.com |
d008e616c943f18e5f7f5c090bc112e713db99cf | c4b7b5a9c56a9b6394a14704d2faf76754175473 | /rooms/templatetags/is_booked.py | da615b5d82465d9cb146e16beb8eeaefaf53bbc4 | [] | no_license | seungjinhan/airbnb_clone_django | 71a15e5242bad28fd96d5f47652a049a77f12f61 | 4c38780746409ea1ed9b4f5b02abca60326752c2 | refs/heads/master | 2022-12-02T15:14:39.341441 | 2020-08-23T13:50:42 | 2020-08-23T13:50:42 | 280,878,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | import datetime
from django import template
from reservations import models as reservation_model
register = template.Library()
@register.simple_tag
def is_booked(room, day):
if day.number == 0:
return False
try:
date = datetime.datetime(
year=day.year, month=day.month, day=day.number)
reservation_model.BookedDay.objects.get(
day=date, reservation__room=room)
print(date)
print(room)
return True
except reservation_model.BookedDay.DoesNotExist:
return False
| [
"hanblues@gmail.com"
] | hanblues@gmail.com |
a6d6d50572836ba4614154dce36cf5e2c21f9c51 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02679/s613915096.py | fec86a56bc93ae2efcf62264eb570f7a448a4ed4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | import math, collections
N = int(input())
AB = [[int(_) for _ in input().split()] for _ in range(N)]
mod = 10**9 + 7
C = collections.Counter()
gcd = math.gcd
a0 = 0
for a, b in AB:
if a == b == 0:
a0 += 1
elif a == 0:
C[0, -1] += 1
else:
g = gcd(a, b)
a //= g
b //= g
if a < 0:
a *= -1
b *= -1
C[a, b] += 1
ans = 1
for a, b in C:
if C[b, -a]:
continue
elif C[-b, a]:
ans *= (pow(2, C[a, b], mod) + pow(2, C[-b, a], mod) - 1) % mod
ans %= mod
else:
ans *= pow(2, C[a, b], mod)
ans %= mod
ans += a0 - 1
ans %= mod
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8bb434118d8a33e713f63403eaff1c739d22dc81 | 144c0ec2098c1a79f277729550262ab953a34aed | /logisticka.py | 32ff6f38f03be4fd21104c731494fc99743dc6be | [] | no_license | mkonjikovac/logistickaRegresija | ad7142d38824c70e992213ae8d4f6bb6ab2d74c7 | 37e58a40526a3679aeeaacbe49d5ad0e06d8881c | refs/heads/master | 2022-03-07T03:30:00.813775 | 2019-11-08T13:57:02 | 2019-11-08T13:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,989 | py | # logisticka
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import seaborn as sn
def standardization(x):
xs = np.copy(x)
for i in range(1, n):
xa = np.average(xs[:, i])
std = np.std(xs[:, i])
for j in range(m):
xs[j, i] = (xs[j, i] - xa) / std
return xs
def h(theta, x):
return 1 / (1 + math.exp(-theta.T.dot(x)))
# sarzni gradijentni spust, maksimizacija verodostojnosti
def gradient(x, y, theta):
gl = np.zeros((n, 1))
for i in range(m):
h_theta = h(theta, x[i].T)
for j in range(n):
gl[j] = gl[j] + (y[i] - h_theta) * x[i, j]
return gl
def gradient_loss(x, y):
alpha_l, alpha, alpha_h = 0.01, 0.02, 0.04
theta_l, J_l = gradient_descent_j(x, y, alpha_l, 1)
theta, J = gradient_descent_j(x, y, alpha, 1)
theta_h, J_h = gradient_descent_j(x, y, alpha_h, 1)
plt.plot(range(len(J_l)), J_l)
plt.plot(range(len(J)), J)
plt.plot(range(len(J_h)), J_h)
plt.legend(['alpha = 0.01', 'alpha = 0.02', 'alpha = 0.04'], loc='upper right')
plt.xlabel('iter')
plt.ylabel('J')
plt.show()
def gradient_descent_j(x, y, alpha=0.02, flag=0):
theta = np.zeros((n, 1))
bound = 2e-2
J = []
dl = gradient(x, y, theta)
while np.linalg.norm(dl) > bound:
theta = theta + alpha * dl
dl = gradient(x, y, theta)
if flag:
dJ = 0
for i in range(m):
ht = h(theta, xs[i].T)
dJ = dJ + y[i] * math.log(ht) + (1 - y[i]) * math.log(1 - ht)
J.append(-dJ)
return theta, J
def gradient_descent(x, y):
theta, J = gradient_descent_j(x, y)
return theta
# softmax
def delta(x, y, theta):
m = x.shape[0]
deltaJ = np.zeros((k, n))
for r in range(k - 1):
for i in range(m):
s = 0
for j in range(k):
s = s + math.exp(theta[j].dot(x[i].T))
deltaJ[r] = deltaJ[r] + ((y[i] == r) - math.exp(theta[r].dot(x[i].T)) / s) * x[i]
return deltaJ
def gauss(x, my, sigma):
sigma2 = math.pow(sigma, 2)
return 1 / math.sqrt(2 * math.pi * sigma2) * math.exp(-math.pow((x - my), 2) / 2 * sigma2)
def gnb(x, my1, sigma1, my0, sigma0):
invS1, invS0 = np.linalg.inv(sigma1), np.linalg.inv(sigma0)
return math.exp(0.5 * x.T.dot(invS1).dot(x) - my1.T.dot(invS1).dot(x) + 0.5 * my1.T.dot(invS1).dot(my1)
- 0.5 * x.T.dot(invS0).dot(x) + my0.T.dot(invS0).dot(x) - 0.5 * my0.T.dot(invS0).dot(my0))
def plot_conf(conf, reg, train):
if train == 1:
print(reg)
print('conf_train:')
else:
print('conf_test:')
print(conf)
df_cm = pd.DataFrame(conf, range(k), range(k))
hm = sn.heatmap(df_cm, annot=True, annot_kws={"size": 12})
bottom, top = hm.get_ylim()
hm.set_ylim(bottom + 0.5, top - 0.5)
plt.show()
df = pd.read_csv('multiclass_data.csv', header=None)
df.columns = ['x1', 'x2', 'x3', 'x4', 'x5', 'y']
df.insert(0, 'one', 1)
boundary_index = round(df.shape[0] * 0.8)
df = df.sample(frac=1)
y = df['y'].to_numpy()
x = df.iloc[:, 0:6].to_numpy()
m, n, k = x.shape[0], x.shape[1], len(np.unique(y)) # n ukljucuje kolonu sa 1
xs = standardization(x)
# logisticka regresija
y0, y1, y2 = np.copy(y), np.copy(y), np.copy(y)
y0[y0 >= 1], y0[y0 == 0], y0[y0 > 1] = 2, 1, 0
y1[y1 != 1] = 0
y2[y2 <= 1], y2[y2 == 2] = 0, 1
theta0, theta1, theta2 = gradient_descent(xs, y0), gradient_descent(xs, y1), gradient_descent(xs, y2)
conf_train, conf_test = np.zeros((k, k)), np.zeros((k, k))
y_guess = np.zeros((m, 1), int)
for i in range(m):
h0, h1, h2 = h(theta0, xs[i].T), h(theta1, xs[i].T), h(theta2, xs[i].T)
if h0 > h1 and h0 > h2:
y_guess[i] = 0
elif h1 > h0 and h1 > h2:
y_guess[i] = 1
else:
y_guess[i] = 2
if i < boundary_index:
conf_train[y[i], y_guess[i]] = conf_train[y[i], y_guess[i]] + 1
else:
conf_test[y[i], y_guess[i]] = conf_test[y[i], y_guess[i]] + 1
plot_conf(conf_train, 'LOGISTIČKA:', 1)
plot_conf(conf_test, 'LOGISTIČKA:', 0)
gradient_loss(xs, y1) # funkcija gubitka u zavisnosti od stope ucenja
# softmax
shuffle = np.arange(m)
row_num = [5, 10, 20]
row_size = row_num[1]
for row in row_num:
alpha, step, cnt = 0.02, 0, 1000
theta_row, J = np.zeros((k, n)), []
for i in range(cnt):
theta_row = theta_row + alpha * delta(xs[step:min(m, step + row)], y[step:min(m, step + row)], theta_row)
dJ = 0
for i in range(m):
y_guess = 0
for j in range(k):
y_guess = y_guess + math.exp(theta_row[j].dot(xs[i].T))
dJ = dJ + (theta_row[y[i]].dot(xs[i].T) - math.log(y_guess))
J.append(-dJ)
step = (step + row) % m
if step < row:
step = 0
np.random.shuffle(shuffle)
xs, y = xs[shuffle], y[shuffle]
if row == row_size:
theta = theta_row
plt.plot(range(len(J)), J)
plt.legend(['šarža = 5', 'šarža = 10', 'šarža = 20'], loc='upper right')
plt.xlabel('iter')
plt.ylabel('J')
plt.show()
conf_train, conf_test = np.zeros((k, k)), np.zeros((k, k))
for i in range(m):
phi, s = np.zeros((k, 1)), 0
for r in range(k):
phi[r] = math.exp(theta[r].dot(xs[i].T))
s = s + math.exp(theta[r].dot(xs[i].T))
phi = phi / s
phi_max_index = np.argmax(phi)
if i < boundary_index:
conf_train[y[i], phi_max_index] = conf_train[y[i], phi_max_index] + 1
else:
conf_test[y[i], phi_max_index] = conf_test[y[i], phi_max_index] + 1
plot_conf(conf_train, 'SOFTMAX:', 1)
plot_conf(conf_test, 'SOFTMAX:', 0)
# GDA - Gausovska diskriminantna analiza
xs = xs[:, 1:]
xs = np.c_[xs, y]
n = n - 1 # nema potrebe vise za kolonom sa 1
xs0, xs1, xs2 = xs[np.where(xs[:, n] == 0)], xs[np.where(xs[:, n] == 1)], xs[np.where(xs[:, n] == 2)]
xs0, xs1, xs2 = xs0[:, :-1], xs1[:, :-1], xs2[:, :-1]
x_sep = [xs0, xs1, xs2]
my, sigma = np.zeros((k, n)), np.zeros((k, n))
# racunanje my-matematicko ocekivanje, sigma-standardna devijansa
for i in range(k):
for j in range(n):
my[i, j] = np.mean(x_sep[i][:, j])
sigma[i, j] = np.std(x_sep[i][:, j])
conf_train, conf_test = np.zeros((k, k)), np.zeros((k, k))
for i in range(m):
gm, p = np.zeros((k, n)), np.zeros(k) # gauss matrix
total = 0
for l in range(k):
for j in range(n):
gm[l, j] = gauss(xs[i, j], my[l, j], sigma[l, j])
p[l] = np.prod(gm[l])
total = total + p[l]
p = p / total
if i < boundary_index:
conf_train[y[i], np.argmax(p)] = conf_train[y[i], np.argmax(p)] + 1
else:
conf_test[y[i], np.argmax(p)] = conf_test[y[i], np.argmax(p)] + 1
plot_conf(conf_train, 'GDA:', 1)
plot_conf(conf_test, 'GDA:', 0)
# GNB - Naivni Bayes
MY0 = np.ones((5, xs0.shape[0]))
MY1 = np.ones((5, xs1.shape[0]))
MY2 = np.ones((5, xs2.shape[0]))
for j in range(n):
MY0[j] = my[0, j]
MY1[j] = my[1, j]
MY2[j] = my[2, j]
print(xs0, my)
SIGMA0 = 1 / (xs0.shape[0] - 1) * (xs0.T - MY0).dot((xs0.T - MY0).T)
SIGMA1 = 1 / (xs1.shape[0] - 1) * (xs1.T - MY1).dot((xs1.T - MY1).T)
SIGMA2 = 1 / (xs2.shape[0] - 1) * (xs2.T - MY2).dot((xs2.T - MY2).T)
conf_train, conf_test = np.zeros((k, k)), np.zeros((k, k))
xs = xs[:, :-1] # izbacivanje kolone sa 1
for i in range(m):
p = np.zeros(k)
p[0] = 1 / (1 + gnb(xs[i].T, my[1].T, SIGMA1, my[0].T, SIGMA0) + gnb(xs[i].T, my[2].T, SIGMA2, my[0].T, SIGMA0))
p[1] = 1 / (1 + gnb(xs[i].T, my[0].T, SIGMA0, my[1].T, SIGMA1) + gnb(xs[i].T, my[2].T, SIGMA2, my[1].T, SIGMA1))
p[2] = 1 / (1 + gnb(xs[i].T, my[0].T, SIGMA0, my[2].T, SIGMA2) + gnb(xs[i].T, my[1].T, SIGMA1, my[2].T, SIGMA2))
if i < boundary_index:
conf_train[y[i], np.argmax(p)] = conf_train[y[i], np.argmax(p)] + 1
else:
conf_test[y[i], np.argmax(p)] = conf_test[y[i], np.argmax(p)] + 1
plot_conf(conf_train, 'GNB:', 1)
plot_conf(conf_test, 'GNB:', 0)
| [
"mkonjikovac12@gmail.com"
] | mkonjikovac12@gmail.com |
e9811e3794478cb96fb7f9d5165286664ef1e3d4 | 67b8c98b89f45780b1a153b2a06ed9b76626df23 | /pyparrot_modified/pyparrot/networking/bleConnection.py | 95cec165f89ea2033ec44f380e408329ea22c022 | [
"MIT"
] | permissive | Hollyqui/PyStalk | fe37dccb583f083b284134d0787dc9ef5dfeee5e | 98abdaeb6194e79b402304a619182cec63074f2d | refs/heads/master | 2020-06-02T15:51:40.106253 | 2020-02-11T08:00:47 | 2020-02-11T08:00:47 | 191,217,500 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 27,229 | py | from bluepy.btle import Peripheral, UUID, DefaultDelegate, BTLEException
from pyparrot_modified.pyparrot.utils.colorPrint import color_print
import struct
import time
from pyparrot_modified.pyparrot.commandsandsensors.DroneSensorParser import get_data_format_and_size
from datetime import datetime
class MinidroneDelegate(DefaultDelegate):
"""
Handle BLE notififications
"""
def __init__(self, handle_map, minidrone, ble_connection):
DefaultDelegate.__init__(self)
self.handle_map = handle_map
self.minidrone = minidrone
self.ble_connection = ble_connection
color_print("initializing notification delegate", "INFO")
def handleNotification(self, cHandle, data):
#print "handling notificiation from channel %d" % cHandle
#print "handle map is %s " % self.handle_map[cHandle]
#print "channel map is %s " % self.minidrone.characteristic_receive_uuids[self.handle_map[cHandle]]
#print "data is %s " % data
channel = self.ble_connection.characteristic_receive_uuids[self.handle_map[cHandle]]
(packet_type, packet_seq_num) = struct.unpack('<BB', data[0:2])
raw_data = data[2:]
if channel == 'ACK_DRONE_DATA':
# data received from drone (needs to be ack on 1e)
#color_print("calling update sensors ack true", "WARN")
self.minidrone.update_sensors(packet_type, None, packet_seq_num, raw_data, ack=True)
elif channel == 'NO_ACK_DRONE_DATA':
# data from drone (including battery and others), no ack
#color_print("drone data - no ack needed")
self.minidrone.update_sensors(packet_type, None, packet_seq_num, raw_data, ack=False)
elif channel == 'ACK_COMMAND_SENT':
# ack 0b channel, SEND_WITH_ACK
#color_print("Ack! command received!")
self.ble_connection._set_command_received('SEND_WITH_ACK', True)
elif channel == 'ACK_HIGH_PRIORITY':
# ack 0c channel, SEND_HIGH_PRIORITY
#color_print("Ack! high priority received")
self.ble_connection._set_command_received('SEND_HIGH_PRIORITY', True)
else:
color_print("unknown channel %s sending data " % channel, "ERROR")
color_print(cHandle)
class BLEConnection:
def __init__(self, address, minidrone):
"""
Initialize with its BLE address - if you don't know the address, call findMinidrone
and that will discover it for you.
:param address: unique address for this minidrone
:param minidrone: the Minidrone object for this minidrone (needed for callbacks for sensors)
"""
self.address = address
self.drone_connection = Peripheral()
self.minidrone = minidrone
# the following UUID segments come from the Minidrone and from the documenation at
# http://forum.developer.parrot.com/t/minidrone-characteristics-uuid/4686/3
# the 3rd and 4th bytes are used to identify the service
self.service_uuids = {
'fa00': 'ARCOMMAND_SENDING_SERVICE',
'fb00': 'ARCOMMAND_RECEIVING_SERVICE',
'fc00': 'PERFORMANCE_COUNTER_SERVICE',
'fd21': 'NORMAL_BLE_FTP_SERVICE',
'fd51': 'UPDATE_BLE_FTP',
'fe00': 'UPDATE_RFCOMM_SERVICE',
'1800': 'Device Info',
'1801': 'unknown',
}
# the following characteristic UUID segments come from the documentation at
# http://forum.developer.parrot.com/t/minidrone-characteristics-uuid/4686/3
# the 4th bytes are used to identify the characteristic
# the usage of the channels are also documented here
# http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2
self.characteristic_send_uuids = {
'0a': 'SEND_NO_ACK', # not-ack commandsandsensors (PCMD only)
'0b': 'SEND_WITH_ACK', # ack commandsandsensors (all piloting commandsandsensors)
'0c': 'SEND_HIGH_PRIORITY', # emergency commandsandsensors
'1e': 'ACK_COMMAND' # ack for data sent on 0e
}
# counters for each packet (required as part of the packet)
self.characteristic_send_counter = {
'SEND_NO_ACK': 0,
'SEND_WITH_ACK': 0,
'SEND_HIGH_PRIORITY': 0,
'ACK_COMMAND': 0,
'RECEIVE_WITH_ACK': 0
}
# the following characteristic UUID segments come from the documentation at
# http://forum.developer.parrot.com/t/minidrone-characteristics-uuid/4686/3
# the 4th bytes are used to identify the characteristic
# the types of commandsandsensors and data coming back are also documented here
# http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2
self.characteristic_receive_uuids = {
'0e': 'ACK_DRONE_DATA', # drone data that needs an ack (needs to be ack on 1e)
'0f': 'NO_ACK_DRONE_DATA', # data from drone (including battery and others), no ack
'1b': 'ACK_COMMAND_SENT', # ack 0b channel, SEND_WITH_ACK
'1c': 'ACK_HIGH_PRIORITY', # ack 0c channel, SEND_HIGH_PRIORITY
}
# these are the FTP incoming and outcoming channels
# the handling characteristic seems to be the one to send commandsandsensors to (per the SDK)
# information gained from reading ARUTILS_BLEFtp.m in the SDK
self.characteristic_ftp_uuids = {
'22': 'NORMAL_FTP_TRANSFERRING',
'23': 'NORMAL_FTP_GETTING',
'24': 'NORMAL_FTP_HANDLING',
'52': 'UPDATE_FTP_TRANSFERRING',
'53': 'UPDATE_FTP_GETTING',
'54': 'UPDATE_FTP_HANDLING',
}
# FTP commandsandsensors (obtained via ARUTILS_BLEFtp.m in the SDK)
self.ftp_commands = {
"list": "LIS",
"get": "GET"
}
# need to save for communication (but they are initialized in connect)
self.services = None
self.send_characteristics = dict()
self.receive_characteristics = dict()
self.handshake_characteristics = dict()
self.ftp_characteristics = dict()
self.data_types = {
'ACK': 1,
'DATA_NO_ACK': 2,
'LOW_LATENCY_DATA': 3,
'DATA_WITH_ACK': 4
}
# store whether a command was acked
self.command_received = {
'SEND_WITH_ACK': False,
'SEND_HIGH_PRIORITY': False,
'ACK_COMMAND': False
}
# instead of parsing the XML file every time, cache the results
self.command_tuple_cache = dict()
self.sensor_tuple_cache = dict()
# maximum number of times to try a packet before assuming it failed
self.max_packet_retries = 3
def connect(self, num_retries):
"""
Connects to the drone and re-tries in case of failure the specified number of times
:param: num_retries is the number of times to retry
:return: True if it succeeds and False otherwise
"""
# first try to connect to the wifi
try_num = 1
connected = False
while (try_num < num_retries and not connected):
try:
self._connect()
connected = True
except BTLEException:
color_print("retrying connections", "INFO")
try_num += 1
# fall through, return False as something failed
return connected
def _reconnect(self, num_retries):
"""
Reconnect to the drone (assumed the BLE crashed)
:param: num_retries is the number of times to retry
:return: True if it succeeds and False otherwise
"""
try_num = 1
success = False
while (try_num < num_retries and not success):
try:
color_print("trying to re-connect to the minidrone at address %s" % self.address, "WARN")
self.drone_connection.connect(self.address, "random")
color_print("connected! Asking for services and characteristics", "SUCCESS")
success = True
except BTLEException:
color_print("retrying connections", "WARN")
try_num += 1
if (success):
# do the magic handshake
self._perform_handshake()
return success
def _connect(self):
"""
Connect to the minidrone to prepare for flying - includes getting the services and characteristics
for communication
:return: throws an error if the drone connection failed. Returns void if nothing failed.
"""
color_print("trying to connect to the minidrone at address %s" % self.address, "INFO")
self.drone_connection.connect(self.address, "random")
color_print("connected! Asking for services and characteristics", "SUCCESS")
# re-try until all services have been found
allServicesFound = False
# used for notifications
handle_map = dict()
while not allServicesFound:
# get the services
self.services = self.drone_connection.getServices()
# loop through the services
for s in self.services:
hex_str = self._get_byte_str_from_uuid(s.uuid, 3, 4)
# store the characteristics for receive & send
if (self.service_uuids[hex_str] == 'ARCOMMAND_RECEIVING_SERVICE'):
# only store the ones used to receive data
for c in s.getCharacteristics():
hex_str = self._get_byte_str_from_uuid(c.uuid, 4, 4)
if hex_str in self.characteristic_receive_uuids:
self.receive_characteristics[self.characteristic_receive_uuids[hex_str]] = c
handle_map[c.getHandle()] = hex_str
elif (self.service_uuids[hex_str] == 'ARCOMMAND_SENDING_SERVICE'):
# only store the ones used to send data
for c in s.getCharacteristics():
hex_str = self._get_byte_str_from_uuid(c.uuid, 4, 4)
if hex_str in self.characteristic_send_uuids:
self.send_characteristics[self.characteristic_send_uuids[hex_str]] = c
elif (self.service_uuids[hex_str] == 'UPDATE_BLE_FTP'):
# store the FTP info
for c in s.getCharacteristics():
hex_str = self._get_byte_str_from_uuid(c.uuid, 4, 4)
if hex_str in self.characteristic_ftp_uuids:
self.ftp_characteristics[self.characteristic_ftp_uuids[hex_str]] = c
elif (self.service_uuids[hex_str] == 'NORMAL_BLE_FTP_SERVICE'):
# store the FTP info
for c in s.getCharacteristics():
hex_str = self._get_byte_str_from_uuid(c.uuid, 4, 4)
if hex_str in self.characteristic_ftp_uuids:
self.ftp_characteristics[self.characteristic_ftp_uuids[hex_str]] = c
# need to register for notifications and write 0100 to the right handles
# this is sort of magic (not in the docs!) but it shows up on the forum here
# http://forum.developer.parrot.com/t/minimal-ble-commands-to-send-for-take-off/1686/2
# Note this code snippet below more or less came from the python example posted to that forum (I adapted it to my interface)
for c in s.getCharacteristics():
if self._get_byte_str_from_uuid(c.uuid, 3, 4) in \
['fb0f', 'fb0e', 'fb1b', 'fb1c', 'fd22', 'fd23', 'fd24', 'fd52', 'fd53', 'fd54']:
self.handshake_characteristics[self._get_byte_str_from_uuid(c.uuid, 3, 4)] = c
# check to see if all 8 characteristics were found
allServicesFound = True
for r_id in self.characteristic_receive_uuids.values():
if r_id not in self.receive_characteristics:
color_print("setting to false in receive on %s" % r_id)
allServicesFound = False
for s_id in self.characteristic_send_uuids.values():
if s_id not in self.send_characteristics:
color_print("setting to false in send")
allServicesFound = False
for f_id in self.characteristic_ftp_uuids.values():
if f_id not in self.ftp_characteristics:
color_print("setting to false in ftp")
allServicesFound = False
# and ensure all handshake characteristics were found
if len(self.handshake_characteristics.keys()) != 10:
color_print("setting to false in len")
allServicesFound = False
# do the magic handshake
self._perform_handshake()
# initialize the delegate to handle notifications
self.drone_connection.setDelegate(MinidroneDelegate(handle_map, self.minidrone, self))
def _perform_handshake(self):
"""
Magic handshake
Need to register for notifications and write 0100 to the right handles
This is sort of magic (not in the docs!) but it shows up on the forum here
http://forum.developer.parrot.com/t/minimal-ble-commandsandsensors-to-send-for-take-off/1686/2
:return: nothing
"""
color_print("magic handshake to make the drone listen to our commandsandsensors")
# Note this code snippet below more or less came from the python example posted to that forum (I adapted it to my interface)
for c in self.handshake_characteristics.values():
# for some reason bluepy characteristic handle is two lower than what I need...
# Need to write 0x0100 to the characteristics value handle (which is 2 higher)
self.drone_connection.writeCharacteristic(c.handle + 2, struct.pack("<BB", 1, 0))
def disconnect(self):
"""
Disconnect the BLE connection. Always call this at the end of your programs to
cleanly disconnect.
:return: void
"""
self.drone_connection.disconnect()
def _get_byte_str_from_uuid(self, uuid, byte_start, byte_end):
"""
Extract the specified byte string from the UUID btle object. This is an ugly hack
but it was necessary because of the way the UUID object is represented and the documentation
on the byte strings from Parrot. You give it the starting byte (counting from 1 since
that is how their docs count) and the ending byte and it returns that as a string extracted
from the UUID. It is assumed it happens before the first - in the UUID.
:param uuid: btle UUID object
:param byte_start: starting byte (counting from 1)
:param byte_end: ending byte (counting from 1)
:return: string with the requested bytes (to be used as a key in the lookup tables for services)
"""
uuid_str = format("%s" % uuid)
idx_start = 2 * (byte_start - 1)
idx_end = 2 * (byte_end)
my_hex_str = uuid_str[idx_start:idx_end]
return my_hex_str
def send_turn_command(self, command_tuple, degrees):
"""
Build the packet for turning and send it
:param command_tuple: command tuple from the parser
:param degrees: how many degrees to turn
:return: True if the command was sent and False otherwise
"""
self.characteristic_send_counter['SEND_WITH_ACK'] = (self.characteristic_send_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBBHh", self.data_types['DATA_WITH_ACK'],
self.characteristic_send_counter['SEND_WITH_ACK'],
command_tuple[0], command_tuple[1], command_tuple[2],
degrees)
return self.send_command_packet_ack(packet)
def send_auto_takeoff_command(self, command_tuple):
"""
Build the packet for auto takeoff and send it
:param command_tuple: command tuple from the parser
:return: True if the command was sent and False otherwise
"""
# print command_tuple
self.characteristic_send_counter['SEND_WITH_ACK'] = (
self.characteristic_send_counter[
'SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBBHB", self.data_types['DATA_WITH_ACK'],
self.characteristic_send_counter['SEND_WITH_ACK'],
command_tuple[0], command_tuple[1], command_tuple[2],
1)
return self.send_command_packet_ack(packet)
def send_command_packet_ack(self, packet):
"""
Sends the actual packet on the ack channel. Internal function only.
:param packet: packet constructed according to the command rules (variable size, constructed elsewhere)
:return: True if the command was sent and False otherwise
"""
try_num = 0
self._set_command_received('SEND_WITH_ACK', False)
while (try_num < self.max_packet_retries and not self.command_received['SEND_WITH_ACK']):
color_print("sending command packet on try %d" % try_num, 2)
self._safe_ble_write(characteristic=self.send_characteristics['SEND_WITH_ACK'], packet=packet)
#self.send_characteristics['SEND_WITH_ACK'].write(packet)
try_num += 1
color_print("sleeping for a notification", 2)
#notify = self.drone.waitForNotifications(1.0)
self.smart_sleep(0.5)
#color_print("awake %s " % notify, 2)
return self.command_received['SEND_WITH_ACK']
def send_pcmd_command(self, command_tuple, roll, pitch, yaw, vertical_movement, duration):
"""
Send the PCMD command with the specified roll, pitch, and yaw
:param command_tuple: command tuple per the parser
:param roll:
:param pitch:
:param yaw:
:param vertical_movement:
:param duration:
"""
start_time = time.time()
while (time.time() - start_time < duration):
self.characteristic_send_counter['SEND_NO_ACK'] = (
self.characteristic_send_counter['SEND_NO_ACK'] + 1) % 256
packet = struct.pack("<BBBBHBbbbbI", self.data_types['DATA_NO_ACK'],
self.characteristic_send_counter['SEND_NO_ACK'],
command_tuple[0], command_tuple[1], command_tuple[2],
1, int(roll), int(pitch), int(yaw), int(vertical_movement), 0)
self._safe_ble_write(characteristic=self.send_characteristics['SEND_NO_ACK'], packet=packet)
# self.send_characteristics['SEND_NO_ACK'].write(packet)
notify = self.drone_connection.waitForNotifications(0.1)
def send_noparam_command_packet_ack(self, command_tuple):
"""
Send a command on the ack channel - where all commandsandsensors except PCMD go, per
http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2
the id of the last command sent (for use in ack) is the send counter (which is incremented before sending)
Ensures the packet was received or sends it again up to a maximum number of times.
:param command_tuple: 3 tuple of the command bytes. 0 padded for 4th byte
:return: True if the command was sent and False otherwise
"""
self.characteristic_send_counter['SEND_WITH_ACK'] = (self.characteristic_send_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBBH", self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'],
command_tuple[0], command_tuple[1], command_tuple[2])
return self.send_command_packet_ack(packet)
def send_enum_command_packet_ack(self, command_tuple, enum_value, usb_id=None):
"""
Send a command on the ack channel with enum parameters as well (most likely a flip).
All commandsandsensors except PCMD go on the ack channel per
http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2
the id of the last command sent (for use in ack) is the send counter (which is incremented before sending)
:param command_tuple: 3 tuple of the command bytes. 0 padded for 4th byte
:param enum_value: the enum index
:return: nothing
"""
self.characteristic_send_counter['SEND_WITH_ACK'] = (self.characteristic_send_counter['SEND_WITH_ACK'] + 1) % 256
if (usb_id is None):
packet = struct.pack("<BBBBBBI", self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'],
command_tuple[0], command_tuple[1], command_tuple[2], 0,
enum_value)
else:
color_print((self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'],
command_tuple[0], command_tuple[1], command_tuple[2], 0, usb_id, enum_value), 1)
packet = struct.pack("<BBBBHBI", self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'],
command_tuple[0], command_tuple[1], command_tuple[2],
usb_id, enum_value)
return self.send_command_packet_ack(packet)
def send_param_command_packet(self, command_tuple, param_tuple=None, param_type_tuple=0, ack=True):
"""
Send a command packet with parameters. Ack channel is optional for future flexibility,
but currently commands are always send over the Ack channel so it defaults to True.
Contributed by awm102 on github. Edited by Amy McGovern to work for BLE commands also.
:param: command_tuple: the command tuple derived from command_parser.get_command_tuple()
:param: param_tuple (optional): the parameter values to be sent (can be found in the XML files)
:param: param_size_tuple (optional): a tuple of strings representing the data type of the parameters
e.g. u8, float etc. (can be found in the XML files)
:param: ack (optional): allows ack to be turned off if required
:return:
"""
# Create lists to store the number of bytes and pack chars needed for parameters
# Default them to zero so that if no params are provided the packet size is correct
param_size_list = [0] * len(param_tuple)
pack_char_list = [0] * len(param_tuple)
if param_tuple is not None:
# Fetch the parameter sizes. By looping over the param_tuple we only get the data
# for requested parameters so a mismatch in params and types does not matter
for i, param in enumerate(param_tuple):
pack_char_list[i], param_size_list[i] = get_data_format_and_size(param, param_type_tuple[i])
if ack:
ack_string = 'SEND_WITH_ACK'
data_ack_string = 'DATA_WITH_ACK'
else:
ack_string = 'SEND_NO_ACK'
data_ack_string = 'DATA_NO_ACK'
# Construct the base packet
self.characteristic_send_counter['SEND_WITH_ACK'] = (self.characteristic_send_counter['SEND_WITH_ACK'] + 1) % 256
# TODO: Amy changed this to match the BLE packet structure but needs to fully test it
packet = struct.pack("<BBBBH", self.data_types[data_ack_string],
self.characteristic_send_counter[ack_string],
command_tuple[0], command_tuple[1], command_tuple[2])
if param_tuple is not None:
# Add in the parameter values based on their sizes
for i, param in enumerate(param_tuple):
packet += struct.pack(pack_char_list[i], param)
# TODO: Fix this to not go with ack always
return self.send_command_packet_ack(packet)
def _set_command_received(self, channel, val):
"""
Set the command received on the specified channel to the specified value (used for acks)
:param channel: channel
:param val: True or False
:return:
"""
self.command_received[channel] = val
def _safe_ble_write(self, characteristic, packet):
"""
Write to the specified BLE characteristic but first ensure the connection is valid
:param characteristic:
:param packet:
:return:
"""
success = False
while (not success):
try:
characteristic.write(packet)
success = True
except BTLEException:
color_print("reconnecting to send packet", "WARN")
self._reconnect(3)
def ack_packet(self, buffer_id, packet_id):
"""
Ack the packet id specified by the argument on the ACK_COMMAND channel
:param packet_id: the packet id to ack
:return: nothing
"""
#color_print("ack last packet on the ACK_COMMAND channel", "INFO")
self.characteristic_send_counter['ACK_COMMAND'] = (self.characteristic_send_counter['ACK_COMMAND'] + 1) % 256
packet = struct.pack("<BBB", self.data_types['ACK'], self.characteristic_send_counter['ACK_COMMAND'],
packet_id)
#color_print("sending packet %d %d %d" % (self.data_types['ACK'], self.characteristic_send_counter['ACK_COMMAND'],
# packet_id), "INFO")
self._safe_ble_write(characteristic=self.send_characteristics['ACK_COMMAND'], packet=packet)
#self.send_characteristics['ACK_COMMAND'].write(packet)
def smart_sleep(self, timeout):
"""
Sleeps the requested number of seconds but wakes up for notifications
Note: NEVER use regular time.sleep! It is a blocking sleep and it will likely
cause the BLE to disconnect due to dropped notifications. Always use smart_sleep instead!
:param timeout: number of seconds to sleep
:return:
"""
start_time = datetime.now()
new_time = datetime.now()
diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0)
while (diff < timeout):
try:
notify = self.drone_connection.waitForNotifications(0.1)
except:
color_print("reconnecting to wait", "WARN")
self._reconnect(3)
new_time = datetime.now()
diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0)
| [
"noreply@github.com"
] | Hollyqui.noreply@github.com |
a34d40955607245d7cd5152f6173d3ba1d85f7cd | 0e867a76d0263d40f78a643d466ebfc10b0ac4f0 | /activities/nmz/nmz_setup.py | bd9401b72f5fcf89e245c9ae516efefd4d687f25 | [
"MIT"
] | permissive | anordin95/replay_mouse | b49d29b0ce0c72ed347e178b982c96b93af678b9 | 569abe771cac3b639317b1ca97c98b0c486a4714 | refs/heads/master | 2022-07-04T18:28:57.299865 | 2020-05-16T14:33:41 | 2020-05-16T14:33:41 | 259,782,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | from pathlib import Path
from primitives.potion_tracker import setup_potions_tracker
from primitives.get_quick_pray_location import get_quick_pray_location
from primitives.record import record
PICKLE_FOLDER = Path('pickled_objects')
PRAYER_POTS_FILENAME = PICKLE_FOLDER / 'prayer_pots.pkl'
RANGE_POTS_FILENAME = PICKLE_FOLDER / 'ranging_pots.pkl'
ABSORPTION_POTS_FILENAME = PICKLE_FOLDER / 'absorption_pots.pkl'
QUICK_PRAY_LOC_FILE = PICKLE_FOLDER / 'quick_pray_loc.pkl'
ROCK_CAKE_ACTION_LIST_FILE = PICKLE_FOLDER / 'rock_cake_action_list.pkl'
import logging
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
log_level = logging.INFO
logging.basicConfig(level=log_level, format=log_format)
# for use with prayer pots
# def setup():
# setup_potions_tracker(filename=RANGE_POTS_FILENAME, potion_type='range')
# setup_potions_tracker(filename=PRAYER_POTS_FILENAME, potion_type='prayer')
# for use with absorption pots
logger = logging.getLogger('__name__')
def setup():
logger.info("Record guzzling a rock cake. When done, press esc.")
record(use_potions=False, filename=ROCK_CAKE_ACTION_LIST_FILE)
get_quick_pray_location(filename=QUICK_PRAY_LOC_FILE)
setup_potions_tracker(filename=RANGE_POTS_FILENAME, potion_type='range')
setup_potions_tracker(filename=ABSORPTION_POTS_FILENAME, potion_type='absorption')
if __name__ == '__main__':
setup() | [
"anordin@butterflynetinc.com"
] | anordin@butterflynetinc.com |
428efb464a06b53657e381f76bf9e07b3382ba40 | 6fddeb3fb4be07e4c1063a0c49d1f25606fa78c2 | /WebProject1/primeiroPrograma.py | a6f6ef9bb480d32170e34644221afe8968666b18 | [] | no_license | RuanNunes/Logica-de-Programa-o-com-Python | d3f663881c2e51888608d9cf5f51c0956cdd10dd | 18b2b41e485d3e58ce2d5cf923e389cd146d7f18 | refs/heads/master | 2020-03-17T16:27:40.661667 | 2018-06-03T20:06:29 | 2018-06-03T20:06:29 | 133,749,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | class primeiroPrograma(object):
print('Meu Primeiro Programa')
num1 = int(input('Digite Um Numero:'))
num2 = int(input('Digite o Segundo Numero:'))
print('A soma dos numeros é:', num1 + num2)
pass
| [
"ruan.nunes@el.com.br"
] | ruan.nunes@el.com.br |
c77f59e3b90ce19c50bd0a77c092b148f74acab0 | 9d30a8c8620640b5e18c6aa5aa4bca6c01a60182 | /Code/utils/inference1.py | 13954a3246b767b1a5ad098f07456a4cfcff6c9b | [
"MIT"
] | permissive | zhouzhiyuan1/RADANet | 00ed5e2380007b53f918788d9e44fcec26c9ce21 | f0db67e5b16b5b566efd40402b7b2b2a5342d5ad | refs/heads/main | 2023-08-31T18:50:05.560253 | 2021-11-02T11:37:59 | 2021-11-02T11:37:59 | 423,765,661 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,803 | py | #!/usr/bin/env python3
# coding: utf-8
__author__ = 'cleardusk'
import numpy as np
from math import sqrt
import scipy.io as sio
import matplotlib.pyplot as plt
from .ddfa import reconstruct_vertex
def get_suffix(filename):
"""a.jpg -> jpg"""
pos = filename.rfind('.')
if pos == -1:
return ''
return filename[pos:]
def crop_img(img, roi_box):
h, w = img.shape[:2]
sx, sy, ex, ey = [int(round(_)) for _ in roi_box]
dh, dw = ey - sy, ex - sx
if len(img.shape) == 3:
res = np.zeros((dh, dw, 3), dtype=np.uint8)
else:
res = np.zeros((dh, dw), dtype=np.uint8)
if sx < 0:
sx, dsx = 0, -sx
else:
dsx = 0
if ex > w:
ex, dex = w, dw - (ex - w)
else:
dex = dw
if sy < 0:
sy, dsy = 0, -sy
else:
dsy = 0
if ey > h:
ey, dey = h, dh - (ey - h)
else:
dey = dh
res[dsy:dey, dsx:dex] = img[sy:ey, sx:ex]
return res
def calc_hypotenuse(pts):
bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]
center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2
bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]
llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)
return llength / 3
def parse_roi_box_from_landmark(pts):
"""calc roi box from landmark"""
bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]
center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2
bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]
llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)
center_x = (bbox[2] + bbox[0]) / 2
center_y = (bbox[3] + bbox[1]) / 2
roi_box = [0] * 4
roi_box[0] = center_x - llength / 2
roi_box[1] = center_y - llength / 2
roi_box[2] = roi_box[0] + llength
roi_box[3] = roi_box[1] + llength
return roi_box
def parse_roi_box_from_bbox(bbox):
left, top, right, bottom = bbox
old_size = (right - left + bottom - top) / 2
center_x = right - (right - left) / 2.0
center_y = bottom - (bottom - top) / 2.0 + old_size * 0.14
size = int(old_size * 1.58)
roi_box = [0] * 4
roi_box[0] = center_x - size / 2
roi_box[1] = center_y - size / 2
roi_box[2] = roi_box[0] + size
roi_box[3] = roi_box[1] + size
return roi_box
def dump_to_ply(vertex, tri, wfp):
header = """ply
format ascii 1.0
element vertex {}
property float x
property float y
property float z
element face {}
property list uchar int vertex_indices
end_header"""
n_vertex = vertex.shape[1]
n_face = tri.shape[1]
header = header.format(n_vertex, n_face)
with open(wfp, 'w') as f:
f.write(header + '\n')
for i in range(n_vertex):
x, y, z = vertex[:, i]
f.write('{:.4f} {:.4f} {:.4f}\n'.format(x, y, z))
for i in range(n_face):
idx1, idx2, idx3 = tri[:, i]
f.write('3 {} {} {}\n'.format(idx1 - 1, idx2 - 1, idx3 - 1))
print('Dump tp {}'.format(wfp))
def dump_vertex(vertex, wfp):
sio.savemat(wfp, {'vertex': vertex})
print('Dump to {}'.format(wfp))
def _predict_vertices(param, roi_bbox, dense, transform=True):
vertex = reconstruct_vertex(param, dense=dense)
sx, sy, ex, ey = roi_bbox
scale_x = (ex - sx) / 120
scale_y = (ey - sy) / 120
vertex[0, :] = vertex[0, :] * scale_x + sx
vertex[1, :] = vertex[1, :] * scale_y + sy
s = (scale_x + scale_y) / 2
vertex[2, :] *= s
return vertex
def predict_68pts(param, roi_box):
return _predict_vertices(param, roi_box, dense=False)
def predict_dense(param, roi_box):
return _predict_vertices(param, roi_box, dense=True)
def draw_landmarks(img, pts, style='fancy', wfp=None, show_flg=False, **kwargs):
"""Draw landmarks using matplotlib"""
height, width = img.shape[:2]
plt.figure(figsize=(12, height / width * 12))
plt.imshow(img[:, :, ::-1])
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.axis('off')
if not type(pts) in [tuple, list]:
pts = [pts]
for i in range(len(pts)):
if style == 'simple':
plt.plot(pts[i][0, :], pts[i][1, :], 'o', markersize=4, color='g')
elif style == 'fancy':
alpha = 0.8
markersize = 10
lw = 1.5
color = kwargs.get('color', 'r')
markeredgecolor = kwargs.get('markeredgecolor', 'red')
nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]
# close eyes and mouths
plot_close = lambda i1, i2: plt.plot([pts[i][0, i1], pts[i][0, i2]], [pts[i][1, i1], pts[i][1, i2]],
color=color, lw=lw, alpha=alpha - 0.1)
plot_close(41, 36)
plot_close(47, 42)
plot_close(59, 48)
plot_close(67, 60)
for ind in range(len(nums) - 1):
l, r = nums[ind], nums[ind + 1]
plt.plot(pts[i][0, l:r], pts[i][1, l:r], color=color, lw=lw, alpha=alpha - 0.1)
plt.plot(pts[i][0, l:r], pts[i][1, l:r], marker='o', linestyle='None', markersize=markersize,
color=color,
markeredgecolor=markeredgecolor, alpha=alpha)
if wfp is not None:
plt.savefig(wfp, dpi=200)
print('Save visualization result to {}'.format(wfp))
if show_flg:
plt.show()
def get_colors(image, vertices):
[h, w, _] = image.shape
vertices[0, :] = np.minimum(np.maximum(vertices[0, :], 0), w - 1) # x
vertices[1, :] = np.minimum(np.maximum(vertices[1, :], 0), h - 1) # y
ind = np.round(vertices).astype(np.int32)
colors = image[ind[1, :], ind[0, :], :] # n x 3
return colors
def write_obj_with_colors(obj_name, vertices, triangles, colors):
triangles = triangles.copy() # meshlab start with 1
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
# write obj
with open(obj_name, 'w') as f:
# write vertices & colors
for i in range(vertices.shape[1]):
s = 'v {:.4f} {:.4f} {:.4f} {} {} {}\n'.format(vertices[1, i], vertices[0, i], vertices[2, i], colors[i, 2],
colors[i, 1], colors[i, 0])
f.write(s)
# write f: ver ind/ uv ind
for i in range(triangles.shape[1]):
s = 'f {} {} {}\n'.format(triangles[0, i], triangles[1, i], triangles[2, i])
f.write(s)
def main():
pass
if __name__ == '__main__':
main()
| [
"zy980203123@163.com"
] | zy980203123@163.com |
344ac01479164503dbab03b95cd598cba2744ea4 | 047ddbf7dc154786da48f4b5ab8968a7abcad24b | /genprimo.py | 51c4484a5e8c896c48e9515a294848b3c6413007 | [] | no_license | algoritmos-2019-2/clase-1-JAAD300 | ecea409136bcc63e4c778b43c5b339cbb4a718de | 0ab81d11a6532956636e8999a2f9ae11d75b8977 | refs/heads/master | 2020-04-19T20:03:19.553368 | 2019-04-01T05:40:22 | 2019-04-01T05:40:22 | 168,404,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | #!/usr/bin/env python3
def checador(n):
for i in range(2, n):
if (n % i) == 0:
return print(n, "no es primo")
else:
return print(n, "es primo")
print("ingrese número")
checador(int(input()))
| [
"jorge@Jorge-PC.localdomain"
] | jorge@Jorge-PC.localdomain |
6a27868511bae2d8a9d10a768aa6fea1b3b93397 | 7e246c308597762dccb129883706fb5f827b1f05 | /examples/cli_debug.py | 7cf1bec440e4396e14220a1389a6e98210f17e55 | [] | no_license | NGnius/casl | b54bdd26003e582d77bb04b4e80e13c34074b4ad | db5bc4fbf6819ba89d0258e4c24a7fa85273d145 | refs/heads/master | 2023-03-01T08:52:31.681391 | 2021-02-05T03:12:43 | 2021-02-05T03:12:43 | 330,711,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | #!/usr/bin/python3
import sys
import json
data = input()
payload = json.loads(data)
response = json.dumps({"error": "debug error", "action": {"type": "Custom"}})
print(response)
| [
"ngniusness@gmail.com"
] | ngniusness@gmail.com |
ac65b404dace4784df733dfdfafadcc28eb379aa | c135da511684bfb267a8bac5a84b3f032b2d7b26 | /algorithms/delete-columns-to-make-sorted/DeleteColumnsToMakeSorted.py | b25bbcf217bb13ce0ca196ee9f751ee7ef66193b | [] | no_license | hieudtrinh/coursera_python | 0bc9841a8be7ea38b5fdf9bf1d2bcd6e40387e60 | 6bd01f898eca617ec3c6ad215f47f7f41265dd4f | refs/heads/main | 2023-02-16T20:59:46.617531 | 2021-01-19T19:05:35 | 2021-01-19T19:05:35 | 319,189,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | import sys
from typing import List
class DeleteColumnsToMakeSorted:
def minDeletionSize(self, A: List[str]) -> int:
return sum(col != tuple(sorted(col)) for col in zip(*A))
def minDeletionSize1(self, A: List[str]) -> int:
res = 0
for pos in range(len(A[0])):
for word in range(len(A)-1):
if A[word][pos] > A[word+1][pos]:
res += 1
break
return res
def minDeletionSize2(self, A: List[str]) -> int:
strings = []
for i in range(0,len(A[0])):
temp = "".join([item[i] for item in A])
if "".join(sorted(temp)) == temp:
pass
else:
strings.append(1)
return len(strings)
def minDeletionSize3(self, A: List[str]) -> int:
l=[]
k=[]
for i in range(len(A[0])):
for j in range(len(A)):
l.append((A[j][i]))
if l != sorted(l):
k.append(i)
l=[]
return len(k)
def main(argv, arc):
A = ["cba", "daf", "ghi"]
solution = DeleteColumnsToMakeSorted()
solution.minDeletionSize(A)
if __name__ == '__main__':
main(sys.argv, len(sys.argv))
| [
"user.namecd"
] | user.namecd |
d1716fb4ec493d1b89f08262b63cd4a0ccee5a05 | af8cb7ec280573b58a16ae6e92a938828ffc052d | /Recurrent_network/Recurrent_network2.py | d0ec9b7584d15643b4ca53b345c7e20dda0a2df4 | [] | no_license | ninetailskim/Tensorflow_Tutorial | 65e44ecce976fdd469fc8c34b0d1ed975e5b9989 | cb7d8fcd12e57de80f5cded091e014b425e3467f | refs/heads/master | 2021-07-02T22:46:55.503318 | 2017-09-23T08:31:03 | 2017-09-23T08:31:03 | 104,552,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | '''
static_rnn
input :A length T list of inputs, each a Tensor of shape [batch_size, input_size], or a nested tuple of such elements.
x = tf.unstack(x, timesteps, 1)
output : A list of outputs (one for each input), or a nested tuple of such elements.
output[-1]
'''
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/data', one_hot=True)
learning_rate = 0.001
training_steps = 10000
batch_size = 128
display_step = 200
num_input = 28
timesteps = 28
num_hidden = 128
num_classes = 10
X = tf.placeholder(tf.float32, [None, timesteps, num_input])
Y = tf.placeholder(tf.float32, [None, num_classes])
weights={
'out':tf.get_variable('weight_out', [num_hidden, num_classes], tf.float32)
}
biases = {
'out':tf.get_variable('biases_out', [num_classes], tf.float32)
}
def RNN(x, weight, biases):
x = tf.unstack(x, timesteps, 1)
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
#h0 = lstm_cell.zero_state(batch_size, tf.float32)
output, state = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
out = tf.nn.bias_add(tf.matmul(output[-1], weight['out']), biases['out'])
return out
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(1, training_steps + 1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
batch_x = batch_x.reshape((-1, timesteps, num_input))
_, loss, acc = sess.run([train_op, loss_op, accuracy],feed_dict={X:batch_x, Y:batch_y})
if step % display_step == 0:
print("Step:", step, "loss:", loss, "Accuracy:", acc)
print("Training finished!")
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Test:", sess.run(accuracy, feed_dict={X:test_data, Y:test_label})) | [
"ninetailsyeon@163.com"
] | ninetailsyeon@163.com |
e5f7852757d20d9217562defb3d22da0c1893cb6 | 5e809acc62b080f1adad2c34e647241cdc5ad297 | /myenv/bin/markdown_py | fa2c63491a1647ccda5e1725538898c521cfc6a8 | [
"MIT"
] | permissive | RootenberG/My-blog-project | f520af79a2f3eb416b3dadee46813a812ce9d53d | 7ef4670cfa9d54d9345d52ca008aae5fed5605bc | refs/heads/master | 2020-08-15T20:04:29.478049 | 2020-02-08T21:57:46 | 2020-02-08T21:57:46 | 215,400,930 | 0 | 0 | MIT | 2019-10-30T20:54:38 | 2019-10-15T21:34:30 | Python | UTF-8 | Python | false | false | 255 | #!/home/den/devspace/My-blog-project/myenv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from markdown.__main__ import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"savichevdenis244@gmail.com"
] | savichevdenis244@gmail.com | |
ed8716a26481360b16a530a1bada6c0c07139b62 | b7dc309c2870431ea90710daf829fd364cf2d578 | /牛客聪明编辑.py | aa96c3286b912ed9c91a11d94b2d70ef75d0fcfe | [] | no_license | KaGen1999/Leetcode-by-Python | 7d65e7890279a2910aae297929a33f52001ad287 | ef10b1aa9b7060f949dcd392d62ddaba5fbcdbb8 | refs/heads/master | 2021-05-20T21:02:44.904731 | 2020-09-13T16:23:50 | 2020-09-13T16:23:50 | 252,415,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | n = int(input())
for i in range(n):
s = input()
b = ''
new_s = ''
count = 1
state = 0
for each in s:
if each != b:
if state == 1:
new_s = new_s + each
state = 2
elif state == 2:
new_s = new_s + each
state = 0
else:
new_s = new_s + each
else:
if state == 0:
state = 1
new_s = new_s + each
b = each
print(new_s) | [
"1032336124@qq.com"
] | 1032336124@qq.com |
5dcf1531f3266b5a1c867bd6a62ba36a36b2bbc2 | 7b08ceb8c901a09e41d4a67804e2adf94142cb17 | /description2process/__init__.py | 2f99a8019b7c0dace78658a646cc5d28bfb7d318 | [] | no_license | simrit1/Description2Process | 1e7cfcc4dc6bb762d69f27bbe1eedd4e0cef6a38 | 223372f3588f7ac67537eae3012667951b5543e0 | refs/heads/master | 2023-08-25T23:12:50.838804 | 2019-05-16T16:51:51 | 2019-05-16T16:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,370 | py | import tensorflow as tf
# We need to enable eager execution for inference at the end of this notebook.
tfe = tf.contrib.eager
tfe.enable_eager_execution()
TFVERSION='1.13'
import os
os.environ['TFVERSION'] = TFVERSION
# Import library
from description2process import data_generation
from description2process import contraction_expansion
from description2process import coreference_resolution
from description2process import clause_extraction
from description2process import activity_recognition
from description2process import activity_extraction
from description2process import structured_description
from description2process import xml_model
from description2process import visualization
from description2process import evaluation
# Returns the visualisation of a process description
# INPUT: process description in string format
def description2model(description, png = False):
# step1 : contraction expansion
description = contraction_expansion.expand_contractions(description)
print("Step 1/8 DONE: contraction expansion")
# step2 : coreference resolution
description = coreference_resolution.resolve_coreferences(description)
print("Step 2/8 DONE: coreference resolution")
# step3 : clause extraction
subsentences = clause_extraction.get_clauses(description)
print("Step 3/8 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses_df = activity_recognition.contains_activity_list(subsentences)
print("Step 4/8 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses_df)
print("Step 5/8 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description(description, df_activities)
print("Step 6/8 DONE: semi-structured descriptions")
# step7: get XML format of models
xml = xml_model.structured2xml(str_descr)
print("Step 7/8 DONE: model in XML")
# step8: Visualize the model in xml
model = visualization.xml2model(xml, png)
print("Step 8/8 DONE: Visualize model")
return model
# Returns the xml format of the process description
# INPUT: process description in string format
def description2xml(description):
# step1 : contraction expansion
description = contraction_expansion.expand_contractions(description)
print("Step 1/7 DONE: contraction expansion")
# step2 : coreference resolution
description = coreference_resolution.resolve_coreferences(description)
print("Step 2/7 DONE: coreference resolution")
# step3 : clause extraction
subsentences = clause_extraction.get_clauses(description)
print("Step 3/7 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses_df = activity_recognition.contains_activity_list(subsentences)
print("Step 4/7 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses_df)
print("Step 5/7 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description(description, df_activities)
print("Step 6/7 DONE: semi-structured descriptions")
# step7: get XML format of models
xml = xml_model.structured2xml(str_descr)
print("Step 7/7 DONE: model in XML")
return xml
# returns the structured description of raw process descriptions
# Input: pandas dataframe of process descriptions
def description2structured_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
print("Step 1/6 DONE: contraction expansion")
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
print("Step 2/6 DONE: coreference resolution")
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
print("Step 3/6 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses = activity_recognition.contains_activity_df(description_df)
print("Step 4/6 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses)
print("Step 5/6 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description_df(description_df, df_activities)
print("Step 6/6 DONE: returned structured descriptions")
return str_descr
# return the descripition after contraction expansion and coreference resolution.
# This type of description can be seen as a cleaned version of the original one.
# Input: pandas dataframe of process descriptions
def description2referenceresolved_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
return description_df
# Return the description with a list containing the description's extracted clauses
# Input: pandas dataframe of process description
def description2clauses_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
return description_df
# Return the description with a list containg the descriptions's extracted clauses
# + an extra dataframe with all its labeled clauses
# Input: pandas dataframe of process descriptions
def description2labeledclauses_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
# step4: label clauses
labeled_clauses = activity_recognition.contains_activity_df(description_df)
return labeled_clauses, description_df
| [
"noreply@github.com"
] | simrit1.noreply@github.com |
886183df918841571dc3a1914dbf86b3af70ee3d | 9ce345af50e58596564a942471c19b17fec5b1b7 | /venv/Scripts/pip-script.py | 8ff319223bd8fc27e410ac0fa90fd31b50f27fd7 | [] | no_license | ArsenTrynko/Python_lab10 | 2f6a4379a53c66f365a85f9db6c818128690d17f | 8da5281ef60e40e43b31e7a38e1d3739d926b552 | refs/heads/master | 2020-05-31T00:47:41.813171 | 2019-06-03T16:26:38 | 2019-06-03T16:26:38 | 190,041,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | #!C:\Users\MI\PycharmProjects\Lab10\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"jarkodara@gmail.com"
] | jarkodara@gmail.com |
ff06d12c1f57c1abcc60e770b2ba9375591bfd04 | 7ba5e9e271f1199582500bc40334ce4dfff03698 | /manage.py | e1e56792c1b390bb6ae5ff85c7019e487c5a3838 | [] | no_license | R1Ng0-1488/four-a-docker | 9ffc0cd2004b06ea9b9871eb2aad778854083bf5 | 2b66ed5baa6df777391343f82c5512b90689b981 | refs/heads/master | 2023-04-13T05:04:34.600023 | 2021-04-27T09:23:25 | 2021-04-27T09:23:25 | 357,534,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fourArest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"57253219+R1Ng0-1488@users.noreply.github.com"
] | 57253219+R1Ng0-1488@users.noreply.github.com |
d3d6757ce1df376dff4c92caaad8942329c824b0 | 801e30ca6313e09ae19c2109604325556edf7e11 | /validate_hcp_release.py | 295ec2b714eeaf5286237d8780121fdcfc0be382 | [] | no_license | MHouse/validate_hcp_release | 5335dbb531564e52e38b10bf538cced6bc2b1265 | 0c9f98fcd51b5c7e7c64f962c6393019b67790ec | refs/heads/master | 2021-01-16T18:18:22.110563 | 2013-02-22T21:17:38 | 2013-02-22T21:17:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,747 | py | #! /usr/bin/env python
__author__ = 'mhouse01'
import requests
import json
import os
import csv
from lxml import etree
from sys import exit
from operator import itemgetter, attrgetter
import argparse
import ConfigParser
from seriesDetails import seriesDetails, csvOrder, seriesLabels, scanIsPackage
# Declare the XNAT Namespace for use in XML parsing
xnatNS = "{http://nrg.wustl.edu/xnat}"
xmlFormat = {'format': 'xml'}
jsonFormat = {'format': 'json'}
# PARSE INPUT
parser = argparse.ArgumentParser(description="Alpha program to pull Subject session parameters from XNAT for verification")
parser.add_argument("-c", "--config", dest="configFile", default="validate_hcp_release.cfg", type=str, help="config file must be specified")
parser.add_argument("-P", "--project", dest="Project", default="HCP_Phase2", type=str, help="specify project")
parser.add_argument("-S", "--subject", dest="Subject", type=str, help="specify subject of interest")
parser.add_argument("-D", "--destination_dir", dest="destDir", default='/tmp', type=str, help="specify the directory for output")
parser.add_argument("-M", "--output_map", dest="outputMap", default='all', type=str, help="specify the output mapping: all, public, package")
parser.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true", help="show more verbose output")
parser.add_argument('--version', action='version', version='%(prog)s: v0.1')
args = parser.parse_args()
args.destDir = os.path.normpath( args.destDir )
# Read the config file
config = ConfigParser.ConfigParser()
try:
config.read( args.configFile )
username = config.get('Credentials', 'username')
password = config.get('Credentials', 'password')
restServerName = config.get('Server', 'server')
restSecurity = config.getboolean('Server', 'security')
except ConfigParser.Error as e:
print "Error reading configuration file:"
print " " + str( e )
exit(1)
if restSecurity:
print "Using only secure connections"
restRoot = "https://" + restServerName
else:
print "Security turned off for all connections"
restRoot = "http://" + restServerName + ":8080"
# If we find an OS certificate bundle, use it instead of the built-in bundle
if requests.utils.get_os_ca_bundle_path() and restSecurity:
os.environ['REQUESTS_CA_BUNDLE'] = requests.utils.get_os_ca_bundle_path()
print "Using CA Bundle: %s" % requests.utils.DEFAULT_CA_BUNDLE_PATH
# Establish a Session ID
try:
r = requests.get( restRoot + "/data/JSESSION", auth=(username, password) )
# If we don't get an OK; code: requests.codes.ok
r.raise_for_status()
# Check if the REST Request fails
except (requests.ConnectionError, requests.exceptions.RequestException) as e:
print "Failed to retrieve REST Session ID:"
print " " + str( e )
exit(1)
restSessionID = r.content
print "Rest Session ID: %s " % (restSessionID)
restSessionHeader = {"Cookie": "JSESSIONID=" + restSessionID}
mrSessions = {"xsiType": "xnat:mrSessionData"}
# Get the list of MR Sessions for each Experiment
# Create a URL pointing to the Experiments for this Subject
restExperimentsURL = restRoot + "/data/archive/projects/" + args.Project + "/subjects/" + args.Subject + "/experiments/"
# Get the list of MR Sessions for the Subject in JSON format
try:
# Create a dictionary of parameters for the rest call
restParams = mrSessions.copy()
restParams.update(jsonFormat)
# Make the rest call
r = requests.get( restExperimentsURL, params=restParams, headers=restSessionHeader)
# If we don't get an OK; code: requests.codes.ok
r.raise_for_status()
# Check if the REST Request fails
except (requests.ConnectionError, requests.exceptions.RequestException) as e:
print "Failed to retrieve MR Session list: %s" % e
exit(1)
# Parse the JSON from the GET
experimentJSON = json.loads( r.content )
# Strip off the trash that comes back with it and store it as a list of name/value pairs
experimentResultsJSON = experimentJSON.get('ResultSet').get('Result')
# List Comprehensions Rock! http://docs.python.org/tutorial/datastructures.html
# Create a stripped down version of the results with a new field for seriesList; Store it in the experimentResults object
experimentResults = [ {'label': experimentItem.get('label').encode('ascii', 'ignore'),
'date': experimentItem.get('date').encode('ascii', 'ignore'),
'subjectSessionNum': None,
'seriesList': None } for experimentItem in experimentResultsJSON ]
# Loop over the MR Experiment Results
for experiment in experimentResults:
print "Gathering results for " + experiment['label']
# Compose a rest URL for this Experiment
restSingleExperimentURL = restExperimentsURL + experiment['label']
# Make a rest request to get the complete XNAT Session XML
try:
r = requests.get( restSingleExperimentURL, params=xmlFormat, headers=restSessionHeader, timeout=10.0 )
# If we don't get an OK; code: requests.codes.ok
r.raise_for_status()
# Check if the REST Request fails
except requests.Timeout as e:
print "Timed out while attempting to retrieve XML:"
print " " + str( e )
if not args.restSecurity:
print "Note that insecure connections are only allowed locally"
exit(1)
# Check if the REST Request fails
except (requests.ConnectionError, requests.exceptions.RequestException) as e:
print "Failed to retrieve XML: %s" % e
exit(1)
# Parse the XML result into an Element Tree
root = etree.fromstring(r.text.encode(r.encoding))
# Extract the Study Date for the session
if experiment['date'] == "":
experiment['date'] = "2013-01-01"
print "Assuming study date of " + experiment['date']
# Start with an empty series list
seriesList = list()
# Iterate over 'scan' records that contain an 'ID' element
for element in root.iterfind(".//" + xnatNS + "scan[@ID]"):
# Create an empty seriesDetails record
currentSeries = seriesDetails()
#Record some basic experiment level info in each scan
currentSeries.subjectName = args.Subject
currentSeries.sessionLabel = experiment['label']
currentSeries.sessionDate = experiment['date']
currentSeries.fromScanXML( element )
# Add the current series to the end of the list
seriesList.append( currentSeries )
# Sort the series list by DateTime
seriesList.sort( key=attrgetter('DateTime') )
# Store the subjectSessionNum extracted from the first item (first acquired scan) in the sorted list
experiment['subjectSessionNum'] = iter(seriesList).next().subjectSessionNum
# Store the series list along with the experiment label
experiment['seriesList'] = seriesList
# Sort the Experiment Results list by the Subject Session Number
experimentResults.sort( key=itemgetter('subjectSessionNum') )
# Name the CSV file by the Subject name
csvFile = args.destDir + os.sep + args.Subject + "_" + args.outputMap + ".csv"
# Create an empty Series Notes object. This can be populated with field specific notes for each Experiment
seriesNotes = seriesDetails()
# Open the CSV file for write/binary
with open( csvFile, 'wb' ) as f:
# Create a CSV Writer for dictionary formatted objects. Give it the Dictionary order for output.
csvWriter = csv.DictWriter( f, csvOrder( args.outputMap ) )
# Write out the series labels as a Header
if args.outputMap != "package":
csvWriter.writerow( seriesLabels(args.outputMap) )
# Loop over all experiment results
for experiment in experimentResults:
# Populate the Series Notes for this Experiment with the Experiment Date and Label
seriesNotes.scan_ID = experiment['label']
seriesNotes.startTime = experiment['date']
# Write out the notes only on 'all' maps
if args.outputMap == "all":
csvWriter.writerow( seriesNotes.asDictionary(args.outputMap) )
# Loop over all scans in each experiment
for scan in experiment['seriesList']:
# Write each scan by converting it to a Dictionary and pulling the relevant Mapping subset
nextRow = scan.asDictionary(args.outputMap)
# But only if this row should be included
if args.outputMap == "all" or \
(args.outputMap == "release" and scan.targetForRelease == "1") or \
(args.outputMap == "release" and restServerName == "hcpx-demo.humanconnectome.org") or \
(args.outputMap == "package" and scanIsPackage(scan.dbDesc)):
csvWriter.writerow( nextRow )
print "Subject details written to: " + csvFile
| [
"mdhouse@gmail.com"
] | mdhouse@gmail.com |
fecb95f2df1a15ec0d1133aa0f186e37532e7f1c | 357ce8dbb7e2ebab438ae90a8f598ba625ee74a1 | /perticks/api/models.py | b2bf85e2e850ce59d32e170e83561f33d1a78fcd | [] | no_license | HealthHackAu2016/per-ticks | 899870f0c3915bb8d0aed9fcfe609674934b1a76 | 03eeaf57ea7e8c1efc07a8ff48c59edc058f7b4d | refs/heads/master | 2021-01-11T02:50:00.246122 | 2016-10-16T07:07:42 | 2016-10-16T07:07:42 | 70,917,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | from django.db import models
from django.contrib import admin
from django.core.validators import RegexValidator, EmailValidator
class BiteReport(models.Model):
# Validators
alphanumeric = RegexValidator(r'^[0-9]*$', 'Only numeric characters are allowed.')
validate_email = EmailValidator()
# Fields
id = models.AutoField(primary_key=True)
auth_id = models.CharField(max_length=20)
auth_code = models.CharField(max_length=20)
email = models.CharField(max_length=200, blank=True, validators=[validate_email])
phone = models.CharField(max_length=11, blank=True, validators=[alphanumeric])
allows_follow_up = models.BooleanField(default=False)
wants_reminder = models.BooleanField(default=False)
symptom_comments = models.TextField()
submission_date = models.DateField(auto_now_add=True)
bite_date = models.DateField()
lat = models.FloatField()
lon = models.FloatField()
bitten_before = models.BooleanField(default=False)
number_of_bites = models.IntegerField(default=1)
# travel
admin.site.register(BiteReport)
class HospitalData(models.Model):
numeric = RegexValidator(r'^[0-9]*$', 'Only numeric characters are allowed.')
hospital_name = models.CharField(max_length=128)
hospital_address = models.CharField(max_length=512)
hospital_telephone = models.CharField(max_length=11, blank=True, validators=[numeric])
admin.site.register(HospitalData)
class Reminders(models.Model):
report = models.ForeignKey(BiteReport)
reminder_date = models.DateField()
reminder_sent = models.BooleanField(default=False)
admin.site.register(Reminders)
| [
"mail@trisreed.com"
] | mail@trisreed.com |
af319737ac47b4c0bdc71afb813cb1635135868b | 8e8f09667b7aae2e8e35e6c130e426aedbe3d565 | /apps/destination/migrations/0005_auto_20170719_1338.py | 251620743aee30278a95641ae10effdf2bac21ae | [] | no_license | TripHub/_API | c33e8b08f43cc45b5d7ed788aaaaed714fdcf802 | dad85e34e826d951a971088bc77c8e63b403f01f | refs/heads/master | 2021-06-24T05:50:28.964085 | 2017-08-06T11:01:50 | 2017-08-06T11:01:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-19 13:38
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('destination', '0004_auto_20170705_1325'),
]
operations = [
migrations.RemoveField(
model_name='destination',
name='address',
),
migrations.RemoveField(
model_name='destination',
name='latitude',
),
migrations.RemoveField(
model_name='destination',
name='longitude',
),
migrations.AddField(
model_name='destination',
name='data',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
preserve_default=False,
),
]
| [
"Ben@hadfieldfamily.co.uk"
] | Ben@hadfieldfamily.co.uk |
1af07f337196fda10e15701549e6243804b7e233 | 9467b65606bdeb2ff1417267728f95aac96e2bd9 | /p24.py | ab13db0c38f7d99139ac1511ae52bfb7916bcb43 | [] | no_license | prince3453/python | a7d1e46f0669f50ac4ca74aa11a393a3f69c9471 | ca31d46dd885b619e4d7cefbf83b813684afad93 | refs/heads/master | 2020-12-06T13:31:13.314451 | 2020-05-16T05:53:00 | 2020-05-16T05:53:00 | 232,474,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | class Bank:
def __init__(self, balance):
self.balance = balance
self.methods = [self.printBalance,self.deposite,self.withdraw]
def printBalance(self):
print("Current Balance:",self.balance)
def inputAmount(self):
return float(input("Enter Amount:"))
def deposite(self):
amount = self.inputAmount()
self.balance += amount
self.printBalance()
def withdraw(self):
amount = self.inputAmount()
if self.balance - amount <= 500:
print("The Account Does Not Has Sufficient Balance.")
else:
self.balance -= amount
self.printBalance()
var = Bank(10000)
while True:
choice = int(input("select \n1. for checking balance.\n2. for deposite.\n3. for withdrawal.\n4. for exit."))
if choice == 4: break
else:
var.methods[choice-1]() | [
"noreply@github.com"
] | prince3453.noreply@github.com |
9800c7757cdf7213dc56c1006e976f8cfdd3b3f5 | 19e84b3ea7944811b6fd113309b8a7c7b5ae33ba | /oec/db_data/views.py | cf69e179543b30faae5c5f12887affeaeba22e82 | [] | no_license | fxcebx/oec | cf9c4cfaa3b4d92d4cbd3539ff94b7f910209167 | cbba5d7513f63cdb5dc761146db784f2a9879ea7 | refs/heads/master | 2020-12-06T20:41:17.105920 | 2015-10-10T03:01:56 | 2015-10-10T03:01:56 | 44,028,188 | 0 | 0 | null | 2015-10-25T22:06:13 | 2015-10-10T21:01:46 | CSS | UTF-8 | Python | false | false | 5,007 | py | from flask import Blueprint, request, jsonify, make_response, g
from oec import db
from oec.utils import make_query
from oec.db_attr.models import Yo as Attr_yo
from oec.db_data import hs92_models
from oec.db_data import hs96_models
from oec.db_data import hs02_models
from oec.db_data import hs07_models
from oec.db_data import sitc_models
from oec.decorators import crossdomain
mod = Blueprint('data', __name__, url_prefix='/<any("sitc","hs","hs92","hs96","hs02","hs07"):classification>')
@mod.url_value_preprocessor
def get_product_classification_models(endpoint, values):
g.locale="en"
classification = values.pop('classification')
g.prod_classification = classification
if classification == "hs" or classification == "hs92":
g.prod_models = hs92_models
elif classification == "hs96":
g.prod_models = hs96_models
elif classification == "hs02":
g.prod_models = hs02_models
elif classification == "hs07":
g.prod_models = hs07_models
elif classification == "sitc":
g.prod_models = sitc_models
g.output_depth = request.args.get("output_depth")
############################################################
# ----------------------------------------------------------
# 2 variable views
#
############################################################
@mod.route('/<trade_flow>/all/<origin_id>/all/all/')
@mod.route('/<trade_flow>/<year>/<origin_id>/all/all/')
@mod.route('/<trade_flow>/<year>/show/all/all/')
@crossdomain(origin='*')
def yo(**kwargs):
q = db.session.query(Attr_yo, getattr(g.prod_models, "Yo")) \
.filter(Attr_yo.origin_id == getattr(g.prod_models, "Yo").origin_id) \
.filter(Attr_yo.year == getattr(g.prod_models, "Yo").year)
return make_response(make_query(q, request.args, g.locale, getattr(g.prod_models, "Yo"), **kwargs))
@mod.route('/<trade_flow>/all/all/<dest_id>/all/')
@mod.route('/<trade_flow>/<year>/all/<dest_id>/all/')
@mod.route('/<trade_flow>/<year>/all/show/all/')
@crossdomain(origin='*')
def yd(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yd"), request.args, g.locale, **kwargs))
@mod.route('/<trade_flow>/all/all/all/<prod_id>/')
@mod.route('/<trade_flow>/<year>/all/all/<prod_id>/')
@mod.route('/<trade_flow>/<year>/all/all/show/')
@crossdomain(origin='*')
def yp(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yp"), \
request.args, g.locale, classification=g.prod_classification, \
output_depth=g.output_depth, **kwargs))
############################################################
# ----------------------------------------------------------
# 3 variable views
#
############################################################
@mod.route('/<trade_flow>/all/<origin_id>/show/all/')
@mod.route('/<trade_flow>/<year>/<origin_id>/show/all/')
@crossdomain(origin='*')
def yod(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yod"), request.args, g.locale, **kwargs))
@mod.route('/<trade_flow>/all/<origin_id>/all/show/')
@mod.route('/<trade_flow>/<year>/<origin_id>/all/show/')
@crossdomain(origin='*')
def yop(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yop"), \
request.args, g.locale, classification=g.prod_classification, \
output_depth=g.output_depth, **kwargs))
@mod.route('/<trade_flow>/all/show/all/<prod_id>/')
@mod.route('/<trade_flow>/<year>/show/all/<prod_id>/')
@crossdomain(origin='*')
def yop_dest(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yop"), \
request.args, g.locale, classification=g.prod_classification, **kwargs))
@mod.route('/<trade_flow>/all/all/<dest_id>/show/')
@mod.route('/<trade_flow>/<year>/all/<dest_id>/show/')
@crossdomain(origin='*')
def ydp(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Ydp"), \
request.args, g.locale, classification=g.prod_classification, \
output_depth=g.output_depth, **kwargs))
############################################################
# ----------------------------------------------------------
# 4 variable views
#
############################################################
@mod.route('/<trade_flow>/all/<origin_id>/<dest_id>/all/')
@mod.route('/<trade_flow>/<year>/<origin_id>/<dest_id>/all/')
@mod.route('/<trade_flow>/all/<origin_id>/<dest_id>/show/')
@mod.route('/<trade_flow>/<year>/<origin_id>/<dest_id>/show/')
@crossdomain(origin='*')
def yodp(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yodp"), \
request.args, g.locale, classification=g.prod_classification, \
output_depth=g.output_depth, **kwargs))
@mod.route('/<trade_flow>/all/<origin_id>/show/<prod_id>/')
@mod.route('/<trade_flow>/<year>/<origin_id>/show/<prod_id>/')
@crossdomain(origin='*')
def yodp_dest(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yodp"), \
request.args, g.locale, classification=g.prod_classification, **kwargs)) | [
"alexandersimoes@gmail.com"
] | alexandersimoes@gmail.com |
a3832a608ada34da7f6cc1b6ee7f96711396596b | 00b2e5b0e600dccf0857e00b5710005062df92e3 | /Fatima/fatima_raman.py | 3abb8f47a90baebde4c4833d5c9befe4a28ee767 | [] | no_license | NMI-BMNT/auswertung | bf933046df3db729a3769fc50ce8c047d8a86177 | b9017ac6745764fc4ddf63c9d982a21e30777885 | refs/heads/master | 2022-01-10T12:41:10.416401 | 2018-05-23T11:48:55 | 2018-05-23T11:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,036 | py | import os
import numpy as np
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import scipy.optimize as opt
from scipy.optimize import curve_fit, basinhopping
import scipy.sparse as sparse
from scipy.special import *
from plotsettings import *
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import seaborn as sns
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from skimage.filters import threshold_otsu
import re
import scipy.signal as signal
import peakutils as pu
def lorentz(x, amplitude, x0, sigma):
g = (amplitude*2/(np.pi*sigma))/(1+4*np.square((x-x0)/sigma))
return g.ravel()
def gauss(x, amplitude, x0, sigma):
g = amplitude/sigma * np.sqrt(4*np.log(2)/np.pi)*np.exp(-4*np.log(2)*np.square((x-x0)/sigma))
return g.ravel()
# https://www.webpages.uidaho.edu/brauns/vibspect1.pdf
def asymvoigt(x, amplitude, x0, sigma, a , f):
sigma = 2 * sigma/(1 + np.exp(a*(x-x0)) )
g = f*lorentz(x,amplitude,x0,sigma)+(1-f)*gauss(x,amplitude,x0,sigma)
return g.ravel()
def fit_fun(x, amp, x0, sigma,a,f):
return asymvoigt(x, amp, x0, sigma,a,f)
path = '/home/sei/Raman/Fatima3/'
savedir = path + 'plots/'
peak_pos = [1085,1590]
search_width = 100 # cm^-1
try:
os.mkdir(savedir)
except:
pass
files = []
for file in os.listdir(path):
if re.search(r"\.(txt)$", file) is not None:
files.append(file)
print(files)
#file = files[0]
k_max = np.zeros((len(files),len(peak_pos)))
c_max = np.zeros((len(files),len(peak_pos)))
labels = np.array([])
for i,file in enumerate(files):
print(file)
k, counts = np.loadtxt(path + file, unpack=True)
counts = signal.savgol_filter(counts, 31, 1, mode='interp')
base = pu.baseline(counts, 11, max_it=10000, tol=0.00001)
counts -= base
#newfig(0.9)
plt.plot(k, counts, linewidth=1)
# plt.plot(k, bl, linewidth=1)
# plt.plot(wl[mask], filtered[mask], color="black", linewidth=0.6)
plt.ylabel(r'$I_{\nu}\, /\, counts$')
plt.xlabel(r'$wavenumber\, /\, cm^{-1}$')
# plt.xlim((minwl, maxwl))
# plt.plot(wl, counts)
plt.tight_layout()
#plt.show()
plt.savefig(savedir + file[:-4] + ".pdf", dpi=300)
plt.close()
for j,peak in enumerate(peak_pos):
mask = (k <= peak + search_width) & (k >= peak - search_width)
c1 = counts[mask]
k1 = k[mask]
max_ind = np.argmax(c1)
k_max[i,j] = k1[max_ind]
c_max[i,j] = c1[max_ind]
labels = np.append(labels,file[:-6])
print(c_max)
sort = np.argsort(labels)
labels = labels[sort]
k_max = k_max[sort,:]
c_max = c_max[sort,:]
print(labels)
label = np.unique(labels)
print(label)
for l in label:
mask = labels == l
plt.scatter(k_max[mask], c_max[mask])
plt.savefig(path + "scatter.pdf", dpi=300)
plt.close()
mean = np.zeros((len(label),len(peak_pos)))
err = np.zeros((len(label),len(peak_pos)))
for i,l in enumerate(label):
mask = labels == l
for j in range(len(peak_pos)):
mean[i,j] = np.mean(c_max[mask,j])
err[i,j] = np.std(c_max[mask,j])
print(mean)
print(mean[:,0].ravel())
print(np.arange(0,mean.shape[0],1))
for i in range(mean.shape[1]):
plt.bar(np.arange(0,mean.shape[0],1)*mean.shape[1]+(i+1),mean[:,i].ravel(),yerr=err[:,i].ravel())
plt.xticks((np.arange(0,mean.shape[0],1)*mean.shape[1]+(mean.shape[1]+1)/2), label)
plt.savefig(path + "bar.pdf", dpi=300)
plt.close()
print('-> Writing measured values to file')
with open(path + "raman.csv", 'w') as f:
f.write("label,")
for j in range(mean.shape[1]):
f.write("mean"+str(peak_pos[j])+",err"+str(peak_pos[j])+",")
f.write("\r\n")
for i in range(len(label)):
f.write( label[i] + ",")
for j in range(mean.shape[1]):
f.write( str(mean[i,j])+ "," + str(err[i,j])+"," )
f.write("\r\n")
mean = np.zeros((len(label),len(counts)))
err = np.zeros((len(label),len(counts)))
for i, l in enumerate(label):
buf = []
for j,file in enumerate(files):
if file[:-6] == l:
k, counts = np.loadtxt(path + file, unpack=True)
#counts = signal.savgol_filter(counts, 31, 1, mode='interp')
#base = pu.baseline(counts, 11, max_it=10000, tol=0.00001)
#counts -= base
buf.append(counts)
buf = np.array(buf)
print(buf.shape)
mean[i, :] = np.mean(buf,axis=0)
err[i, :] = np.std(buf,axis=0)
fig, ax = newfig(0.9)
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
legend = ["A: 30 min","B: 30 min","C: 90 min","D: 90 min"]
print(label)
print(legend)
for i, l in enumerate(label):
poly = np.array((k,mean[i,:]+err[i,:]+1000*i))
poly = np.hstack((poly,np.fliplr(np.array((k, mean[i,:] - err[i,:]+1000*i)))))
poly = poly.T
ax.add_patch(Polygon(poly, closed=True,fill=True,alpha = 0.3,facecolor=colors[i]))
#plt.plot(wl, mean_spec, linewidth=0.8)
plt.plot(k,mean[i,:]+1000*i, linewidth=0.8)
plt.ylabel(r'$I_{\nu}\, /\, counts$')
plt.xlabel(r'$\Delta\widetilde{\nu}\, /\, cm^{-1}$')
plt.legend(legend)
plt.tight_layout()
plt.savefig(path + "overview.pdf", dpi=300)
plt.close()
# width = 100
# max_ind = np.argmax(counts)
# indices = np.arange(0, len(k), 1)
# mask = (indices <= max_ind + width) & (indices >= max_ind - width)
# # inds = np.arange(max_ind-width,max_ind+width,1)
# k1 = k[mask]
# counts1 = counts[mask]
# def err_fun(p):
# fit = fit_fun(k1, *p)
# diff = np.abs(counts1 - fit)
# return np.sum(diff)
#
# #def fit_fun(x, amp, x0, sigma,a,f,b,c):
# b = 0# ( np.mean(counts1[20:])-np.mean(counts1[:-20]) )/( np.mean(k1[20:])-np.mean(k1[:-20]) )
# c = 0#np.mean(k1[20:])
# start = [counts[max_ind]*3,k[max_ind],150,0.01,0.1]
# upper = [counts[max_ind]*10, k[max_ind]+width, 500, 1,1]
# lower = [ 0, k[max_ind]-width, 10, 0,0]
# bnds = []
# for i in range(len(upper)):
# bnds.append((lower[i], upper[i]))
#
# #minimizer_kwargs = {"method": "SLSQP","bounds": bnds,"tol":1e-10}
# #res = basinhopping(err_fun, start, minimizer_kwargs=minimizer_kwargs, niter=1000,disp=False)
# res = opt.minimize(err_fun, start, method='SLSQP', options={'disp': True, 'maxiter': 10000},tol=1e-10)
# #res = opt.minimize(err_fun, start, method='L-BFGS-B', options={'disp': True, 'maxiter': 5000})
# #res = opt.minimize(err_fun, start, method='Nelder-Mead', options={'disp': True, 'maxiter': 5000})
#
# popt = res.x
#
# print(popt)
# plt.plot(k1, counts1, linewidth=1)
# plt.plot(k1, fit_fun(k1,popt[0],popt[1],popt[2],popt[3],popt[4]), linewidth=1)
# #plt.plot(k1, popt[5]*k1+popt[6])
# plt.ylabel(r'$I_{\nu}\, /\, counts$')
# plt.xlabel(r'$wavenumber\, /\, cm^{-1}$')
# plt.savefig(savedir + file[:-4] + "fit.pdf", dpi=300)
# #plt.show()
# plt.close()
#
# fit = fit_fun(k1,popt[0],popt[1],popt[2],popt[3],popt[4])
# print(np.max(fit))
| [
"Simon.Dickreuter@uni-tuebingen.de"
] | Simon.Dickreuter@uni-tuebingen.de |
c35a45aa07d805a1a36b6c9ba503f843f82fe68e | 3554cedeca0e21a015534290a95d0a3930ff1cc1 | /spider/spideOnDelegation.py | f6be5c0879481f57b3fb6d95aa692fb56bf285b8 | [] | no_license | baolintian/EZTrade | 27d2329468f44bbedc610e0f8ab75be05ccfb247 | 72ee63fdcbfd37574a7734bd0991cff114481f79 | refs/heads/main | 2023-09-01T04:08:20.086976 | 2021-09-23T08:47:32 | 2021-09-23T08:47:32 | 390,328,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,650 | py | import requests
import json
import time
import datetime
def get_token():
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/app/login?password=31c15919&userName=admin"
payload = {}
headers = {
'accept': '*/*',
'Authorization': 'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJhZG1pbiIsImV4cCI6MTYyNTY3NTgyNH0.LkYBQnKfeDoEYJAMs4HOZae_Gq9nyu8kqOVP3T_qkkdmHb9pgRJbw4dlbxjEO69tFh7NQ3-vT-EHLTYo6b8Nyw'
}
response = requests.request("GET", url, headers=headers, data=payload)
return json.loads(response.text)["data"]
def get_delegation_info():
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/DelegateInfo/objects"
payload = json.dumps({
"condition": "and 1=1",
"pageSize": 10,
"startIndex": 0
})
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': 'JSESSIONID=8BC976CB994C3656F9AE0E913A2521C9'
}
response = requests.request("POST", url, headers=headers, data=payload)
response = json.loads(response.text)
return response
def get_coin_info(class_name, condition):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects"
payload = json.dumps({
"condition": condition,
"pageSize": 100,
"startIndex": 0
})
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': 'JSESSIONID=8BC976CB994C3656F9AE0E913A2521C9'
}
response = requests.request("POST", url, headers=headers, data=payload)
response = json.loads(response.text)
return response
def delete_delegation_by_oid(class_name, oid):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects-delete"
payload = json.dumps([
oid
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': 'JSESSIONID=8BC976CB994C3656F9AE0E913A2521C9'
}
requests.request("POST", url, headers=headers, data=payload)
def create_transaction(class_name, message):
import requests
import json
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects-create"
payload = json.dumps([
message
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': 'JSESSIONID=8BC976CB994C3656F9AE0E913A2521C9'
}
response = requests.request("POST", url, headers=headers, data=payload)
if response.status_code == 200:
return True
else:
return False
def get_instance_by_oid(class_name, oid):
import requests
import json
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects/oids"
payload = json.dumps([
oid
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return json.loads(response.text)
def edit_VirtualAccount_by_oid(class_name, obj):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects-update?forceUpdate=false"
payload = json.dumps([
obj
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
def get_single_coin_info(class_name, condition):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects"
payload = json.dumps({
"condition": condition,
"pageSize": 100
})
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return json.loads(response.text)
def edit_single_coin_hold(class_name, obj):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/" + class_name + "/objects-update?forceUpdate=false"
payload = json.dumps([
obj
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
while(True):
delegation_info = get_delegation_info()
# print(delegation_info)
# print(len(delegation_info["data"]))
delegation_info = delegation_info["data"]
# 获取各个币种的价格信息
coin_info = get_coin_info("CoinInfo", "and 1=1")
coin_info = coin_info["data"]
# print(coin_info)
coin_dict = {}
for i in range(len(coin_info)):
coin_name = coin_info[i]["coinName"]
coin_price = coin_info[i]["coinPrice"]
coin_dict[coin_name] = coin_price
# 对所有的委托进行处理
for i in range(len(delegation_info)):
print(delegation_info[i])
delegate_coin_name = delegation_info[i]["delegateCoinName"]
delegate_price = delegation_info[i]["delegatePrice"]
delegate_action = delegation_info[i]["delegateAction"]
delegate_number = delegation_info[i]["delegateAmount"]
delegate_oid = delegation_info[i]["oid"]
delegator_oid = delegation_info[i]["delegatorOID"]
# delegate_type = delegation_info[i]["delegateType"]
# 异常处理
if delegate_action != "BUY" and delegate_action != "SELL":
delete_delegation_by_oid("DelegateInfo", delegate_oid)
if delegate_coin_name in coin_dict.keys():
if delegate_action == "BUY" and delegate_price < coin_dict[delegate_coin_name]:
continue
if delegate_action == "SELL" and delegate_price > coin_dict[delegate_coin_name]:
continue
if delegate_action == "BUY":
transaction_message = {
"transactionCoinName": delegate_coin_name,
"transactionAmount": delegate_number,
"transactionPrice": delegate_price,
"transactionPersonOID": delegator_oid,
"transactionAction": delegate_action,
"transactionTime": str(int(time.mktime(datetime.datetime.now().timetuple()))*1000)
}
print("BUY")
print(transaction_message)
result = create_transaction("TransactionHistory", transaction_message)
if result:
# 增加或者修改持仓信息
user = get_instance_by_oid("VirtualAccount", delegator_oid)["data"][0]
user_oid = user["oid"]
user_tot = user["asset"]
user_coin_asset = user["coinAsset"]
user_cash = user["cash"]
user_frozenAsset = user["frozenAsset"]
user_usableAsset = user["usableAsset"]
user_frozenAsset = user_frozenAsset - delegate_number*delegate_price*(1+0.001)
user_coin_asset = user_coin_asset + delegate_number*coin_dict[delegate_coin_name]
user_cash = user_frozenAsset+user_usableAsset
user_tot = user_cash+user_coin_asset
# TODO: 更新收益率
obj = {
"oid": user_oid,
"asset": user_tot,
"coinAsset": user_coin_asset,
"cash": user_cash,
"frozenAsset": user_frozenAsset,
"usableAsset": user_usableAsset
# "delegatorOID": delegator_oid
}
edit_VirtualAccount_by_oid("VirtualAccount", obj)
# 增加或者修改持仓信息
hold_info = get_single_coin_info(r"SingleCoinInfo", "and obj.coinHolderOID = '"+str(user_oid)+r"'")
hold_info = hold_info["data"]
hold_coin_dict = {}
flag = False
for j in range(len(hold_info)):
if hold_info[j]["coinName"] == delegate_coin_name:
print("real update")
flag = True
coin_number = hold_info[j]["coinAmount"]
hold_price = hold_info[j]["coinHoldPrice"]
avg_price = hold_info[j]["coinAveragePrice"]
transaction_time = hold_info[j]["coinTime"]
usable_amount = hold_info[j]["coinUsableAmount"]
hold_price = (hold_price * coin_number + delegate_number * delegate_price) / (
coin_number + delegate_number)
avg_price = (hold_price*transaction_time+delegate_price)/(1+transaction_time)
transaction_time = transaction_time+1
coin_number = coin_number+delegate_number
usable_amount = usable_amount+delegate_number
obj = {
"oid": hold_info[j]["oid"],
"coinAmount": coin_number,
"coinHoldPrice": hold_price,
"coinAveragePrice": avg_price,
"coinTime": transaction_time,
"coinUsableAmount": usable_amount
}
edit_single_coin_hold("SingleCoinInfo", obj)
break
if flag == False:
obj = {
"coinAmount": delegate_number,
"coinHoldPrice": delegate_price,
"coinAveragePrice": delegate_price,
"coinTime": 1,
"coinName": delegate_coin_name,
"coinHolderOID": delegator_oid,
"coinUsableAmount": delegate_number
}
create_transaction("SingleCoinInfo", obj)
# 删除委托信息
delete_delegation_by_oid("DelegateInfo", delegate_oid)
if delegate_action == "SELL":
# 更改用户的资金
# 更改/删除用户持仓信息
# 创建交易记录
# 删除委托信息
transaction_message = {
# 包含SELL 和 AUTO SELL
"transactionAction": delegate_action,
"transactionCoinName": delegate_coin_name,
"transactionAmount": delegate_number,
"transactionPrice": delegate_price,
"transactionPersonOID": delegator_oid,
"transactionTime": str(int(time.mktime(datetime.datetime.now().timetuple())) * 1000)
}
print("SELL")
print(transaction_message)
result = create_transaction("TransactionHistory", transaction_message)
if result:
# 增加或者修改持仓信息
user = get_instance_by_oid("VirtualAccount", delegator_oid)["data"][0]
user_oid = user["oid"]
user_tot = user["asset"]
user_coin_asset = user["coinAsset"]
user_cash = user["cash"]
user_frozenAsset = user["frozenAsset"]
user_usableAsset = user["usableAsset"]
user_coin_asset = user_coin_asset - delegate_number * coin_dict[delegate_coin_name]
user_usableAsset = user_usableAsset+delegate_number * delegate_price*(1-0.001)
user_cash = user_frozenAsset + user_usableAsset
user_tot = user_cash + user_coin_asset
# TODO: 更新收益率
obj = {
"oid": user_oid,
"asset": user_tot,
"coinAsset": user_coin_asset,
"cash": user_cash,
"frozenAsset": user_frozenAsset,
"usableAsset": user_usableAsset
}
edit_VirtualAccount_by_oid("VirtualAccount", obj)
# 增加或者修改持仓信息
hold_info = get_single_coin_info(r"SingleCoinInfo",
"and obj.coinHolderOID = '" + str(user_oid) + r"'")
hold_info = hold_info["data"]
hold_coin_dict = {}
for j in range(len(hold_info)):
if hold_info[j]["coinName"] == delegate_coin_name:
print("real update")
# flag = True
coin_number = hold_info[j]["coinAmount"]
hold_price = hold_info[j]["coinHoldPrice"]
avg_price = hold_info[j]["coinAveragePrice"]
transaction_time = hold_info[j]["coinTime"]
usable_amount = hold_info[j]["coinUsableAmount"]
if(coin_number - delegate_number != 0):
hold_price = (hold_price * coin_number - delegate_number * delegate_price) / (
coin_number - delegate_number)
else:
hold_price = 0
avg_price = (hold_price * transaction_time + delegate_price) / (1 + transaction_time)
transaction_time = transaction_time+1
coin_number = coin_number - delegate_number
print("剩余币种")
print(coin_number)
if(coin_number <= 0.0001):
# 直接删除持仓记录
delete_delegation_by_oid("SingleCoinInfo", hold_info[j]["oid"])
else:
usable_amount = usable_amount
obj = {
"oid": hold_info[j]["oid"],
"coinAmount": coin_number,
"coinHoldPrice": hold_price,
"coinAveragePrice": avg_price,
"coinTime": transaction_time,
"coinUsableAmount": usable_amount
}
edit_single_coin_hold("SingleCoinInfo", obj)
break
# 删除委托信息
delete_delegation_by_oid("DelegateInfo", delegate_oid)
time.sleep(2) | [
"tianbaolin1@gmail.com"
] | tianbaolin1@gmail.com |
bcaf5aa98c8edf969dc67d07dbc2b241654d3d1d | fb7f04ffbdcdf4f5aa7c0e6ccf83f7671ef10770 | /server.py | cadfd74d1f7a4b805332831f5238f54f81f48b2d | [] | no_license | Bthelisma/LandingPage | 8ce348ece186c57e98d00cb5fdde8149587accae | 87fdf0bed1ad0e7a978095c47d9ba3ea860b74b7 | refs/heads/master | 2020-03-22T18:59:26.266574 | 2018-07-10T23:14:03 | 2018-07-10T23:14:03 | 140,496,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/ninjas')
def ninjas():
return render_template("ninjas.html")
@app.route('/dojos')
def dojos():
return render_template("dojos.html")
app.run(debug=True)
| [
"contactjerseysure@gmail.com"
] | contactjerseysure@gmail.com |
dfab9c98e6e8c2274dad941069669ae7f05d9833 | 15f438d029528a978383f24f85035c911e314b72 | /scripts/tile.py | 4265e245943caf8bebb807cdee81181b01d0187c | [
"MIT"
] | permissive | rg314/autoballs | 91d11315a61d4c088b099744301b3f1b68eecc93 | 21fab5c810f18c0d50c23051928d3bb86fbc6941 | refs/heads/main | 2023-05-30T11:48:52.901933 | 2021-06-23T14:48:27 | 2021-06-23T14:48:27 | 341,683,921 | 1 | 0 | MIT | 2021-03-18T23:28:23 | 2021-02-23T20:39:55 | Python | UTF-8 | Python | false | false | 2,397 | py | import os
import glob
import cv2
import matplotlib.pyplot as plt
import numpy as np
import math
size = 512
tile_size = (size, size)
offset = (size, size)
image_types = 'data'
origin_path = os.path.abspath(os.path.join(f'train_data/{image_types}/*', os.pardir))
images = glob.glob(origin_path+'/imgs/*.tif')
masks = [x.replace(f'imgs/img_', 'masks/img_') for x in images]
data = [(x, y) for (x, y) in list(zip(images, masks)) if os.path.exists(x) and os.path.exists(y)]
images, masks = zip(*data)
x = []
y = []
idx = 0
non_zero = 0
for img_n, mask_n in list(zip(images, masks)):
mask = cv2.imread(mask_n)
mask = (mask > 255//2) * 255
img = cv2.imread(img_n)
mask = np.asarray(mask).astype('uint8')
mask = mask[:,:,0]
img_shape = img.shape
# cv2.imwrite('test.tif', mask)
# print(mask)
if mask.shape[:2] == img.shape[:2]:
for i in range(int(math.ceil(img_shape[0]/(offset[1] * 1.0)))):
for j in range(int(math.ceil(img_shape[1]/(offset[0] * 1.0)))):
cropped_img = img[offset[1]*i:min(offset[1]*i+tile_size[1], img_shape[0]), offset[0]*j:min(offset[0]*j+tile_size[0], img_shape[1])]
cropped_mask = mask[offset[1]*i:min(offset[1]*i+tile_size[1], img_shape[0]), offset[0]*j:min(offset[0]*j+tile_size[0], img_shape[1])]
#
path = os.getcwd() + f'/train_data/data_tile_{size}/imgs'
if not os.path.exists(path):
os.makedirs(path)
imtgt = 'img_'+str(idx).zfill(5)+'.tif'
img_target = os.path.join(path, imtgt)
path = os.getcwd() + f'/train_data/data_tile_{size}/masks'
if not os.path.exists(path):
os.makedirs(path)
mskgt = imtgt
mask_target = os.path.join(path, mskgt)
# # print(cropped_img.shape, img_target)
# # print(cropped_mask.shape, mask_target)
cv2.imwrite(img_target, cropped_img)
cv2.imwrite(mask_target, ~cropped_mask)
if np.sum(cropped_mask) > 0:
non_zero += 1
idx += 1
print(f'Total {non_zero} out of {idx} which is {(non_zero*100/idx):.2f} %')
| [
"ryan.greenhalgh@hotmail.co.uk"
] | ryan.greenhalgh@hotmail.co.uk |
50317930bb9698c10a56bf2f5e1c9bf9b3f6f36b | 38f619c6210d77d156c6a9ae2850b30b1d96fd79 | /gen_winning_paths.py | 3a4c2ee73521727270a7dd6a1f62200d8b43fc07 | [] | no_license | ofraam/GTTT | 3c2d0fc55e17c794b9e4f7078640e86d73b780fc | 83e4c4f0d4c667bc719239d79daa1ab9417e7d1e | refs/heads/master | 2021-01-20T04:46:15.095923 | 2019-08-08T15:57:04 | 2019-08-08T15:57:04 | 89,724,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | if __name__ == "__main__":
dimension = 10
streak = 5
filename = "examples/board_"+str(dimension)+"_"+str(streak)+".txt"
row = 1
col = 1
winning_paths = []
#check horizontal
for row in range(1,dimension+1):
for col in range(1, dimension + 1):
i = (row-1)*dimension+col
if (i+(streak-1))<=(dimension*row): #horizontal paths
path = []
for s in range(0,streak):
path.append(i+s)
winning_paths.append(path)
if (i+(streak-1)*dimension)<=dimension*(dimension-1)+col: #vertical paths
path = []
for s in range(0,streak):
path.append(i+(s)*dimension)
winning_paths.append(path)
if (i+(streak-1)*(dimension+1))<=dimension*dimension: #diagonal right paths
if (i + (streak - 1) * (dimension + 1)) <= (row + (streak - 1)) * dimension: # diagonal right paths
path = []
for s in range(0,streak):
path.append(i+(s)*(dimension+1))
winning_paths.append(path)
if (i+(streak-1)*(dimension-1))<=dimension*dimension: #diagonal right paths
if (i + (streak - 1) * (dimension - 1)) > ((row-1) + (streak - 1)) * dimension: # diagonal right paths
path = []
for s in range(0,streak):
path.append(i+(s)*(dimension-1))
winning_paths.append(path)
with open(filename, "w") as text_file:
text_file.write(str(dimension*dimension))
text_file.write("\n")
for path in winning_paths:
for i in range(len(path)):
text_file.write(str(path[i]))
if i<len(path)-1:
text_file.write(" ")
text_file.write("\n")
print winning_paths
| [
"oamir@seas.harvard.edu"
] | oamir@seas.harvard.edu |
cb87f2390f4328b284144e4fa1564341cb8bdcf7 | c27c51f5c33e0431dbe7db6e18c21b249d476cfa | /OpenSource_Python_Code/nova-2013.2/nova/tests/api/ec2/test_faults.py | 36cee0663bf4ff4b4c640f0b081a869d016d26a6 | [
"Apache-2.0"
] | permissive | bopopescu/Python_Stuff | 9bef74e0db17bb5e3ba2d908ced01ee744820d80 | 9aa94a0fa5e4e802090c7b29ec88b840e304d9e5 | refs/heads/master | 2022-11-20T06:54:36.581623 | 2017-12-04T18:56:02 | 2017-12-04T18:56:02 | 282,171,169 | 0 | 0 | null | 2020-07-24T08:54:37 | 2020-07-24T08:54:36 | null | UTF-8 | Python | false | false | 1,914 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import webob
from nova.api.ec2 import faults
from nova import test
from nova import wsgi
class TestFaults(test.NoDBTestCase):
"""Tests covering ec2 Fault class."""
def test_fault_exception(self):
# Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPBadRequest(
explanation='test'))
self.assertTrue(isinstance(fault.wrapped_exc,
webob.exc.HTTPBadRequest))
def test_fault_exception_status_int(self):
# Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
self.assertEquals(fault.wrapped_exc.status_int, 404)
def test_fault_call(self):
# Ensure proper EC2 response on faults.
message = 'test message'
ex = webob.exc.HTTPNotFound(explanation=message)
fault = faults.Fault(ex)
req = wsgi.Request.blank('/test')
req.GET['AWSAccessKeyId'] = "test_user_id:test_project_id"
self.mox.StubOutWithMock(faults, 'ec2_error_response')
faults.ec2_error_response(mox.IgnoreArg(), 'HTTPNotFound',
message=message, status=ex.status_int)
self.mox.ReplayAll()
fault(req)
| [
"thelma1944@gmail.com"
] | thelma1944@gmail.com |
49ef89ed9847f6370bab12ee14d5b37c98c4382f | 8cf211cabe8c5169b2c9c3c6b38f69ac6c93b93e | /flask_bootcamp/section_6/venv/lib/python3.6/os.py | ac0bdb84664fdfc380e6a61aeee8932f5167c92b | [] | no_license | bopopescu/Python_Personal_Projects | 020256cb6835438b1b776eacb1a39d4cb5bc2efc | 025145130da5ac846b8aa14764783739ff68f64c | refs/heads/master | 2022-11-21T04:47:17.253558 | 2018-10-25T22:12:07 | 2018-10-25T22:12:07 | 281,146,690 | 0 | 0 | null | 2020-07-20T14:57:05 | 2020-07-20T14:57:04 | null | UTF-8 | Python | false | false | 44 | py | /home/vinicius/anaconda3/lib/python3.6/os.py | [
"vinicius.yosiura@live.com"
] | vinicius.yosiura@live.com |
68e6812af340c1592f989fbc771b1033a152cf91 | 582660ae9d3c21010042bd6262e421a2a6e94e61 | /python/introduction/python_if-else/python_if_else.py | a8c09703dd210c9a51a596580200c33614db93c6 | [] | no_license | tim-mccabe/hacker-rank | 5e12bcd9baabb94c98bca8ef906063092279f4a2 | 61480d2f7b4d567ac48d526417afd7dbc5a2329e | refs/heads/master | 2023-01-30T11:57:23.041756 | 2020-12-10T20:50:52 | 2020-12-10T20:50:52 | 320,064,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | #!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
if int(n) % 2 == 1:
print('Weird')
if 2 <= int(n) <= 5 and int(n) % 2 == 0:
print('Not Weird')
if 6 <= int(n) <= 20 and int(n) % 2 == 0:
print('Weird')
if int(n) > 20 and int(n) % 2 == 0:
print('Not Weird')
| [
"timmccabe44@gmail.com"
] | timmccabe44@gmail.com |
79dbeaf0b944d391662e5119f73dae9367fe504f | bd26284c804ded76f21d25b9c7a355304428e4d7 | /2/2-4. Cartpole.py | e5c1e31c1bf0433c30db2eb054cf87b7c840057f | [
"Apache-2.0"
] | permissive | Wonjuseo/Project101 | 0c93171bbd6ab86dfbc32f474e12e7b7229db4da | 8c49601e34f56035acd198a09428fa71f6606ca7 | refs/heads/master | 2021-01-19T23:25:40.658736 | 2018-07-02T13:45:03 | 2018-07-02T13:45:03 | 88,979,764 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | # Cart Pole example
import gym
# Environment
env = gym.make('CartPole-v0')
env.reset()
# Parameters
random_episodes = 0
reward_sum = 0
while random_episodes < 10:
# Rendering
env.render()
# Get action
action = env.action_space.sample()
# Update state, reward, done
observation, reward, done, _ = env.step(action)
print(observation,reward,done)
# Add reward
reward_sum += reward
# if it fails, the results were shown
if done:
random_episodes += 1
print("Reward for this episode was:", reward_sum)
reward_sum = 0
env.reset()
| [
"noreply@github.com"
] | Wonjuseo.noreply@github.com |
4da9c1e6ca004b93d1f275e2bd86ea3be8e69b31 | 52bb1d25a8c146b81b876343f861025e034fa964 | /roglick/dungeon/utils.py | fcf6a2a864c5ae7cc6c50f2c302b33b63529bf23 | [
"MIT"
] | permissive | Kromey/roglick | b3fc7a6bce7e60a150c9a9ed1cc825ef3988cf8a | b76202af71df0c30be0bd5f06a3428c990476e0e | refs/heads/master | 2020-12-14T15:49:53.163385 | 2016-05-24T16:29:06 | 2016-05-24T16:29:06 | 21,549,421 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,519 | py | from roglick.engine import random
from roglick.utils import clamp
def smoothstep(a, b, x):
"""Basic S-curve interpolation function.
Based on reference implementation available at
https://en.wikipedia.org/wiki/Smoothstep
"""
x = clamp((x - a)/(b - a), 0.0, 1.0)
return x*x*(3 - 2*x)
def smootherstep(a, b, x):
"""Improved S-curve interpolation function.
Based on reference implementation of the improved algorithm proposed by
Ken Perlin that is available at https://en.wikipedia.org/wiki/Smoothstep
"""
x = clamp((x - a)/(b - a), 0.0, 1.0)
return x*x*x*(x*(x*6 - 15) + 10);
def lerp(a, b, x):
"""Linear interpolation function."""
return a + x * (b - a)
class PerlinNoise2D(object):
def __init__(self, seed=None):
self.p = [x for x in range(256)]
if seed is None:
seed = random.get_int()
rand = random.Random(seed)
rand.shuffle(self.p)
def octave(self, x, y, octaves=5, persistence=0.5):
total = 0
frequency = 1
amplitude = 1
max_val = 0
for i in range(octaves):
total += self.noise(x*frequency, y*frequency) * amplitude
max_val += amplitude
amplitude *= persistence
frequency *= 2
return total / max_val
def noise(self, x, y):
xi = int(x)
yi = int(y)
xf = x - xi
yf = y - yi
u = self.fade(xf)
v = self.fade(yf)
aa = self.p_hash(self.p_hash( xi )+ yi )
ab = self.p_hash(self.p_hash( xi )+ yi+1)
ba = self.p_hash(self.p_hash(xi+1)+ yi )
bb = self.p_hash(self.p_hash(xi+1)+ yi+1)
x1 = lerp(self.grad(aa, xf , yf), self.grad(ba, xf-1, yf), u)
x2 = lerp(self.grad(ab, xf , yf-1), self.grad(bb, xf-1, yf-1), u)
return (lerp(x1, x2, v) + 1) / 2 # Constrain to [0,1] rather than [-1,1]
def fade(self, t):
return smootherstep(0.0, 1.0, t)
def p_hash(self, i):
i = i & 255
return self.p[i]
def grad(self, h, x, y):
"""This gradient function is based on Riven's optimization
Source: http://riven8192.blogspot.com/2010/08/calculate-perlinnoise-twice-as-fast.html
"""
h = h % 4
if h == 0:
return x + y
elif h == 1:
return -x + y
elif h == 2:
return x - y
elif h == 3:
return -x - y
else:
# Never happens
return 0
| [
"travisvz@gmail.com"
] | travisvz@gmail.com |
955bb168de6e1ab256033fbf68a95eb968b92146 | 180d93304e80e485be81dd06dbbc8a3be0c34365 | /exporter/opentelemetry-exporter-opencensus/tests/test_otcollector_trace_exporter.py | 222a94d60b3b85e025d4607da7ff392a7c43b338 | [
"Apache-2.0"
] | permissive | marcosflobo/opentelemetry-python | dbb26b04dbbc813696dbc3f8b3db4543af8cf68c | 81d80aab5d4fd23d0d75b223d482d491ac86f006 | refs/heads/main | 2023-05-04T21:51:24.754989 | 2021-05-06T01:51:26 | 2021-05-06T01:51:26 | 365,263,246 | 1 | 0 | Apache-2.0 | 2021-05-07T14:40:16 | 2021-05-07T14:40:15 | null | UTF-8 | Python | false | false | 11,824 | py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
import grpc
from google.protobuf.timestamp_pb2 import Timestamp
from opencensus.proto.trace.v1 import trace_pb2
import opentelemetry.exporter.opencensus.util as utils
from opentelemetry import trace as trace_api
from opentelemetry.exporter.opencensus.trace_exporter import (
OpenCensusSpanExporter,
translate_to_collector,
)
from opentelemetry.sdk import trace
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SpanExportResult
from opentelemetry.trace import TraceFlags
# pylint: disable=no-member
class TestCollectorSpanExporter(unittest.TestCase):
def test_constructor(self):
mock_get_node = mock.Mock()
patch = mock.patch(
"opentelemetry.exporter.opencensus.util.get_node",
side_effect=mock_get_node,
)
trace_api.set_tracer_provider(
TracerProvider(
resource=Resource.create({SERVICE_NAME: "testServiceName"})
)
)
host_name = "testHostName"
client = grpc.insecure_channel("")
endpoint = "testEndpoint"
with patch:
exporter = OpenCensusSpanExporter(
host_name=host_name,
endpoint=endpoint,
client=client,
)
self.assertIs(exporter.client, client)
self.assertEqual(exporter.endpoint, endpoint)
mock_get_node.assert_called_with("testServiceName", host_name)
def test_get_collector_span_kind(self):
result = utils.get_collector_span_kind(trace_api.SpanKind.SERVER)
self.assertIs(result, trace_pb2.Span.SpanKind.SERVER)
result = utils.get_collector_span_kind(trace_api.SpanKind.CLIENT)
self.assertIs(result, trace_pb2.Span.SpanKind.CLIENT)
result = utils.get_collector_span_kind(trace_api.SpanKind.CONSUMER)
self.assertIs(result, trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED)
result = utils.get_collector_span_kind(trace_api.SpanKind.PRODUCER)
self.assertIs(result, trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED)
result = utils.get_collector_span_kind(trace_api.SpanKind.INTERNAL)
self.assertIs(result, trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED)
def test_proto_timestamp_from_time_ns(self):
result = utils.proto_timestamp_from_time_ns(12345)
self.assertIsInstance(result, Timestamp)
self.assertEqual(result.nanos, 12345)
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
def test_translate_to_collector(self):
trace_id = 0x6E0C63257DE34C926F9EFCD03927272E
span_id = 0x34BF92DEEFC58C92
parent_id = 0x1111111111111111
base_time = 683647322 * 10 ** 9 # in ns
start_times = (
base_time,
base_time + 150 * 10 ** 6,
base_time + 300 * 10 ** 6,
)
durations = (50 * 10 ** 6, 100 * 10 ** 6, 200 * 10 ** 6)
end_times = (
start_times[0] + durations[0],
start_times[1] + durations[1],
start_times[2] + durations[2],
)
span_context = trace_api.SpanContext(
trace_id,
span_id,
is_remote=False,
trace_flags=TraceFlags(TraceFlags.SAMPLED),
trace_state=trace_api.TraceState([("testkey", "testvalue")]),
)
parent_span_context = trace_api.SpanContext(
trace_id, parent_id, is_remote=False
)
other_context = trace_api.SpanContext(
trace_id, span_id, is_remote=False
)
event_attributes = {
"annotation_bool": True,
"annotation_string": "annotation_test",
"key_float": 0.3,
}
event_timestamp = base_time + 50 * 10 ** 6
event = trace.Event(
name="event0",
timestamp=event_timestamp,
attributes=event_attributes,
)
link_attributes = {"key_bool": True}
link_1 = trace_api.Link(
context=other_context, attributes=link_attributes
)
link_2 = trace_api.Link(
context=parent_span_context, attributes=link_attributes
)
span_1 = trace._Span(
name="test1",
context=span_context,
parent=parent_span_context,
events=(event,),
links=(link_1,),
kind=trace_api.SpanKind.CLIENT,
)
span_2 = trace._Span(
name="test2",
context=parent_span_context,
parent=None,
kind=trace_api.SpanKind.SERVER,
)
span_3 = trace._Span(
name="test3",
context=other_context,
links=(link_2,),
parent=span_2.get_span_context(),
)
otel_spans = [span_1, span_2, span_3]
otel_spans[0].start(start_time=start_times[0])
otel_spans[0].set_attribute("key_bool", False)
otel_spans[0].set_attribute("key_string", "hello_world")
otel_spans[0].set_attribute("key_float", 111.22)
otel_spans[0].set_attribute("key_int", 333)
otel_spans[0].set_status(trace_api.Status(trace_api.StatusCode.OK))
otel_spans[0].end(end_time=end_times[0])
otel_spans[1].start(start_time=start_times[1])
otel_spans[1].set_status(
trace_api.Status(
trace_api.StatusCode.ERROR,
{"test", "val"},
)
)
otel_spans[1].end(end_time=end_times[1])
otel_spans[2].start(start_time=start_times[2])
otel_spans[2].end(end_time=end_times[2])
output_spans = translate_to_collector(otel_spans)
self.assertEqual(len(output_spans), 3)
self.assertEqual(
output_spans[0].trace_id, b"n\x0cc%}\xe3L\x92o\x9e\xfc\xd09''."
)
self.assertEqual(
output_spans[0].span_id, b"4\xbf\x92\xde\xef\xc5\x8c\x92"
)
self.assertEqual(
output_spans[0].name, trace_pb2.TruncatableString(value="test1")
)
self.assertEqual(
output_spans[1].name, trace_pb2.TruncatableString(value="test2")
)
self.assertEqual(
output_spans[2].name, trace_pb2.TruncatableString(value="test3")
)
self.assertEqual(
output_spans[0].start_time.seconds,
int(start_times[0] / 1000000000),
)
self.assertEqual(
output_spans[0].end_time.seconds, int(end_times[0] / 1000000000)
)
self.assertEqual(output_spans[0].kind, trace_api.SpanKind.CLIENT.value)
self.assertEqual(output_spans[1].kind, trace_api.SpanKind.SERVER.value)
self.assertEqual(
output_spans[0].parent_span_id, b"\x11\x11\x11\x11\x11\x11\x11\x11"
)
self.assertEqual(
output_spans[2].parent_span_id, b"\x11\x11\x11\x11\x11\x11\x11\x11"
)
self.assertEqual(
output_spans[0].status.code,
trace_api.StatusCode.OK.value,
)
self.assertEqual(len(output_spans[0].tracestate.entries), 1)
self.assertEqual(output_spans[0].tracestate.entries[0].key, "testkey")
self.assertEqual(
output_spans[0].tracestate.entries[0].value, "testvalue"
)
self.assertEqual(
output_spans[0].attributes.attribute_map["key_bool"].bool_value,
False,
)
self.assertEqual(
output_spans[0]
.attributes.attribute_map["key_string"]
.string_value.value,
"hello_world",
)
self.assertEqual(
output_spans[0].attributes.attribute_map["key_float"].double_value,
111.22,
)
self.assertEqual(
output_spans[0].attributes.attribute_map["key_int"].int_value, 333
)
self.assertEqual(
output_spans[0].time_events.time_event[0].time.seconds, 683647322
)
self.assertEqual(
output_spans[0]
.time_events.time_event[0]
.annotation.description.value,
"event0",
)
self.assertEqual(
output_spans[0]
.time_events.time_event[0]
.annotation.attributes.attribute_map["annotation_bool"]
.bool_value,
True,
)
self.assertEqual(
output_spans[0]
.time_events.time_event[0]
.annotation.attributes.attribute_map["annotation_string"]
.string_value.value,
"annotation_test",
)
self.assertEqual(
output_spans[0]
.time_events.time_event[0]
.annotation.attributes.attribute_map["key_float"]
.double_value,
0.3,
)
self.assertEqual(
output_spans[0].links.link[0].trace_id,
b"n\x0cc%}\xe3L\x92o\x9e\xfc\xd09''.",
)
self.assertEqual(
output_spans[0].links.link[0].span_id,
b"4\xbf\x92\xde\xef\xc5\x8c\x92",
)
self.assertEqual(
output_spans[0].links.link[0].type,
trace_pb2.Span.Link.Type.TYPE_UNSPECIFIED,
)
self.assertEqual(
output_spans[1].status.code,
trace_api.StatusCode.ERROR.value,
)
self.assertEqual(
output_spans[2].links.link[0].type,
trace_pb2.Span.Link.Type.PARENT_LINKED_SPAN,
)
self.assertEqual(
output_spans[0]
.links.link[0]
.attributes.attribute_map["key_bool"]
.bool_value,
True,
)
def test_export(self):
mock_client = mock.MagicMock()
mock_export = mock.MagicMock()
mock_client.Export = mock_export
host_name = "testHostName"
collector_exporter = OpenCensusSpanExporter(
client=mock_client, host_name=host_name
)
trace_id = 0x6E0C63257DE34C926F9EFCD03927272E
span_id = 0x34BF92DEEFC58C92
span_context = trace_api.SpanContext(
trace_id,
span_id,
is_remote=False,
trace_flags=TraceFlags(TraceFlags.SAMPLED),
)
otel_spans = [
trace._Span(
name="test1",
context=span_context,
kind=trace_api.SpanKind.CLIENT,
)
]
result_status = collector_exporter.export(otel_spans)
self.assertEqual(SpanExportResult.SUCCESS, result_status)
# pylint: disable=unsubscriptable-object
export_arg = mock_export.call_args[0]
service_request = next(export_arg[0])
output_spans = getattr(service_request, "spans")
output_node = getattr(service_request, "node")
self.assertEqual(len(output_spans), 1)
self.assertIsNotNone(getattr(output_node, "library_info"))
self.assertIsNotNone(getattr(output_node, "service_info"))
output_identifier = getattr(output_node, "identifier")
self.assertEqual(
getattr(output_identifier, "host_name"), "testHostName"
)
| [
"noreply@github.com"
] | marcosflobo.noreply@github.com |
e62a576701748974d99d413ad69f0fa9b0b33e9b | 21c77c2ff4d5fbb982943a22abd46a18a804621c | /flow_control/your_name.py | 68fec85da56a05006fdeaef5eb410d09972eb812 | [] | no_license | foleymd/boring-stuff | 56592f576da19238de5c742b78c34d86688b6319 | d81f10f801a512c38a713344a2fe1d8b5b7e5a09 | refs/heads/main | 2023-01-05T19:21:31.248420 | 2020-10-22T20:24:10 | 2020-10-22T20:24:10 | 302,201,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | # break example
name = ''
while True:
print('Please type your name.')
name = input()
if name == 'your name':
break
print('Thank you!')
# continue example
spam = 0
while spam < 5:
spam = spam + 1
if spam == 3:
continue
print('spam is ' + str(spam))
| [
"foleymd@gmail.com"
] | foleymd@gmail.com |
ed3e3d70a1fd13a1e41fa4985818c02092340a95 | 413fb29b62fe9ba07362d614ba49e7200482216d | /fraud_detection/src/com/mr/data_analysis_python/sampleFraudData.py | 4a77df03d96dbcfeca3f64da411e6f1ddb5ee5a5 | [] | no_license | cash2one/fraud_detection | ff2cc0a151b16cd3151c584839a227a384716ca7 | 6097e47800394f8659c5d14ab6a6538b2af8d444 | refs/heads/master | 2021-01-19T04:46:32.710395 | 2016-07-09T11:58:05 | 2016-07-09T11:58:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | __author__ = 'TakiyaHideto'
import sys
import random
class SampleFraudData:
def __init__(self, input, output):
self.input = input
self.output = output
self.sampleRatio = float(sys.argv[3])/float(sys.argv[4])
def __sample(self):
with open(self.output, "w") as fileOut:
with open(self.input, "r") as fileIn:
for line in fileIn:
if line.startswith("0"):
if random.random() < self.sampleRatio:
fileOut.write(line)
elif line.startswith("1"):
fileOut.write(line)
def runMe(self):
self.__sample()
if __name__ == "__main__":
if len(sys.argv) != 5:
print "<inputFile> <outputSampledFile> <fraudDataQuant> <normalDataQuant>"
exit(1)
job = SampleFraudData(sys.argv[1], sys.argv[2])
job.runMe() | [
"TakiyaHideto@iekoumatoMacBook-Pro.local"
] | TakiyaHideto@iekoumatoMacBook-Pro.local |
9ae24c0c1c39be6cfa372b401d1b1ebdd5bd2035 | d15be7017a8d28ad351d2872fdf36b8638a60abd | /Solutions/week01/word_counter.py | a35bc79b42b58ad6bf3ccc3b99120b527f4f46df | [] | no_license | shadydealer/Python-101 | 60ebdd098d38de45bede35905a378e8311e6891a | 9ec2dccd61f54f4ff8f86fe6dd26cd7dd06f570d | refs/heads/master | 2021-04-15T11:42:57.712723 | 2018-06-04T10:32:30 | 2018-06-04T10:32:30 | 126,197,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py |
word = input()
rows, cols = map(int, input().split())
def get_input():
"""get_input() -> 2D array"""
matrix = []
for i in range(rows):
matrix.append(input().split())
# for subset in matrix:
# print(subset)
return matrix
move_x =[-1,0,1]
move_y =[-1,0,1]
def is_valid_index(x,y):
"""is_valid_index(int, int) -> bool"""
if x >= 0 and y >= 0 and x < cols and y < rows:
return True
return False
def count_occurance(matrix, x,y, str_ind):
"""count_occurance(2D array, string) -> unsigned int"""
for y in range(matrix):
for x in range(matrix[i]): #matrix_char
for k in range(word): #word_char
if matrix[y][x] == word[k]:
y+=move_y[j]
x+=move_x[i]
else:
y-=move_y[j]*k
x-=move_x[i]*k
matrix = get_input() | [
"shady"
] | shady |
bf4f8be8ccdd998f8098cbf3f6605a7b524c9816 | f92722620b74644ee0f2e1a7461d4330ea3374da | /blog/migrations/0001_initial.py | 7ade040810ee75b4aa619f2bb513df02743ad060 | [] | no_license | designwithabhishek/mywebsite | c01e2784b733a681f215cac1c449a98554ca8cb0 | 4aa0593cb750330921de4367e2a389c4918845a1 | refs/heads/master | 2023-05-11T10:12:24.617089 | 2019-06-25T17:36:08 | 2019-06-25T17:36:08 | 200,085,766 | 0 | 0 | null | 2023-04-21T20:35:26 | 2019-08-01T16:37:09 | HTML | UTF-8 | Python | false | false | 835 | py | # Generated by Django 2.2.2 on 2019-06-23 04:53
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('author', models.CharField(max_length=50)),
('created', models.DateTimeField(verbose_name=datetime.datetime(2019, 6, 23, 4, 53, 36, 984687, tzinfo=utc))),
('content', models.TextField()),
('image', models.ImageField(upload_to='')),
],
),
]
| [
"designwithabhishek1996.com"
] | designwithabhishek1996.com |
019e5dd75cb9ff5d93826aea9822c1224063626a | 3a7b0262d408c8faad77d0710d0bee91e27643b9 | /Major_Requirements.py | a91f9661064cb93b0d8670f6a5c2951172039976 | [] | no_license | fmixson/testfulldegree | c1a13eb89001b017e7800304a3197d042a7e234b | de013a7f2171d66ab6a9fd4ed6a1604b53ef79d5 | refs/heads/main | 2023-06-27T23:08:22.016373 | 2021-08-08T20:07:37 | 2021-08-08T20:07:37 | 394,053,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,400 | py | import pandas as pd
class MajorRequirements:
# major_requirements_dataframe = pd.read_csv("C:/Users/family/Desktop/Programming/English_PlanA_GE.csv")
# major_requirements_dataframe = pd.read_csv("C:/Users/family/Desktop/Programming/Copy of COMM_AA.csv")
# major_requirements_dict = {}
def __init__(self, revised_course_list, completed_ge_courses, major_requirements):
self.major_requirements = major_requirements
self.revised_course_list = revised_course_list
self.completed_ge_courses = completed_ge_courses
self.major_course_dict = {}
self.major_courses_list = []
self.major_courses_list2 = []
self.major_units_list = []
self.major_units_dict = {}
self.area_units_dict = {}
self.major_requirements_dict = {}
self.major_no_courses_requirement_dict = {}
# self.discipline_list = []
# self.discipline_set = set()
def _two_disciplines(self, course_key, total_area_units, total_units):
discipline = course_key.split()
discipline = discipline[0]
disc = False
# print('area units', total_area_units, 'total units', total_units - 3)
if total_area_units == (total_units - 3):
unique_disciplines = set(self.discipline_list)
# print(len(unique_disciplines))
# print('unique', unique_disciplines)
if len(unique_disciplines) < 2:
if discipline in unique_disciplines:
disc = True
else:
self.discipline_list.append(discipline)
else:
self.discipline_list.append(discipline)
# print('discipline list', self.discipline_list)
return disc
def _three_disciplines(self, course_key, total_area_units, total_units):
discipline = course_key.split()
discipline = discipline[0]
disc = False
# print('area units', total_area_units, 'total units', total_units - 6)
if total_area_units >= (total_units - 6):
unique_disciplines = set(self.discipline_list)
# print(len(unique_disciplines))
# print('unique', unique_disciplines)
if len(unique_disciplines) < 3:
if len(unique_disciplines) == 2:
self.discipline_list.append(discipline)
elif unique_disciplines == 1:
if discipline in unique_disciplines:
disc = True
else:
self.discipline_list.append(discipline)
else:
self.discipline_list.append(discipline)
return disc
def major_courses_completed(self, area_name, total_units, number_of_disciplines=1):
proficiency_list = ['Writing_Proficiency', 'Math_Proficiency', 'Health_Proficiency', 'Reading_Proficiency']
major_requirements_dataframe = pd.read_csv(self.major_requirements)
self.major_courses_list2 = []
total_area_units = 0
area_units_list = []
ge_course_list = []
self.major_requirements_dict[area_name] = total_units
print('total units', total_units)
if total_units == '':
pass
else:
if total_units < 3:
self.major_no_courses_requirement_dict[area_name] = 1
else:
self.major_no_courses_requirement_dict[area_name] = total_units / 3
disc = False
self.discipline_list = []
self.discipline_set = set()
# print('maj course no', self.major_no_courses_requirement_dict)
# print('maj req dict', self.major_requirements_dict)
for key in self.completed_ge_courses:
if key not in proficiency_list:
ge_course_list.append(self.completed_ge_courses[key])
for i in range(len(major_requirements_dataframe[area_name])):
ge_course = False
major_course = False
if total_area_units < total_units:
for course_key in self.revised_course_list:
if course_key == major_requirements_dataframe.loc[i, area_name]:
if course_key in ge_course_list:
ge_course = True
if course_key in self.major_courses_list:
major_course = True
if not major_course:
if number_of_disciplines > 1:
if number_of_disciplines == 2:
disc = MajorRequirements._two_disciplines(self, course_key=course_key,
total_area_units=total_area_units,
total_units=total_units)
elif number_of_disciplines == 3:
disc = MajorRequirements._three_disciplines(self, course_key=course_key,
total_area_units=total_area_units,
total_units=total_units)
if not disc:
self.area_units_dict[area_name] = self.revised_course_list[course_key]
self.major_courses_list.append(course_key)
self.major_courses_list2.append(course_key)
self.major_course_dict[area_name] = self.major_courses_list2
# print('dict under',self.major_course_dict)
area_units_list.append(self.revised_course_list[course_key])
if not ge_course:
self.major_units_list.append(self.revised_course_list[course_key])
total_area_units = sum(area_units_list)
self.area_units_dict[area_name] = total_area_units
# print('maj course dict', self.major_course_dict)
return self.major_requirements_dict, self.major_course_dict, self.major_no_courses_requirement_dict
| [
"noreply@github.com"
] | fmixson.noreply@github.com |
4e303dd29190aa0d67c81ae34f62c8efeeaa0d0a | b17af89152a7305efb915da7c856c744e7dbd4f0 | /Dict_Project01_NumericIntegrasjon/F6_SendData.py | da51e5ba8dbeb3ed8133bee03f93e93dbcebc84e | [] | no_license | EspenEig/Bachelor2021-vedlegg | 6bb8e9efa84710500855f6129ce8f706b16bd690 | 2893f41e9e92f757360fe7d85dc03fd51d497f39 | refs/heads/main | 2023-05-01T12:34:45.403503 | 2021-05-12T16:31:59 | 2021-05-12T16:31:59 | 358,355,290 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | import json
def SendData(robot, measurements, online):
data = {}
for key in measurements:
if key == "zeroTime" or key == "ts":
continue
data[key] = (measurements[key][-1])
if online:
msg = json.dumps(data)
robot["connection"].send(msg)
if not robot["connection"].recv(3) == b"ack":
print("No data ack")
robot["out_file"].write("{},{},{},{}\n".format(
data["light"],
data["time"],
data["flow"],
data["volume"]))
| [
"54357741+EspenEig@users.noreply.github.com"
] | 54357741+EspenEig@users.noreply.github.com |
31c16a62d12f6538275dc374ce02c654b07ba690 | 582b93ca3747f7ec4ce8c00464c26698b0b8b229 | /DevExa/settings.py | 658b53ef61b015873d139dc62dd14ca2e8e29f93 | [] | no_license | BeToOxX/Final | 7615e0e37c4ca8858687f0293b5058dc75d79a9c | f4371207836b4f7cd856c7237ada3cd60a597bce | refs/heads/master | 2023-08-23T18:06:00.585583 | 2021-10-07T06:01:50 | 2021-10-07T06:01:50 | 414,474,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,325 | py | """
Django settings for DevExa project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=g8j_n6d=)gf_b*vn4hlt%!v5#njdwz_x_u80roi@51qcfze52'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.web'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'DevExa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['template'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DevExa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
import dj_database_url
from decouple import config
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
"""
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"rubengza.98@gmail.com"
] | rubengza.98@gmail.com |
8de31727528745859574b0a71d4d7f4265c46740 | 2718b6f68a717b24cd6238a20d4116b3dea3201b | /BlogTemplate/mysite_env/mysite/apps/blog/views.py | 39b584eea388bcf248d6a6d595bae4840b4bf60b | [] | no_license | tminlun/BlogTemplate | e94654e01e170f27c97c197c898c102518ad13ab | d475587fdd9e111961bbfa56666255d38cfdc056 | refs/heads/master | 2022-12-11T00:51:53.019391 | 2018-12-05T14:54:04 | 2018-12-05T14:54:04 | 138,825,320 | 0 | 0 | null | 2022-12-08T02:25:29 | 2018-06-27T03:30:20 | Python | UTF-8 | Python | false | false | 4,519 | py | from django.shortcuts import render,get_object_or_404
from django.core.paginator import Paginator
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db.models.aggregates import Count
from read_statistics.utils import read_statistics_once_read
from comment.models import Comment
from .models import Blog,BlogType
#获取博客列表共同的数据,设置参数blog_all_list全部博客,因为每个方法都有不同的获取方法
def get_blog_list_common_data(request, blog_all_list):
paginator = Paginator(blog_all_list, settings.EACH_PAGE_BLOG_NUMBER) # 每一页10篇博客
page_num = request.GET.get('page', 1) # 获取页码参数,get请求
page_of_blogs = paginator.get_page(page_num) # 获取当前页码
current_page_num = page_of_blogs.number # 获取当前页码
# current_page_num - 2 , 1 只是拿1和currentr_page_num - 2比,range范围还是
# current_page_num - 2, currentr_page_num
page_range = list(range(max(current_page_num - 2, 1), current_page_num)) + \
list(range(current_page_num, min(current_page_num + 2, paginator.num_pages) + 1))
# 添加省略
if page_range[0] - 1 >= 2:
page_range.insert(0, '...')
# 如果总页 - 最后一页 大于等于2
if paginator.num_pages - page_range[-1] >= 2:
page_range.append('...')
# 添加第一页和最后一页
if page_range[0] != 1:
page_range.insert(0, 1) # 将第一个页码变成1(insert在第一个插入)
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages) # 添加总页码到最后显示页码(append在尾部添加)
blog_dates = Blog.objects.dates('created_time','month',order="DESC")
blog_dates_dict = {}
for blog_date in blog_dates:
date_count = Blog.objects.filter(created_time__year=blog_date.year,created_time__month=blog_date.month).count()
blog_dates_dict[blog_date] = date_count
context = {}
context['page_of_blogs'] = page_of_blogs # 当前页码
context['page_range'] = page_range # 返回所有页码给模板
context['blogs'] = page_of_blogs.object_list # 获取所有博客
# annotate自动返回BlogType的所有数据
context['blog_types']=BlogType.objects.annotate(type_count = Count('blog')).filter(type_count__gt=0)
# 获取到全部的年和月
context['blog_dates'] = blog_dates_dict # 这里是一个坑,记住把日期和数量给对象
return context #返回给模板 render(request,'?.html',context)
def blog_list(request):
blog_all_list = Blog.objects.all()#全部的博客列表
context = get_blog_list_common_data(request,blog_all_list) #传递给context
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request,blog_with_type_pk):
blog_type = get_object_or_404(BlogType,pk = blog_with_type_pk)#获取分类
blog_all_list = Blog.objects.filter(blog_type=blog_type)#获取所有筛选类型博客
context = get_blog_list_common_data(request, blog_all_list)
context['blog_type'] = blog_type # 分类名
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request,year,month):
#获取到对应年和月的博客
blog_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)
context = get_blog_list_common_data(request, blog_all_list)
context['blog_with_date'] = "%s年%s月" %(year,month) #当前的年月
return render(request, 'blog/blogs_with_date.html', context)
#博客细节
def blog_detail(request,blog_pk):
context = {}
blog = get_object_or_404(Blog, pk = blog_pk)
#判断浏览器是否有cookie记录,有不加数,没有加数;get获取字典的key
read_cookie_key = read_statistics_once_read(request, blog)
blog_content_type = ContentType.objects.get_for_model(blog)
comments = Comment.objects.filter(content_type=blog_content_type,object_id=blog.pk)
context['blog'] = blog
#前一篇博客,大于:__gt=
context['previous_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()
#后一篇博客,小于:__lt=
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()
context['user'] = request.user
context['comments'] = comments
response=render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'ture') #坑,值 记得填写
return response
| [
"1272443075@qq.com"
] | 1272443075@qq.com |
18c4b876571211b4d59ba56578c12df35106481c | 5f08d36d8cf92bff8c778eb4fa04e0df4b5768b1 | /Week10/CurveFitting.py | f1719852ab0bba0e27a75c1874c86a835d44f949 | [] | no_license | npilgram/PHYS202-S13 | ae22e5ced93fdedfe757187c8a364a9c3cb359a9 | 8ed9162d820e61aae624f5e646b894e83ce5faca | refs/heads/master | 2021-01-02T23:13:38.342579 | 2013-06-15T02:47:14 | 2013-06-15T02:47:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | import numpy as np
def LinearLeastSquaresFit(x,y):
"""Take in arrays representing (x,y) values for set of linearly varying data and perform
a linear least squares regression. Return the resulting slope and intercept parameters of
the best fit line with their uncertainties."""
x_ave = np.sum(x)/len(x)
y_ave = np.sum(y)/len(y)
xsqr_ave = np.sum((x*x))/len((x*x))
xy_ave = np.sum((x*y))/len((x*y))
m = (xy_ave - (x_ave*y_ave))/(xsqr_ave - (x_ave**2))
b = ((xsqr_ave*y_ave)-(x_ave*xy_ave))/(xsqr_ave - (x_ave**2))
uncer = np.zeros(len(x))
for i in range(len(x)):
uncer[i]=y[i]-((m*x[i])+b)
uncer_sqr_ave = np.sum((uncer*uncer))/len((uncer*uncer))
m_err = np.sqrt((1/(len(x)-2.))*(uncer_sqr_ave/(xsqr_ave -(x_ave**2))))
b_err = np.sqrt((1/(len(x)-2.))*((uncer_sqr_ave*xsqr_ave)/(xsqr_ave -(x_ave**2))))
return (m,b,m_err,b_err)
| [
"npilgram@calpoly.edu"
] | npilgram@calpoly.edu |
94885939895e110e0050528f9b92b238256a9c00 | 39efbd67fa02ef628bd86051781145b77b8244d9 | /PLXs/086-01173-00 ok/CSVReader.py | 2965cac991750f4034087c8861c66e11bd242ba1 | [] | no_license | hbenr/ProjectBender | 9df2326df01ec04db93d2311e0107a5ac2706802 | c0432ae0a9ceaf6442f92f59805bdfbdddc2fd14 | refs/heads/master | 2021-05-29T19:36:07.437213 | 2015-09-17T20:02:07 | 2015-09-17T20:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,343 | py | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: MAT.TE
#
# Created: 20/08/2015
# Copyright: (c) MAT.TE 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import csv
from UtilitiesHB import *
def cadFetch(BOARD_ORIGIN, CAD_FILENAME):
startSaving = False
with open(CAD_FILENAME + '.csv', 'rb') as cadFile:
reader = csv.reader(cadFile)
fiducials = []
devices = []
for row in reader:
if row[0] == 'Name':
startSaving = True
continue
if startSaving and (row[0] != '' and not 'H' in row[0]
and not 'TP' in row[0] and not 'F' in row[0] and not 'N' in row[0]
and not 'P' in row[0] and not 'K' in row[0] and not 'LED' in row[0]
and not 'MOV' in row[0]):
devices.append(['d', unitsConverter(int(row[2]), False, BOARD_ORIGIN, False),
unitsConverter(int(row[1]), False, BOARD_ORIGIN, True), row[0].lower(), 'n0000', row[3],
'partNo', 'f-1', row[0].lower(), 'SHAPE'])
elif startSaving and 'F' in row[0]:
fiducials.append(['f', unitsConverter(int(row[2]), False, BOARD_ORIGIN, False),
unitsConverter(int(row[1]), False, BOARD_ORIGIN, True)])
elif startSaving and (row[0] == ''):
break
return fiducials, devices
def bomFetch(devices, BOM_FILENAME):
startSaving = False
with open(BOM_FILENAME + '.csv', 'rb') as bomFile:
reader = csv.reader(bomFile)
for row in reader:
currentDevices = []
for elem in row[8].split(','):
currentDevices.extend(deviceEnumerator(elem))
if row[0] == 'Part No':
startSaving = True
continue
if startSaving and row[8] != '':
for elem in currentDevices:
for component in devices:
if elem.lower() == component[3]:
component[6] = row[4]
return sorted(devices, key= lambda x: int(x[2]))
def main():
raw_input("Wrong file! Use plxHelper ")
if __name__ == '__main__':
main() | [
"MAT.TE@CTLMAT0301D.Robertshaw.com"
] | MAT.TE@CTLMAT0301D.Robertshaw.com |
b29d6c67789222e938357e11fa5b9b8b77863402 | 6bbe91ea2ebc098b7b7ea5179761f7852875b3f6 | /pugbot.py | 12aea378d489f64a42a51b0e33ce7a3006854f7e | [] | no_license | dpyro/pugbot | 531c1c83ceac8d8ae937247af2d1221752c35f13 | 1e8f1b30de11f98be9ee53c2128104758f997799 | refs/heads/master | 2021-01-20T13:49:58.008215 | 2010-08-27T19:33:28 | 2010-08-27T19:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,453 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# vim: enc=utf-8
from __future__ import print_function
from sys import stderr
import logging
import re
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol, task
from pugdata import *
from pugserver import public_ip
def connectSSL(irc_server, irc_port, app):
f = PugBotFactory(app)
reactor.connectSSL(irc_server, irc_port, f, ssl.ClientContextFactory())
def connectTCP(irc_server, irc_port, app):
f = PugBotFactory(app)
reactor.connectTCP(irc_server, irc_port, f)
# needed for @command decorator
_commands = {}
class PugBot(irc.IRCClient):
_re_stripper = re.compile("""[\x0f\x02\x1f\x16\x1d\x11] | # formatting
\x03(?:\d{1,2}(?:,\d{1,2})?)? | # mIRC colors
\x04[0-9a-fA-F]{0,6} # rgb colors
""", re.UNICODE | re.VERBOSE)
@staticmethod
def _strip_all(str):
return PugBot._re_stripper.sub('', str)
@staticmethod
def _has_color(str):
str_strip = PugBot._strip_all(str)
return str != str_strip
MSG_INFO = 0x1
MSG_CONFIRM = 0x2
MSG_ERROR = 0x3
def __init__(self, app):
self.app = app
self.nickname = app.irc_nick
self.password = app.irc_pass
self.color = app.irc_color
self.lineRate = .75
self.versionName = 'PugBot'
self.keep_alive = task.LoopingCall(self._ping)
self.nickmodes = {}
self.users = {} # (nick, PugUser)
self.logger = logging.getLogger("PugApp.PugBot")
def _colorize(self, str, type):
color_dict = {
self.MSG_ERROR : '\x02\x035,01',
self.MSG_INFO : '\x02\x030,01',
self.MSG_CONFIRM : '\x02\x033,01'
}
color_reset = '\x0f'
if self.color:
# only automatically color if no (custom) color formatting is already present
str = color_dict.get(type, '') + str + color_reset if not self._has_color(str) else str + color_reset
else:
str = self._strip_all(str)
return str
# overrides
def msg(self, user, message, type=None):
message_stripped = self._strip_all(message)
log_message = u"{0} (msg) ← {1}".format(user, message_stripped)
self.logger.info(log_message) if user != self.app.irc_server else self.logger.debug(log_message)
if type is not None:
message = self._colorize(message, type)
nick = PugBot._get_nick(user)
irc.IRCClient.msg(self, nick, message)
def notice(self, user, message, type=None):
message_stripped = self._strip_all(message)
self.logger.info(u"{0} (notice) ← {1}".format(user, message_stripped))
if type is not None:
message = self._colorize(message, type)
nick = PugBot._get_nick(user)
irc.IRCClient.notice(self, nick, message)
def describe(self, channel, action):
self.logger.info("{0} (action) ← {1}".format(channel, action))
irc.IRCClient.describe(self, channel, action)
def whois(self, nickname, server=None):
self.logger.debug(u"Requested WHOIS {0}".format(nickname))
irc.IRCClient.whois(self, nickname, server)
# callbacks
def signedOn(self):
self.logger.info(u"Signed onto IRC network {0}:{1}".format(self.app.irc_server, self.app.irc_port))
self._nickserv_login()
self.join(self.app.irc_channel)
self.keep_alive.start(100)
def joined(self, channel):
self.app.print_irc("* joined channel {0}".format(channel))
self.logger.info(u"Joined channel {0}".format(channel))
self._who(channel)
self.whois(self.app.irc_nick)
def left(self, channel):
self.app.print_irc("* left channel {0}".format(channel))
self.logger.info(u"Left channel {0}".format(channel))
self.nickmodes.clear()
self.users.clear()
def kickedFrom(self, channel, kicker, message):
self.logger.warning(u"Kicked from {0} by {1} ({2})".format(channel, kicker, message))
self.nickmodes.clear()
self.users.clear()
task.deferLater(reactor, 5.0, self.join, self.app.irc_channel)
def nickChanged(self, nick):
self.logger.warning(u"Nick changed to: {0}".format(nick))
def privmsg(self, user, channel, msg):
msg = self._strip_all(msg)
self.logger.info(u":{0} (msg) → {1}: {2}".format(user, channel, msg))
cmd = msg.split(' ', 1)[0].lower()
nick = PugBot._get_nick(user)
if cmd in _commands:
cmd_f, cmd_access = _commands[cmd]
if cmd_access is None:
cmd_f(self, user, channel, msg)
elif nick not in self.users:
self.whois(nick)
self.notice(user, "Refreshing access list, please try again shortly.", self.MSG_ERROR)
elif self.users[nick].irc_access >= cmd_access:
cmd_f(self, user, channel, msg)
else:
self.notice(user, "You don't have access to this command!", self.MSG_ERROR)
def noticed(self, user, channel, msg):
self.logger.info(u"{0} (notice) → {1}: {2}".format(user, channel, msg))
def action(self, user, channel, data):
self.logger.info(u"{0} (action) → {1}: {2}".format(user, channel, msg))
def _purge_user(self, user, reason):
self.logger.info(u"{0}: {1}".format(user, reason))
nick = PugBot._get_nick(user)
if nick in self.users:
p_user = self.users[nick]
if p_user in self.app.players:
self.app.remove(p_user)
self.logger.debug(u"Removed user {0} from game ({1})".format(nick, reason))
self._list_players(channel)
del self.users[nick]
def userLeft(self, user, channel):
reason = u"left {0}".format(channel)
if channel.lower() == self.app.irc_channel:
self._purge_user(user, reason)
def userQuit(self, user, quitMessage):
reason = u"quit ({0})".format(quitMessage)
self._purge_user(user, reason)
def userKicked(self, kickee, channel, kicker, message):
reason = u"kicked by {0} in {1} ({2})".format(kicker, channel, message)
if channel.lower() == self.app.irc_channel:
self._purge_user(kickee, reason)
def userRenamed(self, oldname, newname):
if oldname in self.users:
p_user = self.users[oldname]
p_user.irc_name = newname
self.db_session.add(p_user)
self.db_session.commit()
self.users[newname] = p_user
del self.users[oldname]
self.logger.info(u"User renamed: {0} → {1}".format(oldname, newname))
def modeChanged(self, user, channel, set, modes, args):
if channel.lower() == self.app.irc_channel:
self._who(channel)
mode_prefix = '+' if set else '-'
for mode, arg in zip(modes, args):
self.logger.debug(u"{0} → {1} mode change: {2}{3} {4}".format(
user, channel, mode_prefix, mode, arg))
def pong(self, user, secs):
self.logger.debug(u"{0} (pong) ← {1}".format(user, secs))
def irc_RPL_WHOREPLY(self, prefix, args):
me, chan, uname, host, server, nick, modes, name = args
log_msg = u"Recieved WHOREPLY: chan: {0}, uname: {1}, host: {2}, server: {3}, nick: {4}, modes: {5}, name: {6}".format(
chan, uname, host, server, nick, modes, name)
self.logger.debug(log_msg)
if chan.lower() == self.app.irc_channel:
access = PugBot._get_access(modes)
self.nickmodes[nick] = access
self.logger.debug(u"Set {0} to access level {1}".format(nick, access))
def irc_RPL_ENDOFWHO(self, prefix, args):
self.logger.debug(u"Recieved WHO list: {0}".format(args))
def irc_RPL_WHOISUSER(self, prefix, args):
self.logger.debug(u"WHOIS list: {0}".format(args))
def irc_RPL_WHOISACCOUNT(self, prefix, args):
me, nick, account, msg = args
self.logger.debug(u"WHOIS account: nick: {0}, account {1}".format(nick, account))
if nick in self.users:
self.users[nick].irc_account = account
else:
p_user = PugUser(nick, account)
self.users[nick] = p_user
def irc_RPL_ENDOFWHOIS(self, prefix, args):
self.logger.debug(u"Recieved WHOIS: {0}".format(args))
@staticmethod
def _get_nick(user):
return user.split('!', 1)[0]
@staticmethod
def _get_access(modes):
mode_dict = {
'@': PugUser.IRC_OP,
'+': PugUser.IRC_VOICED
}
for key, val in mode_dict.iteritems():
if key in modes:
return val
return PugUser.IRC_USER
def _who(self, channel):
msg = 'WHO {0}'.format(channel.lower())
self.logger.debug(u"Requested {0}".format(msg))
self.sendLine(msg)
def _ping(self):
self.ping(self.app.irc_server)
def _nickserv_login(self):
self.msg('NickServ@services.', 'IDENTIFY {0} {1}'.format(self.nickname, self.password))
def _authserv_login(self):
self.msg('AuthServ@services.', 'AUTH {0} {1}'.format(self.nickname, self.password))
def _list_players(self, channel):
players = self.app.players
if len(players) == 0:
self.msg(channel, "No players are currently signed up.", self.MSG_INFO)
else:
player_list = ', '.join((p.irc_nick for p in self.app.players))
suffix = 's' if len(self.app.players) != 1 else ''
self.msg(channel, "{0} player{1}: {2}".format(len(players), suffix, player_list), self.MSG_INFO)
def _teams(self, channel):
team1, team2 = self.app.teams()
team1 = ', '.join((p.irc_nick for p in team1))
team2 = ', '.join((p.irc_nick for p in team2))
self.msg(channel, "10,01BLU Team: {0}".format(team1))
self.msg(channel, "05,01RED Team: {0}".format(team2))
msg_red = "You have been assigned to RED team. Connect as soon as possible to {0}:{1}".format(
self.app.rcon_server, self.app.rcon_port)
msg_blu = "You have been assigned to BLU team. Connect as soon as possible to {0}:{1}".format(
self.app.rcon_server, self.app.rcon_port)
[self.msg(p.irc_nick, msg_red, MSG_INFO) for p in team1]
[self.msg(p.irc_nick, msg_blu, MSG_INFO) for p in team2]
class command(object):
def __init__(self, name, access=None):
self.name = name
self.access = access
def __call__(self, f):
global _commands
if not isinstance(self.name, str):
for name in self.name:
name = name.lower()
_commands[name] = (f, self.access)
else:
name = self.name.lower()
_commands[name] = (f, self.access)
def exec_cmd(*args):
try:
f(args)
except Exception as e:
print(Fore.RED + e, file=stderr)
self.logger.exception(e)
return exec_cmd
# commands
@command('!startgame', PugUser.IRC_OP)
def cmd_startgame(self, user, channel, msg):
self.app.startgame()
self.msg(channel, "Game started. Type !add to join the game.", self.MSG_INFO)
@command([ '!add', '!a' ], PugUser.IRC_USER)
def cmd_join(self, user, channel, msg):
nick = PugBot._get_nick(user)
p_user = self.users[nick]
if self.app.game is not None:
if p_user not in self.app.players:
self.app.add(p_user)
self.notice(user, "You successfully added to the game.", self.MSG_CONFIRM)
if len(self.app.players) >= 12:
self._teams(channel)
else:
self._list_players(channel)
else:
self.notice(user, "You have already signed up for the game!", self.MSG_ERROR)
else:
self.notice(user, "There is no active game to sign up for!", self.MSG_ERROR)
@command('!join')
def cmd_add(self, user, channel, msg):
self.notice(user, "Please use !add instead.", self.MSG_ERROR)
@command([ '!remove', '!r' ], PugUser.IRC_USER)
def cmd_remove(self, user, channel, msg):
nick = PugBot._get_nick(user)
p_user = self.users[nick]
if p_user in self.app.players:
self.app.remove(p_user)
self.notice(user, "You successfully removed from the game.", self.MSG_CONFIRM)
self._list_players(channel)
else:
self.notice(user, "You are not in the game!", self.MSG_ERROR)
@command(('!players', '!p'))
def cmd_list(self, user, channel, msg):
if self.app.game is None:
self.msg(channel, "There is no game running currently.", self.MSG_INFO)
else:
self._list_players(channel)
@command('!endgame', PugUser.IRC_OP)
def cmd_endgame(self, user, channel, msg):
if self.app.game is not None:
self.app.endgame()
self.msg(channel, "Game ended.", self.MSG_INFO)
else:
self.notice(user, "There is no game to be ended!", self.MSG_ERROR)
@command('!server')
def cmd_server(self, user, channel, msg):
info = self.app.serverinfo()
self.msg(channel, "connect {0}:{1};".format(self.app.rcon_server, info['port']), self.MSG_INFO)
#TODO: Why does it give key errors when using format()?
self.msg(channel, "%(map)s | %(numplayers)s / %(maxplayers)s | stv: %(specport)s" % (info), self.MSG_INFO)
@command('!mumble')
def cmd_mumble(self, user, channel, msg):
self.msg(channel, ("Mumble is the shiniest new voice server/client used by players to communicate with each other.\n"
"It's not laggy as hell like Ventrilo and has a sweet ingame overlay. Unfortunately, Europeans use it.\n"
"Mumble IP: {0} port: {1}").format(self.app.mumble_server, self.app.mumble_port), self.MSG_INFO)
@command('!version')
def cmd_version(self, user, channel, msg):
self.msg(channel, "PugBot: 3alpha", self.MSG_INFO)
@command('!bear')
def cmd_bear(self, user, channel, msg):
self.describe(channel, "goes 4rawr!", self.MSG_INFO)
@command('!magnets')
def cmd_magnets(self, user, channl, msg):
self.msg(channel, "What am I, a scientist?", self.MSG_INFO)
@command('!rtd')
def cmd_rtd(self, user, channel, msg):
nick = PugBot._get_nick(user)
self.msg(channel, "Don't be a noob, {0}.".format(nick), self.MSG_INFO)
@command('!whattimeisit')
def cmd_whattimeisit(self, user, channel, msg):
nick = PugBot._get_nick(user)
self.msg(channel, "Go back to #tf2.pug.na, {0}.".format(nick))
class PugBotFactory(protocol.ReconnectingClientFactory):
protocol = PugBot
def __init__(self, app):
self.app = app
self.logger = logging.getLogger("PugApp.PugBot")
def buildProtocol(self, addr):
self.resetDelay()
p = PugBot(self.app)
p.factory = self
return p
def clientConnectionLost(self, connector, reason):
msg = "connection lost, reconnecting: {0}".format(reason)
self.app.print_irc(msg)
self.logger.error(msg)
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
msg = "connection failed: {0}".format(reason)
self.app.print_irc(msg)
self.logger.error(msg)
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
| [
"darkpyro@gmail.com"
] | darkpyro@gmail.com |
beff39c0b7dcb6bdf841867e852ac3a4d5057438 | 42dc79035b8488b59374a44ee87136d9fd56bdb3 | /30-Day-Challange/Day-7/count_negative_sorted-2.py | 1903e4b1dc070af2101c398bcd4be981714a4312 | [
"Apache-2.0"
] | permissive | EashanKaushik/LeetCode | f8e655b8a52fa01ef5def44b18b2352875bb7ab8 | 8ee2a61cefa42b332b6252fafff4a2772d25aa31 | refs/heads/main | 2023-06-06T17:15:54.218097 | 2021-07-06T11:46:35 | 2021-07-06T11:46:35 | 371,109,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | class Solution:
def countNegatives(self, grid):
m = len(grid)
n = len(grid[0])
current = 0
row = 0
col = n - 1
while row < m and col >= 0:
curr = grid[row][col]
if curr < 0:
current += m - row
col -= 1
else:
row += 1
return current
s = Solution()
print(s.countNegatives([[4,3,2,-1],[3,2,1,-1],[1,1,-1,-2],[-1,-1,-2,-3]]))
| [
"EashanK16@gmail.com"
] | EashanK16@gmail.com |
341fc6379af0b753834833efa91503520488d7fa | a3aaf7bb73471c67d4adc40aee159e60e7fc964b | /face_pic.py | f2573255284ef455f037aef1a2398b61799bfee2 | [] | no_license | muskaankularia/Gaze-tracker | 29360516efbe94a8ef19aeefa8805db7224b15df | dcae52f85d486ce0f8ec1566814be7136c97df38 | refs/heads/master | 2021-08-22T05:53:12.770849 | 2017-11-29T12:09:06 | 2017-11-29T12:09:06 | 112,461,356 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,519 | py | import numpy as np
import cv2
import sys
import os
import shutil
import timm
if os.path.exists('./data'):
shutil.rmtree('./data')
dirname = 'data'
os.mkdir(dirname)
face_cascade = cv2.CascadeClassifier('/usr/local/Cellar/opencv/3.3.0_3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('//usr/local/Cellar/opencv/3.3.0_3/share/OpenCV/haarcascades/haarcascade_eye.xml')
# mouth_cascade = cv2.CascadeClassifier('/usr/local/Cellar/opencv/3.3.0_3/share/OpenCV/haarcascades/haarcascade_mcs_mouth.xml')
mouth_cascade = cv2.CascadeClassifier('./haarcascade_mcs_mouth.xml')
# if len(sys.argv) < 2:
# sys.exit('Wrong Usage')
# image_name = sys.argv[1]
# img = cv2.imread(image_name)
camera = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('sample.avi',fourcc, 3, (1280,720))
counter = 0
kernel = np.ones((3,3),np.uint8)
while 1:
retval, img = camera.read()
# print img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# print 'y'
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
# print 'face found'
cv2.rectangle(img, (x,y), (x+w, y+h), 0, 2)
roi_face = gray[y:y+h, x:x+w]
roi_face_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_face, 1.3, 5)
for (ex, ey, ew, eh) in eyes:
counter += 1
cv2.rectangle(roi_face_color, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
# print "eye " + str(ex) + " " + str(ey)
# roi_eye = roi_face[int(1.2*ey):int(0.8*(ey+eh)), int(1.2*ex):int(0.8*(ex+ew))]
roi_eye = roi_face[ey:ey+eh, ex:ex+ew]
center = 0
roi_eye = cv2.GaussianBlur(roi_eye,(3,3),0)
roi_eye = cv2.addWeighted(roi_eye,1.5,roi_eye,-0.5,0)
roi_eye_canny = cv2.Canny(roi_eye,100,200)
cv2.imwrite('./data/canny' + str(counter) + '.png', roi_eye_canny)
laplacian = cv2.Laplacian(roi_eye,cv2.CV_64F)
cv2.imwrite('./data/lapla' + str(counter) + '.png', laplacian)
# res = cv2.resize(roi_eye,(int(ew/2), int(eh/2)), interpolation = cv2.INTER_AREA)
roi_eyex = cv2.Sobel(roi_eye, cv2.CV_64F, 1, 0, ksize=3)
roi_eyey = cv2.Sobel(roi_eye, cv2.CV_64F, 0, 1, ksize=3)
roi_eyex = np.absolute(roi_eyex)
roi_eyey = np.absolute(roi_eyey)
roi_eyex = np.uint8(roi_eyex)
roi_eyey = np.uint8(roi_eyey)
# sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)
# abs_sobel64f = np.absolute(sobelx64f)
# sobel_8u = np.uint8(abs_sobel64f)
cv2.imwrite('./data/zsobely' + str(counter) + '.png', roi_eyey)
cv2.imwrite('./data/zsobelx' + str(counter) + '.png', roi_eyex)
ret, tmp = cv2.threshold(roi_eyex, 0, 255, cv2.THRESH_OTSU)
tmp = cv2.erode(tmp, kernel, iterations=1)
cv2.imwrite('./data/zsobelxt' + str(counter) + '.png', tmp)
mag = np.hypot(roi_eyex, roi_eyey) # magnitude
mag *= 255.0 / np.max(mag) # normalize (Q&D)
roi_eye_sobel = mag.astype(np.uint8)
# roi_eye_sobel = cv2.morphologyEx(roi_eye_sobel, cv2.MORPH_OPEN, kernel)
cv2.imwrite('./data/xy' + str(counter) + '.png', roi_eye_sobel)
# roi_eye_sobel = cv2.morphologyEx(roi_eye_sobel, cv2.MORPH_OPEN, kernel)
# roi_eye_sobel = cv2.erode(roi_eye_sobel, kernel, iterations = 1)
# roi_eye_sobel = cv2.morphologyEx(roi_eye_sobel, cv2.MORPH_CLOSE, kernel)
ret, roi_eye_sobel = cv2.threshold(roi_eye_sobel, 0, 255, cv2.THRESH_OTSU)
roi_eye_sobel = cv2.erode(roi_eye_sobel, kernel, iterations=1)
cv2.imwrite('./data/tempthresh' + str(counter) + '.png', roi_eye_sobel)
roi_eye_color = roi_face_color[ey:ey+eh, ex:ex+ew]
# center = timm.findEyeCenter(roi_eye_color, (0,0))
# cv2.circle(roi_eye_color, center, 5, (255, 255, 255), 2)
pupils = cv2.HoughCircles(roi_eye_sobel, cv2.HOUGH_GRADIENT, 1, 100, param1 = 100, param2 = 10, minRadius=int(ew/11), maxRadius=int(ew/3))
if pupils is not None:
# print 'not none'
pupils = np.round(pupils[0,:]).astype("int")
for (x,y,r) in pupils:
print str(x) + " " + str(y) + " " + str(r) + " --- " + str(counter) + " " + str(int(ew/11)) + "-" + str(int(ew/3))
# cv2.circle(roi_eye_color, (x, y), r, (255, 165, 0), 2)
cv2.circle(roi_eye_color, (x, y), 2, (255, 165, 0), 3)
# cv2.imshow('eye' + str(x), roi_eye_color)
# print roi_eye_sobel.shape
# print roi_eye_color.shape
comb = np.zeros(shape=(roi_eye_color.shape[0], roi_eye_color.shape[1]*2, roi_eye_color.shape[2]), dtype=np.uint8)
comb[:roi_eye_color.shape[0], :roi_eye_color.shape[1]] = roi_eye_color
comb[:roi_eye_sobel.shape[0], roi_eye_sobel.shape[1]:] = roi_eye_sobel[:, :, None]
# cat = np.concatenate([roi_eye_sobel, roi_eye_color])
cv2.imwrite('./data/eye' + str(counter) + '.png', comb)
# cv2.moveWindow('eye' + str(x), 1000, 100)
# cv2.resizeWindow('eye' + str(x), eh*2, ew*2)
# mouths = mouth_cascade.detectMultiScale(roi_face, 1.7, 11)
# for (mx, my, mw, mh) in mouths:
# cv2.rectangle(roi_face_color, (mx, my), (mx+mw, my+mh), (0, 0, 0), 2)
# roi_mouth = roi_face[my:my+mh, mx:mx+mw]
# roi_mouth_color = roi_face_color[my:my+mh, mx:mx+mw]
# roi_mouth = cv2.cornerHarris(roi_mouth, 2, 3, 0.04)
# roi_mouth = cv2.dilate(roi_mouth, None)
# roi_mouth_color[roi_mouth>0.01*roi_mouth.max()]=[0,0,255]
out.write(img)
cv2.imshow('test', img)
# cv2.imshow('bhawsar', gray)
# cv2.moveWindow('bhawsar', 800,100)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
camera.release()
out.release()
# cv2.waitKey(0)
cv2.destroyAllWindows() | [
"noreply@github.com"
] | muskaankularia.noreply@github.com |
ee56c5923d4e412ecef2b0e9a6abc6e9db42e260 | 1f60222696b27d1a0f93282c73f72f6870c0c7d6 | /alpha_transform/AlphaTransformUtility.py | 6c4cbca2cb07bcccddf7a558df7b93567d90c79c | [
"MIT"
] | permissive | dedale-fet/alpha-transform | 7ff3d5859ccf4924170894a9eb030fa8ac4da099 | 41b4fb0b28b908391f9ddf17cdcde8b956d3d064 | refs/heads/master | 2021-01-13T14:28:57.702914 | 2020-08-04T15:40:22 | 2020-08-04T15:40:22 | 72,874,669 | 14 | 7 | null | 2020-07-19T18:47:18 | 2016-11-04T18:26:33 | Python | UTF-8 | Python | false | false | 11,097 | py | r"""
This module contains several utility functions which can be used e.g.
for thresholding the alpha-shearlet coefficients or for using the
alpha-shearlet transform for denoising.
Finally, it also contains the functions :func:`my_ravel` and :func:`my_unravel`
which can be used to convert the alpha-shearlet coefficients into a
1-dimensional vector and back. This is in particular convenient for the
subsampled transform, where this conversion is not entirely trivial, since the
different "coefficient images" have varying dimensions.
"""
import os.path
import math
import numpy as np
import numexpr as ne
import scipy.ndimage
def find_free_file(file_template):
r"""
This function finds the first nonexistent ("free") file obtained by
"counting upwards" using the passed template/pattern.
**Required Parameter**
:param string file_template:
This should be a string whose ``format()`` method can be called
using only an integer argument, e.g. ``'/home/test_{0:0>2d}.txt'``,
which would result in ``find_free_file`` consecutively checking
the following files for existence:
`/home/test_00.txt,`
`/home/test_01.txt, ...`
**Return value**
:return:
``file_template.format(i)`` for the first value of ``i`` for which
the corresponding file does not yet exist.
"""
i = 0
while os.path.isfile(file_template.format(i)):
i += 1
return file_template.format(i)
def threshold(coeffs, thresh_value, mode):
r"""
Given a set of coefficients, this function performs a thresholding
procedure, i.e., either soft or hard thresholding.
**Required parameters**
:param coeffs:
The coefficients to be thresholded.
Either a three-dimensional :class:`numpy.ndarray` or a generator
producing two dimensional :class:`numpy.ndarray` objects.
:param float thresh_value:
The thresholding cutoff :math:`c` for the coefficients, see also
``mode`` for more details.
:param string mode:
Either ``'hard'`` or ``'soft'``. This parameter determines whether
the hard thresholding operator
.. math::
\Lambda_cx
=\begin{cases}
x, & \text{if }|x|\geq c,\\
0, & \text{if }|x|<c,
\end{cases}
or the soft thresholding operator
.. math::
\Lambda_cx
=\begin{cases}
x\cdot \frac{|x|-c}{|x|}, & \text{if }|x|\geq c,\\
0, & \text{if }|x|<c
\end{cases}
is applied to each entry of the coefficients.
**Return value**
:return:
A generator producing the thresholded coefficients. Each
thresholded "coefficient image", i.e., each thresholded
2-dimensional array, is produced in turn.
"""
if mode == 'hard':
for coeff in coeffs:
ev_string = 'coeff * (real(abs(coeff)) >= thresh_value)'
yield ne.evaluate(ev_string)
# yield coeff * (np.abs(coeff) >= thresh_value)
elif mode == 'soft':
for coeff in coeffs:
ev_string = ('(real(abs(coeff)) - thresh_value) * '
'(real(abs(coeff)) >= thresh_value)')
large_values = ne.evaluate(ev_string)
# large_values = np.maximum(np.abs(coeff) - thresh_value, 0)
ev_str_2 = 'coeff * large_values / (large_values + thresh_value)'
yield ne.evaluate(ev_str_2)
# yield coeff * large_values / (large_values + thresh_value)
else:
raise ValueError("'mode' must be 'hard' or 'soft'")
def scale_gen(trafo):
r"""
**Required parameter**
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
**Return value**
:return:
A generator producing integers. The i-th produced integer
is the *scale* (starting from -1 for the low-pass part) of the i-th
alpha-shearlet associated to ``trafo``.
Hence, if ``coeff = trafo.transform(im)``, then the following iteration
produces the associated scale to each "coefficient image"::
for scale, c in zip(scale_gen(trafo), coeff):
...
"""
indices_gen = iter(trafo.indices)
next(indices_gen)
yield -1
for index in indices_gen:
yield index[0]
def denoise(img, trafo, noise_lvl, multipliers=None):
r"""
Given a noisy image :math:`\tilde f`, this function performs a denoising
procedure based on shearlet thresholding. More precisely:
#. A scale dependent threshold parameter :math:`c=(c_j)_j` is calculated
according to :math:`c_j=m_j\cdot \lambda / \sqrt{N_1\cdot N_2}`, where
:math:`m_j` is a multiplier for the jth scale, :math:`\lambda` is the
noise level present in the image :math:`\tilde f` and
:math:`N_1\times N_2` are its dimensions.
#. The alpha-shearlet transform of :math:`\tilde f` is calculated
using ``trafo``.
#. Hard thesholding with threshold parameter (cutoff) :math:`c` is
performed on alpha-shearlet coefficients, i.e., for each scale ``j``,
each of the coefficients belonging to the jth scale is set to zero if
its absolute value is smaller than :math:`c_j` and otherwise it is
left unchanged.
#. The (pseudo)-inverse of the alpha-shearlet transform is applied to the
thresholded coefficients and this reconstruction is the return value
of the function.
**Required parameters**
:param numpy.ndarray img:
The “image” (2 dimensional array) that should be denoised.
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
This object is used to calculate the (inverse) alpha-shearlet
transform during the denoising procedure.
The dimension of the transform and of ``img`` need to coincide.
:param float noise_lvl:
The (presumed) noise level present in ``img``.
If ``img = img_clean + noise``, then ``noise_lvl`` should be
approximately equal to the :math:`\ell^2` norm of ``noise``.
In particular, if ``im`` is obtained by adding Gaussian noise with
standard deviation :math:`\sigma` (in each entry) to a noise free
image :math:`f`, then the noise level :math:`\lambda` is given by
:math:`\lambda= \sigma\cdot \sqrt{N_1\cdot N_2}`; see also
:func:`AdaptiveAlpha.optimize_denoising`.
**Keyword parameter**
:param list multipliers:
A list of multipliers (floats) for each scale. ``multipliers[j]``
determines the value of :math:`m_j` and thus of the cutoff
:math:`c_j = m_j \cdot \lambda / \sqrt{N_1 \cdot N_2}` for scale ``j``.
In particular, ``len(multipliers)`` needs
to be equal to the number of the scales of ``trafo``.
**Return value**
:return:
The denoised image, i.e., the result of the denoising procedure
described above.
"""
coeff_gen = trafo.transform_generator(img, do_norm=True)
if multipliers is None:
# multipliers = [1] + ([2.5] * (trafo.num_scales - 1)) + [5]
multipliers = [3] * trafo.num_scales + [4]
width = trafo.width
height = trafo.height
thresh_lvls = [multi * noise_lvl / math.sqrt(width * height)
for multi in multipliers]
thresh_coeff = (coeff * (np.abs(coeff) >= thresh_lvls[scale + 1])
for (coeff, scale) in zip(coeff_gen, scale_gen(trafo)))
recon = trafo.inverse_transform(thresh_coeff, real=True, do_norm=True)
return recon
def image_load(path):
r"""
Given a '.npy' or '.png' file, this function loads the file and returns
its content as a two-dimensional :class:`numpy.ndarray` of :class:`float`
values.
For '.png' images, the pixel values are normalized to be between 0 and 1
(instead of between 0 and 255) and color images are converted to
grey-scale.
**Required parameter**
:param string path:
Path to the image to be converted, either of a '.png' or '.npy' file.
**Return value**
:return:
The loaded image as a two-dimensional :class:`numpy.ndarray`.
"""
image_extension = path[path.rfind('.'):]
if image_extension == '.npy':
return np.array(np.load(path), dtype='float64')
elif image_extension == '.png':
return np.array(scipy.ndimage.imread(path, flatten=True) / 255.0,
dtype='float64')
else:
raise ValueError("This function can only load .png or .npy files.")
def _print_listlist(listlist):
for front, back, l in zip(['['] + ([' '] * (len(listlist) - 1)),
([''] * (len(listlist) - 1)) + [']'],
listlist):
print(front + str(l) + back)
def my_ravel(coeff):
r"""
The subsampled alpha-shearlet transform returns a list of differently
sized(!) two-dimensional arrays. Likewise, the fully sampled transform
yields a three dimensional numpy array containing the coefficients.
The present function can be used (in both cases) to convert this list into
a single *one-dimensional* numpy array.
.. note::
In order to invert this conversion to a one-dimensional array,
use the associated function :func:`my_unravel`. Precisely,
:func:`my_unravel` satisfies
``my_unravel(my_trafo, my_ravel(coeff)) == coeff``,
if coeff is obtained from calling ``my_trafo.transform(im)``
for some image ``im``.
The preceding equality holds at least up to (negligible)
differences (the left-hand side is a generator while the
right-hand side could also be a list).
**Required parameter**
:param list coeff:
A list (or a generator) containing/producing two-dimensional
numpy arrays.
**Return value**
:return:
A one-dimensional :class:`numpy.ndarray` from which **coeff** can
be reconstructed.
"""
return np.concatenate([c.ravel() for c in coeff])
def my_unravel(trafo, coeff):
r"""
This method is a companion method to :func:`my_ravel`.
See the documentation of that function for more details.
**Required parameters**
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
:param numpy.ndarray coeff:
A one-dimensional numpy array, obtained via
``my_ravel(coeff_unrav)``, where ``coeff_unrav`` is of the same
dimensions as the output of ``trafo.transform(im)``, where
``im`` is an image.
**Return value**
:return:
A generator producing the same values as ``coeff_unrav``, i.e.,
an "unravelled" version of ``coeff``.
"""
coeff_sizes = [spec.shape for spec in trafo.spectrograms]
split_points = np.cumsum([spec.size for spec in trafo.spectrograms])
return (c.reshape(size)
for size, c in zip(coeff_sizes, np.split(coeff, split_points)))
| [
"felix.voigtlaender@gmail.com"
] | felix.voigtlaender@gmail.com |
113b1426d9036aee80c7202882206d1f33646a46 | fa1e90dedb7f9b84cd210420215ff6a9bf7e6f2d | /airmozilla/suggest/forms.py | 605254a63fff168bd1e667a2ed8a5f5f55e9866b | [] | no_license | sara-mansouri/airmozilla | f7bdf6aeafa9a7a299fc69c506e186ba47be7ccb | 8f93162be46044798df1e6d0ce80c8407fc41995 | refs/heads/master | 2021-01-16T18:28:35.569244 | 2014-03-28T02:59:31 | 2014-03-28T02:59:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,080 | py | from django import forms
from django.conf import settings
from django.template.defaultfilters import filesizeformat
from django.utils.timesince import timesince
from django.utils.safestring import mark_safe
from django.db.models import Q
from slugify import slugify
import requests
from funfactory.urlresolvers import reverse
from airmozilla.base.forms import BaseModelForm
from airmozilla.main.models import (
SuggestedEvent,
Event,
Tag,
Channel,
SuggestedEventComment
)
from airmozilla.comments.models import SuggestedDiscussion
from airmozilla.uploads.models import Upload
from . import utils
class StartForm(BaseModelForm):
event_type = forms.ChoiceField(
label='',
choices=[
('upcoming', 'Upcoming'),
('pre-recorded', 'Pre-recorded'),
('popcorn', 'Popcorn')
],
widget=forms.widgets.RadioSelect()
)
class Meta:
model = SuggestedEvent
fields = ('title',)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(StartForm, self).__init__(*args, **kwargs)
# self.fields['upcoming'].label = ''
# self.fields['upcoming'].widget = forms.widgets.RadioSelect(
# choices=[(True, 'Upcoming'), (False, 'Pre-recorded')]
# )
def clean_title(self):
value = self.cleaned_data['title']
if Event.objects.filter(title__iexact=value):
raise forms.ValidationError("Event title already used")
if SuggestedEvent.objects.filter(title__iexact=value, user=self.user):
raise forms.ValidationError(
"You already have a suggest event with this title"
)
return value
class TitleForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('title', 'slug')
def clean_slug(self):
value = self.cleaned_data['slug']
if value:
if Event.objects.filter(slug__iexact=value):
raise forms.ValidationError('Already taken')
return value
def clean_title(self):
value = self.cleaned_data['title']
if Event.objects.filter(title__iexact=value):
raise forms.ValidationError("Event title already used")
return value
def clean(self):
cleaned_data = super(TitleForm, self).clean()
if 'slug' in cleaned_data and 'title' in cleaned_data:
if not cleaned_data['slug']:
cleaned_data['slug'] = slugify(cleaned_data['title']).lower()
if Event.objects.filter(slug=cleaned_data['slug']):
raise forms.ValidationError('Slug already taken')
return cleaned_data
class ChooseFileForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('upload',)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(ChooseFileForm, self).__init__(*args, **kwargs)
this_or_nothing = (
Q(suggested_event__isnull=True) |
Q(suggested_event=self.instance)
)
uploads = (
Upload.objects
.filter(user=self.user)
.filter(this_or_nothing)
.order_by('created')
)
self.fields['upload'].widget = forms.widgets.RadioSelect(
choices=[(x.pk, self.describe_upload(x)) for x in uploads]
)
@staticmethod
def describe_upload(upload):
html = (
'%s <br><span class="metadata">(%s) uploaded %s ago</span>' % (
upload.file_name,
filesizeformat(upload.size),
timesince(upload.created)
)
)
return mark_safe(html)
class PopcornForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('popcorn_url',)
def __init__(self, *args, **kwargs):
super(PopcornForm, self).__init__(*args, **kwargs)
self.fields['popcorn_url'].label = 'Popcorn URL'
def clean_popcorn_url(self):
url = self.cleaned_data['popcorn_url']
if '://' not in url:
url = 'http://' + url
response = requests.get(url)
if response.status_code != 200:
raise forms.ValidationError('URL can not be found')
return url
class DescriptionForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('description', 'short_description')
def __init__(self, *args, **kwargs):
super(DescriptionForm, self).__init__(*args, **kwargs)
self.fields['description'].help_text = (
"Write a description of your event that will entice viewers to "
"watch.<br>"
"An interesting description improves the chances of your "
"presentation being picked up by bloggers and other websites."
"<br>"
"Please phrase your description in the present tense. "
)
self.fields['short_description'].help_text = (
"This Short Description is used in public feeds and tweets. "
"<br>If your event is non-public be careful "
"<b>not to "
"disclose sensitive information here</b>."
"<br>If left blank the system will use the first few "
"words of the description above."
)
class DetailsForm(BaseModelForm):
tags = forms.CharField(required=False)
enable_discussion = forms.BooleanField(required=False)
class Meta:
model = SuggestedEvent
fields = (
'location',
'start_time',
'privacy',
'category',
'tags',
'channels',
'additional_links',
'remote_presenters',
)
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields['channels'].required = False
if not self.instance.upcoming:
del self.fields['location']
del self.fields['start_time']
del self.fields['remote_presenters']
else:
self.fields['location'].required = True
self.fields['start_time'].required = True
self.fields['location'].help_text = (
"Choose an Air Mozilla origination point. <br>"
"If the location of your event isn't on the list, "
"choose Live Remote. <br>"
"Note that live remote dates and times are UTC."
)
self.fields['remote_presenters'].help_text = (
"If there will be presenters who present remotely, please "
"enter email addresses, names and locations about these "
"presenters."
)
self.fields['remote_presenters'].widget.attrs['rows'] = 3
if 'instance' in kwargs:
event = kwargs['instance']
if event.pk:
tag_format = lambda objects: ','.join(map(unicode, objects))
tags_formatted = tag_format(event.tags.all())
self.initial['tags'] = tags_formatted
self.fields['tags'].help_text = (
"Enter some keywords to help viewers find the recording of your "
"event. <br>Press return between keywords"
)
self.fields['channels'].help_text = (
"Should your event appear in one or more particular "
"Air Mozilla Channels? <br>If in doubt, select Main."
)
self.fields['additional_links'].help_text = (
"If you have links to slides, the presenter's blog, or other "
"relevant links, list them here and they will appear on "
"the event page."
)
self.fields['additional_links'].widget.attrs['rows'] = 3
def clean_tags(self):
tags = self.cleaned_data['tags']
split_tags = [t.strip() for t in tags.split(',') if t.strip()]
final_tags = []
for tag_name in split_tags:
t, __ = Tag.objects.get_or_create(name=tag_name)
final_tags.append(t)
return final_tags
def clean_channels(self):
channels = self.cleaned_data['channels']
if not channels:
return Channel.objects.filter(slug=settings.DEFAULT_CHANNEL_SLUG)
return channels
class DiscussionForm(BaseModelForm):
emails = forms.CharField(required=False, label="Moderators")
class Meta:
model = SuggestedDiscussion
fields = ('enabled', 'moderate_all')
def __init__(self, *args, **kwargs):
super(DiscussionForm, self).__init__(*args, **kwargs)
event = self.instance.event
self.fields['moderate_all'].help_text = (
'That every comment has to be approved before being shown '
'publically. '
)
self.fields['emails'].widget.attrs.update({
'data-autocomplete-url': reverse('suggest:autocomplete_emails')
})
if event.privacy != Event.PRIVACY_COMPANY:
self.fields['moderate_all'].widget.attrs.update(
{'disabled': 'disabled'}
)
self.fields['moderate_all'].help_text += (
'<br>If the event is not MoCo private you have to have '
'full moderation on '
'all the time.'
)
def clean_emails(self):
value = self.cleaned_data['emails']
emails = list(set([
x.lower().strip() for x in value.split(',') if x.strip()
]))
for email in emails:
if not utils.is_valid_email(email):
raise forms.ValidationError(
'%s is not a valid email address' % (email,)
)
return emails
class PlaceholderForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('placeholder_img',)
def __init__(self, *args, **kwargs):
super(PlaceholderForm, self).__init__(*args, **kwargs)
self.fields['placeholder_img'].help_text = (
"We need a placeholder image for your event. <br>"
"A recent head-shot of the speaker is preferred. <br>"
"Placeholder images should be 200 x 200 px or larger."
)
#class ParticipantsForm(BaseModelForm):
#
# participants = forms.CharField(required=False)
#
# class Meta:
# model = SuggestedEvent
# fields = ('participants',)
#
# def clean_participants(self):
# participants = self.cleaned_data['participants']
# split_participants = [p.strip() for p in participants.split(',')
# if p.strip()]
# final_participants = []
# for participant_name in split_participants:
# p = Participant.objects.get(name=participant_name)
# final_participants.append(p)
# return final_participants
#
class SuggestedEventCommentForm(BaseModelForm):
class Meta:
model = SuggestedEventComment
fields = ('comment',)
| [
"mail@peterbe.com"
] | mail@peterbe.com |
c10ea4aaf707b2472d05f5082eeeb2cded2d7235 | e69aa8050bced4e625928e3e18e14e892ba860dc | /Partial_Permutations.py | 5886fbe97a1612b6882017146363daf371f01eae | [
"MIT"
] | permissive | Esprit-Nobel/ROSALIND | e65cf85f1fbe0660cda547926d91be3f109edce9 | ec964f6d9cc5d97339106c89df865fb105251928 | refs/heads/master | 2021-01-12T18:16:19.768656 | 2016-11-02T14:06:50 | 2016-11-02T14:06:50 | 71,357,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 18:18:43 2016
@author: yannick
"""
import sys
import math
with open(sys.argv[1], "r") as fichier_lu:
CONTENU = fichier_lu.readlines()
NUM = CONTENU[0].strip("\n\r\t ").split()
TOT = ( math.factorial( long(NUM[0]) ) / \
math.factorial( long(NUM[0])-long(NUM[1]) ) ) \
% 1000000
print TOT
| [
"esprit.nobel@orange.fr"
] | esprit.nobel@orange.fr |
07a5041034955e430ae5f1723511345940c3a5b6 | 1f4e6b4045df3a0313880da83e69f10d44c8bab4 | /4 Data types 3listpy.py | 15a8f1cd8c2ab2129144c106420c4f510286c828 | [] | no_license | AhmedAliGhanem/PythonForNetowrk-Cisco | daee76b17cc271b56516d559a8fb4184656a15b0 | f71b2325db69cb5eb9c6a9fe0c6f04a217468875 | refs/heads/master | 2022-01-12T09:26:25.338980 | 2019-06-22T16:18:23 | 2019-06-22T16:18:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | #List
years = [2000, 2001, 2002]
print(years)
Repeatable = [2000, 2001, 2000]
print(Repeatable)
mix = [2000, "yasser", 2002]
print(mix)
x = ["A", "B", "C"]
y = ["D", "E"]
z = x + y
print(z)
z = x * 3
print(z)
z = "A" in y
print(z)
fastethernet_speed=['auto', '10', '100']
print(fastethernet_speed)
print(fastethernet_speed[0])
portList = []
portList.append(21)
portList.append(80)
portList.append(443)
portList.append(25)
print(portList)
portList.sort()
print(portList)
pos = portList.index(80)
print ("[+] There are "+str(pos)+" ports to scan before 80.")
portList.remove(443)
print(portList)
test = 'CCIE CCNP CCNA and CCNT'
print(test.split())
fastethernet_duplex = 'auto half full'
fastethernet_duplex_list = fastethernet_duplex.split()
print(fastethernet_duplex_list)
fastethernet_duplex_list[0] = 'Auto'
fastethernet_duplex_list[1] = 'Half'
fastethernet_duplex_list[2] = 'Full'
print(fastethernet_duplex_list)
print(fastethernet_duplex_list[0])
del fastethernet_duplex_list[0]
print(fastethernet_duplex_list)
print('Auto' in fastethernet_duplex_list)
| [
"noreply@github.com"
] | AhmedAliGhanem.noreply@github.com |
bc48ed3a69d6249ded7e941d7d465964d67fa3dc | 4d946b12fa07bb4375b687e74bbc785d35c5175b | /DIO-intro-python.py | 1e8c1c1014ebc5344d98c14d8f69b35db3e45730 | [] | no_license | Thalitachargel/100daysofCode | c4994bdc48870fc6b702387fe9ec004148ac73b0 | 0333a3f0358d1309368a4f93fec6759e307d7dba | refs/heads/main | 2023-07-05T02:14:00.105793 | 2021-08-18T22:28:21 | 2021-08-18T22:28:21 | 363,527,384 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,648 | py | #!/usr/bin/env python
# coding: utf-8
# # Introdução a programação em Python 🐱💻
# ## Digital Inovation One
# ## Modulo 6
# 🐍 Organizando conjuntos e subconjuntos de elementos em Python
#
# In[3]:
# O que é conjunto:
conjunto = {1, 2, 3, 4}
print(type(conjunto))
# In[4]:
# Conjunto não permite duplicidade
conjunto_Duplo = {1, 2, 3, 4, 4, 2}
print(conjunto_Duplo)
# In[5]:
# adincionando elementos ao conjunto
conjunto.add(5)
conjunto
# In[7]:
# removento elemento
conjunto.discard(2)
conjunto
# ### Operações com conjuntos
# In[11]:
# União entre conjuntos
conj1 = {1, 2, 3, 4, 5}
conj2 = {5, 6, 7, 8}
print( f'conjunto 1 ={ conj1} e conjunto 1 = {conj2}')
conj_uniao = conj1.union(conj2)
conj_uniao
# In[12]:
# Intersecção entre conjuntos
conj_interseccao = conj1.intersection(conj2)
conj_interseccao
# In[16]:
# Diferença
conj_diferencaA = conj1.difference(conj2)
conj_diferencaB = conj2.difference(conj1)
print(f"conj1 ≠ conj2 = {conj_diferencaA} e conj2 ≠ conj1 = {conj_diferencaB}")
# In[20]:
# diferença simétrica (o não tem nos dos conjuntos, ou seja,
# todos os elementos que não são compartilhados entre os conjuntos)
conj_dif_simetrico = conj1.symmetric_difference(conj2)
conj_dif_simetrico
# ### Pertinencia
# In[33]:
# Is subset - Se um conjunto é um subconjunto de outro
conjA = {1, 2, 3}
conjB = {1, 2, 3, 4, 5}
conj_subset = conjA.issubset(conjB)
conj_subset2 = conjB.issubset(conjA)
conj_subset2
if conj_subset == True:
print("Conjunto A é subset do Conjunto B")
else:
print("Conjunto B é subset do Conjunto A")
if conj_subset2 == True:
print("Conjunto A é subset do Conjunto B")
else:
print("Conjunto B é subset do Conjunto A")
# In[36]:
# Super conjunto
conj_superset = conjA.issuperset(conjB)
conj_superset1 = conjB.issuperset(conjA)
if conj_superset == True:
print("Conjunto A é superconjunto do Conjunto B")
else:
print("Conjunto B é superconjunto do Conjunto A")
if conj_superset1 == True:
print("Conjunto A é superconjunto do Conjunto B")
else:
print("Conjunto B é superconjunto do Conjunto A")
# In[46]:
# convertendo uma lista em conjunto
lista = ['cachorro', 'gato', 'gato', 'elefante']
conj_animais = set(lista)
print(conj_animais, type(lista), type(conj_animais))
# In[51]:
# converter de volta a lista
#lista_animais = list(conj_animais)
#print(lista_animais, type(lista_animais))
# ## Módulo 7 - Construindo Métodos, Funções e Classes em Python
# In[56]:
# condicional IF, else
a = int(input("Primeiro Valor: "))
b = int(input("Segundo valor: "))
if a > b:
print(f'O primeiro valor, {a}, é maior que o segundo valor, {b}.')
else:
print(f'O segundo valor, {b}, é maior que o primeiro valor, {a}.')
# In[58]:
# E Elif
a = int(input("Primeiro Valor: "))
b = int(input("Segundo valor: "))
c = int(input("Terceiro Valor: "))
if a > b and a > c:
print(f'O maior numero é o primerio, {a}.')
elif b > a and b > c:
print(f'O maior numero é o segundo, {b}.')
else:
print(f'O maior numero é o terceiro, {c}.')
# In[62]:
# Exercício
# saber se o numero digitado é par
n = int(input("Digite um número:"))
if n == 0:
print("Digite um número diferente de zero!")
elif n % 2 == 0:
print(f'O número {n} é par.')
else:
print(f'O número {n} é impar')
# In[63]:
# função é tudo aquilo que retorna valor
# Método é Definição e não retorna valor
def soma(a, b):
return a + b # como tem retorno, vira uma função
print(soma(1, 2))
print(soma(3, 4))
# In[64]:
def subtracao(a, b):
return a - b
print(subtracao(10, 2))
# In[76]:
# Classe
def multiplicacao(a, b):
return a * b
def divisao(a, b):
return (a / b)
print(multiplicacao(10, 2))
print(divisao(50, 5))
# Transformando em classe
class Calculadora:
def __init__(self, num1, num2):
self.a = num1
self.b = num2
def soma(self):
return self.a + self.b
def subtracao(self):
return self.a - self.b
def multiplicacao(self):
return self.a * self.b
def divisao(self):
return self.a / self.b
calculadora = Calculadora(10, 2)
print(calculadora.a, calculadora.b)
print(calculadora.soma())
print(calculadora.subtracao())
print(calculadora.multiplicacao())
print(calculadora.divisao())
# In[79]:
# Calculadora 2
class Calculadora2:
def __init__(self):
pass
def soma(self, a, b):
return a + b
def subtracao(self, a, b):
return a - b
def multiplicacao(self, a, b):
return a * b
def divisao(self, a, b):
return a / b
calculadora2 = Calculadora2()
print(calculadora2.soma(10,2))
print(calculadora2.subtracao(5,3))
print(calculadora2.multiplicacao(100,2))
print(calculadora2.divisao(10,5))
# In[95]:
# criar uma televisão usando Class
class Televisao:
def __init__(self):
self.ligada = False
self.canal = 5
def power(self):
if self.ligada:
self.ligada = False
else:
self.ligada = True
def aumenta_canal(self):
if self.ligada:
self.canal += 1
else:
print("A tv está desligada")
def diminui_canal(self):
if self.ligada:
self.canal -= 1
else:
print("A tv está desligada")
televisao = Televisao()
print(f'A televisão está ligada: {televisao.ligada}')
televisao.power()
print(f'A televisão está ligada: {televisao.ligada}')
televisao.power()
print(f'A televisão está ligada: {televisao.ligada}')
print(f'Canal {televisao.canal}')
televisao.aumenta_canal()
televisao.power()
televisao.aumenta_canal()
televisao.aumenta_canal()
print(f'Canal {televisao.canal}')
televisao.diminui_canal()
print(f'Canal {televisao.canal}')
# ### Módulo 8 - Lidando com módulos, importação de classes, métodos e lambdas
# In[100]:
#modulo - são os arquivos py
#import ClasseTelevisao #O exercicio proposto só funciona no PY;.
# In[108]:
def contador_de_letras(lista_palavras):
contador = []
for x in lista_palavras:
quantidade = len(x)
contador.append(quantidade)
return contador
if __name__ == '__main__':
lista = ['cachorro', 'gato']
print(contador_de_letras(lista))
list1=['cachorro', 'gato', 'elefante']
total_de_letras_lista = contador_de_letras(list1)
print(f'Total letras da lista {list1} é {total_de_letras_lista}')
# In[107]:
list1=['cachorro', 'gato', 'elefante']
total_de_letras_lista = contador_de_letras(list1)
print(total_de_letras_lista)
# In[110]:
#Função anonima
# convertendo o contador em uma função anonima
lista_animais = ['cachorro', 'gato', 'elefante']
#contador_letras = lambda lista # paramentro : [Len (x) for x in lista] #devolução
#passe o for de x pela lista, e retorne o len de x em forma de lista
contador_letras = lambda lista : [len(x) for x in lista]
contador_letras(lista_animais)
# In[115]:
soma2 = lambda a, b: a + b
soma2(2, 3)
# In[127]:
#criando um dicionario de lambdas
calculadora3 ={ 'soma': lambda a, b: a + b,
'subtracao': lambda a, b : a - b,
'multiplicacao': lambda a, b: a * b,
'divisao': lambda a, b : a / b}
type(calculadora3)
# In[128]:
cal3 = calculadora3['soma']
# In[129]:
cal3(2,3)
# ### Modulo 9 - Gere, copie, mova e escreva
# ### Módulo 10 Aprenda a utilizar data e hora
#
# In[183]:
#Importanto a biblioteca
from datetime import date, time, datetime, timedelta
# In[135]:
data_atual = date.today()
data_atual
# In[137]:
#Formatando data atual
data_atual.strftime('%d/%m/%y')
# In[138]:
data_atual.strftime('%d/%m/%Y')
# In[139]:
data_atual.strftime('%d * %m * %y')
# In[140]:
data_atual.strftime('%d ~%m~%y')
# In[143]:
data_atual_str = data_atual.strftime('%A/%B/%Y')
data_atual_str
# In[145]:
type(data_atual) #datetime.date
type (data_atual_str) #string
# In[186]:
# time
def trabalhando_com_date():
data_atual = date.today()
data_atual_str = data_atual.strftime('%A %B %Y')
dia1 = data_atual.day
print(data_atual_str, dia1)
def trabalhando_com_time():
horario = time(hour=15, minute=18, second=30)
print(horario.strftime('%H:%M:%S'))
def trabalhando_com_datetime():
data_atual = datetime.now()
dia = data_atual.strftime('%d %m %y')
hora = data_atual.strftime('%H, %M %S')
completa = data_atual.strftime('%c')
print(data_atual, dia, hora, completa)
print(data_atual.weekday())
tupla = ('Segunda', 'Terça', 'Quarta',
'Quinta', 'Sexta', 'Sábado', 'Domingo')
print(tupla[data_atual.weekday()])
data_criada = datetime(2008, 5, 25, 20, 15, 30, 20)
print(data_criada)
print(data_criada.strftime('%c'))
data_str = '21/03/1985 12:20:22'
data_con = datetime.strptime(data_str, '%d/%m/%Y %H:%M:%S')
print(data_str)
#subtração de data e hora
nova_data = data_con - timedelta(days = 365, hours = 2)
print(nova_data.strftime('%d - %m - %Y'))
if __name__ == '__main__':
trabalhando_com_date()
trabalhando_com_time()
trabalhando_com_datetime()
# ### Módulo 11 Gerenciando e criando excessões
# In[187]:
# forçando um erro
divisao = 10 / 0
# In[189]:
try:
divisao = 10 / 0
except ZeroDivisionError:
print('Não é possivel dividir por zero')
# In[191]:
# forçar erro
lista = [1, 10]
numero = lista[3]
# In[198]:
try:
lista = [1, 2]
numero = lista[3]
except IndexError:
print("Erro ao acessar indice inesistente")
except:
print('Erro desconhecido')
# In[204]:
try:
x = alma
print(alma)
except BaseException as ex:
print(f'Erro desconhecido. Erro tipo: {ex}.')
# In[211]:
#else
arquivo = open('teste.txt', 'w')
try:
texto = arquivo.read()
print('fechar arquivo')
arquivo.close()
except ZeroDivisionError:
print("não é possivel dividr por zero")
except ArithmeticError:
print("Erro de op aritmetica")
except IndexError:
print("Erro ao acessar indice inesistente")
except BaseException as ex:
print(f'Erro desconhecido. Erro tipo: {ex}.')
else:
print('Executa quando não ocorre exceção')
# In[213]:
arquivo = open('teste.txt', 'w')
try:
texto = arquivo.read()
print('fechar arquivo')
except ZeroDivisionError:
print("não é possivel dividr por zero")
except ArithmeticError:
print("Erro de op aritmetica")
except IndexError:
print("Erro ao acessar indice inesistente")
except BaseException as ex:
print(f'Erro desconhecido. Erro tipo: {ex}.')
else:
print('Executa quando não ocorre exceção')
arquivo.close()
# In[223]:
#Exercicio
while True:
try:
nota = int(input("Digite uma nota entre 0 e 10: "))
print(nota)
if x > 10:
break
except ValueError:
print("Valor inválido. Deve-se digitar apenas Numeros.")
# In[222]:
#criando classe de excessão
class Error(Exception):
pass
class InputError(Error):
def __init__(self, message):
self.message = message
# In[ ]:
while True:
try:
nota = int(input("Digite uma nota entre 0 e 10: "))
print(nota)
if nota > 10:
raise InputError('O valor não pode ser maior que 10')
elif nota < 0:
raise InputError('O valor não pode ser negativo')
break
except ValueError:
print("Valor inválido. Deve-se digitar apenas Numeros.")
except InputError as ex:
print(ex)
# ### Modulo 12 e final: Instalando pacotes e request
# In[230]:
pip list
# In[235]:
#bibliotecas ou pacotes
get_ipython().system('pip install requests')
# In[236]:
pip freeze
# In[240]:
import requests
# In[250]:
#testando o requests
response = requests.get('https://viacep.com.br/ws/70165900/json/')
print(response.status_code) # sucesso = 200
print(response.text)
print(response.json()) #em formato de dicionário
print(type(response.text))
# In[252]:
dado_cep = response.json()
print(dado_cep['logradouro'])
print(dado_cep['complemento'])
# In[256]:
def pokemon(nome_pokemon):
response = requests.get(f'https://pokeapi.co/api/v2/pokemon/{nome_pokemon}')
dados_pokemon = response.json()
return dados_pokemon
pokemon('ditto')
# In[257]:
#request de sites comuns
def retorna_response(url):
response = requests.get(url)
return response.txt
# In[262]:
print(retorna_response('https://recruit.navercorp.com/global/recruitMain'))
# In[ ]:
| [
"noreply@github.com"
] | Thalitachargel.noreply@github.com |
ce9628165675f68be35472bfb365504846ce6053 | 9f078f64c86af7425d32c5e498b0af4458543a8b | /bin/owl | 084c37efcdf13305ac24c9f84df33c20cb7572e6 | [] | no_license | lucassmagal/Owl | 7252ba86f7b4a1deea1be10d1f1a715476585712 | 40272bdc03415d6073ea62da39399e5a2ace1344 | refs/heads/master | 2021-01-01T20:17:36.396245 | 2013-05-12T14:43:09 | 2013-05-12T14:43:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from owl.cli import run
run(sys.argv[1:])
| [
"magal.lucas@gmail.com"
] | magal.lucas@gmail.com | |
d1c81821046b2be4b5a8b0c06ec25dac7c3a6841 | cc33d6045e08be160dcae36dc9e9e24d190878d8 | /visualization/visualizer2d.py | 84f60383c873dd67276d11a3c1b07c0329de4ffb | [
"Apache-2.0"
] | permissive | StanfordVL/visualization | 66ca20a2d7d435b1fc950dfbed4e205488418e3a | 06a179c550a608a548f3cad70f06dd0c8610fa66 | refs/heads/master | 2021-06-23T01:54:16.414594 | 2017-08-30T01:05:43 | 2017-08-30T01:05:43 | 105,830,187 | 0 | 0 | null | 2017-10-04T23:35:12 | 2017-10-04T23:35:12 | null | UTF-8 | Python | false | false | 5,636 | py | """
Common 2D visualizations using pyplot
Author: Jeff Mahler
"""
import numpy as np
import IPython
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from autolab_core import Box, Contour
from perception import BinaryImage, ColorImage, DepthImage, GrayscaleImage, RgbdImage, GdImage, SegmentationImage
class Visualizer2D:
@staticmethod
def figure(size=(8,8), *args, **kwargs):
""" Creates a figure.
Parameters
----------
size : 2-tuple
size of the view window in inches
args : list
args of mayavi figure
kwargs : list
keyword args of mayavi figure
Returns
-------
pyplot figure
the current figure
"""
return plt.figure(figsize=size, *args, **kwargs)
@staticmethod
def show(*args, **kwargs):
""" Show the current figure """
plt.show(*args, **kwargs)
@staticmethod
def clf(*args, **kwargs):
""" Clear the current figure """
plt.clf(*args, **kwargs)
@staticmethod
def xlim(*args, **kwargs):
""" Set the x limits of the current figure """
plt.xlim(*args, **kwargs)
@staticmethod
def ylim(*args, **kwargs):
""" Set the y limits the current figure """
plt.ylim(*args, **kwargs)
@staticmethod
def savefig(*args, **kwargs):
""" Save the current figure """
plt.savefig(*args, **kwargs)
@staticmethod
def colorbar(*args, **kwargs):
""" Adds a colorbar to the current figure """
plt.colorbar(*args, **kwargs)
@staticmethod
def subplot(*args, **kwargs):
""" Creates a subplot in the current figure """
plt.subplot(*args, **kwargs)
@staticmethod
def title(*args, **kwargs):
""" Creates a title in the current figure """
plt.title(*args, **kwargs)
@staticmethod
def xlabel(*args, **kwargs):
""" Creates an x axis label in the current figure """
plt.xlabel(*args, **kwargs)
@staticmethod
def ylabel(*args, **kwargs):
""" Creates an y axis label in the current figure """
plt.ylabel(*args, **kwargs)
@staticmethod
def legend(*args, **kwargs):
""" Creates a legend for the current figure """
plt.legend(*args, **kwargs)
@staticmethod
def scatter(*args, **kwargs):
""" Scatters points """
plt.scatter(*args, **kwargs)
@staticmethod
def plot(*args, **kwargs):
""" Plots lines """
plt.plot(*args, **kwargs)
@staticmethod
def imshow(image, **kwargs):
""" Displays an image.
Parameters
----------
image : :obj:`perception.Image`
image to display
"""
if isinstance(image, BinaryImage) or isinstance(image, GrayscaleImage):
plt.imshow(image.data, cmap=plt.cm.gray, **kwargs)
elif isinstance(image, ColorImage) or isinstance(image, SegmentationImage):
plt.imshow(image.data, **kwargs)
elif isinstance(image, DepthImage):
plt.imshow(image.data, cmap=plt.cm.gray_r, **kwargs)
elif isinstance(image, RgbdImage):
# default to showing color only, for now...
plt.imshow(image.color.data, **kwargs)
elif isinstance(image, GdImage):
# default to showing gray only, for now...
plt.imshow(image.gray.data, cmap=plt.cm.gray, **kwargs)
plt.axis('off')
@staticmethod
def box(b, line_width=2, color='g', style='-'):
""" Draws a box on the current plot.
Parameters
----------
b : :obj:`autolab_core.Box`
box to draw
line_width : int
width of lines on side of box
color : :obj:`str`
color of box
style : :obj:`str`
style of lines to draw
"""
if not isinstance(b, Box):
raise ValueError('Input must be of type Box')
# get min pixels
min_i = b.min_pt[1]
min_j = b.min_pt[0]
max_i = b.max_pt[1]
max_j = b.max_pt[0]
top_left = np.array([min_i, min_j])
top_right = np.array([max_i, min_j])
bottom_left = np.array([min_i, max_j])
bottom_right = np.array([max_i, max_j])
# create lines
left = np.c_[top_left, bottom_left].T
right = np.c_[top_right, bottom_right].T
top = np.c_[top_left, top_right].T
bottom = np.c_[bottom_left, bottom_right].T
# plot lines
plt.plot(left[:,0], left[:,1], linewidth=line_width, color=color, linestyle=style)
plt.plot(right[:,0], right[:,1], linewidth=line_width, color=color, linestyle=style)
plt.plot(top[:,0], top[:,1], linewidth=line_width, color=color, linestyle=style)
plt.plot(bottom[:,0], bottom[:,1], linewidth=line_width, color=color, linestyle=style)
@staticmethod
def contour(c, subsample=1, size=10, color='g'):
""" Draws a contour on the current plot by scattering points.
Parameters
----------
c : :obj:`autolab_core.Contour`
contour to draw
subsample : int
subsample rate for boundary pixels
size : int
size of scattered points
color : :obj:`str`
color of box
"""
if not isinstance(c, Contour):
raise ValueError('Input must be of type Contour')
for i in range(c.num_pixels)[0::subsample]:
plt.scatter(c.boundary_pixels[i,1], c.boundary_pixels[i,0], s=size, c=color)
| [
"jmahler@berkeley.edu"
] | jmahler@berkeley.edu |
b391859b94fd32af4b40bd699c1b6acde8391faf | edb884e3f639261f36bbb8f444e2200bb879a9a2 | /diagfi_compare_singlemonth.py | baa02ec94263fd6a9e083e4dbd41e800d3960a62 | [] | no_license | draelsaid/MGCM-python | 7df36a783829fadb1d89ec9e54f92470d54c0493 | 9ee1491f009bed5f092c21a9235d61e9612f32f0 | refs/heads/master | 2020-06-21T22:41:09.556055 | 2017-05-31T12:22:45 | 2017-05-31T12:22:45 | 74,768,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,806 | py | # Compares NetCDF data from the Mars GCM for Full Mars Year by combining monthly output of diagfi.nc files
# Adam El-Said 08/2016
import matplotlib as mpl
#mpl.use('Agg') # removes need for X-Server (graphics in linux). For qsub only.
import numpy as np
import pylab as py
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mars_time import MarsTime
from scipy.io import *
from matplotlib import cm,ticker
from plt_timeseries import *
from matplotlib.ticker import FormatStrFormatter
from MidPointNorm import *
# Prints EVERYTHING inside a variable without holding back (intended for diagnostic)
np.set_printoptions(threshold=np.inf)
# Abbreviate sol_ls conversion function
sol_Ls=MarsTime().sol_ls
# Moving average
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
# Initialise dictionaries - due to data size
Ls_m = {}
psa, psb = {}, {}
presa, presb = {}, {}
tempa, tempb = {}, {}
tsurfa, tsurfb = {}, {}
ua, ub = {}, {}
va, vb = {}, {}
dustqa, dustqb = {}, {}
dustNa, dustNb = {}, {}
rhoa, rhob = {}, {}
fluxsurflwa, fluxsurflwb = {}, {}
fluxsurfswa, fluxsurfswb = {}, {}
fluxtoplwa, fluxtoplwb = {}, {}
fluxtopswa, fluxtopswb = {}, {}
taua, taub = {}, {}
rdusta, rdustb = {}, {}
lw_htrta, lw_htrtb = {}, {}
sw_htrta, sw_htrtb = {}, {}
dqsseda, dqssedb = {}, {}
dqsdeva, dqsdevb = {}, {}
# Grab topography from surface.nc or mola32.nc file
ml = netcdf.netcdf_file('/padata/mars/users/aes442/mgcm_data/surface.nc','r')
mola = {}
mola[0] = ml.variables['latitude'][:]
mola[1] = ml.variables['longitude'][:]
mola[2] = ml.variables['zMOL'][:]
# Import data from Luca's TES dust files for comparison
a = netcdf.netcdf_file('/padata/mars/users/aes442/mgcm_data/dust_MY28.nc','r')
d_lat_s = a.variables['latitude'][:]
d_lon_s = a.variables['longitude'][:]
d_t = a.variables['Time'][:]
d_d = a.variables['dustop'][:]
d_lat = np.linspace(-90,90,d_lat_s.shape[0])
d_lon = np.linspace(-180,180,d_lon_s.shape[0])
# Number of months in comparison (always add 1 because of Python indexing)
Months = 2 # No. of months
amth = 1 # Actual month
# This loop assigns the data in both directories to variables here. This is done for each month. The result is a dictionary of dictionaries. One dictionary containing a dictionary for every month.
for i in xrange(1,Months):
mgcm = "MGCM_v5-1"
rundira = "a_ds8"
rundirb = "a_ref4"
month = ("m%s" % (amth)) # CHANGE
filename = "diagfi.nc"
a = netcdf.netcdf_file("/padata/alpha/users/aes442/RUNS/R-%s/%s/%s/%s" % (mgcm,rundira,month,filename),'r')
b = netcdf.netcdf_file("/padata/alpha/users/aes442/RUNS/R-%s/%s/%s/%s" % (mgcm,rundirb,month,filename),'r')
lat = a.variables['lat'][:]
lon = a.variables['lon'][:]
sigma = a.variables['sigma'][:]
t_m = a.variables['time'][:]
Ls_m[i] = a.variables['Ls'][:]
psa[i] = a.variables['ps'][:]
presa[i] = a.variables['pressure'][:]
tempa[i] = a.variables['temp'][:]
tsurfa[i] = a.variables['tsurf'][:]
ua[i] = a.variables['u'][:]
va[i] = a.variables['v'][:]
dustqa[i] = a.variables['dustq'][:]
dustNa[i] = a.variables['dustN'][:]
rhoa[i] = a.variables['rho'][:]
fluxsurflwa[i] = a.variables['fluxsurf_lw'][:]
fluxsurfswa[i] = a.variables['fluxsurf_sw'][:]
fluxtoplwa[i] = a.variables['fluxtop_lw'][:]
fluxtopswa[i] = a.variables['fluxtop_sw'][:]
taua[i] = a.variables['taudustvis'][:]
rdusta[i] = a.variables['reffdust'][:]
lw_htrta[i] = a.variables['lw_htrt'][:]
sw_htrta[i] = a.variables['sw_htrt'][:]
dqsseda[i] = a.variables['dqssed'][:]
dqsdeva[i] = a.variables['dqsdev'][:]
psb[i] = b.variables['ps'][:]
presb[i] = b.variables['pressure'][:]
tempb[i] = b.variables['temp'][:]
tsurfb[i] = b.variables['tsurf'][:]
ub[i] = b.variables['u'][:]
vb[i] = b.variables['v'][:]
dustqb[i] = b.variables['dustq'][:]
dustNb[i] = b.variables['dustN'][:]
rhob[i] = b.variables['rho'][:]
fluxsurflwb[i] = b.variables['fluxsurf_lw'][:]
fluxsurfswb[i] = b.variables['fluxsurf_sw'][:]
fluxtoplwb[i] = b.variables['fluxtop_lw'][:]
fluxtopswb[i] = b.variables['fluxtop_sw'][:]
taub[i] = b.variables['taudustvis'][:]
rdustb[i] = b.variables['reffdust'][:]
lw_htrtb[i] = b.variables['lw_htrt'][:]
sw_htrtb[i] = b.variables['sw_htrt'][:]
dqssedb[i] = b.variables['dqssed'][:]
dqsdevb[i] = b.variables['dqsdev'][:]
# Calculate approximate HEIGHT from sigma (km)
alt = np.zeros((sigma.shape[0]))
for i in xrange(len(sigma)):
alt[i] = -10.8*np.log(sigma[i])
print "Latitude: %i || Longitude: %i || Model levels: %i => Alt Min:%.3f | Alt Max:%.3f | Alt half: %.3f " % (lat.shape[0],lon.shape[0],sigma.shape[0],alt[0],alt[-1],alt[18])
alt_half=18 # 47.8km
# Get time dimension length
n = 0
for i in xrange(1,len(psa)+1,1): # len(psa) gives the number of months
n = n + len(dustqa[i]) # len(dustqa[i]) gives the number of time steps in each month.
print ("Total time steps: %i" % (n))
## Ls vector
Ls_s = (Months-1)*30 # Number of solar longitudes for time vector for comparison
Ls = np.zeros((n))
# Method 2 grabs Ls's from model (has bugs, but can be ironed out)
p=0
for i in xrange(1,len(Ls_m)+1,1):
gg = Ls_m[i]
for j in xrange(gg.shape[0]):
Ls[p] = gg[j]
p = p + 1
Ls = np.roll(Ls,5)
Ls[-1] = np.ceil(Ls[-2])
Ls[:6] = np.linspace(np.floor(Ls[5]),Ls[5],6)
print Ls[:8], Ls[-8:]
## Create all other variables, with altitude dimension removed
ps_a, ps_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
temp_a, temp_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
tsurf_a, tsurf_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
u_a, u_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
v_a, v_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
dustq_a, dustq_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
dustN_a, dustN_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
rho_a, rho_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
fslwa, fslwb = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
fsswa, fsswb = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
ftlwa, ftlwb = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
ftswa, ftswb = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
tau_a, tau_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
rdust_a, rdust_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
lw_htrt_a, lw_htrt_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
sw_htrt_a, sw_htrt_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
pres_a, pres_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
dqssed_a, dqssed_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
dqsdev_a, dqsdev_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
# 3D Vars
ps_a, ps_b = psa[1][:,:,:], psb[1][:,:,:]
fslwa, fslwb = fluxsurflwa[1][:,:,:], fluxsurflwb[1][:,:,:]
fsswa, fsswb = fluxsurfswa[1][:,:,:], fluxsurfswb[1][:,:,:]
ftlwa, ftlwb = fluxtoplwa[1][:,:,:], fluxtoplwb[1][:,:,:]
ftswa, ftswb = fluxtopswa[1][:,:,:], fluxtopswb[1][:,:,:]
tau_a, tau_b = taua[1][:,:,:], taub[1][:,:,:]
tsurf_a, tsurf_b = tsurfa[1][:,:,:], tsurfb[1][:,:,:]
dqssed_a, dqssed_b = dqsseda[1][:,:,:], dqssedb[1][:,:,:]
dqsdev_a, dqsdev_b = dqsdeva[1][:,:,:], dqsdevb[1][:,:,:]
# 4D Vars
temp_a, temp_b = tempa[1][:,1,:,:], tempb[1][:,1,:,:]
u_a, u_b = ua[1][:,1,:,:], ub[1][:,1,:,:]
v_a, v_b = va[1][:,1,:,:], vb[1][:,1,:,:]
dustq_a, dustq_b = dustqa[1][:,1,:,:], dustqb[1][:,1,:,:]
dustN_a, dustN_b = dustNa[1][:,1,:,:], dustNb[1][:,1,:,:]
rho_a, rho_b = rhoa[1][:,1,:,:], rhob[1][:,1,:,:]
rdust_a, rdust_b = rdusta[1][:,1,:,:], rdustb[1][:,1,:,:]
lw_htrt_a, lw_htrt_b = lw_htrta[1][:,1,:,:], lw_htrtb[1][:,1,:,:]
sw_htrt_a, sw_htrt_b = sw_htrta[1][:,1,:,:], sw_htrtb[1][:,1,:,:]
pres_a, pres_b = presa[1][:,1,:,:], presb[1][:,1,:,:]
# Longitudal averaging
# Variables without longitude
temp_aa, temp_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
tsurf_aa, tsurf_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
u_aa, u_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
dustq_aa, dustq_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
dustN_aa, dustN_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
rho_aa, rho_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
rdust_aa, rdust_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
lw_htrt_aa, lw_htrt_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
sw_htrt_aa, sw_htrt_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
pres_aa, pres_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
# 4D Vars
temp_aa, temp_bb = np.sum(tempa[1],axis=3)/tempa[1].shape[3], np.sum(tempb[1],axis=3)/tempb[1].shape[3]
u_aa, u_bb = np.sum(ua[1],axis=3)/ua[1].shape[3], np.sum(ub[1],axis=3)/ub[1].shape[3]
dustq_aa, dustq_bb = np.sum(dustqa[1],axis=3)/dustqa[1].shape[3], np.sum(dustqb[1],axis=3)/dustqb[1].shape[3]
dustN_aa, dustN_bb = np.sum(dustNa[1],axis=3)/dustNa[1].shape[3], np.sum(dustNb[1],axis=3)/dustNb[1].shape[3]
rho_aa, rho_bb = np.sum(rhoa[1],axis=3)/rhoa[1].shape[3], np.sum(rhob[1],axis=3)/rhob[1].shape[3]
rdust_aa, rdust_bb = np.sum(rdusta[1],axis=3)/rdusta[1].shape[3], np.sum(rdustb[1],axis=3)/rdustb[1].shape[3]
lw_htrt_aa, lw_htrt_bb = np.sum(lw_htrta[1],axis=3)/lw_htrta[1].shape[3], np.sum(lw_htrtb[1],axis=3)/lw_htrtb[1].shape[3]
sw_htrt_aa, sw_htrt_bb = np.sum(sw_htrta[1],axis=3)/sw_htrta[1].shape[3], np.sum(sw_htrtb[1],axis=3)/sw_htrtb[1].shape[3]
pres_aa, pres_bb = np.sum(presa[1],axis=3)/presa[1].shape[3], np.sum(presb[1],axis=3)/presb[1].shape[3]
# Calculate differences
dustq_diff = dustq_a - dustq_b
dustN_diff = dustN_a - dustN_b
temp_diff = temp_a - temp_b
tsurf_diff = tsurf_a - tsurf_b
ps_diff = ps_a - ps_b
rho_diff = rho_a - rho_b
u_diff = u_a - u_b
v_diff = v_a - v_b
rdust_diff = rdust_a - rdust_b
lw_htrt_diff = lw_htrt_a - lw_htrt_b
sw_htrt_diff = sw_htrt_a - sw_htrt_b
pres_diff = pres_a - pres_b
dqssed_diff = dqssed_a - dqssed_b
dqsdev_diff = dqsdev_a - dqsdev_b
fslw_diff = fslwa - fslwb
fssw_diff = fsswa - fsswb
ftlw_diff = ftlwa - ftlwb
ftsw_diff = ftswa - ftswb
t_d = temp_aa - temp_bb
pres_d = pres_aa - pres_bb
ts_d = tsurf_aa - tsurf_bb
dq_d = dustq_aa - dustq_bb
dN_d = dustN_aa - dustN_bb
rho_d = rho_aa - rho_bb
u_d = u_aa - u_bb
rdust_d = rdust_aa - rdust_bb
lw_htrt_d = lw_htrt_aa - lw_htrt_bb
sw_htrt_d = sw_htrt_aa - sw_htrt_bb
# Zonal averaging (time,lat)
temp_avg = np.sum(temp_a,axis=2)/temp_a.shape[2] - np.sum(temp_b,axis=2)/temp_b.shape[2]
tsurf_avg = np.sum(tsurf_a,axis=2)/tsurf_a.shape[2] - np.sum(tsurf_b,axis=2)/tsurf_b.shape[2]
ps_avg = np.sum(ps_a,axis=2)/ps_a.shape[2] - np.sum(ps_b,axis=2)/ps_b.shape[2]
pres_avg = np.sum(pres_a,axis=2)/pres_a.shape[2] - np.sum(pres_b,axis=2)/pres_b.shape[2]
u_avg = np.sum(u_a,axis=2)/u_a.shape[2] - np.sum(u_b,axis=2)/u_b.shape[2]
rho_avg = np.sum(rho_a,axis=2)/rho_a.shape[2] - np.sum(rho_b,axis=2)/rho_b.shape[2]
fssw_avg = np.sum(fsswa,axis=2)/fsswa.shape[2] - np.sum(fsswb,axis=2)/fsswb.shape[2]
fslw_avg = np.sum(fslwa,axis=2)/fslwa.shape[2] - np.sum(fslwb,axis=2)/fslwb.shape[2]
ftsw_avg = np.sum(ftswa,axis=2)/ftswa.shape[2] - np.sum(ftswb,axis=2)/ftswb.shape[2]
ftlw_avg = np.sum(ftlwa,axis=2)/ftlwa.shape[2] - np.sum(ftlwb,axis=2)/ftlwb.shape[2]
tau_a_avg = np.sum(tau_a,axis=2)/tau_a.shape[2]
tau_b_avg = np.sum(tau_b,axis=2)/tau_b.shape[2]
rdust_avg = np.sum(rdust_a,axis=2)/rdust_a.shape[2] - np.sum(rdust_b,axis=2)/rdust_b.shape[2]
lw_htrt_avg = np.sum(lw_htrt_a,axis=2)/lw_htrt_a.shape[2] - np.sum(lw_htrt_b,axis=2)/lw_htrt_b.shape[2]
sw_htrt_avg = np.sum(sw_htrt_a,axis=2)/sw_htrt_a.shape[2] - np.sum(sw_htrt_b,axis=2)/sw_htrt_b.shape[2]
temp_avg_ = np.sum(temp_b,axis=2)/temp_b.shape[2]
pres_avg_ = np.sum(pres_b,axis=2)/pres_b.shape[2]
tsurf_avg_ = np.sum(tsurf_b,axis=2)/tsurf_b.shape[2]
ps_avg_ = np.sum(ps_b,axis=2)/ps_b.shape[2]
u_avg_ = np.sum(u_b,axis=2)/u_b.shape[2]
rho_avg_ = np.sum(rho_b,axis=2)/rho_b.shape[2]
fssw_avg_ = np.sum(fsswb,axis=2)/fsswb.shape[2]
fslw_avg_ = np.sum(fslwb,axis=2)/fslwb.shape[2]
ftsw_avg_ = np.sum(ftswb,axis=2)/ftswb.shape[2]
ftlw_avg_ = np.sum(ftlwb,axis=2)/ftlwb.shape[2]
# from 35N to 55N Lat
#tmp_ = np.sum(np.sum(temp_avg_[:,7:11],axis=0)/n,axis=0)/4
#tmps_ = np.sum(np.sum(tsurf_avg_[:,7:11],axis=0)/n,axis=0)/4
#ps_ = np.sum(np.sum(ps_avg_[:,7:11],axis=0)/n,axis=0)/4
#pres_ = np.sum(np.sum(pres_avg_[:,7:11],axis=0)/n,axis=0)/4
#rho_ = np.sum(np.sum(rho_avg_[:,7:11],axis=0)/n,axis=0)/4
#u_ = np.sum(np.sum(np.absolute(u_avg_[:,7:11]),axis=0)/n,axis=0)/4
#fslw_ = np.sum(np.sum(fslw_avg_[:,7:11],axis=0)/n,axis=0)/4
#fssw_ = np.sum(np.sum(fssw_avg_[:,7:11],axis=0)/n,axis=0)/4
#ftlw_ = np.sum(np.sum(ftlw_avg_[:,7:11],axis=0)/n,axis=0)/4
#ftsw_ = np.sum(np.sum(ftsw_avg_[:,7:11],axis=0)/n,axis=0)/4
#tmp_1 = np.sum(np.sum(temp_avg[:,7:11],axis=0)/n,axis=0)/4
#tmps_1 = np.sum(np.sum(tsurf_avg[:,7:11],axis=0)/n,axis=0)/4
#ps_1 = np.sum(np.sum(ps_avg[:,7:11],axis=0)/n,axis=0)/4
#pres_1 = np.sum(np.sum(pres_avg[:,7:11],axis=0)/n,axis=0)/4
#rho_1 = np.sum(np.sum(rho_avg[:,7:11],axis=0)/n,axis=0)/4
#u_1 = np.sum(np.sum(u_avg[:,7:11],axis=0)/n,axis=0)/4
#fslw_1 = np.sum(np.sum(fslw_avg[:,7:11],axis=0)/n,axis=0)/4
#fssw_1 = np.sum(np.sum(fssw_avg[:,7:11],axis=0)/n,axis=0)/4
#ftlw_1 = np.sum(np.sum(ftlw_avg[:,7:11],axis=0)/n,axis=0)/4
#ftsw_1 = np.sum(np.sum(ftsw_avg[:,7:11],axis=0)/n,axis=0)/4
#print "AVERAGES: tmp: %.2f || surf tmp: %.2f || press: %.2f || surf press: %.2f || dens: %.2f || zon wind: #%.2f || fluxes (inLW: %.2f, outLW: %.2f, inSW: %.2f, outSW: %.2f). " % (tmp_, tmps_, pres_, ps_, rho_, u_, #fslw_, ftlw_, fssw_, ftsw_)
#print tmp_1/tmp_, tmps_1/tmps_, pres_1/pres_, ps_1/ps_, rho_1/rho_, u_1/u_, fslw_1/fslw_, fssw_1/fssw_, ftlw_1/ftlw_, ftsw_1/ftsw_
# Time moving-point average of zonal average
nn=2 # Number of points to average over
t_avg = Ls[:-(nn-1)]
temp_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
pres_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
tsurf_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
ps_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
u_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
rho_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
fssw_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
fslw_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
ftsw_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
ftlw_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
rdust_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
lw_htrt_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
sw_htrt_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
for i in xrange(0,lat.shape[0]):
temp_avg_t[:,i] = moving_average(temp_avg[:,i],n=nn)
pres_avg_t[:,i] = moving_average(pres_avg[:,i],n=nn)
tsurf_avg_t[:,i] = moving_average(tsurf_avg[:,i],n=nn)
ps_avg_t[:,i] = moving_average(ps_avg[:,i],n=nn)
u_avg_t[:,i] = moving_average(u_avg[:,i],n=nn)
rho_avg_t[:,i] = moving_average(rho_avg[:,i],n=nn)
fssw_avg_t[:,i] = moving_average(fssw_avg[:,i],n=nn)
fslw_avg_t[:,i] = moving_average(fslw_avg[:,i],n=nn)
ftsw_avg_t[:,i] = moving_average(ftsw_avg[:,i],n=nn)
ftlw_avg_t[:,i] = moving_average(ftlw_avg[:,i],n=nn)
rdust_avg_t[:,i] = moving_average(rdust_avg[:,i],n=nn)
lw_htrt_avg_t[:,i] = moving_average(lw_htrt_avg[:,i],n=nn)
sw_htrt_avg_t[:,i] = moving_average(sw_htrt_avg[:,i],n=nn)
############ TIME AVERAGE of differences ###################
nnn=nn
t_av = Ls[:-(nnn-1)]
td_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
pres_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
tds_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
dqd_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
dNd_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
rhod_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
ud_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
rd_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
lwhr_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
swhr_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
for j in xrange(0,lat.shape[0],1):
for i in xrange(0,sigma.shape[0],1):
td_avg[:,i,j] = moving_average(t_d[:,i,j],n=nnn)
pres_avg[:,i,j] = moving_average(pres_d[:,i,j],n=nnn)
tds_avg[:,i,j] = moving_average(ts_d[:,i,j],n=nnn)
dqd_avg[:,i,j] = moving_average(dq_d[:,i,j],n=nnn)
dNd_avg[:,i,j] = moving_average(dN_d[:,i,j],n=nnn)
rhod_avg[:,i,j] = moving_average(rho_d[:,i,j],n=nnn)
ud_avg[:,i,j] = moving_average(u_d[:,i,j],n=nnn)
rd_avg[:,i,j] = moving_average(rdust_d[:,i,j],n=nnn)
lwhr_avg[:,i,j] = moving_average(lw_htrt_d[:,i,j],n=nnn)
swhr_avg[:,i,j] = moving_average(sw_htrt_d[:,i,j],n=nnn)
# Save destination
fpath = "/home/physastro/aes442/results/Dustruns/m%i/" % (amth)
## Plot settings (MUST CHANGE FROM MONTH TO MONTH)
######################################################################################
# Which Ls do you want to focus on?
Ls_ee= 4.
Ls_e = 5.5
l_1 = np.where(Ls - Ls_ee > 0.001)[0][0]
l_2 = np.where(Ls - Ls_e > 0.001)[0][0]
Ls = Ls[l_1:l_2]
n = l_2 - l_1
## Dust storm insertion points (Ls - tstep_start - centre [lat,lon])
# m1 = 3.95 - 96 - [45, -135]
# m26 = 45.66 - 408 - [45, -90]
# m30 = 153.95 - 84 - [ 0, 0]
# m33 = 244.28 - 84 - [-2, -6]
# m34 = 273.52 - 60 - [-45, 90]
c = np.matrix('4. 45.') # Dust storm mid-points [Ls Lat]
#########################################################################################
######## TES dust files
# Zonal averaging
tau_d_z = d_d.sum(axis=2)/d_d.shape[2]
# Time averaging
nnnn=2
tau_d_avg=np.zeros((tau_d_z.shape[0]-(nnnn-1),tau_d_z.shape[1]))
for i in xrange(0,d_lat_s.shape[0]):
tau_d_avg[:,i] = moving_average(tau_d_z[:,i],nnnn)
# first and last sols
sol_a = int(np.round(669*(Ls_ee/360.)))
sol_s = int(np.round(669*(Ls_e/360.)))
tau_d_avg = tau_d_avg[sol_a:sol_s,:]
d_Ls_avg = np.linspace(Ls_ee,Ls_e,tau_d_avg.shape[0])
#########
## PLOTS
# Common settings (ticks)
t_t = np.linspace(Ls_ee,Ls_e,n)
t_tau = np.linspace(Ls_ee,Ls_e,n)
lat_t = np.linspace(90,-90,lat.shape[0])
lon_t = np.linspace(-180,180,lon.shape[0])
# Solar longitude
i_mj=0.2
i_mn=0.05
major_ticksx = np.arange(Ls_ee, Ls_e+i_mj, i_mj)
minor_ticksx = np.arange(Ls_ee, Ls_e, i_mn)
# Latitude
major_ticksy = np.arange(-90, 91, 30)
minor_ticksy = np.arange(-90, 91, 10)
## tau_ref, tau_ds, tau_tes PLOT
tau_ds = np.matrix.transpose(tau_a_avg)
tau_ds = tau_ds[:,l_1:l_2]
tau_ref = np.matrix.transpose(tau_b_avg)
tau_ref = tau_ref[:,l_1:l_2]
tau_TES = np.matrix.transpose(tau_d_avg)
f, axarr = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(12,12), dpi=100)
x = t_tau
y = lat_t
xx = d_Ls_avg
yy = np.linspace(-90,90,d_lat_s.shape[0])
xlabel = 'Solar longitude / degrees'
ylabel = 'Latitude / degrees'
cb_label = 'Dust optical depth / SI'
# Common axis labels
f.text(0.5, 0.04, '%s' % (xlabel), fontsize=18, ha='center')
f.text(0.06, 0.5, '%s' % (ylabel), fontsize=18, va='center', rotation='vertical')
ax1 = axarr[0].pcolormesh(x, y, tau_ds, cmap='gist_rainbow_r', vmin=np.min((np.min(tau_ds),np.min(tau_ref),np.min(tau_TES))), vmax=np.max((np.max(tau_ds),np.max(tau_ref),np.max(tau_TES))))
axarr[0].axis('tight')
axarr[0].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[0].set_xticks(major_ticksx)
axarr[0].set_xticks(minor_ticksx, minor=True)
axarr[0].set_yticks(major_ticksy)
axarr[0].set_yticks(minor_ticksy, minor=True)
axarr[0].set_title('(a) Dust storm run', fontsize=14)
axarr[0].tick_params(axis='both', labelsize=11, pad=10)
ax2 = axarr[1].pcolormesh(x, y, tau_ref, cmap='gist_rainbow_r', vmin=np.min((np.min(tau_ds),np.min(tau_ref),np.min(tau_TES))), vmax=np.max((np.max(tau_ds),np.max(tau_ref),np.max(tau_TES))))
axarr[1].set_title('(b) Reference run', fontsize=14)
# Colorbar creation and placement
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.1, 0.04, 0.8]) # [h_placement, v_placement, h_size, v_size]
cb = f.colorbar(ax1, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
#f.subplots_adjust(right=0.8)
#cbar_ax = f.add_axes([0.85, 0.665, 0.04, 0.235]) # [h_placement, v_placement, h_size, v_size]
#cb = f.colorbar(ax1, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
#cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
#f.subplots_adjust(right=0.8)
#cbar_ax2 = f.add_axes([0.85, 0.38, 0.04, 0.235]) # [h_placement, v_placement, h_size, v_size]
#cb = f.colorbar(ax2, cax=cbar_ax2, format='%.1f', extend='both') # double-edged colorbar
#cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
#f.subplots_adjust(right=0.8)
#cbar_ax3 = f.add_axes([0.85, 0.095, 0.04, 0.235]) # [h_placement, v_placement, h_size, v_size]
#cb = f.colorbar(ax3, cax=cbar_ax3, format='%.1f', extend='both') # double-edged colorbar
#cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
plt.savefig("%sCDOD_latvsLs_dsrunvsrefrun.png" % (fpath), bbox_inches='tight')
## TEMP/WIND/TOPG map
# DATA
day = 1
hr = 96 # this is actually the tstep (t=96 is storm start)
lvl = 0
# variable[day][hour, elevation, lat, lon]
ut = ua[day][hr,lvl,:,:] - ub[day][hr,lvl,:,:]
vt = va[day][hr,lvl,:,:] - vb[day][hr,lvl,:,:]
#data = tempa[day][hr,lvl,:,:] - tempb[day][hr,lvl,:,:]
data = tsurfa[day][hr,:,:] - tsurfb[day][hr,:,:]
data2= presa[day][hr,:,:] - presb[day][hr,:,:]
# Longitude
major_ticksx = np.arange(np.floor(lon_t[0]), np.ceil(lon_t[-1]), 30)
minor_ticksx = np.arange(np.floor(lon_t[0]), np.ceil(lon_t[-1]), 10)
# Latitude
major_ticksy = np.arange(np.floor(lat_t[-1]), np.ceil(lat_t[0]), 30)
minor_ticksy = np.arange(np.floor(lat_t[-1]), np.ceil(lat_t[0]), 10)
## PLOT temperature/winds/topography
f, axarr = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(10,10), dpi=100)
x = lon_t
y = lat_t
xlabel = 'Longitude / degrees'
ylabel = 'Latitude / degrees'
cblabel= 'Temperature difference / K'
plt.xlabel(xlabel, fontsize=14, labelpad=10)
plt.ylabel(ylabel, fontsize=14, labelpad=10)
# Main plot
ax = axarr.pcolormesh(x, y, data, cmap='RdBu_r', norm=MidPointNorm(midpoint=0.))
# Secondary plot
ax2 = axarr.quiver(x, y, ut, vt, scale=2**2, units='y', width=0.1)
aq = axarr.quiverkey(ax2, 0.815, 0.9, 1, r'$1 \frac{m}{s}$', labelpos='E', coordinates='figure')
# Topography
lvls = [-5,0,5,10,15]
ax3 = axarr.contour(mola[1], mola[0], mola[2], lvls, colors='k')
# Ticks
axarr.set_xticks(major_ticksx)
axarr.set_xticks(minor_ticksx, minor=True)
axarr.set_yticks(major_ticksy)
axarr.set_yticks(minor_ticksy, minor=True)
axarr.tick_params(axis='both', labelsize=12, pad=10)
axarr.axis('tight')
# Colour bar
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.1, 0.04, 0.8]) # [h_place, v_place, h_size, v_size]
cb = f.colorbar(ax, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cblabel), fontsize=16) # colorbar label
plt.axis('tight')
plt.savefig("%stemp_uvwind_mola_latvslon.png" % (fpath), bbox_inches='tight')
plt.close('all')
## Temperature PLOT
temp_t = tsurf_avg_t.T
temp_t = temp_t[:,l_1:l_2]
fig = plt.figure(figsize=(10,10), dpi=100)
ax = fig.add_subplot(1,1,1)
plt.pcolormesh(t_t,lat_t,temp_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
plt.xlabel('Solar longitude / degrees', fontsize=14, labelpad=10)
plt.ylabel('Latitude / degrees', fontsize=14, labelpad=10)
# Extra Markers
ax.plot(c[0,0],c[0,1],'o',color='y',markersize=10)
# Ticks
ax.set_xticks(major_ticksx)
ax.set_xticks(minor_ticksx, minor=True)
ax.set_yticks(major_ticksy)
ax.set_yticks(minor_ticksy, minor=True)
ax.tick_params(axis='both', labelsize=12, pad=10)
# Colour bar
cb = plt.colorbar(format='%.2f', extend='both')
cb.set_label('Temperature difference / K')
tick_locator = ticker.MaxNLocator(nbins=16)
cb.locator = tick_locator
plt.axis('tight')
plt.savefig("%sSurfTempDiff_LatvsTime_FY_uavg_tavg.png" % (fpath), bbox_inches='tight')
## Surface pressure and Atmospheric density at surface PLOT
ps_t = np.matrix.transpose(pres_avg_t)
ps_t = ps_t[:,l_1:l_2]
rho_t = np.matrix.transpose(rho_avg_t)
rho_t = rho_t[:,l_1:l_2]
f, axarr = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(12,12), dpi=100)
x = t_t
y = lat_t
xlabel = 'Solar longitude / degrees'
ylabel = 'Latitude / degrees'
cb_label = 'Atmospheric pressure difference / Pa'
cb_label2 = 'Atmospheric density difference / kg / $m^3$'
# Common axis labels
f.text(0.5, 0.04, '%s' % (xlabel), fontsize=18, ha='center')
f.text(0.06, 0.5, '%s' % (ylabel), fontsize=18, va='center', rotation='vertical')
ax1 = axarr[0].pcolormesh(x, y, ps_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[0].axis('tight')
axarr[0].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[0].set_xticks(major_ticksx)
axarr[0].set_xticks(minor_ticksx, minor=True)
axarr[0].set_yticks(major_ticksy)
axarr[0].set_yticks(minor_ticksy, minor=True)
axarr[0].set_title('(a)', fontsize=18)
axarr[0].tick_params(axis='both', labelsize=14)
ax2 = axarr[1].pcolormesh(x, y, rho_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[1].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[1].set_title('(b)', fontsize=18)
# Colorbar creation and placement
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.54, 0.04, 0.36]) # [h_placement, v_placement, h_size, v_size]
cb = f.colorbar(ax1, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cb_label), fontsize=14) # colorbar label
cbar_ax2 = f.add_axes([0.85, 0.1, 0.04, 0.36]) # [h_placement, v_placement, h_size, v_size]
cb2 = f.colorbar(ax2, cax=cbar_ax2, format='%.1e', extend='both') # double-edged colorbar
cb2.set_label('%s' % (cb_label2), fontsize=14) # colorbar label
plt.savefig("%sPresDensDiff_LatvsLs_zonavg_tavg.png" % (fpath), bbox_inches='tight')
# Zonal wind PLOT
u_t = np.matrix.transpose(u_avg_t)
u_t = u_t[:,l_1:l_2]
fig = plt.figure( figsize=(10,10), dpi=100)
ax = fig.add_subplot(1,1,1)
plt.pcolormesh(t_t,lat_t,u_t,norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
plt.xlabel('Solar longitude / degrees',fontsize=16)
plt.ylabel('Latitude / degrees',fontsize=16)
ax.plot(c[0,0],c[0,1],'o',color='y',markersize=10)
ax.set_xticks(major_ticksx)
ax.set_xticks(minor_ticksx, minor=True)
ax.set_yticks(major_ticksy)
ax.set_yticks(minor_ticksy, minor=True)
cb = plt.colorbar(format='%.1f', extend='both')
cb.set_label('Zonal wind velocity difference / m / s')
tick_locator = ticker.MaxNLocator(nbins=7)
cb.locator = tick_locator
cb.update_ticks()
plt.axis('tight')
plt.savefig("%sZonalWindDiff_LatvsTime_FY_uavg_tavg.png" % (fpath), bbox_inches='tight')
# ALL FLUXES on one plot
fslw_t = np.matrix.transpose(fslw_avg_t[l_1:l_2,:]) # Incoming (surf) long wave (IR) radiation
ftlw_t = np.matrix.transpose(ftlw_avg_t[l_1:l_2,:]) # Outgoing (top) long wave (IR) radiation
fssw_t = np.matrix.transpose(fssw_avg_t[l_1:l_2,:]) # Incoming (surf) short wave (VL) radiation
ftsw_t = np.matrix.transpose(ftsw_avg_t[l_1:l_2,:]) # Outgoing (top) short wave (VL) radiation
f, axarr = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(12,12), dpi=100)
x = t_t
y = lat_t
xlabel = 'Solar longitude / degrees'
ylabel = 'Latitude / degrees'
cb_label = 'Radiative flux difference / W / $m^2$'
# Common axis labels
f.text(0.5, 0.04, '%s' % (xlabel), fontsize=18, ha='center')
f.text(0.06, 0.5, '%s' % (ylabel), fontsize=18, va='center', rotation='vertical')
ax1 = axarr[0,0].pcolormesh(x, y, fslw_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[0,0].axis('tight')
axarr[0,0].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[0,0].set_xticks(major_ticksx)
axarr[0,0].set_xticks(minor_ticksx, minor=True)
axarr[0,0].set_yticks(major_ticksy)
axarr[0,0].set_yticks(minor_ticksy, minor=True)
axarr[0,0].set_title('Incident flux at surface (LW) (a)', fontsize=10)
axarr[0,0].tick_params(axis='both', labelsize=10)
dv1 = make_axes_locatable(axarr[0,0])
cax1 = dv1.append_axes("right",size="5%",pad=0.05)
cb = f.colorbar(ax1,cax=cax1, format='%.1f', extend='both')
cb.set_label('%s' % (cb_label), fontsize=10)
ax2 = axarr[0,1].pcolormesh(x, y, ftlw_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[0,1].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[0,1].set_title('Outgoing flux at top (LW) (b)', fontsize=10)
axarr[0,1].tick_params(axis='both', labelsize=10)
dv2 = make_axes_locatable(axarr[0,1])
cax2 = dv2.append_axes("right",size="5%",pad=0.05)
cb2 = f.colorbar(ax2,cax=cax2, format='%.1f', extend='both')
cb2.set_label('%s' % (cb_label), fontsize=10)
ax3 = axarr[1,0].pcolormesh(x, y, fssw_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[1,0].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[1,0].set_title('Incident flux at surface (SW) (c)', fontsize=10)
axarr[1,0].tick_params(axis='both', labelsize=10)
dv3 = make_axes_locatable(axarr[1,0])
cax3 = dv3.append_axes("right",size="5%",pad=0.05)
cb3 = f.colorbar(ax3,cax=cax3, format='%.1f', extend='both')
cb3.set_label('%s' % (cb_label), fontsize=10)
ax4 = axarr[1,1].pcolormesh(x, y, ftsw_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[1,1].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[1,1].set_title('Outgoing flux at top (SW) (d)', fontsize=10)
axarr[1,1].tick_params(axis='both', labelsize=10)
dv4 = make_axes_locatable(axarr[1,1])
cax4 = dv4.append_axes("right",size="5%",pad=0.05)
cb4 = f.colorbar(ax4,cax=cax4, format='%.1f', extend='both')
cb4.set_label('%s' % (cb_label), fontsize=10)
# Colorbar creation and placement
#f.subplots_adjust(right=0.8)
#cbar_ax = f.add_axes([0.85, 0.1, 0.04, 0.8]) # [h_placement, v_placement, h_size, v_size]
#cb = f.colorbar(ax3, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
#cb.set_label('%s' % (cb_label), fontsize=14) # colorbar label
plt.savefig("%sfluxes_latvsLs_zonavg_tavg.png" % (fpath), bbox_inches='tight')
### Short-term Temperature and Heating rates at exact location vs Altitude (put in particle size or mmr)
# lat = 87.49999, 82.49999, 77.5, 72.5, 67.5, 62.5, 57.5, 52.5, 47.5, 42.5,
# 37.5, 32.5, 27.5, 22.5, 17.5, 12.5, 7.500001, 2.500001, -2.500001,
# -7.500003, -12.5, -17.5, -22.5, -27.5, -32.5, -37.5, -42.5, -47.5, -52.5,
# -57.5, -62.5, -67.5, -72.5, -77.5, -82.49999, -87.49999 ;
# lon = -180, -175, -170, -165, -160, -155, -150, -145, -140, -135, -130,
# -125, -120, -115, -110, -105, -100, -95, -90, -84.99999, -80, -75, -70,
# -65, -60, -55, -50, -45, -40, -35, -30, -25, -20, -15, -10, -5, 0, 5, 10,
# 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 84.99999, 90, 95,
# 100, 105, 110, 115, 120, 125, 130, 135, 140, 145, 150, 155, 160, 165,
# 170, 175 ;
## Dust storm insertion points (Ls - tstep_start - [lat,lon])
# m26 = 45.66 - 408 - [45, -90]
# m30 = 153.95 - 84 - [ 0, 0]
# m33 = 244.28 - 84 - [-2, -6]
# m34 = 273.52 - 60 - [-45, 90]
### Plot explaination
# Storm starts at tstep=96, which is midnight of sol 8 relative to (0,0). However the storm is at 135W (midpoint).
# So 360/24 = 15deg for each hour, meaning local time at 135W is 135/15=9hrs behind (0,0) local time, so at dust storm time insertion it is 15:00 locally.
# We want the plot to start 1 day before the storm, which will be at tstep=84, since each tstep accounts for 2 hours.
# From tstep=84 we push forward 10 hours for approximate midnight plot start
### In reality the plot starts at 01:00 the night before the storm, local time 135W.
f, axarr = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(12,12), dpi=100)
tl1, tl2 =89, 125
al=15
latl, lonl=6, 9
d1 = tempa[1][tl1:tl2,:al,latl,lonl]
d2 = tempb[1][tl1:tl2,:al,latl,lonl]
d3 = (88800/24.)*(sw_htrta[1][tl1:tl2,:al,latl,lonl] + lw_htrta[1][tl1:tl2,:al,latl,lonl]) # NET heat rate (SW cooling, LW heating) changed from K/s to K/hr
d4 = rdusta[1][tl1:tl2,:al,latl,lonl]
d5 = dustqa[1][tl1:tl2,:al,latl,lonl]
data = np.matrix.transpose(d2)
data2 = np.matrix.transpose(d1)
data3 = np.matrix.transpose(d3)
data4 = np.matrix.transpose(d4)
data5 = np.matrix.transpose(d5)
y = alt[:al]
x = np.linspace(0,72,data.shape[1])
xlabel = 'Local time / hr'
ylabel = 'Altitude above surface / km'
cb_label = 'Temperature / K'
cb_label2 = 'Net heating rate / K / hr'
major_ticksx = np.arange(0,np.max(x)+1,6)
minor_ticksx = np.arange(0,np.max(x),2)
major_ticksy = np.arange(0,np.max(y)+1,10)
minor_ticksy = np.arange(0,np.max(y),2)
# Common axis labels
f.text(0.5, 0.04, '%s' % (xlabel), fontsize=16, ha='center')
f.text(0.06, 0.5, '%s' % (ylabel), fontsize=16, va='center', rotation='vertical')
ax1 = axarr[0].pcolormesh(x, y, data, cmap='jet')
axarr[0].axis('tight')
axarr[0].set_xticks(major_ticksx)
axarr[0].set_yticks(major_ticksy)
axarr[0].set_xticks(minor_ticksx, minor=True)
axarr[0].set_yticks(minor_ticksy, minor=True)
axarr[0].set_title('Reference run (a)', fontsize=10)
axarr[0].tick_params(axis='both', labelsize=14)
ax2 = axarr[1].pcolormesh(x, y, data2, cmap='jet')
axarr[1].set_title('Dust storm run (b)', fontsize=10)
axarr[1].tick_params(axis='both', labelsize=14)
axarr[1].add_patch(mpl.patches.Rectangle((14, 0), 24, 9, facecolor="none", linestyle='dashed'))
ax3 = axarr[2].pcolormesh(x, y, data3, cmap='RdBu_r', vmax=10, vmin=-10)
axarr[2].set_title('Dust storm run (c)', fontsize=10)
axarr[2].tick_params(axis='both', labelsize=14)
axarr[2].add_patch(mpl.patches.Rectangle((14, 0), 24, 9, facecolor="none", linestyle='dashed'))
lvl = np.array([10**-6,10**-5,1*10**-4,10**-3]) # Contour levels
ax = axarr[2].contour(x,y,data5,lvl,colors='k',linewidth=3,locator=ticker.LogLocator())
plt.clabel(ax, fontsize=9, inline=1,fmt='%2.0e')
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.38, 0.02, 0.52]) # [h_placement, v_placement, h_size, v_size]
cb = f.colorbar(ax1, cax=cbar_ax, format='%.0f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
f.subplots_adjust(right=0.8)
cbar_ax2 = f.add_axes([0.85, 0.095, 0.02, 0.235]) # [h_placement, v_placement, h_size, v_size]
cb = f.colorbar(ax3, cax=cbar_ax2, format='%.0f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cb_label2), fontsize=16) # colorbar label
#locs,labels = py.xticks()
#py.xticks(locs, map(lambda x: "%02d" % x, locs*1e9))
plt.savefig("%sheating.png" % (fpath), bbox_inches='tight')
plt.close('all')
### Time series plots
# settings
s_l = [-2.05, -6.12, 242.7] # landing site marking on plot (actually for 244.7, Ls is messed up)
ticky_latlon = [60,10,30,10] # tick settings [xmajor,xminor,ymajor,yminor] ticks
ticky_latalt = [60,10,20,10]
int_Ls = int(np.ceil(Ls.shape[0]/(12*Months)))
# Dust particle size contours
rd_ds1 = {}
rd_ds1[0] = alt[:alt_half]
rd_ds1[1] = lat_t
rd_ds1[2] = rd_avg[:,:alt_half,:]
# dust mmr average difference contours
dqd_ds = {}
dqd_ds[0] = alt[:alt_half]
dqd_ds[1] = lat_t
dqd_ds[2] = dqd_avg[:,:alt_half,:]
wind = {}
wind[0] = u_diff
wind[1] = v_diff
## Dust storm 1 Time series dustq (mmr) (time, lat, lon)
plt_timeseries(dustq_diff[l_1:,:,:], lon_t, lat_t, Ls_m[1][l_1:], 4,4, ticky_latlon, 'Longitude / degrees', 'Latitude / degrees', 'Ls: ', 'Dust MMR difference / kg / kg', 3, '%sDustqdiff_latlon_tseries_ds1.png' % (fpath), mola)
alt_t = alt # Height of 20.9km
latll = 26
dustq_diff_altlon = dustqa[1][l_1:,:,latll,:] - dustqb[1][l_1:,:,latll,:]
temp_diff_altlon = tempa[1][l_1:,:,latll,:] - tempb[1][l_1:,:,latll,:]
plt_timeseries(temp_diff_altlon, lon_t, alt_t, Ls, 4,4, ticky_latalt, 'Longitude / degrees', 'Altitude / km', 'Ls: ', 'Temperature difference / K', int_Ls, '%stemp_altlon_tseries_ds1.png' % (fpath))
a
plt_timeseries(dustq_diff_altlon, lon_t, alt_t, Ls_m[1][l_1:], 4, 4, ticky_latalt, 'Longitude / degrees', 'Altitude / km', 'Ls: ', 'Dust MMR difference / kg / kg', 3, '%sdustq_altlon_tseries_ds1.png' % (fpath))
plt.close('all')
## IMPACT CALCULATIONS
## Dust storm insertion points (Ls - tstep_start - [lat,lon])
### DS1 m1 = 3.95 - (96-120, 2 sol) - [45, -135] (ORIGINAL DS)
llat1, llat2 = 22.5, 67.5
llon1, llon2 = -155., -115.
lalt1, lalt2 = 0., 8.
ts1, ts2 = 120, 132
### DS2 m26 = 45.66 - 408 - [45, -90]
#llat1, llat2 = 22.5, 67.5
#llon1, llon2 = -110., -70.
#lalt1, lalt2 = 0., 8.
#ts1, ts2 = 420, 432
### DS3 m30 = 153.95 - 84 - [ 0, 0]
#llat1, llat2 = -22.5, 22.5
#llon1, llon2 = -20., 20.
#lalt1, lalt2 = 0., 8.
#ts1, ts2 = 96, 108
### DS4 m33 = 244.28 - 84 - [-2, -6] (EXOMARS)
#llat1, llat2 = -22.5, 22.5
#llon1, llon2 = -20., 20.
#lalt1, lalt2 = 0., 8.
#ts1, ts2 = 96, 108
### DS5 m34 = 273.52 - 60 - [-45, 90]
#llat1, llat2 = -67.5, -22.5
#llon1, llon2 = 70., 110.
#lalt1, lalt2 = 0., 8.
#ts1, ts2 = 72, 84
lat_1, lat_2 = np.where(lat - llat2 >= 0.001)[0][-1]+1, np.where(lat - llat1 >= 0.001)[0][-1]+1
lon_1, lon_2 = np.where(lon - llon1 >= 0.001)[0][0]-1, np.where(lon - llon2 >= 0.001)[0][0]-1
alt_1, alt_2 = 0., np.where(alt - lalt2 >= 0.001)[0][0]
# Loop to compute impact
re_err, avg_t = {}, {}
re, avg = {}, {}
day = 1
var_da = [dustqa[1], dustNa[1], tempa[1], tsurfa[1], presa[1], psa[1], ua[1], va[1], rhoa[1], fluxsurflwa[1], fluxsurfswa[1], fluxtoplwa[1], fluxtopswa[1]]
var_db = [dustqb[1], dustNb[1], tempb[1], tsurfb[1], presb[1], psb[1], ub[1], vb[1], rhob[1], fluxsurflwb[1], fluxsurfswb[1], fluxtoplwb[1], fluxtopswb[1]]
re[day] = np.zeros([len(var_da), (ts2-ts1)+1])
avg[day] = np.zeros([len(var_da), (ts2-ts1)+1])
re_err[day] = np.zeros(len(var_da))
avg_t[day] = np.zeros(len(var_da))
for n in xrange(0, len(var_da)):
data_a = var_da[n]
data_b = var_db[n]
if len(data_a.shape)==4:
m=0
for j in xrange(ts1, ts2+1):
aa = data_a[j,alt_1:alt_2,lat_1:lat_2,lon_1:lon_2].flatten() - data_b[j,alt_1:alt_2,lat_1:lat_2,lon_1:lon_2].flatten()
a_ref = data_b[j,alt_1:alt_2,lat_1:lat_2,lon_1:lon_2].flatten()
avg[day][n,m] = sum(a_ref)/a_ref.shape[0]
re[day][n,m] = np.linalg.norm(aa) / np.linalg.norm(a_ref)
m=m+1
else:
m=0
for j in xrange(ts1, ts2+1):
aa = data_a[j,lat_1:lat_2,lon_1:lon_2].flatten() - data_b[j,lat_1:lat_2,lon_1:lon_2].flatten()
a_ref = data_b[j,lat_1:lat_2,lon_1:lon_2].flatten()
avg[day][n,m] = sum(a_ref)/a_ref.shape[0]
re[day][n,m] = np.linalg.norm(aa) / np.linalg.norm(a_ref)
m=m+1
re[day][(np.isnan(re[day])==True)] = 0.
re_err[day][n] = sum(re[day][n,:]) / re[day][n,:].shape[0]
avg_t[day][n] = sum(avg[day][n,:]) / avg[day][n,:].shape[0]
np.savetxt("%srelative_errors_t.txt" % (fpath), re[1], fmt='%.2e')
np.savetxt("%srelative_errors.txt" % (fpath), re_err[1], fmt='%.2e')
np.savetxt("%saverages.txt" % (fpath), avg[1], fmt='%.2e')
np.savetxt("%saverages_t.txt" % (fpath), avg_t[1], fmt='%.2e')
| [
"adam.el-said@open.ac.uk"
] | adam.el-said@open.ac.uk |
4ceac2bc4f9946d9f2573cc41ecd8880bc8d7375 | d200aee203cb0d384213747d5fd3934c80c9728a | /Python/First Exercise.py | 5632cb72b27aebec64b0d76abd7f701832a9d8de | [] | no_license | Dmiller2599/BFOR206 | 1ab1b43e38423926080f15f2b7d50c44612906f3 | a8cae7cc8fe60f63175e3abc6ed4b4f7a68ac247 | refs/heads/main | 2023-03-20T19:17:18.063724 | 2021-03-21T03:29:53 | 2021-03-21T03:29:53 | 335,765,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a block comment.
I can type whatever I desire here.
This script will demonstarte basic variables,
if statements, and for loops with Python.
"""
# this is a normal comment.
"""
The comment below defines a chunck that spyder can
use to separate parts of code into small blocks.
This makes it easy to run just a small part of your code.
The syntax is
#%%
If you want to name a chunk
#%% chunk name
"""
#%% define vars
# To run a single line press, f9
my_str = "this is a string"
print(my_str)
my_num = 123.456789
my_int = 123
print(my_num, my_int)
# to run entire chunk
# Ctrl + Enter (Cmd + Enter on Mac)
# to run the entire chunk and go to the next chunk
# Shift + Enter
#%% if statements
a = 0
b = 1
print("the value of a is:", a)
if a > b:
# Everything indented is part of the if statement
print("a is greater than b. Wow!")
elif a < b:
print("a is less than b. Weak!")
else:
print("a and b are the same, eh?")
print("Done with if statements.")
#%% for loops
for i in range(10):
print("the number i is", i)
#%% nested statements
for i in range(5, 10):
print("i is ", i)
# indents are important!
for j in range(3):
print("j is ", j)
print("done with nested loops")
#%% lab
"""
Fix this code below to complete the lab
"""
my_list = ['Hello', 'BFOR', 206, None, 'Bye!']
for item in my_list:
if item is None:
print("Found item with value of None!")
else:
print("The item is", item)
| [
"david.miller2599@gmail.com"
] | david.miller2599@gmail.com |
99aaac368b5a91c9ccdea2dd36758013b517c21f | 03db4adc692a989a0dbc90d1175bdabaaa5341b3 | /Lab-9-hhu14.py | 2af4c479900e134d179c8572e04c55d197cc93ab | [] | no_license | MeloHu/CS128 | e70d18668238533cd0e8b42588e5d7b0e88c8766 | e2595aea14f150efce25816b1215f35ef319b751 | refs/heads/master | 2021-05-05T10:06:35.993342 | 2017-09-19T04:24:44 | 2017-09-19T04:24:44 | 104,021,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py |
# coding: utf-8
# In[ ]:
## Honglie Hu,hhu14@earlham.edu,Saturday&8:00 at 66.5486,-18.0233,Lab9-M&M
# In[1]:
## Part A
def main():
filename = input("file_name:")
file = open(filename,"r")
text = file.read()
length = text.split()
count=len(length)
print("Count:",count)
TheNumber =[float(i) for i in length]
file.close()
## to get "mode"
mNumber = [TheNumber.count(i) for i in TheNumber]
position = max(mNumber)
positionArray = mNumber.index(position)
Mode = TheNumber[positionArray]
## --------------------------------------
print("Total:",sum(TheNumber))
print("Smallest:",min(TheNumber))
print("Largest:",max(TheNumber))
average=sum(TheNumber)/count
print("Mode:",Mode)
print("Average:",average)
main()
# In[2]:
## Part B
def main():
import math
filename = input("file_name:")
file = open(filename,"r")
text = file.read()
length = text.split()
count = len(length)
print("Count:",count)
TheNumber =[float(i) for i in length]
file.close()
## to get "mode"
mNumber = [TheNumber.count(i) for i in TheNumber]
position = max(mNumber)
positionArray = mNumber.index(position)
Mode = TheNumber[positionArray]
## ---------------------------------
print("Total:",sum(TheNumber))
print("Smallest:",min(TheNumber))
print("Largest:",max(TheNumber))
average=sum(TheNumber)/count
print("Mode:",Mode)
## to get "median"
TheNumber.sort()
medianNumber = len(TheNumber)
Median =(TheNumber[math.floor(medianNumber/2)])
print("Median:",Median)
## ----------------------------------
print("Average:",average)
main()
# In[ ]:
| [
"noreply@github.com"
] | MeloHu.noreply@github.com |
791732e7600f99bd887cc9a3ffddbe99e5bf9c14 | fe7feafddd2b03814046039411afbaafd1cdcd43 | /b7/b7.py | 7b38e13986d44c3e30b1ada5028f64862afafad0 | [] | no_license | vamsikrishna2421/Scripting-Languages | d87e668eb8d0d9002cd32bed301a7795d4477908 | 89fc94ff6d0e77a4e75bbee3a127ac0b3e16cb51 | refs/heads/master | 2020-06-25T20:14:41.082900 | 2017-12-26T21:09:37 | 2017-12-26T21:09:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | import pandas as pd
import matplotlib.pyplot as plt
fname=raw_input("Enter filename: ")
df=pd.read_csv(fname)
print "Headers of the Dataset are :-"
#print df.columns.values
print df.head(0)
print "**Dataset Description**"
print df.info()
print df.describe()
df=df.drop(["Pclass","PassengerId","Parch","Name"],axis=1)
print "\n\nAfter Dropping Unwanted Columns:-\n",df.head(0)
print "\nWith Empty Column Values:-\n",df
df['Cabin']=df["Cabin"].fillna("CC55")
print "\n\nWithout Empty Column Values:-\n",df
print "\n\nNumber of Entries: ",len(df)
print "\nNumber of Columns: ",len(df.columns)
print "\n\nAttributes and Their Datatypes:-\n"
df.info()
print "\nMinimum Age: ",df['Age'].min()
print "\nMaximum Age: ",df['Age'].max()
print "\nMean Age: ",round(df['Age'].mean(),2)
gp=df['Age'].plot.hist()
gp.set_ylabel("Number of People")
gp.set_xlabel("Age")
gp.set_title("Age vs No. of People")
plt.show()
dgp=df.Age.plot()
dgp.set_ylabel("Age")
dgp.set_xlabel("Number of People")
dgp.set_title("Age vs No. of People")
plt.show()
| [
"noreply@github.com"
] | vamsikrishna2421.noreply@github.com |
26edd3c2bede88520759eec09c94b180a897f414 | 58579a68a94967abb1b8d74cab06f687f4a79a7b | /PythonStyle/code_sample_test.py | fee48cead8af0cc166df1e5fd80a4ff6024f53bc | [] | no_license | imdsoho/python | 3c65cb9b19c2b2299d5bca67ebcde7d7f2f8bec2 | eed1ef3e835f2ab77e4e197a689a9595bd93243e | refs/heads/master | 2020-04-16T06:38:49.089727 | 2020-02-05T09:11:44 | 2020-02-05T09:11:44 | 165,355,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | class cust_slice(object):
"""
slice(stop)
slice(start, stop[, step])
Create a slice object. This is used for extended slicing (e.g. a[0:10:2]).
"""
def indices(self, len): # real signature unknown; restored from __doc__
"""
S.indices(len) -> (start, stop, stride)
Assuming a sequence of length len, calculate the start and stop
indices, and the stride length of the extended slice described by
S. Out of bounds indices are clipped in a manner consistent with the
handling of normal slices.
"""
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __init__(self, stop): # real signature unknown; restored from __doc__
print("__init__")
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
print("__new__")
print(args)
print(kwargs)
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
start = property(lambda self: 0)
""":type: int"""
step = property(lambda self: 0)
""":type: int"""
stop = property(lambda self: 0)
""":type: int"""
__hash__ = None
sl = cust_slice(1)
print(sl) | [
"imdsoho@gmail.com"
] | imdsoho@gmail.com |
1e65771ae50c232198a15764de8fa56266b68719 | 646c30e512e7ead3de33974e856f1a0ef242fec8 | /Educational Codeforces Round 73/C. Perfect Team.py | b972a3b41cc05690f80a2878cd9c667420188aad | [] | no_license | Hybrid-Skills/Codeforces | 2194f6d78186b8f6af5389ae6eccdd45c724ee23 | 68f281ba3d14ee039aa89238f3545bf06b90bc74 | refs/heads/master | 2020-06-03T02:38:24.502731 | 2019-11-16T16:30:25 | 2019-11-16T16:30:25 | 191,399,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | for _ in range(int(input())):
c, m, x = map(int, input().split())
maxi = (c + m + x)//3
if c >= m:
if m <= maxi:
print(m)
elif m > maxi:
print(maxi)
else:
if c <= maxi:
print(c)
else:
print(maxi) | [
"ankurgoyal616@gmail.com"
] | ankurgoyal616@gmail.com |
9cff0f2a316ca7bb8e9daefe0127a1a8ef5609ea | 90177443dddd57dc7a8ad2cfb0758b0abb2a10f2 | /currency_converter.py | d786177fbbec6974fdfcfcaa260732194b7bc02b | [] | no_license | Tengtiantian/data-analytics | 18181d6d17d4077f503f505500f50b1fdb6efe44 | ac63bde80a4355c2d0911f60fd8f55683ae026bf | refs/heads/main | 2023-07-10T15:53:32.334067 | 2021-08-17T09:19:25 | 2021-08-17T09:19:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | """
作者:梁斌
功能:汇率兑换
版本:1.0
日期:01/08/2017
"""
# 汇率
USD_VS_RMB = 6.77
# 人民币的输入
rmb_str_value = input('请输入人民币(CNY)金额:')
# 将字符串转换为数字
rmb_value = eval(rmb_str_value)
# 汇率计算
usd_value = rmb_value / USD_VS_RMB
# 输出结果
print('美元(USD)金额是:', usd_value)
| [
"wangsiyuan_id@126.com"
] | wangsiyuan_id@126.com |
eeb85c0763b4b58838c030ceccd1de9ec42a82e6 | 5cea11c9373d997430b523227ce81b61972ad1e3 | /tests/test_client_events.py | bd3bc8ac4bf3a96cd62673408ee09427626646ff | [
"BSD-3-Clause"
] | permissive | tinylambda/grpclib | fcc0d4f5723fe36359ceb9655764e9a37c87ebc1 | 948e32a29a4ad82ebbfdbb681f7a797f6233bff3 | refs/heads/master | 2023-07-15T16:19:59.776603 | 2021-08-25T19:56:10 | 2021-08-25T19:56:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | import pytest
from multidict import MultiDict
from google.rpc.error_details_pb2 import ResourceInfo
from grpclib.const import Status
from grpclib.events import listen, SendRequest, SendMessage, RecvMessage
from grpclib.events import RecvInitialMetadata, RecvTrailingMetadata
from grpclib.testing import ChannelFor
from grpclib._compat import nullcontext
from grpclib.exceptions import GRPCError
from dummy_pb2 import DummyRequest, DummyReply
from dummy_grpc import DummyServiceStub, DummyServiceBase
class DummyService(DummyServiceBase):
def __init__(self, fail=False):
self.fail = fail
async def UnaryUnary(self, stream):
await stream.recv_message()
await stream.send_initial_metadata(metadata={'initial': 'true'})
await stream.send_message(DummyReply(value='pong'))
if self.fail:
await stream.send_trailing_metadata(
status=Status.NOT_FOUND,
status_message="Everything is not OK",
status_details=[ResourceInfo()],
metadata={'trailing': 'true'},
)
else:
await stream.send_trailing_metadata(metadata={'trailing': 'true'})
async def UnaryStream(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def StreamUnary(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def StreamStream(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def _test(event_type, *, fail=False):
service = DummyService(fail)
events = []
async def callback(event_):
events.append(event_)
async with ChannelFor([service]) as channel:
listen(channel, event_type, callback)
stub = DummyServiceStub(channel)
ctx = pytest.raises(GRPCError) if fail else nullcontext()
with ctx:
reply = await stub.UnaryUnary(DummyRequest(value='ping'),
timeout=1,
metadata={'request': 'true'})
assert reply == DummyReply(value='pong')
event, = events
return event
@pytest.mark.asyncio
async def test_send_request():
event = await _test(SendRequest)
assert event.metadata == MultiDict({'request': 'true'})
assert event.method_name == '/dummy.DummyService/UnaryUnary'
assert event.deadline.time_remaining() > 0
assert event.content_type == 'application/grpc'
@pytest.mark.asyncio
async def test_send_message():
event = await _test(SendMessage)
assert event.message == DummyRequest(value='ping')
@pytest.mark.asyncio
async def test_recv_message():
event = await _test(RecvMessage)
assert event.message == DummyReply(value='pong')
@pytest.mark.asyncio
async def test_recv_initial_metadata():
event = await _test(RecvInitialMetadata)
assert event.metadata == MultiDict({'initial': 'true'})
@pytest.mark.asyncio
async def test_recv_trailing_metadata():
event = await _test(RecvTrailingMetadata, fail=True)
assert event.metadata == MultiDict({'trailing': 'true'})
assert event.status is Status.NOT_FOUND
assert event.status_message == "Everything is not OK"
assert isinstance(event.status_details[0], ResourceInfo)
| [
"vladimir@magamedov.com"
] | vladimir@magamedov.com |
c2a6d24f20bb1c2478b4feea8182623aca53bac4 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_14413.py | 5e67c83692878ae8becbb59fe8019e05781959d1 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | # changing type of a entry in dictionary throws error
d = {'today': datetime.today()}
d['today'] = d['today'].strftime(<your format>)
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
504975487133379c5ad90d41e592ecc7584e58ac | f69af7fb96d29edc5d7bd7424acfa9078ba5047d | /models/networks.py | 95fbfab2f1a59fdeb3945ea0396ea30a6b8e80dc | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | TrendingTechnology/PeeledHuman | ece77dcf3eef81990be720fd4a8e72db2cb5f6d9 | ab7bff2c03b22774ecea4bc4ec3ae214da654dd5 | refs/heads/master | 2023-05-08T09:53:23.413879 | 2021-05-21T12:22:20 | 2021-05-21T12:22:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,561 | py | import torch
import torch.nn as nn
from torch.nn import init
import functools
import copy
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda x: Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
net = convert_model(net)
net.cuda()
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | resnet_18blocks
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'resnet_18blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=18)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70x70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
model_depth = []
model_rgb = []
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model_depth += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model_rgb += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model_depth += [nn.ReflectionPad2d(3)]
model_depth += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model_depth += [nn.Sigmoid()]
model_rgb += [nn.ReflectionPad2d(3)]
model_rgb += [nn.Conv2d(ngf, 3*(output_nc-1), kernel_size=7, padding=0)]
# model_rgb += [nn.Conv2d(ngf, 3*output_nc, kernel_size=7, padding=0)]
model_rgb += [nn.Tanh()]
self.model = nn.Sequential(*model)
self.model_depth = nn.Sequential(*model_depth)
self.model_rgb = nn.Sequential(*model_rgb)
def forward(self, input):
"""Standard forward"""
downsample = self.model(input)
return self.model_rgb(downsample), self.model_depth(downsample)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
| [
"rohanchacko007@gmail.com"
] | rohanchacko007@gmail.com |
0239f50106a25909870229a9b49d17ca773a5c68 | d125a7467b815ea3027567b0a6976c8ad730beb9 | /src/itsmservice/itsmservice/conf/product.py | 82ef0f132d7f11c4ceea13a01500a1df5a454f0f | [] | no_license | sunyaxiong/itsmservice | 06a1cb38b7314695613e2432f2e1d56c86aad815 | e50fccae9ae536ac520337ec79b1d1c985e49aa4 | refs/heads/master | 2022-12-12T11:14:03.838601 | 2018-10-31T06:17:25 | 2018-10-31T06:17:25 | 137,029,391 | 0 | 0 | null | 2022-12-08T00:58:47 | 2018-06-12T06:50:01 | JavaScript | UTF-8 | Python | false | false | 1,765 | py | # cas conf
SUCC_REDIRECT_URL = "itsm.ecscloud.com"
CAS_SERVER_URL = "http://cas.ecscloud.com/cas/"
CMP_URL = "http://cmp.ecscloud.com"
# CAS_REDIRECT_URL = "http://www.baidu.com"
# databases conf
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'itsm',
'USER': 'root',
'PASSWORD': 'Itsm@vstecs.com',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
'charset': 'utf8mb4',
},
},
'cas_db': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cas',
'USER': 'root',
'PASSWORD': 'Db@vstecs.com',
"HOST": "172.31.31.255",
"PORT": "3306",
},
}
# use multi-database in django
DATABASE_ROUTERS = ['itsmservice.database_router.DatabaseAppsRouter']
DATABASE_APPS_MAPPING = {
# example:
# 'app_name':'database_name',
'cas_sync': 'cas_db',
}
# fit2cloud api conf
INTERNET_HOST = "cmp.ecscloud.com"
CLOUD_HOST = "172.16.13.155"
CMDB_HOST = "172.16.13.155"
access_key = "My00ZjRkMzVkZA=="
cloud_secret_key = "228e1f50-3b39-4213-a8d8-17e8bf2aeb1e"
CMDB_CONF = {
"access_key": access_key,
"version": "v1",
"signature_method": "HmacSHA256",
"signature_version": "v1"
}
CLOUD_CONF = {
"access_key": access_key,
"version": "v1",
"signature_method": "HmacSHA256",
"signature_version": "v1",
"user": "sunyaxiong@vstecs.com",
}
secret_key = cloud_secret_key
# cloud_secret_key = '228e1f50-3b39-4213-a8d8-17e8bf2aeb1e'
# mail
EMAIL_HOST = 'smtp.163.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'sunyaxiongnn@163.com'
EMAIL_HOST_PASSWORD = 'Sun880519'
EMAIL_SUBJECT_PREFIX = u'[vstecs.com]'
EMAIL_USE_TLS = True
| [
"sunyaxiong"
] | sunyaxiong |
d9b3198dc97ae3d100c2537f1a374cc313ba3383 | 0e22ce0913d3f0f7a7404a3add796533df10ffd2 | /code_exam_student_grade.py | db18cc99d9d2ae1c6cf6534b6d0b6f0c95625ef4 | [] | no_license | Bhuvan1696/PythoN | 714e98717d277c81d8a6d0a83873af0ff6f45df3 | 685eddd9cb7132867519f9dff71ed3a55502cca6 | refs/heads/master | 2020-12-09T18:05:57.724508 | 2020-02-22T17:51:06 | 2020-02-22T17:51:06 | 233,379,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | """ Student Mark Statement """
def grade(percentage, eng, sci, mat):
if ( eng >= 25 and sci >= 35 and mat >= 35):
if (percentage > 90):
print ("Grade A")
elif (percentage > 75 and percentage <= 90):
print ("Grafe B")
else:
print ("Average")
else:
print("Fail..!")
def total_marks(eng, theory, practical, mat):
if (eng <= 75 and theory <= 75 and practical <= 25 and mat <= 100):
tot_sci = theory + practical
total = eng + tot_sci + mat
percent = total/3
print ("Over all percentage :", percent)
grade(percent, eng, tot_sci, mat)
else:
print(" Out of Marks..")
def get_marks():
eng = input("Enter English out of 75 :")
eng = int(eng)
sci_thoery = input("Enter Science_Thoery out of 75 :")
sci_thoery = int(sci_thoery)
sci_practical = input("Enter Science_Pracical out of 25 :")
sci_practical = int(sci_practical)
mat = input("Enter Maths out of 100 :")
mat = int(mat)
return eng, sci_thoery, sci_practical, mat
def main():
english, thoery, practical, maths = get_marks()
total_marks(english, thoery, practical, maths)
#Main starts from here
main()
| [
"noreply@github.com"
] | Bhuvan1696.noreply@github.com |
ee39967cfee84345c3f981e0d983d21bfa8dc82f | dbe86e522bf7c0fa58531e13bed3dd97051e1b79 | /cognitoLogin.py | 4ab85e5d4802764ca7ab75cbd00e13fa51ba772e | [] | no_license | Asteriw/CMPT473-AWSApp | e214281bbae59f9319efe423f55745e0a10dddb1 | 9d30543439913259a5e88fdf5b8913d3cac5acb4 | refs/heads/master | 2023-04-25T05:39:35.352531 | 2021-04-09T05:57:31 | 2021-04-09T05:57:31 | 369,430,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,570 | py | import boto3
import botocore.exceptions
import hmac
import hashlib
import base64
import json
USER_POOL_ID = 'us-east-1_b6HnaK2eM'
CLIENT_ID = '4tnka15q9dfg2si6rd9d44mncc'
CLIENT_SECRET = '1i3e81c7nqnjqkl9dcnd48ebmn629ieivs3umi37ib3lv9907n8r'
def get_secret_hash(username):
msg = username + CLIENT_ID
dig = hmac.new(str(CLIENT_SECRET).encode('utf-8'),
msg = str(msg).encode('utf-8'), digestmod=hashlib.sha256).digest()
d2 = base64.b64encode(dig).decode()
return d2
def initiate_auth(client, username, password):
secret_hash = get_secret_hash(username)
try:
resp = client.admin_initiate_auth(
UserPoolId=USER_POOL_ID,
ClientId=CLIENT_ID,
AuthFlow='ADMIN_NO_SRP_AUTH',
AuthParameters={
'USERNAME': username,
'SECRET_HASH': secret_hash,
'PASSWORD': password,
},
ClientMetadata={
'name': username,
'password': password,
})
except client.exceptions.NotAuthorizedException:
return None, "The username or password is incorrect"
except client.exceptions.UserNotConfirmedException:
return resp, None
except Exception as e:
return None, e.__str__()
return resp, None
def lambda_handler(event, context):
client = boto3.client('cognito-idp')
for field in ["username", "password"]:
if event.get(field) is None:
return {"error": True,
"success": False,
"message": f"{field} is required",
"data": None}
resp, msg = initiate_auth(client, event.get("username"), event.get("password"))
print(resp)
print(msg)
if msg != None:
return {'message': msg,
"error": True, "success": False, "data": None}
if resp.get("AuthenticationResult"):
return {'message': "success",
"error": False,
"success": True,
"data": {
"id_token": resp["AuthenticationResult"]["IdToken"],
"refresh_token": resp["AuthenticationResult"]["RefreshToken"],
"access_token": resp["AuthenticationResult"]["AccessToken"],
"expires_in": resp["AuthenticationResult"]["ExpiresIn"],
"token_type": resp["AuthenticationResult"]["TokenType"]
}}
else: #this code block is relevant only when MFA is enabled
return {"error": True,
"success": False,
"data": None, "message": None} | [
"zhiqi_qiao@sfu.ca"
] | zhiqi_qiao@sfu.ca |
479f083fa79fc3fdc8c1cf6c85a8c0a00641158c | a62b70e3eed1bee2b2214f1f78be131d9485f370 | /codes/app.py | 3794515aeaaa27242abe484bc055afe523041a2a | [] | no_license | nileshvarshney/restful_database | 53a12a68f40d142021c30d155d9b67bc3fab99aa | 2a16b74c2ab99804158d9eeb23fec0ada33292aa | refs/heads/master | 2020-12-03T07:50:35.017918 | 2020-01-03T07:30:52 | 2020-01-03T07:30:52 | 231,248,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from resources.user import RegisterUser
from resources.item import ItemList, Items
app = Flask(__name__)
api = Api(app)
app.secret_key='sanjose'
jwt = JWT(app, authentication_handler=authenticate, identity_handler=identity) # /auth
api.add_resource(Items,'/item/<string:name>')
api.add_resource(ItemList,'/items')
api.add_resource(RegisterUser,'/registerUser')
app.run(port=5000,debug=True) | [
"nilvarshney@stubhub.com"
] | nilvarshney@stubhub.com |
31dd5fd0705bfebccf299f10eb6ba594038b885d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /5ejvPTQeiioTTA9xZ_0.py | 9b5d0b04aa8e5dca2af5037100305f74b9f4c108 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
Create a function that checks if the argument is an integer or a string.
Return `"int"` if it's an integer and `"str"` if it's a string.
### Examples
int_or_string(8) ➞ "int"
int_or_string("Hello") ➞ "str"
int_or_string(9843532) ➞ "int"
### Notes
Input will either be an integer or a string.
"""
def int_or_string(var):
return var.__class__.__name__
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7b288b67b9fa3473f2fb3c72085b6de7ea893109 | 6cecdc007a3aafe0c0d0160053811a1197aca519 | /apps/receiver/management/commands/generate_submissions.py | ae672ba20a318c1fc46d7ecce22a17363b20c062 | [] | no_license | commtrack/temp-aquatest | 91d678c927cc4b2dce6f709afe7faf2768b58157 | 3b10d179552b1e9d6a0e4ad5e91a92a05dba19c7 | refs/heads/master | 2016-08-04T18:06:47.582196 | 2010-09-29T13:20:13 | 2010-09-29T13:20:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,172 | py | """ This script generates all the necessary data to
synchronize with a remote CommCareHQ server on that server.
This is only really useful if you intend to manually
scp/rsync data to your local server, which requires a
login to the remote server. So this is not the standard
synchronization workflow (but is necessary for low-connectivity
settings)
"""
import bz2
import sys
import urllib2
import httplib
import cStringIO
from urlparse import urlparse
from optparse import make_option
from django.core.management.base import LabelCommand, CommandError
from django_rest_interface import util as rest_util
from receiver.models import Submission
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('-a','--all', action='store_true', dest='all', \
default=False, help='Generate all files'),
make_option('-?','--debug', action='store_true', dest='debug', \
default=False, help='Generate some files'),
make_option('-d','--download', action='store_true', dest='download', \
default=False, help='Download files.'),
)
help = "Generate synchronization files on a CommCareHQ remote server."
args = "<remote_url username password>"
label = 'IP address of the remote server (including port), username, and password'
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Please specify %s.' % self.label)
remote_url = args[0]
username = args[1]
password = args[2]
print "Generating synchronization data from %s" % remote_url
all = options.get('all', False)
debug = options.get('debug', False)
download = options.get('download', False)
generate_submissions(remote_url, username, password, not all, debug, download)
def __del__(self):
pass
def generate_submissions(remote_url, username, password, latest=True, debug=False, download=False, to='submissions.tar'):
""" Generate sync data from remote server
remote_url: url of remote server (ip:port)
username, password: credentials for logging in
"""
status = rest_util.login(remote_url, username, password)
if not status:
print "Sorry. Your credentials were not accepted."
sys.exit()
url = 'http://%s/api/submissions/' % remote_url
if latest:
MD5_buffer = rest_util.get_field_as_bz2(Submission, 'checksum', debug)
response = rest_util.request(url, username, password, MD5_buffer)
print "Generated latest remote submissions"
else:
response = urllib2.urlopen(url)
print "Generated all remote submissions archive"
if download:
fout = open(to, 'w+b')
fout.write(response.read())
fout.close()
print "Submissions downloaded to %s" % to
else:
# Check for status messages
# (i think tar payloads always begin 'BZ'...)
response = response.read(255)
if response[:2] != "BZ":
print response
return response
| [
"allen.machary@gmail.com"
] | allen.machary@gmail.com |
5a5a5583911ddb9db5402f6b3d6030070b115f57 | 1e50f1643376039ca988d909e79f528e01fa1371 | /leetcode/editor/cn/292.nim-游戏.py | 174da887a6b080c9b99b41e140bf445662a9f611 | [] | no_license | mahatmaWM/leetcode | 482a249e56e2121f4896e34c58d9fa44d6d0034b | 4f41dad6a38d3cac1c32bc1f157e20aa14eab9be | refs/heads/master | 2022-09-04T17:53:54.832210 | 2022-08-06T07:29:46 | 2022-08-06T07:29:46 | 224,415,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | #
# @lc app=leetcode.cn id=292 lang=python3
#
# [292] Nim 游戏
#
# https://leetcode-cn.com/problems/nim-game/description/
#
# algorithms
# Easy (69.45%)
# Likes: 326
# Dislikes: 0
# Total Accepted: 50K
# Total Submissions: 71.9K
# Testcase Example: '4'
#
# 你和你的朋友,两个人一起玩 Nim 游戏:桌子上有一堆石头,每次你们轮流拿掉 1 - 3 块石头。 拿掉最后一块石头的人就是获胜者。你作为先手。
#
# 你们是聪明人,每一步都是最优解。 编写一个函数,来判断你是否可以在给定石头数量的情况下赢得游戏。
#
# 示例:
#
# 输入: 4
# 输出: false
# 解释: 如果堆中有 4 块石头,那么你永远不会赢得比赛;
# 因为无论你拿走 1 块、2 块 还是 3 块石头,最后一块石头总是会被你的朋友拿走。
#
#
#
# @lc code=start
class Solution:
def canWinNim(self, n: int) -> bool:
return False if n % 4 == 0 else True
# @lc code=end
| [
"chrismwang@tencent.com"
] | chrismwang@tencent.com |
d4a33c08e35fe6ddedc4fee59d98a62a0b60cb31 | 1493997bb11718d3c18c6632b6dd010535f742f5 | /particles/particles_point_sprites.py | 34a1996efb93c10ffe497379fab53cf8acfd7ca9 | [] | no_license | kovrov/scrap | cd0cf2c98a62d5af6e4206a2cab7bb8e4560b168 | b0f38d95dd4acd89c832188265dece4d91383bbb | refs/heads/master | 2021-01-20T12:21:34.742007 | 2010-01-12T19:53:23 | 2010-01-12T19:53:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,964 | py | from pyglet.gl import gl_info
assert gl_info.have_extension("GL_ARB_point_sprite"), "ARB_point_sprite not available"
from pyglet.gl import *
import random
# see:
# http://www.opengl.org/registry/specs/ARB/point_sprite.txt
# http://www.opengl.org/registry/specs/ARB/point_parameters.txt
g_slowdown = 2.0
# Query for the max point size supported by the hardware
g_maxSize = c_float(0.0)
glGetFloatv(GL_POINT_SIZE_MAX_ARB, pointer(g_maxSize))
# Clamp size to 100.0f or the sprites could get a little too big on some of the
# newer graphic cards. My ATI card at home supports a max point size of 1024.0!
if (g_maxSize.value > 100.0): g_maxSize.value = 100.0
def draw_task(texture_id):
particles = [{
'life': 1.0,
'fade': random.uniform(0.1, 0.004),
#'r': 1.0, 'g': 1.0, 'b': 1.0,
'r': 0.32, 'g': 0.32, 'b': 0.32,
'x': 0.0, 'y': 0.0, 'z': 0.0,
'xi': float(random.randint(-250, 250)),
'yi': float(random.randint(-250, 250)),
'zi': float(random.randint(-250, 250)),
'xg': 0.0, 'yg': -0.8, 'zg': 0.0,
} for i in xrange(1000)]
glDisable(GL_DEPTH_TEST) # TODO: see if this integrates well with rest of render...
glEnable(GL_POINT_SPRITE_ARB) # affects global state
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE)
# This is how will our point sprite's size will be modified by
# distance from the viewer
glPointParameterfvARB(GL_POINT_DISTANCE_ATTENUATION_ARB, (c_float*3)(1.0, 0.0, 0.01))
glPointSize(g_maxSize)
# The alpha of a point is calculated to allow the fading of points instead
# of shrinking them past a defined threshold size. The threshold is defined
# by GL_POINT_FADE_THRESHOLD_SIZE_ARB and is not clamped to the minimum and
# maximum point sizes.
# glPointParameterfARB(GL_POINT_FADE_THRESHOLD_SIZE_ARB, 60.0)
# glPointParameterfARB(GL_POINT_SIZE_MIN_ARB, 1.0)
# glPointParameterfARB(GL_POINT_SIZE_MAX_ARB, g_maxSize)
# Specify point sprite texture coordinate replacement mode for each
# texture unit (see ARB_point_sprite specs)
glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE) # per-texture unit
while True:
yield
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glBindTexture(GL_TEXTURE_2D, texture_id)
glBegin(GL_POINTS)
for p in particles:
# draw
glColor4f(p['r'], p['g'], p['b'], p['life'])
glVertex3f(p['x'], p['y'], p['z'])
# update
p['life'] -= p['fade']
p['x'] += p['xi'] / (g_slowdown * 1000)
p['y'] += p['yi'] / (g_slowdown * 1000)
p['z'] += p['zi'] / (g_slowdown * 1000)
p['xi'] += p['xg']
p['yi'] += p['yg']
p['zi'] += p['zg']
if p['life'] < 0.0:
p['life'] = 1.0
p['fade'] = random.uniform(0.1, 0.004)
p['x'] = 0.0; p['y'] = 0.0; p['z'] = 0.0
p['xi'] = random.uniform(-32.0, 32.0)
p['yi'] = random.uniform(-32.0, 32.0)
p['zi'] = random.uniform(-32.0, 32.0)
glEnd()
glEnable(GL_BLEND)
glDisable(GL_POINT_SPRITE_ARB)
| [
"kovrov@gmail.com"
] | kovrov@gmail.com |
56b0c048589ed3ef8f13303160de9e5ae6e672df | 9701287f1cc7734d31c898708581b15a41916e36 | /backend/app/crud/inventory.py | 5dc66359f2914fdfc5f4b7ef88503d0e6e4b1ba9 | [] | no_license | cedric0306/fastERP | a40c6d3bd3f07f65d2d8c1a440d8930f20bf4aee | d87824e945b01bc1969c9e2fdea3f243f6240a2f | refs/heads/main | 2023-07-27T07:44:58.711211 | 2021-08-27T15:14:08 | 2021-08-27T15:14:08 | 564,331,443 | 1 | 0 | null | 2022-11-10T13:44:17 | 2022-11-10T13:44:16 | null | UTF-8 | Python | false | false | 1,833 | py | from sqlalchemy.orm import Session
from fastapi import Depends
from ..models import Inventory as InventoryModel, User
from ..schemas.inventory import InventoryCreate, InventoryDelete, InventoryUpdate
from ..dependencies import get_current_user
# CASH
def get_inventory(db: Session, inventory_id: int):
return db.query(InventoryModel).filter(InventoryModel.id == inventory_id).first()
def get_inventoryes(db: Session, skip: int = 0, limit: int = 100):
return db.query(InventoryModel).offset(skip).limit(limit).all()
def create_inventory(db: Session, inventory: InventoryCreate, current_user: User):
db_inventory = InventoryModel(date=inventory.date,
description=inventory.description,
created_on=inventory.created_on,
user_id=current_user.id,
status=inventory.status)
db.add(db_inventory)
db.commit()
return db_inventory
def update_inventory(db: Session, inventory: InventoryUpdate, current_user: User):
inventory_data = db.query(InventoryModel).filter(
InventoryModel.id == inventory.id).first()
inventory_data.date = inventory.date
inventory_data.description = inventory.description
inventory_data.user_id = current_user.id,
inventory_data.status = inventory.status
db.commit()
db.refresh(inventory_data)
return inventory_data
def delete_inventory(db: Session, inventory: InventoryDelete):
inventory_data = db.query(InventoryModel).filter(
InventoryModel.id == inventory.id).first()
if inventory_data is None:
return None
else:
db.delete(inventory_data)
db.commit()
return inventory_data
| [
"wasuaje@shorecg.com"
] | wasuaje@shorecg.com |
282e63fed4ef69cb10987c6e83a4b406b3ef4bf6 | f0316e656767cf505b32c83eef4df13bb9f6b60c | /Kattis/cups.py | 4789de27f116b754ca591f250e6577a087e0b6a9 | [] | no_license | AkshdeepSharma/Classroom | 70ec46b35fab5fc4a9d2eac430659d7dafba93da | 4e55799466c101c736de6c7e07d716ff147deb83 | refs/heads/master | 2022-06-13T18:14:03.236503 | 2022-05-17T20:16:28 | 2022-05-17T20:16:28 | 94,828,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | N = int(input())
cups = []
for i in range(N):
a, b = input().split()
try:
cups.append([int(b), a])
except:
cups.append([int(a) // 2, b])
cups = sorted(cups, key=lambda x: x[0])
for k in range(len(cups)):
print(cups[k][1])
| [
"akshdeep.s@live.com"
] | akshdeep.s@live.com |
22f4ffa79f304c929e6c0680c0a2228d0e15dd2b | dbf2d3f8eb11d04123894e398446b56ca791c9f6 | /examples/02.py | c9847666ba51a1574e379280d847d651e7982b21 | [] | no_license | podhmo/nendo | ed8d9a62ab23f7409a8ce519f28deff7d3642942 | 841ec7a990019596c769a2f581a1190aeb8cbd56 | refs/heads/master | 2021-01-22T17:47:58.964323 | 2015-06-28T11:37:38 | 2015-06-28T11:37:38 | 37,828,656 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding:utf-8 -*-
import logging
logger = logging.getLogger(__name__)
"""
-- select explicitly
SELECT open_emp_id, product_cd
FROM account
ORDER BY open_emp_id, product_cd;
"""
from nendo import Query, make_record, render
from nendo.value import List
Account = make_record("account", "account_id product_cd open_date avail_balance open_emp_id")
query = (Query()
.from_(Account)
.order_by(List([Account.open_emp_id, Account.product_cd]).desc())
.select(Account.open_emp_id, Account.product_cd))
print(render(query))
| [
"podhmo+altair@beproud.jp"
] | podhmo+altair@beproud.jp |
e5ae86739f26139d2a56b19277ea7832e21d41bd | f74dd098c3e665d8f605af5ebe7e2874ac31dd2f | /aiogithubapi/namespaces/user.py | 1d1bd8b8cab4928b70f10f1d9836568e6cc2db64 | [
"MIT"
] | permissive | ludeeus/aiogithubapi | ce87382698827939aaa127b378b9a11998f13c06 | 90f3fc98e5096300269763c9a5857481b2dec4d2 | refs/heads/main | 2023-08-20T19:30:05.309844 | 2023-08-14T20:24:21 | 2023-08-14T20:24:21 | 198,505,021 | 21 | 20 | MIT | 2023-09-11T06:12:10 | 2019-07-23T20:39:53 | Python | UTF-8 | Python | false | false | 2,993 | py | """
Methods for the authenticated user namespace
https://docs.github.com/en/rest/reference/users#get-the-authenticated-user
"""
from __future__ import annotations
from typing import Any, Dict
from ..const import GitHubRequestKwarg
from ..models.organization import GitHubOrganizationMinimalModel
from ..models.repository import GitHubRepositoryModel
from ..models.response import GitHubResponseModel
from ..models.user import GitHubAuthenticatedUserModel
from .base import BaseNamespace
from .projects import GitHubUserProjectsNamespace
class GitHubUserNamespace(BaseNamespace):
"""Methods for the user namespace"""
def __post_init__(self) -> None:
self._projects = GitHubUserProjectsNamespace(self._client)
@property
def projects(self) -> GitHubUserProjectsNamespace:
"""Property to access the users projects namespace"""
return self._projects
async def get(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[GitHubAuthenticatedUserModel]:
"""
Get the authenticated user
https://docs.github.com/en/rest/reference/users#get-a-user
"""
response = await self._client.async_call_api(
endpoint="/user",
**kwargs,
)
response.data = GitHubAuthenticatedUserModel(response.data)
return response
async def starred(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubRepositoryModel]]:
"""
Get the authenticated user starred repositories
https://docs.github.com/en/rest/reference/users#get-a-user
"""
response = await self._client.async_call_api(
endpoint="/user/starred",
**kwargs,
)
response.data = [GitHubRepositoryModel(data) for data in response.data]
return response
async def repos(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubRepositoryModel]]:
"""
Get the repositories for the authenticated user
https://docs.github.com/en/rest/reference/repos#list-repositories-for-a-user
"""
response = await self._client.async_call_api(
endpoint="/user/repos",
**kwargs,
)
response.data = [GitHubRepositoryModel(data) for data in response.data]
return response
async def orgs(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubOrganizationMinimalModel]]:
"""
List public organization memberships for the specified user.
https://docs.github.com/en/rest/reference/orgs#list-organizations-for-the-authenticated-user
"""
response = await self._client.async_call_api(endpoint="/user/orgs", **kwargs)
response.data = [GitHubOrganizationMinimalModel(data) for data in response.data or []]
return response
| [
"noreply@github.com"
] | ludeeus.noreply@github.com |
adf625842636ccc75d545aa5f1e107a48d4ec5cb | f8d753a822047a68e417ba58d17f754789e2af93 | /migrations/versions/ad4acce05428_.py | 182a439446f9e08a2fd3cbe43752625d82ba13eb | [] | no_license | daronjp/travel_blog | 113eba826ccabcc18c51fc169e3b2ae359365b77 | 2a016ec840ebb468112a79c52605404d2ac1aa72 | refs/heads/master | 2023-05-10T19:39:34.713528 | 2022-09-13T03:47:57 | 2022-09-13T03:47:57 | 211,604,162 | 0 | 0 | null | 2023-05-01T22:49:07 | 2019-09-29T04:39:32 | Python | UTF-8 | Python | false | false | 798 | py | """empty message
Revision ID: ad4acce05428
Revises: 07c5566941d1
Create Date: 2019-09-30 14:09:13.551393
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad4acce05428'
down_revision = '07c5566941d1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_name', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
# ### end Alembic commands ###
| [
"daronjp@gmail.com"
] | daronjp@gmail.com |
d4eb0b65a8e727748c9d78004d51c636bf799cf0 | 6aea393423a0f840c5d28e903726c1fc82dd0544 | /System_class.py | 4b9ee2d8eebe3fa193ecc2a20d2e3af9fc762a77 | [] | no_license | markrsteiner/markosim_reloaded | 7ea4e9ed6d3403a2e560e055f89ab359c69519be | 96ce8d534c9e59feb79ed1e80a52ef55e88a7749 | refs/heads/master | 2020-04-10T18:09:24.717592 | 2019-02-28T16:28:55 | 2019-02-28T16:28:55 | 161,195,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,260 | py | import math
import numpy as np
import format__output
class System:
def __init__(self, user_input_file: dict, simulation_instance) -> None: # should take simulator instance or something
self.input_bus_voltage = float(user_input_file['Vcc [V]'])
self.input_ic_peak = float(user_input_file['Io [Apk]'])
self.input_mod_depth = float(user_input_file['Mod. Depth'])
self.input_output_freq = float(user_input_file['fo [Hz]'])
self.input_t_sink = float(user_input_file['Ts [\u00B0C]'])
self.input_modulation_type = simulation_instance.get__modulation_type()
self.input_freq_carrier = float(user_input_file['fc [kHz]'])
self.is_three_level = simulation_instance.get__three_level_flag()
if self.is_three_level:
self.input_bus_voltage /= 2
self.input_rg_on_inside = float(user_input_file['Inside rg on [\u03A9]'])
self.input_rg_off_inside = float(user_input_file['Inside rg off [\u03A9]'])
self.input_rg_on_outside = float(user_input_file['Outside rg on [\u03A9]'])
self.input_rg_off_outside = float(user_input_file['Outside rg off [\u03A9]'])
else:
self.input_rg_on = float(user_input_file['rg on [\u03A9]'])
self.input_rg_off = float(user_input_file['rg off [\u03A9]'])
self.rg_output_flag = True
self.input_power_factor = float(user_input_file['PF [cos(\u03D5)]'])
self.step_size = simulation_instance.get__step_size()
self.time_division = 1 / self.input_output_freq / 360.0 * self.step_size
self.switches_per_degree = self.input_freq_carrier * self.time_division
self.power_factor_phase_shift = math.acos(float(user_input_file['PF [cos(\u03D5)]']))
self.output_current = []
self.system_output_view = {}
self.cycle_angle__degree = None
self.system_output_voltage = np.arange(0)
self.duty_cycle__p = []
self.duty_cycle__n = []
self.calculate__system_output()
def calculate__system_output(self):
self.cycle_angle__degree = np.array([val * math.pi / 180 * self.step_size for val in range(int(360 / self.step_size))]) #todo there is probably a smarter way to do this with numpy arange
if self.input_modulation_type == "Sinusoidal":
self.calculate__sinusoidal_output()
if self.input_modulation_type == "SVPWM": # add later maybe
self.calculate__svpwm_output()
if self.input_modulation_type == 'Two Phase I':
self.calculate__two_phase1_output()
if self.is_three_level:
self.duty_cycle__p = np.clip(self.system_output_voltage / self.input_bus_voltage, 0, 1)
self.duty_cycle__n = np.clip(-self.system_output_voltage / self.input_bus_voltage, 0, 1)
else:
self.duty_cycle__p = np.clip(np.divide(self.system_output_voltage, self.input_bus_voltage), 0, 1)
self.duty_cycle__n = 1 - self.duty_cycle__p
def create__output_view(self, inside_module, outside_module=None, diode_module=None):
is_three_level = outside_module is not None and diode_module is not None
if is_three_level:
self.system_output_view = format__output.build__output_view_dict(self, inside_module, outside_module, diode_module)
self.system_output_view.update({'Modulation': self.input_modulation_type})
if not self.rg_output_flag:
self.system_output_view.update({
'Outside rg on [\u03A9]': "STOCK",
'Outside rg off [\u03A9]': "STOCK",
'Inside rg on [\u03A9]': "STOCK",
'Inside rg off [\u03A9]': "STOCK"
})
else:
self.system_output_view = format__output.build__output_view_dict(self, inside_module)
self.system_output_view.update({'Modulation': self.input_modulation_type})
if self.rg_output_flag:
self.system_output_view.update({'rg on [\u03A9]': self.input_rg_on, 'rg off [\u03A9]': self.input_rg_off})
else:
self.system_output_view.update({'rg on [\u03A9]': "STOCK", 'rg off [\u03A9]': "STOCK"})
def calculate__sinusoidal_output(self):
if self.is_three_level:
self.system_output_voltage = self.input_bus_voltage * self.input_mod_depth * np.sin(self.cycle_angle__degree)
else:
self.system_output_voltage = self.input_bus_voltage * (1 + self.input_mod_depth * np.sin(self.cycle_angle__degree)) / 2
self.output_current = self.input_ic_peak * np.sin(self.cycle_angle__degree - self.power_factor_phase_shift)
def calculate__svpwm_output(self):
sector = np.floor(self.cycle_angle__degree * 3 / math.pi)
duty_cycle = np.array([self.svpwm_helper(_sector, _degree) for _sector, _degree in zip(sector, self.cycle_angle__degree)])
self.system_output_voltage = self.input_bus_voltage * duty_cycle
self.output_current = self.input_ic_peak * np.cos(self.cycle_angle__degree - self.power_factor_phase_shift)
def svpwm_helper(self, sector, degree):
modified_input_mod_depth = self.input_mod_depth * math.sqrt(3) / 2
duty_cycle_results = {
0: modified_input_mod_depth * math.cos(degree - math.pi / 6) + (1.0 - modified_input_mod_depth * math.cos(degree - math.pi / 6)) / 2.0,
1: modified_input_mod_depth * math.sin(2 * math.pi / 3 - degree) + (1.0 - modified_input_mod_depth * math.cos(degree - math.pi / 2)) / 2.0,
2: (1.0 - modified_input_mod_depth * math.cos(degree - 5 * math.pi / 6)) / 2.0,
3: (1.0 - modified_input_mod_depth * math.cos(degree - 7 * math.pi / 6)) / 2.0,
4: modified_input_mod_depth * math.sin(degree - 4 * math.pi / 3) + (1.0 - modified_input_mod_depth * math.cos(degree - 3 * math.pi / 2)) / 2.0,
5: modified_input_mod_depth * math.cos(degree - 11 * math.pi / 6) + (1.0 - modified_input_mod_depth * math.cos(degree - 11 * math.pi / 6)) / 2.0
}
return duty_cycle_results[sector]
def calculate__two_phase1_output(self):
sector = np.floor(self.cycle_angle__degree * 3 / math.pi)
duty_cycle = np.array([self.two_phase1_helper(_sector, _degree) for _sector, _degree in zip(sector, self.cycle_angle__degree)])
self.system_output_voltage = self.input_bus_voltage * duty_cycle
self.output_current = self.input_ic_peak * np.cos(self.cycle_angle__degree - self.power_factor_phase_shift)
def two_phase1_helper(self, sector, degree):
modified_input_mod_depth = self.input_mod_depth * math.sqrt(3) / 2
duty_cycle_results = {
0: modified_input_mod_depth * math.sin(degree + math.pi / 6),
1: 1.0,
2: -modified_input_mod_depth * math.sin(degree - 7 * math.pi / 6),
3: 1.0 + modified_input_mod_depth * math.sin(degree + math.pi / 6),
4: 0.0,
5: 1.0 - modified_input_mod_depth * math.sin(degree - 7 * math.pi / 6)
}
return duty_cycle_results[sector]
def calculate__two_phase2_output(self):
sector = np.floor(self.cycle_angle__degree * 1.5 * math.pi)
duty_cycle = np.array([self.two_phase2_helper(_sector, _degree) for _sector, _degree in zip(sector, self.cycle_angle__degree)])
self.system_output_voltage = self.input_bus_voltage * duty_cycle
self.output_current = self.input_ic_peak * np.cos(self.cycle_angle__degree - self.power_factor_phase_shift - math.pi / 6)
def two_phase2_helper(self, sector, degree):
modified_input_mod_depth = self.input_mod_depth * math.sqrt(3) / 2
duty_cycle_results = {
0: modified_input_mod_depth * math.sin(degree),
1: modified_input_mod_depth * math.sin(degree - math.pi / 3),
2: 0
}
return duty_cycle_results[sector]
# Getters and setters
#
# def set__step_size(self, step_size):
# self.step_size = step_size
# self.time_division = 1 / self.input_output_freq / 360.0 * self.step_size
# self.switches_per_degree = self.input_freq_carrier * self.time_division
#
# def set__three_level(self, is_three_level):
# self.is_three_level = is_three_level
# self.input_bus_voltage /= 2
#
# def set__modulation(self, input__modulation_type):
# self.input_modulation_type = input__modulation_type
def set__input_current(self, input_current):
self.input_ic_peak = input_current
def set__rg_flag(self, flag):
self.rg_output_flag = flag
def get__input_current(self):
return self.input_ic_peak
def get__input_bus_voltage(self):
return self.input_bus_voltage
def get__switches_per_degree(self):
return self.switches_per_degree
def get__input_output_freq(self):
return self.input_output_freq
def get__input_mod_depth(self):
return self.input_mod_depth
def get__input_freq_carrier(self):
return self.input_freq_carrier
def get__input_power_factor(self):
return self.input_power_factor
def get__duty_cycle__p(self):
return self.duty_cycle__p
def get__duty_cycle__n(self):
return self.duty_cycle__n
def get__step_size(self):
return self.step_size
def get__time_division(self):
return self.time_division
def get__input_t_sink(self):
return self.input_t_sink
def get__system_output_current(self):
return self.output_current
def get__system_output_voltage(self):
return self.system_output_voltage
def get__system_output_view(self):
return self.system_output_view
def get__input_rg_on(self):
return self.input_rg_on
def get__input_rg_off(self):
return self.input_rg_off
def get__input_rg_on_inside(self):
return self.input_rg_on_inside
def get__input_rg_off_inside(self):
return self.input_rg_off_inside
def get__input_rg_on_outside(self):
return self.input_rg_on_outside
def get__input_rg_off_outside(self):
return self.input_rg_off_outside
def get__three_level(self):
return self.is_three_level
| [
"calarmy1"
] | calarmy1 |
6dadb8446146a85cfb8ae39894b3b97d9a46708d | 24108066b4b5b6ecd02c7fb499d970eab1877380 | /codeforces/queueatschool.py | b500a8dde8579bfc2827cd50ca3f5685bf164946 | [] | no_license | vishu1994/Datastructures-And-Algorithms | 93fc7e1d1f5fac775b6c50cb8cafd1a4f3060544 | 35bfc28edd8ebf1c1724be41402b1befd478aed4 | refs/heads/master | 2020-03-30T19:26:36.413735 | 2019-07-27T10:31:47 | 2019-07-27T10:31:47 | 151,542,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | nt=list(map(int,input().split()))
n=nt[0]
t=nt[1]
myqueue=list(input())
for i in range(t):
j=0
while j<len(myqueue)-1:
if myqueue[j]=="B" and myqueue[j+1]=="G":
myqueue[j],myqueue[j+1]="G","B"
j=j+2
else:
j=j+1
for i in myqueue:
print(i,end="")
| [
"vishalpandey801@gmail.com"
] | vishalpandey801@gmail.com |
4f7ae60a8596d2b441a4ff0da86b405f6c80aba6 | ad5d38fce4785037c108186f17eb1c64380355ef | /sddsd/google-cloud-sdk.staging/lib/googlecloudsdk/calliope/arg_parsers.py | 106bfe82ce32e1f5504ba759ff9f2da633c36cc4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | saranraju90/multik8s | 75864b605a139ddb7947ed4de4ae8466bdd49acb | 428576dedef7bb9cd6516e2c1ab2714581e1137c | refs/heads/master | 2023-03-03T21:56:14.383571 | 2021-02-20T14:56:42 | 2021-02-20T14:56:42 | 339,665,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,962 | py | # -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module that provides parsing utilities for argparse.
For details of how argparse argument pasers work, see:
http://docs.python.org/dev/library/argparse.html#type
Example usage:
import argparse
import arg_parsers
parser = argparse.ArgumentParser()
parser.add_argument(
'--metadata',
type=arg_parsers.ArgDict())
parser.add_argument(
'--delay',
default='5s',
type=arg_parsers.Duration(lower_bound='1s', upper_bound='10s')
parser.add_argument(
'--disk-size',
default='10GB',
type=arg_parsers.BinarySize(lower_bound='1GB', upper_bound='10TB')
res = parser.parse_args(
'--names --metadata x=y,a=b,c=d --delay 1s --disk-size 10gb'.split())
assert res.metadata == {'a': 'b', 'c': 'd', 'x': 'y'}
assert res.delay == 1
assert res.disk_size == 10737418240
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import collections
import copy
import re
from dateutil import tz
from googlecloudsdk.calliope import parser_errors
from googlecloudsdk.core import log
from googlecloudsdk.core import yaml
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import times
import six
from six.moves import zip # pylint: disable=redefined-builtin
__all__ = ['Duration', 'BinarySize']
class Error(Exception):
"""Exceptions that are defined by this module."""
class ArgumentTypeError(Error, argparse.ArgumentTypeError):
"""Exceptions for parsers that are used as argparse types."""
class ArgumentParsingError(Error, argparse.ArgumentError):
"""Raised when there is a problem with user input.
argparse.ArgumentError takes both the action and a message as constructor
parameters.
"""
def _GenerateErrorMessage(error, user_input=None, error_idx=None):
"""Constructs an error message for an exception.
Args:
error: str, The error message that should be displayed. This
message should not end with any punctuation--the full error
message is constructed by appending more information to error.
user_input: str, The user input that caused the error.
error_idx: int, The index at which the error occurred. If None,
the index will not be printed in the error message.
Returns:
str: The message to use for the exception.
"""
if user_input is None:
return error
elif not user_input: # Is input empty?
return error + '; received empty string'
elif error_idx is None:
return error + '; received: ' + user_input
return ('{error_message} at index {error_idx}: {user_input}'
.format(error_message=error, user_input=user_input,
error_idx=error_idx))
_VALUE_PATTERN = r"""
^ # Beginning of input marker.
(?P<amount>\d+) # Amount.
((?P<suffix>[-/a-zA-Z]+))? # Optional scale and type abbr.
$ # End of input marker.
"""
_RANGE_PATTERN = r'^(?P<start>[0-9]+)(-(?P<end>[0-9]+))?$'
_SECOND = 1
_MINUTE = 60 * _SECOND
_HOUR = 60 * _MINUTE
_DAY = 24 * _HOUR
# The units are adopted from sleep(1):
# http://linux.die.net/man/1/sleep
_DURATION_SCALES = {
's': _SECOND,
'm': _MINUTE,
'h': _HOUR,
'd': _DAY,
}
_BINARY_SIZE_SCALES = {
'': 1,
'K': 1 << 10,
'M': 1 << 20,
'G': 1 << 30,
'T': 1 << 40,
'P': 1 << 50,
'Ki': 1 << 10,
'Mi': 1 << 20,
'Gi': 1 << 30,
'Ti': 1 << 40,
'Pi': 1 << 50,
}
def GetMultiCompleter(individual_completer):
"""Create a completer to handle completion for comma separated lists.
Args:
individual_completer: A function that completes an individual element.
Returns:
A function that completes the last element of the list.
"""
def MultiCompleter(prefix, parsed_args, **kwargs):
start = ''
lst = prefix.rsplit(',', 1)
if len(lst) > 1:
start = lst[0] + ','
prefix = lst[1]
matches = individual_completer(prefix, parsed_args, **kwargs)
return [start + match for match in matches]
return MultiCompleter
def _DeleteTypeAbbr(suffix, type_abbr='B'):
"""Returns suffix with trailing type abbreviation deleted."""
if not suffix:
return suffix
s = suffix.upper()
i = len(s)
for c in reversed(type_abbr.upper()):
if not i:
break
if s[i - 1] == c:
i -= 1
return suffix[:i]
def GetBinarySizePerUnit(suffix, type_abbr='B'):
"""Returns the binary size per unit for binary suffix string.
Args:
suffix: str, A case insensitive unit suffix string with optional type
abbreviation.
type_abbr: str, The optional case insensitive type abbreviation following
the suffix.
Raises:
ValueError for unknown units.
Returns:
The binary size per unit for a unit+type_abbr suffix.
"""
unit = _DeleteTypeAbbr(suffix.upper(), type_abbr)
return _BINARY_SIZE_SCALES.get(unit)
def _ValueParser(scales, default_unit, lower_bound=None, upper_bound=None,
strict_case=True, type_abbr='B',
suggested_binary_size_scales=None):
"""A helper that returns a function that can parse values with units.
Casing for all units matters.
Args:
scales: {str: int}, A dictionary mapping units to their magnitudes in
relation to the lowest magnitude unit in the dict.
default_unit: str, The default unit to use if the user's input is
missing unit.
lower_bound: str, An inclusive lower bound.
upper_bound: str, An inclusive upper bound.
strict_case: bool, whether to be strict on case-checking
type_abbr: str, the type suffix abbreviation, e.g., B for bytes, b/s for
bits/sec.
suggested_binary_size_scales: list, A list of strings with units that will
be recommended to user.
Returns:
A function that can parse values.
"""
def UnitsByMagnitude(suggested_binary_size_scales=None):
"""Returns a list of the units in scales sorted by magnitude."""
scale_items = sorted(six.iteritems(scales),
key=lambda value: (value[1], value[0]))
if suggested_binary_size_scales is None:
return [key + type_abbr for key, _ in scale_items]
return [key + type_abbr for key, _ in scale_items
if key + type_abbr in suggested_binary_size_scales]
def Parse(value):
"""Parses value that can contain a unit and type avvreviation."""
match = re.match(_VALUE_PATTERN, value, re.VERBOSE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'given value must be of the form INTEGER[UNIT] where units '
'can be one of {0}'
.format(', '.join(UnitsByMagnitude(suggested_binary_size_scales))),
user_input=value))
amount = int(match.group('amount'))
suffix = match.group('suffix') or ''
unit = _DeleteTypeAbbr(suffix, type_abbr)
if strict_case:
unit_case = unit
default_unit_case = _DeleteTypeAbbr(default_unit, type_abbr)
scales_case = scales
else:
unit_case = unit.upper()
default_unit_case = _DeleteTypeAbbr(default_unit.upper(), type_abbr)
scales_case = dict([(k.upper(), v) for k, v in scales.items()])
if not unit and unit == suffix:
return amount * scales_case[default_unit_case]
elif unit_case in scales_case:
return amount * scales_case[unit_case]
else:
raise ArgumentTypeError(_GenerateErrorMessage(
'unit must be one of {0}'.format(', '.join(UnitsByMagnitude())),
user_input=unit))
if lower_bound is None:
parsed_lower_bound = None
else:
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
else:
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
elif parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
else:
return parsed_value
return ParseWithBoundsChecking
def RegexpValidator(pattern, description):
"""Returns a function that validates a string against a regular expression.
For example:
>>> alphanumeric_type = RegexpValidator(
... r'[a-zA-Z0-9]+',
... 'must contain one or more alphanumeric characters')
>>> parser.add_argument('--foo', type=alphanumeric_type)
>>> parser.parse_args(['--foo', '?'])
>>> # SystemExit raised and the error "error: argument foo: Bad value [?]:
>>> # must contain one or more alphanumeric characters" is displayed
Args:
pattern: str, the pattern to compile into a regular expression to check
description: an error message to show if the argument doesn't match
Returns:
function: str -> str, usable as an argparse type
"""
def Parse(value):
if not re.match(pattern + '$', value):
raise ArgumentTypeError('Bad value [{0}]: {1}'.format(value, description))
return value
return Parse
def CustomFunctionValidator(fn, description, parser=None):
"""Returns a function that validates the input by running it through fn.
For example:
>>> def isEven(val):
... return val % 2 == 0
>>> even_number_parser = arg_parsers.CustomFunctionValidator(
isEven, 'This is not even!', parser=arg_parsers.BoundedInt(0))
>>> parser.add_argument('--foo', type=even_number_parser)
>>> parser.parse_args(['--foo', '3'])
>>> # SystemExit raised and the error "error: argument foo: Bad value [3]:
>>> # This is not even!" is displayed
Args:
fn: str -> boolean
description: an error message to show if boolean function returns False
parser: an arg_parser that is applied to to value before validation. The
value is also returned by this parser.
Returns:
function: str -> str, usable as an argparse type
"""
def Parse(value):
"""Validates and returns a custom object from an argument string value."""
try:
parsed_value = parser(value) if parser else value
except ArgumentTypeError:
pass
else:
if fn(parsed_value):
return parsed_value
encoded_value = console_attr.SafeText(value)
formatted_err = 'Bad value [{0}]: {1}'.format(encoded_value, description)
raise ArgumentTypeError(formatted_err)
return Parse
def Duration(default_unit='s',
lower_bound='0',
upper_bound=None,
parsed_unit='s'):
"""Returns a function that can parse time durations.
See times.ParseDuration() for details. If the unit is omitted, seconds is
assumed. The parsed unit is assumed to be seconds, but can be specified as
ms or us.
For example:
parser = Duration()
assert parser('10s') == 10
parser = Duration(parsed_unit='ms')
assert parser('10s') == 10000
parser = Duration(parsed_unit='us')
assert parser('10s') == 10000000
Args:
default_unit: str, The default duration unit.
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
parsed_unit: str, The unit that the result should be returned as. Can be
's', 'ms', or 'us'.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single time duration as input to be
parsed.
"""
def Parse(value):
"""Parses a duration from value and returns integer of the parsed_unit."""
if parsed_unit == 'ms':
multiplier = 1000
elif parsed_unit == 'us':
multiplier = 1000000
elif parsed_unit == 's':
multiplier = 1
else:
raise ArgumentTypeError(
_GenerateErrorMessage('parsed_unit must be one of s, ms, us.'))
try:
duration = times.ParseDuration(value, default_suffix=default_unit)
return int(duration.total_seconds * multiplier)
except times.Error as e:
message = six.text_type(e).rstrip('.')
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse duration: {0}'.format(message, user_input=value)))
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return parsed_value
return ParseWithBoundsChecking
def BinarySize(lower_bound=None, upper_bound=None,
suggested_binary_size_scales=None, default_unit='G',
type_abbr='B'):
"""Returns a function that can parse binary sizes.
Binary sizes are defined as base-2 values representing number of
bytes.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "B", "KB", "MB",
"GB", "TB", "KiB", "MiB", "GiB", "TiB", "PiB". If the unit is
omitted then default_unit is assumed.
The result is parsed in bytes. For example:
parser = BinarySize()
assert parser('10GB') == 1073741824
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
suggested_binary_size_scales: list, A list of strings with units that will
be recommended to user.
default_unit: str, unit used when user did not specify unit.
type_abbr: str, the type suffix abbreviation, e.g., B for bytes, b/s for
bits/sec.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single binary size as input to be
parsed.
"""
return _ValueParser(
_BINARY_SIZE_SCALES, default_unit=default_unit, lower_bound=lower_bound,
upper_bound=upper_bound, strict_case=False, type_abbr=type_abbr,
suggested_binary_size_scales=suggested_binary_size_scales)
_KV_PAIR_DELIMITER = '='
class Range(object):
"""Range of integer values."""
def __init__(self, start, end):
self.start = start
self.end = end
@staticmethod
def Parse(string_value):
"""Creates Range object out of given string value."""
match = re.match(_RANGE_PATTERN, string_value)
if not match:
raise ArgumentTypeError('Expected a non-negative integer value or a '
'range of such values instead of "{0}"'
.format(string_value))
start = int(match.group('start'))
end = match.group('end')
if end is None:
end = start
else:
end = int(end)
if end < start:
raise ArgumentTypeError('Expected range start {0} smaller or equal to '
'range end {1} in "{2}"'.format(
start, end, string_value))
return Range(start, end)
def Combine(self, other):
"""Combines two overlapping or adjacent ranges, raises otherwise."""
if self.end + 1 < other.start or self.start > other.end + 1:
raise Error('Cannot combine non-overlapping or non-adjacent ranges '
'{0} and {1}'.format(self, other))
return Range(min(self.start, other.start), max(self.end, other.end))
def __eq__(self, other):
if isinstance(other, Range):
return self.start == other.start and self.end == other.end
return False
def __lt__(self, other):
if self.start == other.start:
return self.end < other.end
return self.start < other.start
def __str__(self):
if self.start == self.end:
return six.text_type(self.start)
return '{0}-{1}'.format(self.start, self.end)
class HostPort(object):
"""A class for holding host and port information."""
IPV4_OR_HOST_PATTERN = r'^(?P<address>[\w\d\.-]+)?(:|:(?P<port>[\d]+))?$'
# includes hostnames
IPV6_PATTERN = r'^(\[(?P<address>[\w\d:]+)\])(:|:(?P<port>[\d]+))?$'
def __init__(self, host, port):
self.host = host
self.port = port
@staticmethod
def Parse(s, ipv6_enabled=False):
"""Parse the given string into a HostPort object.
This can be used as an argparse type.
Args:
s: str, The string to parse. If ipv6_enabled and host is an IPv6 address,
it should be placed in square brackets: e.g.
[2001:db8:0:0:0:ff00:42:8329]
or
[2001:db8:0:0:0:ff00:42:8329]:8080
ipv6_enabled: boolean, If True then accept IPv6 addresses.
Raises:
ArgumentTypeError: If the string is not valid.
Returns:
HostPort, The parsed object.
"""
if not s:
return HostPort(None, None)
match = re.match(HostPort.IPV4_OR_HOST_PATTERN, s, re.UNICODE)
if ipv6_enabled and not match:
match = re.match(HostPort.IPV6_PATTERN, s, re.UNICODE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'or\n\n'
' [IPv6_ADDRESS]:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
elif not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
return HostPort(match.group('address'), match.group('port'))
class Day(object):
"""A class for parsing a datetime object for a specific day."""
@staticmethod
def Parse(s):
if not s:
return None
try:
return times.ParseDateTime(s, '%Y-%m-%d').date()
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date: {0}'.format(six.text_type(e)),
user_input=s))
class Datetime(object):
"""A class for parsing a datetime object."""
@staticmethod
def Parse(s):
"""Parses a string value into a Datetime object in local timezone."""
if not s:
return None
try:
return times.ParseDateTime(s)
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date/time: {0}'.format(six.text_type(e)),
user_input=s))
@staticmethod
def ParseUtcTime(s):
"""Parses a string representing a time in UTC into a Datetime object."""
if not s:
return None
try:
return times.ParseDateTime(s, tzinfo=tz.tzutc())
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse UTC time: {0}'.format(six.text_type(e)),
user_input=s))
class DayOfWeek(object):
"""A class for parsing a day of the week."""
DAYS = ['SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT']
@staticmethod
def Parse(s):
"""Validates and normalizes a string as a day of the week."""
if not s:
return None
fixed = s.upper()[:3]
if fixed not in DayOfWeek.DAYS:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse day of week. Value should be one of {0}'.format(
', '.join(DayOfWeek.DAYS)),
user_input=s))
return fixed
def _BoundedType(type_builder, type_description,
lower_bound=None, upper_bound=None, unlimited=False):
"""Returns a function that can parse given type within some bound.
Args:
type_builder: A callable for building the requested type from the value
string.
type_description: str, Description of the requested type (for verbose
messages).
lower_bound: of type compatible with type_builder,
The value must be >= lower_bound.
upper_bound: of type compatible with type_builder,
The value must be <= upper_bound.
unlimited: bool, If True then a value of 'unlimited' means no limit.
Returns:
A function that can parse given type within some bound.
"""
def Parse(value):
"""Parses value as a type constructed by type_builder.
Args:
value: str, Value to be converted to the requested type.
Raises:
ArgumentTypeError: If the provided value is out of bounds or unparsable.
Returns:
Value converted to the requested type.
"""
if unlimited and value == 'unlimited':
return None
try:
v = type_builder(value)
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage('Value must be {0}'.format(type_description),
user_input=value))
if lower_bound is not None and v < lower_bound:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if upper_bound is not None and upper_bound < v:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return v
return Parse
def BoundedInt(*args, **kwargs):
return _BoundedType(int, 'an integer', *args, **kwargs)
def BoundedFloat(*args, **kwargs):
return _BoundedType(float, 'a floating point number', *args, **kwargs)
def _TokenizeQuotedList(arg_value, delim=','):
"""Tokenize an argument into a list.
Args:
arg_value: str, The raw argument.
delim: str, The delimiter on which to split the argument string.
Returns:
[str], The tokenized list.
"""
if arg_value:
if not arg_value.endswith(delim):
arg_value += delim
return arg_value.split(delim)[:-1]
return []
class ArgType(object):
"""Base class for arg types."""
class ArgBoolean(ArgType):
"""Interpret an argument value as a bool."""
def __init__(
self, truthy_strings=None, falsey_strings=None, case_sensitive=False):
self._case_sensitive = case_sensitive
if truthy_strings:
self._truthy_strings = truthy_strings
else:
self._truthy_strings = ['true', 'yes']
if falsey_strings:
self._falsey_strings = falsey_strings
else:
self._falsey_strings = ['false', 'no']
def __call__(self, arg_value):
if not self._case_sensitive:
normalized_arg_value = arg_value.lower()
else:
normalized_arg_value = arg_value
if normalized_arg_value in self._truthy_strings:
return True
if normalized_arg_value in self._falsey_strings:
return False
raise ArgumentTypeError(
'Invalid flag value [{0}], expected one of [{1}]'.format(
arg_value,
', '.join(self._truthy_strings + self._falsey_strings)
)
)
class ArgList(ArgType):
"""Interpret an argument value as a list.
Intended to be used as the type= for a flag argument. Splits the string on
commas or another delimiter and returns a list.
By default, splits on commas:
'a,b,c' -> ['a', 'b', 'c']
There is an available syntax for using an alternate delimiter:
'^:^a,b:c' -> ['a,b', 'c']
'^::^a:b::c' -> ['a:b', 'c']
'^,^^a^,b,c' -> ['^a^', ',b', 'c']
"""
DEFAULT_DELIM_CHAR = ','
ALT_DELIM_CHAR = '^'
def __init__(self,
element_type=None,
min_length=0,
max_length=None,
choices=None,
custom_delim_char=None,
visible_choices=None):
"""Initialize an ArgList.
Args:
element_type: (str)->str, A function to apply to each of the list items.
min_length: int, The minimum size of the list.
max_length: int, The maximum size of the list.
choices: [element_type], a list of valid possibilities for elements. If
None, then no constraints are imposed.
custom_delim_char: char, A customized delimiter character.
visible_choices: [element_type], a list of valid possibilities for
elements to be shown to the user. If None, defaults to choices.
Returns:
(str)->[str], A function to parse the list of values in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
"""
self.element_type = element_type
self.choices = choices
self.visible_choices = (
visible_choices if visible_choices is not None else choices)
if self.visible_choices:
def ChoiceType(raw_value):
if element_type:
typed_value = element_type(raw_value)
else:
typed_value = raw_value
if typed_value not in choices:
raise ArgumentTypeError('{value} must be one of [{choices}]'.format(
value=typed_value,
choices=', '.join(
[six.text_type(choice) for choice in self.visible_choices])))
return typed_value
self.element_type = ChoiceType
self.min_length = min_length
self.max_length = max_length
self.custom_delim_char = custom_delim_char
def __call__(self, arg_value): # pylint:disable=missing-docstring
if isinstance(arg_value, list):
arg_list = arg_value
elif not isinstance(arg_value, six.string_types):
raise ArgumentTypeError('Invalid type [{}] for flag value [{}]'.format(
type(arg_value).__name__, arg_value))
else:
delim = self.custom_delim_char or self.DEFAULT_DELIM_CHAR
if (arg_value.startswith(self.ALT_DELIM_CHAR) and
self.ALT_DELIM_CHAR in arg_value[1:]):
delim, arg_value = arg_value[1:].split(self.ALT_DELIM_CHAR, 1)
if not delim:
raise ArgumentTypeError(
'Invalid delimeter. Please see `gcloud topic flags-file` or '
'`gcloud topic escaping` for information on providing list or '
'dictionary flag values with special characters.')
arg_list = _TokenizeQuotedList(arg_value, delim=delim)
# TODO(b/35944028): These exceptions won't present well to the user.
if len(arg_list) < self.min_length:
raise ArgumentTypeError('not enough args')
if self.max_length is not None and len(arg_list) > self.max_length:
raise ArgumentTypeError('too many args')
if self.element_type:
arg_list = [self.element_type(arg) for arg in arg_list]
return arg_list
_MAX_METAVAR_LENGTH = 30 # arbitrary, but this is pretty long
def GetUsageMsg(self, is_custom_metavar, metavar):
"""Get a specially-formatted metavar for the ArgList to use in help.
An example is worth 1,000 words:
>>> ArgList().GetUsageMetavar('FOO')
'[FOO,...]'
>>> ArgList(min_length=1).GetUsageMetavar('FOO')
'FOO,[FOO,...]'
>>> ArgList(max_length=2).GetUsageMetavar('FOO')
'FOO,[FOO]'
>>> ArgList(max_length=3).GetUsageMetavar('FOO') # One, two, many...
'FOO,[FOO,...]'
>>> ArgList(min_length=2, max_length=2).GetUsageMetavar('FOO')
'FOO,FOO'
>>> ArgList().GetUsageMetavar('REALLY_VERY_QUITE_LONG_METAVAR')
'REALLY_VERY_QUITE_LONG_METAVAR,[...]'
Args:
is_custom_metavar: unused in GetUsageMsg
metavar: string, the base metavar to turn into an ArgList metavar
Returns:
string, the ArgList usage metavar
"""
del is_custom_metavar # Unused in GetUsageMsg
delim_char = self.custom_delim_char or self.DEFAULT_DELIM_CHAR
required = delim_char.join([metavar] * self.min_length)
if self.max_length:
num_optional = self.max_length - self.min_length
else:
num_optional = None
# Use the "1, 2, many" approach to counting
if num_optional == 0:
optional = ''
elif num_optional == 1:
optional = '[{}]'.format(metavar)
elif num_optional == 2:
optional = '[{0}{1}[{0}]]'.format(metavar, delim_char)
else:
optional = '[{}{}...]'.format(metavar, delim_char)
msg = delim_char.join([x for x in [required, optional] if x])
if len(msg) < self._MAX_METAVAR_LENGTH:
return msg
# With long metavars, only put it in once.
if self.min_length == 0:
return '[{}{}...]'.format(metavar, delim_char)
if self.min_length == 1:
return '{}{}[...]'.format(metavar, delim_char)
else:
return '{0}{1}...{1}[...]'.format(metavar, delim_char)
class ArgDict(ArgList):
"""Interpret an argument value as a dict.
Intended to be used as the type= for a flag argument. Splits the string on
commas to get a list, and then splits the items on equals to get a set of
key-value pairs to get a dict.
"""
def __init__(self, key_type=None, value_type=None, spec=None, min_length=0,
max_length=None, allow_key_only=False, required_keys=None,
operators=None):
"""Initialize an ArgDict.
Args:
key_type: (str)->str, A function to apply to each of the dict keys.
value_type: (str)->str, A function to apply to each of the dict values.
spec: {str: (str)->str}, A mapping of expected keys to functions.
The functions are applied to the values. If None, an arbitrary
set of keys will be accepted. If not None, it is an error for the
user to supply a key that is not in the spec. If the function specified
is None, then accept a key only without '=value'.
min_length: int, The minimum number of keys in the dict.
max_length: int, The maximum number of keys in the dict.
allow_key_only: bool, Allow empty values.
required_keys: [str], Required keys in the dict.
operators: operator_char -> value_type, Define multiple single character
operators, each with its own value_type converter. Use value_type==None
for no conversion. The default value is {'=': value_type}
Returns:
(str)->{str:str}, A function to parse the dict in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
ValueError: If both value_type and spec are provided.
"""
super(ArgDict, self).__init__(min_length=min_length, max_length=max_length)
if spec and value_type:
raise ValueError('cannot have both spec and sub_type')
self.key_type = key_type
self.spec = spec
self.allow_key_only = allow_key_only
self.required_keys = required_keys or []
if not operators:
operators = {'=': value_type}
for op in operators.keys():
if len(op) != 1:
raise ArgumentTypeError(
'Operator [{}] must be one character.'.format(op))
ops = ''.join(six.iterkeys(operators))
key_op_value_pattern = '([^{ops}]+)([{ops}]?)(.*)'.format(
ops=re.escape(ops))
self.key_op_value = re.compile(key_op_value_pattern, re.DOTALL)
self.operators = operators
def _ApplySpec(self, key, value):
if key in self.spec:
if self.spec[key] is None:
if value:
raise ArgumentTypeError('Key [{0}] does not take a value'.format(key))
return None
return self.spec[key](value)
else:
raise ArgumentTypeError(
_GenerateErrorMessage(
'valid keys are [{0}]'.format(
', '.join(sorted(self.spec.keys()))),
user_input=key))
def _ValidateKeyValue(self, key, value, op='='):
"""Converts and validates <key,value> and returns (key,value)."""
if (not op or value is None) and not self.allow_key_only:
raise ArgumentTypeError(
'Bad syntax for dict arg: [{0}]. Please see '
'`gcloud topic flags-file` or `gcloud topic escaping` for '
'information on providing list or dictionary flag values with '
'special characters.'.format(key))
if self.key_type:
try:
key = self.key_type(key)
except ValueError:
raise ArgumentTypeError('Invalid key [{0}]'.format(key))
convert_value = self.operators.get(op, None)
if convert_value:
try:
value = convert_value(value)
except ValueError:
raise ArgumentTypeError('Invalid value [{0}]'.format(value))
if self.spec:
value = self._ApplySpec(key, value)
return key, value
def __call__(self, arg_value): # pylint:disable=missing-docstring
if isinstance(arg_value, dict):
raw_dict = arg_value
arg_dict = collections.OrderedDict()
for key, value in six.iteritems(raw_dict):
key, value = self._ValidateKeyValue(key, value)
arg_dict[key] = value
elif not isinstance(arg_value, six.string_types):
raise ArgumentTypeError('Invalid type [{}] for flag value [{}]'.format(
type(arg_value).__name__, arg_value))
else:
arg_list = super(ArgDict, self).__call__(arg_value)
arg_dict = collections.OrderedDict()
for arg in arg_list:
match = self.key_op_value.match(arg)
# TODO(b/35944028): These exceptions won't present well to the user.
if not match:
raise ArgumentTypeError('Invalid flag value [{0}]'.format(arg))
key, op, value = match.group(1), match.group(2), match.group(3)
key, value = self._ValidateKeyValue(key, value, op=op)
arg_dict[key] = value
for required_key in self.required_keys:
if required_key not in arg_dict:
raise ArgumentTypeError(
'Key [{0}] required in dict arg but not provided'.format(
required_key))
return arg_dict
def GetUsageMsg(self, is_custom_metavar, metavar):
# If we're not using a spec to limit the key values or if metavar
# has been overridden, then use the normal ArgList formatting
if not self.spec or is_custom_metavar:
return super(ArgDict, self).GetUsageMsg(is_custom_metavar, metavar)
msg_list = []
spec_list = sorted(six.iteritems(self.spec))
# First put the spec keys with no value followed by those that expect a
# value
for spec_key, spec_function in spec_list:
if spec_function is None:
if not self.allow_key_only:
raise ArgumentTypeError(
'Key [{0}] specified in spec without a function but '
'allow_key_only is set to False'.format(spec_key))
msg_list.append(spec_key)
for spec_key, spec_function in spec_list:
if spec_function is not None:
msg_list.append('{0}={1}'.format(spec_key, spec_key.upper()))
msg = '[' + '],['.join(msg_list) + ']'
return msg
class UpdateAction(argparse.Action):
r"""Create a single dict value from delimited or repeated flags.
This class is intended to be a more flexible version of
argparse._AppendAction.
For example, with the following flag definition:
parser.add_argument(
'--inputs',
type=arg_parsers.ArgDict(),
action='append')
a caller can specify on the command line flags such as:
--inputs k1=v1,k2=v2
and the result will be a list of one dict:
[{ 'k1': 'v1', 'k2': 'v2' }]
Specifying two separate command line flags such as:
--inputs k1=v1 \
--inputs k2=v2
will produce a list of dicts:
[{ 'k1': 'v1'}, { 'k2': 'v2' }]
The UpdateAction class allows for both of the above user inputs to result
in the same: a single dictionary:
{ 'k1': 'v1', 'k2': 'v2' }
This gives end-users a lot more flexibility in constructing their command
lines, especially when scripting calls.
Note that this class will raise an exception if a key value is specified
more than once. To allow for a key value to be specified multiple times,
use UpdateActionWithAppend.
"""
def OnDuplicateKeyRaiseError(self, key, existing_value=None, new_value=None):
if existing_value is None:
user_input = None
else:
user_input = ', '.join([existing_value, new_value])
raise argparse.ArgumentError(self, _GenerateErrorMessage(
'"{0}" cannot be specified multiple times'.format(key),
user_input=user_input))
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None, # pylint:disable=redefined-builtin
choices=None,
required=False,
help=None, # pylint:disable=redefined-builtin
metavar=None,
onduplicatekey_handler=OnDuplicateKeyRaiseError):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != argparse.OPTIONAL:
raise ValueError('nargs must be %r to supply const' % argparse.OPTIONAL)
self.choices = choices
if isinstance(choices, dict):
choices = sorted(choices.keys())
super(UpdateAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
self.onduplicatekey_handler = onduplicatekey_handler
def _EnsureValue(self, namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, dict):
# Get the existing arg value (if any)
items = copy.copy(self._EnsureValue(
namespace, self.dest, collections.OrderedDict()))
# Merge the new key/value pair(s) in
for k, v in six.iteritems(values):
if k in items:
v = self.onduplicatekey_handler(self, k, items[k], v)
items[k] = v
else:
# Get the existing arg value (if any)
items = copy.copy(self._EnsureValue(namespace, self.dest, []))
# Merge the new key/value pair(s) in
for k in values:
if k in items:
self.onduplicatekey_handler(self, k)
else:
items.append(k)
# Saved the merged dictionary
setattr(namespace, self.dest, items)
class UpdateActionWithAppend(UpdateAction):
"""Create a single dict value from delimited or repeated flags.
This class provides a variant of UpdateAction, which allows for users to
append, rather than reject, duplicate key values. For example, the user
can specify:
--inputs k1=v1a --inputs k1=v1b --inputs k2=v2
and the result will be:
{ 'k1': ['v1a', 'v1b'], 'k2': 'v2' }
"""
def OnDuplicateKeyAppend(self, key, existing_value=None, new_value=None):
if existing_value is None:
return key
elif isinstance(existing_value, list):
return existing_value + [new_value]
else:
return [existing_value, new_value]
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None, # pylint:disable=redefined-builtin
choices=None,
required=False,
help=None, # pylint:disable=redefined-builtin
metavar=None,
onduplicatekey_handler=OnDuplicateKeyAppend):
super(UpdateActionWithAppend, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
onduplicatekey_handler=onduplicatekey_handler)
class RemainderAction(argparse._StoreAction): # pylint: disable=protected-access
"""An action with a couple of helpers to better handle --.
argparse on its own does not properly handle -- implementation args.
argparse.REMAINDER greedily steals valid flags before a --, and nargs='*' will
bind to [] and not parse args after --. This Action represents arguments to
be passed through to a subcommand after --.
Primarily, this Action provides two utility parsers to help a modified
ArgumentParser parse -- properly.
There is one additional property kwarg:
example: A usage statement used to construct nice additional help.
"""
def __init__(self, *args, **kwargs):
if kwargs['nargs'] is not argparse.REMAINDER:
raise ValueError(
'The RemainderAction should only be used when '
'nargs=argparse.REMAINDER.')
# Create detailed help.
self.explanation = (
"The '--' argument must be specified between gcloud specific args on "
'the left and {metavar} on the right.'
).format(metavar=kwargs['metavar'])
if 'help' in kwargs:
kwargs['help'] += '\n+\n' + self.explanation
if 'example' in kwargs:
kwargs['help'] += ' Example:\n\n' + kwargs['example']
del kwargs['example']
super(RemainderAction, self).__init__(*args, **kwargs)
def _SplitOnDash(self, args):
split_index = args.index('--')
# Remove -- before passing through
return args[:split_index], args[split_index + 1:]
def ParseKnownArgs(self, args, namespace):
"""Binds all args after -- to the namespace."""
# Not [], so that we can distinguish between empty remainder args and
# absent remainder args.
remainder_args = None
if '--' in args:
args, remainder_args = self._SplitOnDash(args)
self(None, namespace, remainder_args)
return namespace, args
def ParseRemainingArgs(self, remaining_args, namespace, original_args):
"""Parses the unrecognized args from the end of the remaining_args.
This method identifies all unrecognized arguments after the last argument
recognized by a parser (but before --). It then either logs a warning and
binds them to the namespace or raises an error, depending on strictness.
Args:
remaining_args: A list of arguments that the parsers did not recognize.
namespace: The Namespace to bind to.
original_args: The full list of arguments given to the top parser,
Raises:
ArgumentError: If there were remaining arguments after the last recognized
argument and this action is strict.
Returns:
A tuple of the updated namespace and unrecognized arguments (before the
last recognized argument).
"""
# Only parse consecutive unknown args from the end of the original args.
# Strip out everything after '--'
if '--' in original_args:
original_args, _ = self._SplitOnDash(original_args)
# Find common suffix between remaining_args and original_args
split_index = 0
for i, (arg1, arg2) in enumerate(
zip(reversed(remaining_args), reversed(original_args))):
if arg1 != arg2:
split_index = len(remaining_args) - i
break
pass_through_args = remaining_args[split_index:]
remaining_args = remaining_args[:split_index]
if pass_through_args:
msg = ('unrecognized args: {args}\n' + self.explanation).format(
args=' '.join(pass_through_args))
raise parser_errors.UnrecognizedArgumentsError(msg)
self(None, namespace, pass_through_args)
return namespace, remaining_args
class StoreOnceAction(argparse.Action):
r"""Create a single dict value from delimited flags.
For example, with the following flag definition:
parser.add_argument(
'--inputs',
type=arg_parsers.ArgDict(),
action=StoreOnceAction)
a caller can specify on the command line flags such as:
--inputs k1=v1,k2=v2
and the result will be a list of one dict:
[{ 'k1': 'v1', 'k2': 'v2' }]
Specifying two separate command line flags such as:
--inputs k1=v1 \
--inputs k2=v2
will raise an exception.
Note that this class will raise an exception if a key value is specified
more than once. To allow for a key value to be specified multiple times,
use UpdateActionWithAppend.
"""
def OnSecondArgumentRaiseError(self):
raise argparse.ArgumentError(self, _GenerateErrorMessage(
'"{0}" argument cannot be specified multiple times'.format(self.dest)))
def __init__(self, *args, **kwargs):
self.dest_is_populated = False
super(StoreOnceAction, self).__init__(*args, **kwargs)
# pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
# Make sure no existing arg value exist
if self.dest_is_populated:
self.OnSecondArgumentRaiseError()
self.dest_is_populated = True
setattr(namespace, self.dest, values)
class _HandleNoArgAction(argparse.Action):
"""This class should not be used directly, use HandleNoArgAction instead."""
def __init__(self, none_arg, deprecation_message, **kwargs):
super(_HandleNoArgAction, self).__init__(**kwargs)
self.none_arg = none_arg
self.deprecation_message = deprecation_message
def __call__(self, parser, namespace, value, option_string=None):
if value is None:
log.warning(self.deprecation_message)
if self.none_arg:
setattr(namespace, self.none_arg, True)
setattr(namespace, self.dest, value)
def HandleNoArgAction(none_arg, deprecation_message):
"""Creates an argparse.Action that warns when called with no arguments.
This function creates an argparse action which can be used to gracefully
deprecate a flag using nargs=?. When a flag is created with this action, it
simply log.warning()s the given deprecation_message and then sets the value of
the none_arg to True.
This means if you use the none_arg no_foo and attach this action to foo,
`--foo` (no argument), it will have the same effect as `--no-foo`.
Args:
none_arg: a boolean argument to write to. For --no-foo use "no_foo"
deprecation_message: msg to tell user to stop using with no arguments.
Returns:
An argparse action.
"""
def HandleNoArgActionInit(**kwargs):
return _HandleNoArgAction(none_arg, deprecation_message, **kwargs)
return HandleNoArgActionInit
class FileContents(object):
"""Creates an argparse type that reads the contents of a file or stdin.
This is similar to argparse.FileType, but unlike FileType it does not leave
a dangling file handle open. The argument stored in the argparse Namespace
is the file's contents.
Attributes:
binary: bool, If True, the contents of the file will be returned as bytes.
Returns:
A function that accepts a filename, or "-" representing that stdin should be
used as input.
"""
def __init__(self, binary=False):
self.binary = binary
def __call__(self, name):
"""Return the contents of the file with the specified name.
If name is "-", stdin is read until EOF. Otherwise, the named file is read.
Args:
name: str, The file name, or '-' to indicate stdin.
Returns:
The contents of the file.
Raises:
ArgumentTypeError: If the file cannot be read or is too large.
"""
try:
return console_io.ReadFromFileOrStdin(name, binary=self.binary)
except files.Error as e:
raise ArgumentTypeError(e)
class YAMLFileContents(object):
"""Creates an argparse type that reads the contents of a YAML or JSON file.
This is similar to argparse.FileType, but unlike FileType it does not leave
a dangling file handle open. The argument stored in the argparse Namespace
is the file's contents parsed as a YAML object.
Attributes:
validator: function, Function that will validate the provided input
file contents.
Returns:
A function that accepts a filename that should be parsed as a YAML
or JSON file.
"""
def __init__(self, validator=None):
if validator and not callable(validator):
raise ArgumentTypeError('Validator must be callable')
self.validator = validator
def _AssertJsonLike(self, yaml_data):
if not (yaml.dict_like(yaml_data) or yaml.list_like(yaml_data)):
raise ArgumentTypeError('Invalid YAML/JSON Data [{}]'.format(yaml_data))
def _LoadSingleYamlDocument(self, name):
"""Returns the yaml data for a file or from stdin for a single document.
YAML allows multiple documents in a single file by using `---` as a
separator between documents. See https://yaml.org/spec/1.1/#id857577.
However, some YAML-generating tools generate a single document followed by
this separator before ending the file.
This method supports the case of a single document in a file that contains
superfluous document separators, but still throws if multiple documents are
actually found.
Args:
name: str, The file path to the file or "-" to read from stdin.
Returns:
The contents of the file parsed as a YAML data object.
"""
if name == '-':
stdin = console_io.ReadStdin() # Save to potentially reuse below
yaml_data = yaml.load_all(stdin)
else:
yaml_data = yaml.load_all_path(name)
yaml_data = [d for d in yaml_data if d is not None] # Remove empty docs
# Return the single document if only 1 is found.
if len(yaml_data) == 1:
return yaml_data[0]
# Multiple (or 0) documents found. Try to parse again with single-document
# loader so its error is propagated rather than creating our own.
if name == '-':
return yaml.load(stdin)
else:
return yaml.load_path(name)
def __call__(self, name):
"""Load YAML data from file path (name) or stdin.
If name is "-", stdin is read until EOF. Otherwise, the named file is read.
If self.validator is set, call it on the yaml data once it is loaded.
Args:
name: str, The file path to the file.
Returns:
The contents of the file parsed as a YAML data object.
Raises:
ArgumentTypeError: If the file cannot be read or is not a JSON/YAML like
object.
ValueError: If file content fails validation.
"""
try:
yaml_data = self._LoadSingleYamlDocument(name)
self._AssertJsonLike(yaml_data)
if self.validator:
if not self.validator(yaml_data):
raise ValueError('Invalid YAML/JSON content [{}]'.format(yaml_data))
return yaml_data
except (yaml.YAMLParseError, yaml.FileLoadError) as e:
raise ArgumentTypeError(e)
class StoreTrueFalseAction(argparse._StoreTrueAction): # pylint: disable=protected-access
"""Argparse action that acts as a combination of store_true and store_false.
Calliope already gives any bool-type arguments the standard and `--no-`
variants. In most cases we only want to document the option that does
something---if we have `default=False`, we don't want to show `--no-foo`,
since it won't do anything.
But in some cases we *do* want to show both variants: one example is when
`--foo` means "enable," `--no-foo` means "disable," and neither means "do
nothing." The obvious way to represent this is `default=None`; however, (1)
the default value of `default` is already None, so most boolean actions would
have this setting by default (not what we want), and (2) we still want an
option to have this True/False/None behavior *without* the flag documentation.
To get around this, we have an opt-in version of the same thing that documents
both the flag and its inverse.
"""
def __init__(self, *args, **kwargs):
super(StoreTrueFalseAction, self).__init__(*args, default=None, **kwargs)
def StoreFilePathAndContentsAction(binary=False):
"""Returns Action that stores both file content and file path.
Args:
binary: boolean, whether or not this is a binary file.
Returns:
An argparse action.
"""
class Action(argparse.Action):
"""Stores both file content and file path.
Stores file contents under original flag DEST and stores file path under
DEST_path.
"""
def __init__(self, *args, **kwargs):
super(Action, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
"""Stores the contents of the file and the file name in namespace."""
try:
content = console_io.ReadFromFileOrStdin(value, binary=binary)
except files.Error as e:
raise ArgumentTypeError(e)
setattr(namespace, self.dest, content)
new_dest = '{}_path'.format(self.dest)
setattr(namespace, new_dest, value)
return Action
| [
"saranraju90@gmail.com"
] | saranraju90@gmail.com |
25758d4020776fdb429b99cd383fb2251ca42ea7 | cbbc0c95e367932e962f8d9e6175a5150d0c6570 | /coursera/algorithmic_toolbox/Greedy Algorithms/Maximum Salary/maximum_salary.py | 2ebcd8633f3620ee15e42cef0dfecc8f14bbe780 | [] | no_license | chobostar/education_and_training | 1369ab98f28b93651bb861a40c1fa0603973519e | fcec324a1b92916401ba8de5c61f6d7b1ee69c68 | refs/heads/master | 2023-08-04T04:36:16.217908 | 2023-07-22T08:09:54 | 2023-07-22T08:09:54 | 216,988,123 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | # python3
from itertools import permutations
import random
def if_greater(x, y: str) -> bool:
return (x + y) > (y + x)
def sort(items):
less = []
equal = []
greater = []
if len(items) > 1:
pivot = random.choice(items)
for x in items:
if not if_greater(x, pivot) and x != pivot:
less.append(x)
elif x == pivot:
equal.append(x)
elif if_greater(x, pivot):
greater.append(x)
return sort(greater)+equal+sort(less)
else:
return items
def largest_number_naive(numbers):
numbers = list(map(str, numbers))
largest = 0
for permutation in permutations(numbers):
largest = max(largest, int("".join(permutation)))
return largest
def largest_number(numbers):
sorted_numbers = sort([str(number) for number in numbers])
result = ''.join(sorted_numbers)
return int(result)
if __name__ == '__main__':
n = int(input())
input_numbers = input().split()
assert len(input_numbers) == n
print(largest_number(input_numbers))
| [
"yakutskkirill@mail.ru"
] | yakutskkirill@mail.ru |
d556f5c5b3363e7fc2bbc713413256455f6f53d3 | 7b6e3c5e6b963c749da9f946275661ae0e67dbd2 | /src/model/test/yolo_v2_test.py | f12f453a1da9ecff535acc2209d498da9c687322 | [] | no_license | WeiZongqi/yolo-tensorflow | c8237295b41beb61943207d8511c80a0f33507f2 | 53eaa2ad779918ced2ded2834e09abf2e0ed7202 | refs/heads/master | 2021-01-25T14:26:58.371334 | 2017-12-28T08:18:59 | 2017-12-28T08:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,688 | py | # -*- coding: utf8 -*-
# author: ronniecao
from __future__ import print_function
import sys
import os
import time
import numpy
import matplotlib.pyplot as plt
import tensorflow as tf
from src.data.image import ImageProcessor
from src.model.yolo_v2 import TinyYolo
class TinyYoloTestor:
def test_calculate_loss(self):
self.batch_size = 1
self.cell_size = 2
self.n_boxes = 2
self.max_objects = 3
self.n_classes = 5
coord_pred = numpy.zeros((1, 2, 2, 2, 4))
coord_pred[0,0,0,0,:] = [0.4, 0.4, 0.1, 0.1]
coord_pred[0,0,0,1,:] = [0.1, 0.1, 0.1, 0.1]
coord_pred[0,0,1,0,:] = [0.75, 0.25, 0.1, 0.1]
coord_pred[0,0,1,1,:] = [0.7, 0.2, 0.1, 0.1]
coord_pred[0,1,0,0,:] = [0.3, 0.8, 0.1, 0.1]
coord_pred[0,1,0,1,:] = [0.25, 0.75, 0.1, 0.1]
coord_pred[0,1,1,0,:] = [0.75, 0.75, 0.1, 0.1]
coord_pred[0,1,1,1,:] = [0.7, 0.8, 0.1, 0.1]
conf_pred = numpy.zeros((1, 2, 2, 2, 1))
conf_pred[0,0,0,0,0] = 1.0
conf_pred[0,0,0,1,0] = 1.0
conf_pred[0,0,1,0,0] = 1.0
conf_pred[0,0,1,1,0] = 0.2
conf_pred[0,1,0,0,0] = 0.1
conf_pred[0,1,0,1,0] = 0.9
conf_pred[0,1,1,0,0] = 1.0
class_pred = numpy.zeros((1, 2, 2, 2, 5))
class_pred[0,0,0,0,0] = 0.9
class_pred[0,0,0,0,1] = 0.1
class_pred[0,0,0,1,1] = 1.0
class_pred[0,0,1,0,4] = 0.8
class_pred[0,0,1,0,3] = 0.1
class_pred[0,0,1,0,2] = 0.1
class_pred[0,1,0,1,2] = 1.0
class_pred[0,1,1,0,3] = 0.8
class_pred[0,1,1,0,0] = 0.05
class_pred[0,1,1,0,1] = 0.05
class_pred[0,1,1,0,2] = 0.05
class_pred[0,1,1,0,4] = 0.05
coord_true = numpy.zeros((1, 2, 2, 3, 4))
coord_true[0,0,0,0,:] = [0.1, 0.1, 0.1, 0.1]
coord_true[0,0,0,1,:] = [0.4, 0.4, 0.1, 0.1]
coord_true[0,0,1,0,:] = [0.75, 0.25, 0.1, 0.1]
coord_true[0,1,0,0,:] = [0.25, 0.75, 0.1, 0.1]
coord_true[0,1,1,0,:] = [0.75, 0.75, 0.1, 0.1]
class_true = numpy.zeros((1, 2, 2, 3, 5))
class_true[0,0,0,0,1] = 1.0
class_true[0,0,0,1,0] = 1.0
class_true[0,0,1,0,4] = 1.0
class_true[0,1,0,0,2] = 1.0
class_true[0,1,1,0,3] = 1.0
object_mask = numpy.zeros((1, 2, 2, 3))
object_mask[0,0,0,0] = 1
object_mask[0,0,0,1] = 1
object_mask[0,0,1,0] = 1
object_mask[0,1,0,0] = 1
object_mask[0,1,1,0] = 1
coord_true_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 3, 4], name='coord_true_tf')
coord_pred_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 2, 4], name='coord_pred_tf')
conf_pred_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 2, 1], name='conf_pred_tf')
class_true_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 3, 5], name='class_true_tf')
class_pred_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 2, 5], name='class_pred_tf')
object_mask_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 3], name='object_mask_tf')
coord_pred_iter = tf.tile(
tf.reshape(coord_pred_tf, shape=[
self.batch_size, self.cell_size, self.cell_size, self.n_boxes, 1, 4]),
[1, 1, 1, 1, self.max_objects, 1])
coord_true_iter = tf.reshape(coord_true_tf, shape=[
self.batch_size, self.cell_size, self.cell_size, 1, self.max_objects, 4])
coord_true_iter = tf.tile(coord_true_iter, [1, 1, 1, self.n_boxes, 1, 1])
iou_tensor = self.calculate_iou_tf(coord_pred_iter, coord_true_iter)
iou_tensor_max = tf.reduce_max(iou_tensor, 3, keep_dims=True)
iou_tensor_mask = tf.cast(
(iou_tensor >= iou_tensor_max), dtype=tf.float32) * tf.reshape(
object_mask_tf, shape=(
self.batch_size, self.cell_size, self.cell_size, 1, self.max_objects, 1))
iou_tensor_pred_mask = tf.reduce_sum(iou_tensor_mask, axis=4)
coord_label = tf.reduce_max(iou_tensor_mask * coord_true_iter, axis=4)
coord_loss = tf.nn.l2_loss((coord_pred_tf - coord_label) * iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
iou_value = tf.reduce_sum(
tf.reduce_max(iou_tensor, axis=4) * iou_tensor_pred_mask, axis=[0,1,2,3]) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
conf_label = tf.reduce_max(iou_tensor_mask * tf.ones(shape=(
self.batch_size, self.cell_size, self.cell_size,
self.n_boxes, self.max_objects, 1)), axis=4)
object_loss = tf.nn.l2_loss(
(conf_pred_tf - conf_label) * iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
object_value = tf.reduce_sum(
conf_pred_tf * iou_tensor_pred_mask, axis=[0,1,2,3]) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
inv_iou_tensor_pred_mask = tf.ones(shape=(
self.batch_size, self.cell_size, self.cell_size,
self.n_boxes, 1)) - iou_tensor_pred_mask
noobject_loss = tf.nn.l2_loss(
(conf_pred_tf - conf_label) * inv_iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
noobject_value = tf.reduce_sum(
conf_pred_tf * inv_iou_tensor_pred_mask, axis=[0,1,2,3]) / (
tf.reduce_sum(inv_iou_tensor_pred_mask, axis=[0,1,2,3]))
class_true_iter = tf.reshape(class_true_tf, shape=[
self.batch_size, self.cell_size, self.cell_size, 1, self.max_objects, self.n_classes])
class_true_iter = tf.tile(class_true_iter, [1, 1, 1, self.n_boxes, 1, 1])
class_label = tf.reduce_max(iou_tensor_mask * class_true_iter, axis=4)
class_loss = tf.nn.l2_loss(
(class_pred_tf - class_label) * iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
class_value = tf.reduce_sum(
class_pred_tf * class_label * iou_tensor_pred_mask, axis=[0,1,2,3,4]) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
sess = tf.Session()
[output] = sess.run(
fetches=[class_value],
feed_dict={coord_true_tf: coord_true, coord_pred_tf: coord_pred,
conf_pred_tf: conf_pred,
class_true_tf: class_true, class_pred_tf: class_pred,
object_mask_tf: object_mask})
print(output)
def calculate_iou_tf(self, box_pred, box_true):
box1 = tf.stack([
box_pred[:,:,:,:,:,0] - box_pred[:,:,:,:,:,2] / 2.0,
box_pred[:,:,:,:,:,1] - box_pred[:,:,:,:,:,3] / 2.0,
box_pred[:,:,:,:,:,0] + box_pred[:,:,:,:,:,2] / 2.0,
box_pred[:,:,:,:,:,1] + box_pred[:,:,:,:,:,3] / 2.0])
box1 = tf.transpose(box1, perm=[1, 2, 3, 4, 5, 0])
box2 = tf.stack([
box_true[:,:,:,:,:,0] - box_true[:,:,:,:,:,2] / 2.0,
box_true[:,:,:,:,:,1] - box_true[:,:,:,:,:,3] / 2.0,
box_true[:,:,:,:,:,0] + box_true[:,:,:,:,:,2] / 2.0,
box_true[:,:,:,:,:,1] + box_true[:,:,:,:,:,3] / 2.0])
box2 = tf.transpose(box2, perm=[1, 2, 3, 4, 5, 0])
left_top = tf.maximum(box1[:,:,:,:,:,0:2], box2[:,:,:,:,:,0:2])
right_bottom = tf.minimum(box1[:,:,:,:,:,2:4], box2[:,:,:,:,:,2:4])
intersection = right_bottom - left_top
inter_area = intersection[:,:,:,:,:,0] * intersection[:,:,:,:,:,1]
mask = tf.cast(intersection[:,:,:,:,:,0] > 0, tf.float32) * \
tf.cast(intersection[:,:,:,:,:,1] > 0, tf.float32)
inter_area = inter_area * mask
box1_area = (box1[:,:,:,:,:,2]-box1[:,:,:,:,:,0]) * (box1[:,:,:,:,:,3]-box1[:,:,:,:,:,1])
box2_area = (box2[:,:,:,:,:,2]-box2[:,:,:,:,:,0]) * (box2[:,:,:,:,:,3]-box2[:,:,:,:,:,1])
iou = inter_area / (box1_area + box2_area - inter_area + 1e-6)
return tf.reshape(iou, shape=[
self.batch_size, self.cell_size, self.cell_size, self.n_boxes, self.max_objects, 1])
def test_get_box_pred(self):
label = [[0, 0, 0, 0, 0]] * 5
label[0] = [0.5, 0.15, 0.8, 0.2, 1]
label[1] = [0.5, 0.7, 0.1, 0.2, 1]
label[2] = [0.5, 0.9, 0.6, 0.1, 1]
pred = numpy.zeros(shape=(3,3,6,5))
pred[0,1,4,:] = [-1.6, -1.73, 0.09, -0.09, 1.0]
# pred[1,0,4,:] = [0.0, 0.0, 0.0, 0.0, 1.0]
image_processor = ImageProcessor(
'Z:', image_size=96, max_objects_per_image=5, cell_size=3, n_classes=1)
class_label, class_mask, box_label, object_num = \
image_processor.process_label(label)
tiny_yolo = TinyYolo(
n_channel=3, n_classes=1, image_size=96, max_objects_per_image=5,
box_per_cell=6, object_scala=10, nobject_scala=5,
coord_scala=10, class_scala=1, batch_size=1)
box_pred = tf.placeholder(
dtype=tf.float32, shape=[3, 3, 6, 4], name='box_pred')
box_truth = tf.placeholder(
dtype=tf.float32, shape=[3, 3, 1, 4], name='box_truth')
iou_matrix = tiny_yolo.get_box_pred(box_pred)
sess = tf.Session()
[output] = sess.run(
fetches=[iou_matrix],
feed_dict={box_pred: pred[:,:,:,0:4]})
sess.close()
print(output, output.shape)
# 画图
image = numpy.zeros(shape=(256, 256, 3), dtype='uint8') + 255
cv2.line(image, (0, int(256/3.0)), (256, int(256/3.0)), (100, 149, 237), 1)
cv2.line(image, (0, int(256*2.0/3.0)), (256, int(256*2.0/3.0)), (100, 149, 237), 1)
cv2.line(image, (int(256/3.0), 0), (int(256/3.0), 256), (100, 149, 237), 1)
cv2.line(image, (int(256*2.0/3.0), 0), (int(256*2.0/3.0), 256), (100, 149, 237), 1)
for center_x, center_y, w, h, prob in label:
if prob != 1.0:
continue
# 画中心点
cv2.circle(image, (int(center_x*256), int(center_y*256)), 2, (255, 99, 71), 0)
# 画真实框
xmin = int((center_x - w / 2.0) * 256)
xmax = int((center_x + w / 2.0) * 256)
ymin = int((center_y - h / 2.0) * 256)
ymax = int((center_y + h / 2.0) * 256)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 99, 71), 0)
for x in range(3):
for y in range(3):
for n in range(2):
[center_x, center_y, w, h, prob] = pred[x, y, n, :]
# 画中心点
cv2.circle(image, (int(center_x*256), int(center_y*256)), 2, (238, 130, 238), 0)
# 画预测框
xmin = int((center_x - w / 2.0) * 256)
xmax = int((center_x + w / 2.0) * 256)
ymin = int((center_y - h / 2.0) * 256)
ymax = int((center_y + h / 2.0) * 256)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (238, 130, 238), 0)
plt.imshow(image)
plt.show() | [
"caocao7066@outlook.com"
] | caocao7066@outlook.com |
449a5b4d464ce12c138b35ee87635fe1817540fc | 13d3a44447f6a7d8b0d61c2fb445fa6aa76c2f95 | /stackdio/core/viewsets.py | 3708da69f32348e2a5e6effb26d7be236dfe77f5 | [
"Apache-2.0"
] | permissive | stackdio/stackdio | 6ba4ad6c2ef10a323cbd955e6d6d5bd7917c17c2 | 84be621705031d147e104369399b872d5093ef64 | refs/heads/master | 2021-04-09T16:36:38.220557 | 2018-08-13T18:25:29 | 2018-08-13T18:25:29 | 17,679,603 | 9 | 11 | Apache-2.0 | 2020-03-19T17:21:45 | 2014-03-12T19:02:06 | Python | UTF-8 | Python | false | false | 13,461 | py | # -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.models import Group
from django.http import Http404
from guardian.shortcuts import get_groups_with_perms, get_users_with_perms, remove_perm
from rest_framework import viewsets
from rest_framework.serializers import ListField, SlugRelatedField, ValidationError
from stackdio.api.users.models import get_user_queryset
from stackdio.core import fields, mixins, serializers
from stackdio.core.config import StackdioConfigException
from stackdio.core.permissions import StackdioPermissionsModelPermissions
from stackdio.core.shortcuts import get_groups_with_model_perms, get_users_with_model_perms
try:
from django_auth_ldap.backend import LDAPBackend
except ImportError:
LDAPBackend = None
logger = logging.getLogger(__name__)
def _filter_perms(available_perms, perms):
ret = []
for perm in perms:
if perm in available_perms:
ret.append(perm)
return ret
class UserSlugRelatedField(SlugRelatedField):
def to_internal_value(self, data):
try:
return super(UserSlugRelatedField, self).to_internal_value(data)
except ValidationError:
if settings.LDAP_ENABLED:
if LDAPBackend is None:
raise StackdioConfigException('LDAP is enabled, but django_auth_ldap isn\'t '
'installed. Please install django_auth_ldap')
# Grab the ldap user and try again
user = LDAPBackend().populate_user(data)
if user is not None:
return super(UserSlugRelatedField, self).to_internal_value(data)
# Nothing worked, just re-raise the exception
raise
class StackdioBasePermissionsViewSet(mixins.BulkUpdateModelMixin, viewsets.ModelViewSet):
"""
Viewset for creating permissions endpoints
"""
user_or_group = None
model_or_object = None
lookup_value_regex = r'[\w.@+-]+'
parent_lookup_field = 'pk'
parent_lookup_url_kwarg = None
def get_model_name(self):
raise NotImplementedError('`get_model_name()` must be implemented.')
def get_app_label(self):
raise NotImplementedError('`get_app_label()` must be implemented.')
def get_serializer_class(self):
user_or_group = self.get_user_or_group()
model_or_object = self.get_model_or_object()
model_name = self.get_model_name()
app_label = self.get_app_label()
super_cls = self.switch_model_object(serializers.StackdioModelPermissionsSerializer,
serializers.StackdioObjectPermissionsSerializer)
default_parent_lookup_url_kwarg = 'parent_{}'.format(self.parent_lookup_field)
url_field_kwargs = {
'view_name': 'api:{0}:{1}-{2}-{3}-permissions-detail'.format(
app_label,
model_name,
model_or_object,
user_or_group
),
'permission_lookup_field': self.lookup_field,
'permission_lookup_url_kwarg': self.lookup_url_kwarg or self.lookup_field,
'lookup_field': self.parent_lookup_field,
'lookup_url_kwarg': self.parent_lookup_url_kwarg or default_parent_lookup_url_kwarg,
}
url_field_cls = self.switch_model_object(
fields.HyperlinkedModelPermissionsField,
fields.HyperlinkedObjectPermissionsField,
)
# Create a class
class StackdioUserPermissionsSerializer(super_cls):
user = UserSlugRelatedField(slug_field='username', queryset=get_user_queryset())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'user'
class StackdioGroupPermissionsSerializer(super_cls):
group = SlugRelatedField(slug_field='name', queryset=Group.objects.all())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'group'
return self.switch_user_group(StackdioUserPermissionsSerializer,
StackdioGroupPermissionsSerializer)
def get_user_or_group(self):
assert self.user_or_group in ('user', 'group'), (
"'%s' should include a `user_or_group` attribute that is one of 'user' or 'group'."
% self.__class__.__name__
)
return self.user_or_group
def switch_user_group(self, if_user, if_group):
return {
'user': if_user,
'group': if_group,
}.get(self.get_user_or_group())
def get_model_or_object(self):
assert self.model_or_object in ('model', 'object'), (
"'%s' should include a `model_or_object` attribute that is one of 'model' or 'object'."
% self.__class__.__name__
)
return self.model_or_object
def switch_model_object(self, if_model, if_object):
return {
'model': if_model,
'object': if_object,
}.get(self.get_model_or_object())
def _transform_perm(self, model_name):
def do_tranform(item):
# pylint: disable=unused-variable
perm, sep, empty = item.partition('_' + model_name)
return perm
return do_tranform
def get_object(self):
queryset = self.get_queryset()
url_kwarg = self.lookup_url_kwarg or self.lookup_field
name_attr = self.switch_user_group('username', 'name')
for obj in queryset:
auth_obj = obj[self.get_user_or_group()]
if self.kwargs[url_kwarg] == getattr(auth_obj, name_attr):
return obj
raise Http404('No permissions found for %s' % self.kwargs[url_kwarg])
class StackdioModelPermissionsViewSet(StackdioBasePermissionsViewSet):
model_cls = None
model_or_object = 'model'
permission_classes = (StackdioPermissionsModelPermissions,)
def get_model_cls(self):
assert self.model_cls, (
"'%s' should include a `model_cls` attribute or override the `get_model_cls()` method."
% self.__class__.__name__
)
return self.model_cls
def get_model_name(self):
return self.get_model_cls()._meta.model_name
def get_app_label(self):
ret = self.get_model_cls()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_model_permissions(self):
return getattr(self.get_model_cls(),
'model_permissions',
getattr(self, 'model_permissions', ()))
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
ret = []
for permission_cls in self.permission_classes:
permission = permission_cls()
# Inject our model_cls into the permission
if isinstance(permission, StackdioPermissionsModelPermissions) \
and permission.model_cls is None:
permission.model_cls = self.model_cls
ret.append(permission)
return ret
def get_queryset(self): # pylint: disable=method-hidden
model_cls = self.get_model_cls()
model_name = model_cls._meta.model_name
model_perms = self.get_model_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_model_perms(model_cls, attach_perms=True,
with_group_users=False),
lambda: get_groups_with_model_perms(model_cls, attach_perms=True),
)
# Do this as a function so we don't fetch both the user AND group permissions on each
# request
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(model_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioModelPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_model_permissions())
return response
def perform_create(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_update(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_destroy(self, instance):
model_cls = self.get_model_cls()
app_label = model_cls._meta.app_label
model_name = model_cls._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()])
class StackdioModelUserPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioModelGroupPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
class StackdioObjectPermissionsViewSet(StackdioBasePermissionsViewSet):
"""
Viewset for creating permissions endpoints
"""
model_or_object = 'object'
def get_permissioned_object(self):
raise NotImplementedError('`get_permissioned_object()` must be implemented.')
def get_model_name(self):
return self.get_permissioned_object()._meta.model_name
def get_app_label(self):
ret = self.get_permissioned_object()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_object_permissions(self):
return getattr(self.get_permissioned_object(),
'object_permissions',
getattr(self, 'object_permissions', ()))
def get_queryset(self): # pylint: disable=method-hidden
obj = self.get_permissioned_object()
model_name = obj._meta.model_name
object_perms = self.get_object_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_perms(obj, attach_perms=True,
with_superusers=False, with_group_users=False),
lambda: get_groups_with_perms(obj, attach_perms=True),
)
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(object_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioObjectPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_object_permissions())
return response
def perform_create(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_update(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_destroy(self, instance):
obj = self.get_permissioned_object()
app_label = obj._meta.app_label
model_name = obj._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()],
obj)
# pylint: disable=abstract-method
class StackdioObjectUserPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioObjectGroupPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
| [
"clark.perkins@digitalreasoning.com"
] | clark.perkins@digitalreasoning.com |
fabcbe720fa7f9586321ad3d1884bd8c89a35a95 | fa701904e59a94510a5c4fa3e1e64a8fe4135fd6 | /mysite/mysite/settings.py | 887b16718fbb0754f36a99c8116988166e0bd302 | [] | no_license | pr0mila/django_practice | 6c4c29987d94d7d838fe6f5378862f266203d97f | 64ab8181f053d158ed1c5d47d459e8771cc4681a | refs/heads/master | 2020-08-18T09:27:01.351518 | 2019-10-18T21:04:18 | 2019-10-18T21:04:18 | 215,774,432 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&x^9hen^vm3#thtq8(ijj3ld=yj^=l%)hy4tp7e4kt!v8=9-^7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/' | [
"me@promila.info"
] | me@promila.info |
26435cad8b4f6e3701b33caaa53babbe68372fcd | 122db49900adae3d25bf6a17db54699086593f94 | /klimplot_fetch.py | 7bbb62ddb847248ee839f7ceb18b007e5ac29816 | [] | no_license | geoenvo/klimplot | 77a63296ad85b5e1e2a2fa391ab3904d289860ea | 0c70b350c5dca155211f2e4089b7765f34ef7101 | refs/heads/master | 2021-01-10T11:19:06.675854 | 2016-02-16T03:41:42 | 2016-02-16T03:41:42 | 51,801,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,725 | py | # Klimplot
# Fetch Script
# (c) 2015 Mohammad Fadli
# Geo Enviro Omega, PT
# KlimPlot digunakan untuk memvisualisasikan dan mempublikasikan data Iklim menjadi bentuk Map Services.
# Script ini digunakan untuk me
#HOWTO
# Jalankan di background: python klimplot.py &
# Taruh di cron, atau jalankan sebagai services berkala
# Struktur folder hasil akan menyesuaikan
import os.path
import wget
import subprocess
#define variable
#Sumber Data (Server PIKAM via http)
spath="http://202.90.199.147/ec_prob/results_mat/"
#sfolder="2015.01.01/"
#sfile="control.2015.02_ver_2015.01.01.csv"
#surl=spath+sfolder+sfile
#cpath=os.getcwd()+"/"
#nama folder direktori
datadir="data/"
#path di mana folder data yg difetch akan di simpan
datapath="/home/klimplot/"+datadir
#datapath=cpath+datadir
#filepath=datapath+sfile
#Check Folder data
if not os.path.exists(datapath):
os.makedirs(datapath)
else:
print "\n Directory already exist."
"""
#Check File
if not os.path.exists(filepath):
#Get File
wget.download(surl,datapath)
else:
print "\n File already exist. Download Aborted."
"""
subprocess.call("wget -r -np -nc --cut-dirs=2 -A '*.csv' --ignore-case -nH -P "+datapath+" "+spath, shell=True)
"""
-r recursive download folder
-np no parents directory, tidak mendownload isi dari parent directory
-nH would download all files to the directory d in the current directory
-P you will save to specific directory
-nc, --no-clobber: skip downloads that would download to existing files.
--cut-dirs tidak melihat struktur direktori yang ada di sub folder sebelumnya.
-A '*.csv' hanya download csv file
"""
print "\n Alhamdulillah."
| [
"mf4dl1@gmail.com"
] | mf4dl1@gmail.com |
282cc009f6f0e3ea9db0caeb7c910203e582bc4d | 13df9ce30c3b6999f38ccf46ea9a85f3fa9f44a9 | /reports/forms.py | 75e02c96731b1abb0eb6f31b575fa2e133c1e6a8 | [] | no_license | PavelM87/tf_idf_app | 37b5598f7f46af308f24733a145b8308610a0797 | 6b36e516b2daefd89b52765f1244857d16a4efcd | refs/heads/master | 2023-07-26T16:03:33.165133 | 2021-09-13T16:04:20 | 2021-09-13T16:04:20 | 405,765,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django import forms
from .models import File
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = 'file', | [
"mospv87@gmail.com"
] | mospv87@gmail.com |
33e88efe4e627f6559fbe2ae3e666d6cd80bb96a | 25db8b32ecda47a22a8a1ae4551e2378e4d576cf | /rest/serializers.py | 48d3eeebe7c5ac11184608c3cbe7c1f91bd0730c | [] | no_license | viperfx/ng-forum | 4754ca69d699ad466e836b28bda68d9d84e0cd34 | 5a55692122b91876104c209a73bab05f7318c3ff | refs/heads/master | 2021-01-01T19:42:18.879573 | 2013-11-08T15:27:32 | 2013-11-08T15:27:32 | 13,254,263 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | from django.contrib.auth.models import User, Group
from rest_framework import serializers
from rest.models import Forum, Thread, Post
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name')
class ForumSerializer(serializers.ModelSerializer):
class Meta:
model = Forum
depth=1
fields = ('id','title', 'threads',)
class ThreadSerializer(serializers.ModelSerializer):
class Meta:
model = Thread
depth=2
fields = ('id','title', 'forum', 'body', 'creator','created', 'posts')
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('thread', 'body', 'creator')
| [
"tharshan09@gmail.com"
] | tharshan09@gmail.com |
88c38efa8ff0a8056b6fc309011e034888426fa0 | 26acc7e23024098661a42da37e2cb4ed56c21b44 | /dgp/genera/load/loader.py | daf5ca8acee012f9dd328fd48ef0fb2baf85a38a | [
"MIT"
] | permissive | dataspot/dgp | 80536c0e296570c109511de3dae6e0297bb8b0fd | e86d604c8af5534985f9b788ba809facbc325152 | refs/heads/master | 2023-03-16T05:15:38.362702 | 2023-03-09T07:07:28 | 2023-03-09T07:07:28 | 169,378,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | import os
import json
import requests
from hashlib import md5
from dataflows import Flow, load, dump_to_path
from dataflows.base.schema_validator import ignore
from ...core import BaseDataGenusProcessor, Required, Validator, ConfigurableDGP
from .analyzers import FileFormatDGP, StructureDGP
from ...config.consts import CONFIG_URL, CONFIG_PUBLISH_ALLOWED, RESOURCE_NAME
from ...config.log import logger
class LoaderDGP(BaseDataGenusProcessor):
PRE_CHECKS = Validator(
Required(CONFIG_URL, 'Source data URL or path')
)
def init(self):
self.steps = self.init_classes([
FileFormatDGP,
StructureDGP,
])
def hash_key(self, *args):
data = json.dumps(args, sort_keys=True, ensure_ascii=False)
return md5(data.encode('utf8')).hexdigest()
def flow(self):
if len(self.errors) == 0:
config = self.config._unflatten()
source = config['source']
ref_hash = self.hash_key(source, config['structure'], config.get('publish'))
cache_path = os.path.join('.cache', ref_hash)
datapackage_path = os.path.join(cache_path, 'datapackage.json')
structure_params = self.context._structure_params()
http_session = self.context.http_session()
loader = load(source.pop('path'), validate=False,
name=RESOURCE_NAME,
**source, **structure_params,
http_session=http_session,
http_timeout=120,
infer_strategy=load.INFER_PYTHON_TYPES,
cast_strategy=load.CAST_DO_NOTHING,
limit_rows=(
None
if self.config.get(CONFIG_PUBLISH_ALLOWED)
else 5000
))
if self.config.get(CONFIG_PUBLISH_ALLOWED):
return Flow(
loader,
)
else:
if not os.path.exists(datapackage_path):
logger.info('Caching source data into %s', cache_path)
Flow(
loader,
dump_to_path(cache_path, validator_options=dict(on_error=ignore)),
# printer(),
).process()
logger.info('Using cached source data from %s', cache_path)
return Flow(
load(datapackage_path, resources=RESOURCE_NAME),
)
class PostLoaderDGP(ConfigurableDGP):
def init(self):
super().init('loading', per_taxonomy=False)
self._flows = None
class PreLoaderDGP(ConfigurableDGP):
def init(self):
super().init('preloading', per_taxonomy=False)
self._flows = None
| [
"adam.kariv@gmail.com"
] | adam.kariv@gmail.com |
0176c8e8f9f456d2c8194d846412d68db7679af2 | e9552e0e7960a8b04ec2f3e4889d51ffb1e5318c | /td/client.py | 5732f13556e75132f0fc37d1b75f711f90dcd8fc | [] | no_license | webclinic017/ethan_trade_bot | 32d76270f18f339c7a116b83128a4954669711f6 | 9b78e216be38dc9dd709d5e0bcc936ea8886b751 | refs/heads/main | 2023-06-14T19:45:38.614865 | 2021-07-03T07:10:34 | 2021-07-03T07:10:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76,843 | py | import os
import time
import json
import datetime
import requests
import urllib.parse
import dateutil.parser
from td.stream import TDStreamerClient
class TDClient():
'''
TD Ameritrade API Client Class.
Implements OAuth 2.0 Authorization Code Grant workflow, handles configuration
and state management, adds token for authenticated calls, and performs request
to the TD Ameritrade API.
'''
def __init__(self, **kwargs):
'''
Initializes the session with default values and any user-provided overrides.
The following arguments MUST be fspecified at runtime or else initalization
will fail.
NAME: consumer_id
DESC: The Consumer ID assigned to you during the App registration. This can
be found at the app registration portal.
NAME: account_number
DESC: This is the account number for your main TD Ameritrade Account.
NAME: account_password
DESC: This is the account password for your main TD Ameritrade Account.
NAME: redirect_uri
DESC: This is the redirect URL that you specified when you created your
TD Ameritrade Application.
'''
# define the configuration settings.
self.config = {'consumer_id': None,
'account_number': None,
'account_password': None,
'redirect_uri': None,
'resource':'https://api.tdameritrade.com',
'api_version':'/v1',
'cache_state': True,
'authenticaiton_url':'https://auth.tdameritrade.com',
'auth_endpoint':'https://auth.tdameritrade.com' + '/auth?',
'token_endpoint':'https://api.tdameritrade.com' + '/v1' + '/oauth2/token',
'refresh_enabled': True}
# This serves as a mechanism to validate input parameters for the different endpoint arguments.
self.endpoint_arguments = {
'search_instruments':{'projection': ['symbol-search', 'symbol-regex', 'desc-search', 'desc-regex', 'fundamental']},
'get_market_hours':{'markets':['EQUITY', 'OPTION', 'FUTURE', 'BOND', 'FOREX']},
'get_movers':{'market':['$DJI', '$COMPX', '$SPX.X'],
'direction':['up','down'],
'change':['value','percent']},
'get_user_principals':{'fields':['streamerSubscriptionKeys', 'streamerConnectionInfo', 'preferences', 'surrogateIds']}
}
# loop through the key word arguments.
for key in kwargs:
# there may be a chance an unknown argument was pass through. Print a warning if this is the case.
if key not in self.config:
print("WARNING: The argument, {} is an unkown argument.".format(key))
raise KeyError('Invalid Argument Name.')
# update the configuration settings so they now contain the passed through value.
self.config.update(kwargs.items())
# call the state_manager method and update the state to init (initalized)
self.state_manager('init')
# define a new attribute called 'authstate' and initalize it to '' (Blank). This will be used by our login function.
self.authstate = False
# Initalize the client with no streaming session.
self.streaming_session = None
def __repr__(self):
'''
Defines the string representation of our TD Ameritrade Class instance.
RTYPE: String
'''
# grab the logged in state.
if self.state['loggedin']:
logged_in_state = 'True'
else:
logged_in_state = 'False'
# define the string representation
str_representation = '<TDAmeritrade Client (logged_in = {}, authorized = {})>'.format(logged_in_state, self.authstate)
return str_representation
def headers(self, mode = None):
'''
Returns a dictionary of default HTTP headers for calls to TD Ameritrade API,
in the headers we defined the Authorization and access token.
NAME: mode
DESC: Defines the content-type for the headers dictionary.
TYPE: String
'''
# grab the access token
token = self.state['access_token']
# create the headers dictionary
headers = {'Authorization' : f'Bearer {token}'}
if mode == 'application/json':
headers['Content-type'] = 'application/json'
return headers
def api_endpoint(self, url):
'''
Convert relative endpoint (e.g., 'quotes') to full API endpoint.
NAME: url
DESC: The URL that needs conversion to a full endpoint URL.
TYPE: String
RTYPE: String
'''
# if they pass through a valid url then, just use that.
if urllib.parse.urlparse(url).scheme in ['http', 'https']:
return url
# otherwise build the URL
return urllib.parse.urljoin( self.config['resource'] + self.config['api_version'] + "/", url.lstrip('/'))
def state_manager(self, action):
'''
Manages the self.state dictionary. Initalize State will set
the properties to their default value. Save will save the
current state if 'cache_state' is set to TRUE.
NAME: action
DESC: action argument must of one of the following:
'init' -- Initalize State.
'save' -- Save the current state.
TYPE: String
'''
# define the initalized state, these are the default values.
initialized_state = {'access_token': None,
'refresh_token': None,
'access_token_expires_at': 0,
'refresh_token_expires_at':0,
'authorization_url': None,
'redirect_code': None,
'token_scope': '',
'loggedin': False}
# Grab the current directory of the client file, that way we can store the JSON file in the same folder.
# dir_path = os.path.expanduser("~")
# filename = 'TDAmeritradeState.json'
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = 'TDAmeritradeState.json'
file_path = os.path.join(dir_path, filename)
# if the state is initalized
if action == 'init':
self.state = initialized_state
# if they allowed for caching and the file exist, load the file.
if self.config['cache_state'] and os.path.isfile(file_path):
with open(file_path, 'r') as fileHandle:
self.state.update(json.load(fileHandle))
# if they didnt allow for caching delete the file.
elif not self.config['cache_state'] and os.path.isfile(os.path.join(dir_path, filename)):
os.remove(file_path)
# if they want to save it and have allowed for caching then load the file.
elif action == 'save' and self.config['cache_state']:
with open(file_path, 'w') as fileHandle:
# build JSON string using dictionary comprehension.
json_string = {key:self.state[key] for key in initialized_state}
json.dump(json_string, fileHandle)
def login(self):
'''
Ask the user to authenticate themselves via the TD Ameritrade Authentication Portal. This will
create a URL, display it for the User to go to and request that they paste the final URL into
command window.
Once the user is authenticated the API key is valide for 90 days, so refresh tokens may be used
from this point, up to the 90 days.
'''
# if caching is enabled then attempt silent authentication.
if self.config['cache_state']:
# if it was successful, the user is authenticated.
if self.silent_sso():
# update the authentication state
self.authstate = 'Authenticated'
return True
# update the authentication state
self.authstate = 'Authenticated'
# prepare the payload to login
data = {'response_type': 'code',
'redirect_uri': self.config['redirect_uri'],
'client_id': self.config['consumer_id'] + '@AMER.OAUTHAP'}
# url encode the data.
params = urllib.parse.urlencode(data)
# build the full URL for the authentication endpoint.
url = self.config['auth_endpoint'] + params
# set the newly created 'authorization_url' key to the newly created url
self.state['authorization_url'] = url
# aks the user to go to the URL provided, they will be prompted to authenticate themsevles.
print('Please go to URL provided authorize your account: {}'.format(self.state['authorization_url']))
# ask the user to take the final URL after authentication and paste here so we can parse.
my_response = input('Paste the full URL redirect here: ')
# store the redirect URL
self.state['redirect_code'] = my_response
# this will complete the final part of the authentication process.
self.grab_access_token()
def logout(self):
'''
Clears the current TD Ameritrade Connection state.
'''
# change state to initalized so they will have to either get a
# new access token or refresh token next time they use the API
self.state_manager('init')
def grab_access_token(self):
'''
Access token handler for AuthCode Workflow. This takes the
authorization code parsed from the auth endpoint to call the
token endpoint and obtain an access token.
'''
# Parse the URL
url_dict = urllib.parse.parse_qs(self.state['redirect_code'])
# Convert the values to a list.
url_values = list(url_dict.values())
# Grab the Code, which is stored in a list.
url_code = url_values[0][0]
# define the parameters of our access token post.
data = {'grant_type':'authorization_code',
'client_id':self.config['consumer_id'],
'access_type':'offline',
'code':url_code,
'redirect_uri':self.config['redirect_uri']}
# post the data to the token endpoint and store the response.
token_response = requests.post(url = self.config['token_endpoint'], data = data, verify = True)
# call the save_token method to save the access token.
self.token_save(token_response)
# update the state if the request was successful.
if token_response and token_response.ok:
self.state_manager('save')
def silent_sso(self):
'''
Attempt a silent authentication, by checking whether current access token
is valid and/or attempting to refresh it. Returns True if we have successfully
stored a valid access token.
RTYPE: Boolean
'''
# if the current access token is not expired then we are still authenticated.
if self.token_seconds(token_type = 'access_token') > 0:
return True
# if the refresh token is expired then you have to do a full login.
elif self.token_seconds(token_type = 'refresh_token') <= 0:
return False
# if the current access token is expired then try and refresh access token.
elif self.state['refresh_token'] and self.token_refresh():
return True
# More than likely a first time login, so can't do silent authenticaiton.
return False
def token_refresh(self):
'''
Refreshes the current access token.
RTYPE: Boolean
'''
# build the parameters of our request
data = {'client_id': self.config['consumer_id'] + '@AMER.OAUTHAP',
'grant_type':'refresh_token',
'access_type':'offline',
'refresh_token': self.state['refresh_token']}
# make a post request to the token endpoint
response = requests.post(self.config['token_endpoint'], data=data, verify=True)
# if there was an error go through the full authentication
if response.status_code == 401:
print('The Credentials you passed through are invalid.')
return False
elif response.status_code == 400:
print('Validation was unsuccessful.')
return False
elif response.status_code == 500:
print('The TD Server is experiencing an error, please try again later.')
return False
elif response.status_code == 403:
print("You don't have access to this resource, cannot authenticate.")
return False
elif response.status_code == 503:
print("The TD Server can't respond, please try again later.")
return False
else:
# save the token and the state, since we now have a new access token that has a new expiration date.
self.token_save(response)
self.state_manager('save')
return True
def token_save(self, response):
'''
Parses an access token from the response of a POST request and saves it
in the state dictionary for future use. Additionally, it will store the
expiration time and the refresh token.
NAME: response
DESC: A response object recieved from the `token_refresh` or `grab_access_token`
methods.
TYPE: requests.Response
RTYPE: Boolean
'''
# parse the data.
json_data = response.json()
# make sure there is an access token before proceeding.
if 'access_token' not in json_data:
self.logout()
return False
# save the access token and refresh token
self.state['access_token'] = json_data['access_token']
self.state['refresh_token'] = json_data['refresh_token']
# and the logged in status
self.state['loggedin'] = True
# store token expiration time
self.state['access_token_expires_at'] = time.time() + int(json_data['expires_in'])
self.state['refresh_token_expires_at'] = time.time() + int(json_data['refresh_token_expires_in'])
return True
def token_seconds(self, token_type = 'access_token'):
'''
Return the number of seconds until the current access token or refresh token
will expire. The default value is access token because this is the most commonly used
token during requests.
NAME: token_type
DESC: The type of token you would like to determine lifespan for. Possible values are:
'access_token'
'refresh_token'
TYPE: String
RTYPE: Boolean
'''
# if needed check the access token.
if token_type == 'access_token':
# if the time to expiration is less than or equal to 0, return 0.
if not self.state['access_token'] or time.time() >= self.state['access_token_expires_at']:
return 0
# else return the number of seconds until expiration.
token_exp = int(self.state['access_token_expires_at'] - time.time())
# if needed check the refresh token.
elif token_type == 'refresh_token':
# if the time to expiration is less than or equal to 0, return 0.
if not self.state['refresh_token'] or time.time() >= self.state['refresh_token_expires_at']:
return 0
# else return the number of seconds until expiration.
token_exp = int(self.state['refresh_token_expires_at'] - time.time())
return token_exp
def token_validation(self, nseconds = 5):
'''
Verify the current access token is valid for at least N seconds, and
if not then attempt to refresh it. Can be used to assure a valid token
before making a call to the TD Ameritrade API.
PARA: nseconds
TYPE: integer
DESC: The minimum number of seconds the token has to be valid for before
attempting to get a refresh token.
'''
if self.token_seconds(token_type = 'access_token') < nseconds and self.config['refresh_enabled']:
self.token_refresh()
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE ALL ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def validate_arguments(self, endpoint = None, parameter_name = None, parameter_argument = None):
'''
This will validate an argument for the specified endpoint and raise an error if the argument
is not valid. Can take both a list of arguments or a single argument.
NAME: endpoint
DESC: This is the endpoint name, and should line up exactly with the TD Ameritrade Client library.
TYPE: String
NAME: parameter_name
DESC: An endpoint can have a parameter that needs to be passed through, this represents the name of
that parameter.
TYPE: String
NAME: parameter_argument
DESC: The arguments being validated for the particular parameter name. This can either be a single value
or a list of values.
TYPE: List<Strings> OR String
EXAMPLES:
WITH NO LIST:
------------------------------------------------------------
api_endpoint = 'search_instruments'
para_name = 'projection'
para_args = 'fundamental'
self.validate_arguments(endpoint = api_endpoint,
parameter_name = para_name,
parameter_argument = para_args)
WITH LIST:
------------------------------------------------------------
api_endpoint = 'get_market_hours'
para_name = 'markets'
para_args = ['FOREX', 'EQUITY']
self.validate_arguments(endpoint = api_endpoint,
parameter_name = para_name,
parameter_argument = para_args)
'''
# grab the possible parameters for the endpoint.
parameters_dictionary = self.endpoint_arguments[endpoint]
# grab the parameter arguments, for the specified parameter name.
parameter_possible_arguments = parameters_dictionary[parameter_name]
# if it's a list then see if it matches any of the possible values.
if type(parameter_argument) is list:
# build the validation result list.
validation_result = [argument not in parameter_possible_arguments for argument in parameter_argument]
# if any of the results are FALSE then raise an error.
if any(validation_result):
print('\nThe value you passed through is not valid, please choose one of the following valid values: {} \n'.format(' ,'.join(parameter_possible_arguments)))
raise ValueError('Invalid Value.')
elif not any(validation_result):
return True
# if the argument isn't in the list of possible values, raise an error.
elif parameter_argument not in parameter_possible_arguments:
print('\nThe value you passed through is not valid, please choose one of the following valid values: {} \n'.upper().format(' ,'.join(parameter_possible_arguments)))
raise ValueError('Invalid Value.')
elif parameter_argument in parameter_possible_arguments:
return True
def prepare_arguments_list(self, parameter_list = None):
'''
Some endpoints can take multiple values for a parameter, this
method takes that list and creates a valid string that can be
used in an API request. The list can have either one index or
multiple indexes.
NAME: parameter_list
DESC: A list of paramater values assigned to an argument.
TYPE: List
EXAMPLE:
SessionObject.prepare_arguments_list(parameter_list = ['MSFT', 'SQ'])
'''
# validate it's a list.
if type(parameter_list) is list:
# specify the delimeter and join the list.
delimeter = ','
parameter_list = delimeter.join(parameter_list)
return parameter_list
def get_quotes(self, instruments = None):
'''
Serves as the mechanism to make a request to the Get Quote and Get Quotes Endpoint.
If one item is provided a Get Quote request will be made and if more than one item
is provided then a Get Quotes request will be made.
Documentation Link: https://developer.tdameritrade.com/quotes/apis
NAME: instruments
DESC: A list of different financial instruments.
TYPE: List
EXAMPLES:
SessionObject.get_quotes(instruments = ['MSFT'])
SessionObject.get_quotes(instruments = ['MSFT','SQ'])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# because we have a list argument, prep it for the request.
instruments = self.prepare_arguments_list(parameter_list = instruments)
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'symbol':instruments}
# define the endpoint
endpoint = '/marketdata/quotes'
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers = merged_headers, params=data, verify = True).json()
def get_price_history(self, symbol = None, periodType = None, period = None, startDate = None, endDate = None,
frequencyType = None, frequency = None, needExtendedHoursData = None):
'''
STILL BUILDING
NAME: symbol
DESC: The ticker symbol to request data for.
TYPE: String
NAME: periodType
DESC: The type of period to show. Valid values are day, month, year, or ytd (year to date). Default is day.
TYPE: String
NAME: period
DESC: The number of periods to show.
TYPE: Integer
NAME: startDate
DESC: Start date as milliseconds since epoch.
TYPE: Integer
NAME: endDate
DESC: End date as milliseconds since epoch.
TYPE: Integer
NAME: frequencyType
DESC: The type of frequency with which a new candle is formed.
TYPE: String
NAME: frequency
DESC: The number of the frequencyType to be included in each candle.
TYPE: Integer
NAME: needExtendedHoursData
DESC: True to return extended hours data, false for regular market hours only. Default is true
TYPE: Boolean
'''
# Validator function for get_price_history
def validate(data):
# Valid periods by periodType
valid_periods = {
'day': [1, 2, 3, 4, 5, 10],
'month': [1, 2, 3, 6],
'year': [1, 2, 3, 5, 10, 15, 20],
'ytd': [1],
}
# Valid frequencyType by period
valid_frequency_types = {
'day': ['minute'],
'month': ['daily', 'weekly'],
'year': ['daily', 'weekly', 'monthly'],
'ytd': ['daily', 'weekly'],
}
# Valid frequency by frequencyType
valid_frequencies = {
'minute': [1, 5, 10, 15, 30],
'daily': [1],
'weekly': [1],
'monthly': [1]
}
# check data to confirm that either period or date range is provided
if (data['startDate'] and data['endDate'] and not data['period']) or (not data['startDate'] and not data['endDate'] and data['period']):
# Validate periodType
if data['periodType'] not in valid_periods.keys():
print('Period Type: {} is not valid. Valid values are {}'.format(data['periodType'], valid_periods.keys()))
raise ValueError('Invalid Value')
# Validate period
if data['period'] and data['period'] not in valid_periods[data['periodType']]:
print('Period: {} is not valid. Valid values are {}'.format(data['period'], valid_periods[data['periodType']]))
raise ValueError('Invalid Value')
# Validate frequencyType by frenquency
if data['frequencyType'] not in valid_frequencies.keys():
print('frequencyType: {} is not valid. Valid values are {}'.format(data['frequencyType'], valid_frequencies.keys()))
raise ValueError('Invalid Value')
# Validate frequencyType by periodType
if data['frequencyType'] not in valid_frequency_types[data['periodType']]:
print('frequencyType: {} is not valid. Valid values for period: {} are {}'.format(data['frequencyType'], data['periodType'], valid_frequency_types[data['periodType']]))
raise ValueError('Invalid Value')
# Validate periodType
if data['frequency'] not in valid_frequencies[data['frequencyType']]:
print('frequency: {} is not valid. Valid values are {}'.format(data['frequency'], valid_frequencies[data['frequencyType']]))
raise ValueError('Invalid Value')
# TODO Validate startDate and endDate
# Recompute payload dictionary and remove any None values
return({k: v for k, v in data.items() if v is not None})
else:
print('Either startDate/endDate or period must be provided exclusively.')
raise ValueError('Invalid Value')
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'period':period,
'periodType':periodType,
'startDate':startDate,
'endDate':endDate,
'frequency':frequency,
'frequencyType':frequencyType,
'needExtendedHoursData':needExtendedHoursData}
# define the endpoint
endpoint = '/marketdata/{}/pricehistory'.format(symbol)
# validate the data
data = validate(data)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers = merged_headers, params=data, verify = True).json()
def search_instruments(self, symbol = None, projection = 'symbol-search'):
'''
Search or retrieve instrument data, including fundamental data.
Documentation Link: https://developer.tdameritrade.com/instruments/apis/get/instruments
NAME: symbol
DESC: The symbol of the financial instrument you would like to search.
TYPE: string
NAME: projection
DESC: The type of request, default is "symbol-search". The type of request include the following:
1. symbol-search
Retrieve instrument data of a specific symbol or cusip
2. symbol-regex
Retrieve instrument data for all symbols matching regex.
Example: symbol=XYZ.* will return all symbols beginning with XYZ
3. desc-search
Retrieve instrument data for instruments whose description contains
the word supplied. Example: symbol=FakeCompany will return all
instruments with FakeCompany in the description
4. desc-regex
Search description with full regex support. Example: symbol=XYZ.[A-C]
returns all instruments whose descriptions contain a word beginning
with XYZ followed by a character A through C
5. fundamental
Returns fundamental data for a single instrument specified by exact symbol.
TYPE: string
EXAMPLES:
SessionObject.search_instrument(symbol = 'XYZ', projection = 'symbol-search')
SessionObject.search_instrument(symbol = 'XYZ.*', projection = 'symbol-regex')
SessionObject.search_instrument(symbol = 'FakeCompany', projection = 'desc-search')
SessionObject.search_instrument(symbol = 'XYZ.[A-C]', projection = 'desc-regex')
SessionObject.search_instrument(symbol = 'XYZ.[A-C]', projection = 'fundamental')
'''
# first make sure that the token is still valid.
self.token_validation()
# validate argument
self.validate_arguments(endpoint = 'search_instruments', parameter_name = 'projection', parameter_argument = projection)
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'symbol':symbol,
'projection':projection}
# define the endpoint
endpoint = '/instruments'
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
def get_instruments(self, cusip = None):
'''
Get an instrument by CUSIP (Committee on Uniform Securities Identification Procedures) code.
Documentation Link: https://developer.tdameritrade.com/instruments/apis/get/instruments/%7Bcusip%7D
NAME: cusip
DESC: The CUSIP code of a given financial instrument.
TYPE: string
EXAMPLES:
SessionObject.get_instruments(cusip = 'SomeCUSIPNumber')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id']}
# define the endpoint
endpoint = '/instruments'
# build the url
url = self.api_endpoint(endpoint) + "/" + cusip
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
def get_market_hours(self, markets = None, date = None):
'''
Serves as the mechanism to make a request to the "Get Hours for Multiple Markets" and
"Get Hours for Single Markets" Endpoint. If one market is provided a "Get Hours for Single Markets"
request will be made and if more than one item is provided then a "Get Hours for Multiple Markets"
request will be made.
Documentation Link: https://developer.tdameritrade.com/market-hours/apis
NAME: markets
DESC: The markets for which you're requesting market hours, comma-separated.
Valid markets are EQUITY, OPTION, FUTURE, BOND, or FOREX.
TYPE: List<Strings>
NAME: date
DESC: The date you wish to recieve market hours for. Valid ISO-8601 formats
are: yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz
TYPE: String
EXAMPLES:
SessionObject.get_market_hours(markets = ['EQUITY'], date = '2019-10-19')
SessionObject.get_market_hours(markets = ['EQUITY','FOREX'], date = '2019-10-19')
'''
# first make sure that the token is still valid.
self.token_validation()
# validate argument
self.validate_arguments(endpoint = 'get_market_hours', parameter_name = 'markets', parameter_argument = markets)
# because we have a list argument, prep it for the request.
markets = self.prepare_arguments_list(parameter_list = markets)
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'markets':markets,
'date':date}
# define the endpoint
endpoint = '/marketdata/hours'
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
def get_movers(self, market = None, direction = None, change = None):
'''
Top 10 (up or down) movers by value or percent for a particular market.
Documentation Link: https://developer.tdameritrade.com/movers/apis/get/marketdata
NAME: market
DESC: The index symbol to get movers for. Can be $DJI, $COMPX, or $SPX.X.
TYPE: String
NAME: direction
DESC: To return movers with the specified directions of up or down. Valid values
are up or down
TYPE: String
NAME: change
DESC: To return movers with the specified change types of percent or value Valid
values are percent or value.
TYPE: String
EXAMPLES:
SessionObject.get_movers(market = '$DJI', direction = 'up', change = 'value')
SessionObject.get_movers(market = '$COMPX', direction = 'down', change = 'percent')
'''
# grabs a dictionary representation of our arguments and their inputs.
local_args = locals()
# we don't need the 'self' key
del local_args['self']
# first make sure that the token is still valid.
self.token_validation()
# validate arguments, before making request.
for key, value in local_args.items():
self.validate_arguments(endpoint = 'get_movers', parameter_name = key, parameter_argument = value)
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'direction':direction,
'change':change}
# define the endpoint
endpoint = '/marketdata/{}/movers'.format(market)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
def get_options_chain(self, option_chain = None, args_dictionary = None):
'''
Get option chain for an optionable Symbol using one of two methods. Either,
use the OptionChain object which is a built-in object that allows for easy creation of the
POST request. Otherwise, can pass through a dictionary of all the arguments needed.
Documentation Link: https://developer.tdameritrade.com/option-chains/apis/get/marketdata/chains
NAME: option_chain
DESC: Represents a single OptionChainObject.
TYPE: TDAmeritrade.OptionChainObject
EXAMPLE:
from td.option_chain import OptionChain
option_chain_1 = OptionChain(args)
SessionObject.get_options_chain( option_chain = option_chain_1)
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = '/marketdata/chains'
# build the url
url = self.api_endpoint(endpoint)
# Grab the items needed for the request.
if option_chain is not None:
# this request requires an API key, so let's add that.
option_chain.add_chain_key(key_name = 'apikey', key_value = self.config['consumer_id'])
# take the JSON representation of the string
data = option_chain._get_query_parameters()
else:
# otherwise take the args dictionary.
data = args_dictionary
# return the response of the get request.
return requests.get(url = url, headers = merged_headers, params = data, verify = True).json()
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE ACCOUNTS ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def get_accounts(self, account = 'all', fields = None):
'''
Serves as the mechanism to make a request to the "Get Accounts" and "Get Account" Endpoint.
If one account is provided a "Get Account" request will be made and if more than one account
is provided then a "Get Accounts" request will be made.
Documentation Link: https://developer.tdameritrade.com/account-access/apis
NAME: account
DESC: The account number you wish to recieve data on. Default value is 'all'
which will return all accounts of the user.
TYPE: String
NAME: fields
DESC: Balances displayed by default, additional fields can be added here by
adding positions or orders.
TYPE: List<String>
EXAMPLES:
SessionObject.get_accounts(account = 'all', fields = ['orders'])
SessionObject.get_accounts(account = 'MyAccountNumber', fields = ['orders','positions'])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# because we have a list argument, prep it for the request.
fields = self.prepare_arguments_list(parameter_list = fields)
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'fields':fields}
# if all use '/accounts' else pass through the account number.
if account == 'all':
endpoint = '/accounts'
else:
endpoint = '/accounts/{}'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE TRANSACTIONS ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def get_transactions(self, account = None, transaction_type = None, symbol = None,
start_date = None, end_date = None, transaction_id = None):
'''
Serves as the mechanism to make a request to the "Get Transactions" and "Get Transaction" Endpoint.
If one `transaction_id` is provided a "Get Transaction" request will be made and if it is not provided
then a "Get Transactions" request will be made.
Documentation Link: https://developer.tdameritrade.com/transaction-history/apis
NAME: account
DESC: The account number you wish to recieve transactions for.
TYPE: String
NAME: transaction_type
DESC: The type of transaction. Only transactions with the specified type will be returned. Valid
values are the following: ALL, TRADE, BUY_ONLY, SELL_ONLY, CASH_IN_OR_CASH_OUT, CHECKING,
DIVIDEND, INTEREST, OTHER, ADVISOR_FEES
TYPE: String
NAME: symbol
DESC: The symbol in the specified transaction. Only transactions with the specified
symbol will be returned.
TYPE: String
NAME: start_date
DESC: Only transactions after the Start Date will be returned. Note: The maximum date range is
one year. Valid ISO-8601 formats are: yyyy-MM-dd.
TYPE: String
NAME: end_date
DESC: Only transactions before the End Date will be returned. Note: The maximum date range is
one year. Valid ISO-8601 formats are: yyyy-MM-dd.
TYPE: String
NAME: transaction_id
DESC: The transaction ID you wish to search. If this is specifed a "Get Transaction" request is
made. Should only be used if you wish to return one transaction.
TYPE: String
EXAMPLES:
SessionObject.get_transactions(account = 'MyAccountNumber', transaction_type = 'ALL', start_date = '2019-01-31', end_date = '2019-04-28')
SessionObject.get_transactions(account = 'MyAccountNumber', transaction_type = 'ALL', start_date = '2019-01-31')
SessionObject.get_transactions(account = 'MyAccountNumber', transaction_type = 'TRADE')
SessionObject.get_transactions(transaction_id = 'MyTransactionID')
'''
# first make sure that the token is still valid.
self.token_validation()
# default to a "Get Transaction" Request if anything else is passed through along with the transaction_id.
if transaction_id != None:
account = None
transaction_type = None,
start_date = None,
end_date = None
# if the request type they made isn't valid print an error and return nothing.
else:
if transaction_type not in ['ALL', 'TRADE', 'BUY_ONLY', 'SELL_ONLY', 'CASH_IN_OR_CASH_OUT', 'CHECKING','DIVIDEND', 'INTEREST', 'OTHER', 'ADVISOR_FEES']:
print('The type of transaction type you specified is not valid.')
return False
# grab the original headers we have stored.
merged_headers = self.headers()
# if transaction_id is not none, it means we need to make a request to the get_transaction endpoint.
if transaction_id:
# define the endpoint
endpoint = '/accounts/{}/transactions/{}'.format(account, transaction_id)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, verify = True).json()
# if it isn't then we need to make a request to the get_transactions endpoint.
else:
# build the params dictionary
data = {'type':transaction_type,
'symbol':symbol,
'startDate':start_date,
'endDate':end_date}
# define the endpoint
endpoint = '/accounts/{}/transactions'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE USER INFOS & PREFERENCES ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def get_preferences(self, account = None):
'''
Get's User Preferences for a specific account.
Documentation Link: https://developer.tdameritrade.com/user-principal/apis/get/accounts/%7BaccountId%7D/preferences-0
NAME: account
DESC: The account number you wish to recieve preference data for.
TYPE: String
EXAMPLES:
SessionObject.get_preferences(account = 'MyAccountNumber')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = '/accounts/{}/preferences'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, verify = True).json()
def get_streamer_subscription_keys(self, accounts = None):
'''
SubscriptionKey for provided accounts or default accounts.
Documentation Link: https://developer.tdameritrade.com/user-principal/apis/get/userprincipals/streamersubscriptionkeys-0
NAME: account
DESC: A list of account numbers you wish to recieve a streamer key for.
TYPE: List<String>
EXAMPLES:
SessionObject.get_streamer_subscription_keys(account = ['MyAccountNumber'])
SessionObject.get_streamer_subscription_keys(account = ['MyAccountNumber1', 'MyAccountNumber2'])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# because we have a list argument, prep it for the request.
accounts = self.prepare_arguments_list(parameter_list = accounts)
# define the endpoint
endpoint = '/userprincipals/streamersubscriptionkeys'
# build the params dictionary
data = {'accountIds':accounts}
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params = data, verify = True).json()
def get_user_principals(self, fields = None):
'''
Returns User Principal details.
Documentation Link: https://developer.tdameritrade.com/user-principal/apis/get/userprincipals-0
NAME: fields
DESC: A comma separated String which allows one to specify additional fields to return. None of
these fields are returned by default. Possible values in this String can be:
1. streamerSubscriptionKeys
2. streamerConnectionInfo
3. preferences
4. surrogateIds
TYPE: List<String>
EXAMPLES:
SessionObject.get_user_principals(fields = ['preferences'])
SessionObject.get_user_principals(fields = ['preferences', 'streamerConnectionInfo'])
'''
# first make sure that the token is still valid.
self.token_validation()
# validate arguments
self.validate_arguments(endpoint = 'get_user_principals', parameter_name = 'fields', parameter_argument = fields)
# grab the original headers we have stored.
merged_headers = self.headers()
# because we have a list argument, prep it for the request.
fields = self.prepare_arguments_list(parameter_list = fields)
# define the endpoint
endpoint = '/userprincipals'
# build the params dictionary
data = {'fields':fields}
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params = data, verify = True).json()
def update_preferences(self, account = None, dataPayload = None):
'''
Update preferences for a specific account. Please note that the directOptionsRouting and
directEquityRouting values cannot be modified via this operation.
Documentation Link: https://developer.tdameritrade.com/user-principal/apis/put/accounts/%7BaccountId%7D/preferences-0
NAME: account
DESC: The account number you wish to update preferences for.
TYPE: String
NAME: dataPayload
DESC: A dictionary that provides all the keys you wish to update. It must contain the following keys to be valid.
1. expressTrading
2. directOptionsRouting
3. directEquityRouting
4. defaultEquityOrderLegInstruction
5. defaultEquityOrderType
6. defaultEquityOrderPriceLinkType
7. defaultEquityOrderDuration
8. defaultEquityOrderMarketSession
9. defaultEquityQuantity
10. mutualFundTaxLotMethod
11. optionTaxLotMethod
12. equityTaxLotMethod
13. defaultAdvancedToolLaunch
14. authTokenTimeout
TYPE: dictionary
EXAMPLES:
SessionObject.update_preferences(account = 'MyAccountNumer', dataPayload = <Dictionary>)
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
merged_headers['Content-Type'] = 'application/json'
# define the endpoint
endpoint = '/accounts/{}/preferences'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# make the request
response = requests.put(url = url, headers = merged_headers, data = json.dumps(dataPayload), verify = True)
if response.status_code == 204:
return "Data successfully updated."
else:
return response.content
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE WATCHLISTS ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def create_watchlist(self, account = None, name = None, watchlistItems = None):
'''
Create watchlist for specific account. This method does not verify that the symbol or asset type are valid.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/post/accounts/%7BaccountId%7D/watchlists-0
NAME: account
DESC: The account number you wish to create the watchlist for.
TYPE: String
NAME: name
DESC: The name you want to give your watchlist.
TYPE: String
NAME: watchlistItems
DESC: A list of WatchListItems object.
TYPE: List<WatchListItems>
EXAMPLES:
WatchListItem1 = WatchListItem()
WatchListItem2 = WatchListItem()
SessionObject.create_watchlist(account = 'MyAccountNumber',
name = 'MyWatchlistName',
watchlistItems = [ WatchListItem1, WatchListItem2 ])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
merged_headers['Content-Type'] = 'application/json'
# define the endpoint
endpoint = '/accounts/{}/watchlists'.format(account)
# define the payload
payload = {"name": name, "watchlistItems": watchlistItems}
# build the url
url = self.api_endpoint(endpoint)
# make the request
response = requests.post(url = url, headers = merged_headers, data = json.dumps(payload) , verify = True)
if response.status_code == 201:
return "Watchlist {} was successfully created.".format(name)
else:
return response.content
def get_watchlist_accounts(self, account = 'all'):
'''
Serves as the mechanism to make a request to the "Get Watchlist for Single Account" and
"Get Watchlist for Multiple Accounts" Endpoint. If one account is provided a
"Get Watchlist for Single Account" request will be made and if 'all' is provided then a
"Get Watchlist for Multiple Accounts" request will be made.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis
NAME: account
DESC: The account number you wish to pull watchlists from. Default value is 'all'
TYPE: String
EXAMPLES:
SessionObject.get_watchlist_accounts(account = 'all')
SessionObject.get_watchlist_accounts(account = 'MyAccount1')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
if account == 'all':
endpoint = '/accounts/watchlists'
else:
endpoint = '/accounts/{}/watchlists'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, verify = True).json()
def get_watchlist(self, account = None, watchlist_id = None):
'''
Returns a specific watchlist for a specific account.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/get/accounts/%7BaccountId%7D/watchlists/%7BwatchlistId%7D-0
NAME: account
DESC: The account number you wish to pull watchlists from.
TYPE: String
NAME: watchlist_id
DESC: The ID of the watchlist you wish to return.
TYPE: String
EXAMPLES:
SessionObject.get_watchlist(account = 'MyAccount1', watchlist_id = 'MyWatchlistId')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = '/accounts/{}/watchlists/{}'.format(account, watchlist_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, verify = True).json()
def delete_watchlist(self, account = None, watchlist_id = None):
'''
Deletes a specific watchlist for a specific account.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/delete/accounts/%7BaccountId%7D/watchlists/%7BwatchlistId%7D-0
NAME: account
DESC: The account number you wish to delete the watchlist from.
TYPE: String
NAME: watchlist_id
DESC: The ID of the watchlist you wish to delete.
TYPE: String
EXAMPLES:
SessionObject.delete_watchlist(account = 'MyAccount1', watchlist_id = 'MyWatchlistId')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = '/accounts/{}/watchlists/{}'.format(account, watchlist_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.delete(url = url, headers = merged_headers, verify = True).status_code
def update_watchlist(self, account = None, watchlist_id = None, name = None, watchlistItems = None):
'''
Partially update watchlist for a specific account: change watchlist name, add to the beginning/end of a
watchlist, update or delete items in a watchlist. This method does not verify that the symbol or asset
type are valid.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/patch/accounts/%7BaccountId%7D/watchlists/%7BwatchlistId%7D-0
NAME: account
DESC: The account number that contains the watchlist you wish to update.
TYPE: String
NAME: watchlist_id
DESC: The ID of the watchlist you wish to update.
TYPE: String
NAME: watchlistItems
DESC: A list of the original watchlist items you wish to update and their modified keys.
TYPE: List<WatchListItems>
EXAMPLES:
WatchListItem1 = WatchListItem()
WatchListItem2 = WatchListItem()
SessionObject.update_watchlist(account = 'MyAccountNumber',
watchlist_id = 'WatchListID',
watchlistItems = [ WatchListItem1, WatchListItem2 ])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
merged_headers['Content-Type'] = 'application/json'
# define the payload
payload = {"name": name, "watchlistItems": watchlistItems}
# define the endpoint
endpoint = '/accounts/{}/watchlists/{}'.format(account, watchlist_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.patch(url = url, headers = merged_headers, data = json.dumps(payload), verify = True).status_code
def replace_watchlist(self, account = None, watchlist_id_new = None, watchlist_id_old = None, name_new = None, watchlistItems_new = None):
'''
STILL BUILDING
Replace watchlist for a specific account. This method does not verify that the symbol or asset type are valid.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/put/accounts/%7BaccountId%7D/watchlists/%7BwatchlistId%7D-0
NAME: account
DESC: The account number that contains the watchlist you wish to replace.
TYPE: String
NAME: watchlist_id_new
DESC: The ID of the watchlist you wish to replace with the old one.
TYPE: String
NAME: watchlist_id_old
DESC: The ID of the watchlist you wish to replace.
TYPE: String
NAME: name_new
DESC: The name of the new watchlist.
TYPE: String
NAME: watchlistItems_New
DESC: The new watchlist items you wish to add to the watchlist.
TYPE: List<WatchListItems>
EXAMPLES:
WatchListItem1 = WatchListItem()
WatchListItem2 = WatchListItem()
SessionObject.replace_watchlist(account = 'MyAccountNumber',
watchlist_id_new = 'WatchListIDNew',
watchlist_id_old = 'WatchListIDOld',
name_new = 'MyNewName',
watchlistItems_new = [ WatchListItem1, WatchListItem2 ])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
merged_headers['Content-Type'] = 'application/json'
# define the payload
payload = {"name": name_new, "watchlistId": watchlist_id_new, "watchlistItems": watchlistItems_new}
# define the endpoint
endpoint = '/accounts/{}/watchlists/{}'.format(account, watchlist_id_old)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.put(url = url, headers = merged_headers, data = json.dumps(payload), verify = True).status_code
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE ORDERS ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def get_orders_path(self, account = None, max_results = None, from_entered_time = None, to_entered_time = None, status = None):
'''
Returns the orders for a specific account.
Documentation Link: https://developer.tdameritrade.com/account-access/apis/get/accounts/%7BaccountId%7D/orders-0
NAME: account
DESC: The account number that you want to query for orders.
TYPE: String
NAME: max_results
DESC: The maximum number of orders to retrieve.
TYPE: integer
NAME: from_entered_time
DESC: Specifies that no orders entered before this time should be returned. Valid ISO-8601 formats are:
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz Date must be within 60 days from today's date. 'to_entered_time'
must also be set.
TYPE: String
NAME: to_entered_time
DESC: Specifies that no orders entered after this time should be returned.Valid ISO-8601 formats are:
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz. 'from_entered_time' must also be set.
TYPE: String
NAME: status
DESC: Specifies that only orders of this status should be returned. Possible Values are:
1. AWAITING_PARENT_ORDER
2. AWAITING_CONDITION
3. AWAITING_MANUAL_REVIEW
4. ACCEPTED
5. AWAITING_UR_NOT
6. PENDING_ACTIVATION
7. QUEDED
8. WORKING
9. REJECTED
10. PENDING_CANCEL
11. CANCELED
12. PENDING_REPLACE
13. REPLACED
14. FILLED
15. EXPIRED
EXAMPLES:
SessionObject.get_orders_query(account = 'MyAccountID', max_results = 6, from_entered_time = '2019-10-01', to_entered_time = '2019-10-10', status = 'FILLED')
SessionObject.get_orders_query(account = 'MyAccountID', max_results = 6, status = 'EXPIRED')
SessionObject.get_orders_query(account = 'MyAccountID', status = 'REJECTED')
SessionObject.get_orders_query(account = 'MyAccountID')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the payload
data = {"maxResults": max_results, "fromEnteredTime": from_entered_time, "toEnteredTime": to_entered_time, "status": status}
# define the endpoint
endpoint = '/accounts/{}/orders'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, params = data, verify = True).json()
def get_orders_query(self, account = None, max_results = None, from_entered_time = None, to_entered_time = None, status = None):
'''
All orders for a specific account or, if account ID isn't specified, orders will be returned for all linked accounts
Documentation Link: https://developer.tdameritrade.com/account-access/apis/get/orders-0
NAME: account
DESC: The account number that you want to query for orders, or if none provided will query all.
TYPE: String
NAME: max_results
DESC: The maximum number of orders to retrieve.
TYPE: integer
NAME: from_entered_time
DESC: Specifies that no orders entered before this time should be returned. Valid ISO-8601 formats are:
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz Date must be within 60 days from today's date. 'to_entered_time'
must also be set.
TYPE: String
NAME: to_entered_time
DESC: Specifies that no orders entered after this time should be returned.Valid ISO-8601 formats are:
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz. 'from_entered_time' must also be set.
TYPE: String
NAME: status
DESC: Specifies that only orders of this status should be returned. Possible Values are:
1. AWAITING_PARENT_ORDER
2. AWAITING_CONDITION
3. AWAITING_MANUAL_REVIEW
4. ACCEPTED
5. AWAITING_UR_NOT
6. PENDING_ACTIVATION
7. QUEDED
8. WORKING
9. REJECTED
10. PENDING_CANCEL
11. CANCELED
12. PENDING_REPLACE
13. REPLACED
14. FILLED
15. EXPIRED
EXAMPLES:
SessionObject.get_orders_query(account = 'MyAccountID', max_results = 6, from_entered_time = '2019-10-01', to_entered_time = '2019-10-10', status = 'FILLED')
SessionObject.get_orders_query(account = 'MyAccountID', max_results = 6, status = 'EXPIRED')
SessionObject.get_orders_query(account = 'MyAccountID', status = 'REJECTED')
SessionObject.get_orders_query(account = None)
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the payload
data = {"accountId": account,
"maxResults": max_results,
"fromEnteredTime": from_entered_time,
"toEnteredTime": to_entered_time,
"status": status}
# define the endpoint
endpoint = '/orders'
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, params = data, verify = True).json()
def get_order(self, account = None, order_id = None):
'''
All orders for a specific account or, if account ID isn't specified, orders will be returned for all linked accounts
Documentation Link: https://developer.tdameritrade.com/account-access/apis/get/orders-0
NAME: account
DESC: The account number that you want to query the order for.
TYPE: String
NAME: order_id
DESC: The order id.
TYPE: integer
EXAMPLES:
SessionObject.get_order(account = 'MyAccountID', order_id = 'MyOrderID')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = 'accounts/{}/orders/{}'.format(account, order_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, verify = True).json()
def cancel_order(self, account = None, order_id = None):
'''
Cancel a specific order for a specific account.
Documentation Link: https://developer.tdameritrade.com/account-access/apis/delete/accounts/%7BaccountId%7D/orders/%7BorderId%7D-0
NAME: account
DESC: The account number that you want to query the order for.
TYPE: String
NAME: order_id
DESC: The order id.
TYPE: integer
EXAMPLES:
SessionObject.cancel_order(account = 'MyAccountID', order_id = 'MyOrderID')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = 'accounts/{}/orders/{}'.format(account, order_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.delete(url = url, headers = merged_headers, verify = True).json()
def place_order(self, account = None, order = None):
'''
Places an order for a specific account.
Documentation Link: https://developer.tdameritrade.com/account-access/apis/delete/accounts/%7BaccountId%7D/orders/%7BorderId%7D-0
NAME: account
DESC: The account number that you want to place the order for.
TYPE: String
NAME: order
DESC: Either a JSON string or a TDOrder object that contains the info needed for an order placement.
TYPE: String | Order
EXAMPLES:
SessionObject.place_order(account = 'MyAccountID', order = {'orderKey':'OrderValue'})
SessionObject.place_order(account = 'MyAccountID', order = <Order>)
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers(mode = 'application/json')
# define the endpoint
endpoint = 'accounts/{}/orders'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# make the request
response = requests.post(url = url, headers = merged_headers, data = json.dumps(order), verify = True)
if response.status_code == 201:
return "Order was successfully placed."
else:
return response.json()
def _create_token_timestamp(self, token_timestamp = None):
'''
Takes the token timestamp and converts it to the proper format
needed for the streaming API.
NAME: token_timestamp
DESC: The timestamp returned from the get_user_principals endpoint.
TYPE: String.
RTYPE: TDStream Object
'''
# First parse the date.
token_timestamp = dateutil.parser.parse(token_timestamp, ignoretz = True)
# Grab the starting point, so time '0'
epoch = datetime.datetime.utcfromtimestamp(0)
return int((token_timestamp - epoch).total_seconds() * 1000.0)
def message_key(self, account_id = None):
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = 'MessageKey'
url = r'https://apis.tdameritrade.com/apps/100/MessageKey?source={}'.format()
# build the url
# url = self.api_endpoint(endpoint)
# print(url)
# make the request
response = requests.get(url = url, headers = merged_headers, verify = True)
print(response.url)
def create_streaming_session(self):
'''
Creates a new streaming session that can be used to stream different data sources.
RTYPE: TDStream Object
'''
# Grab the Subscription Key
sub_key = self.get_streamer_subscription_keys()['keys'][0]['key']
# Grab the Streamer Info.
userPrincipalsResponse = self.get_user_principals(fields = ['streamerConnectionInfo'])
# Grab the timestampe.
tokenTimeStamp = userPrincipalsResponse['streamerInfo']['tokenTimestamp']
# Grab socket
socket_url = userPrincipalsResponse['streamerInfo']['streamerSocketUrl']
# Parse the token timestamp.
tokenTimeStampAsMs = self._create_token_timestamp(token_timestamp = tokenTimeStamp)
# Define our Credentials Dictionary used for authentication.
credentials = {"userid": userPrincipalsResponse['accounts'][0]['accountId'],
"token": userPrincipalsResponse['streamerInfo']['token'],
"company": userPrincipalsResponse['accounts'][0]['company'],
"segment": userPrincipalsResponse['accounts'][0]['segment'],
"cddomain": userPrincipalsResponse['accounts'][0]['accountCdDomainId'],
"usergroup": userPrincipalsResponse['streamerInfo']['userGroup'],
"accesslevel":userPrincipalsResponse['streamerInfo']['accessLevel'],
"authorized": "Y",
"timestamp": tokenTimeStampAsMs,
"appid": userPrincipalsResponse['streamerInfo']['appId'],
"acl": userPrincipalsResponse['streamerInfo']['acl']}
# Create the session
streaming_session = TDStreamerClient(websocket_url = socket_url, user_principal_data = userPrincipalsResponse, credentials = credentials)
return streaming_session
| [
"lovetrading09@yahoo.com"
] | lovetrading09@yahoo.com |
7dbe960a1c9a8e1e356ad75deab3f1df4abc7aac | 4a0c3f5f697ab694067f5fc59486707440593856 | /python/20_Slots.py | b95795f1ef5714b1713849c16c00279e4745897a | [] | no_license | shweb360/Python | 4cfe5e1e12d0bad02217ccd1bded7815a1c192e9 | dd589674ed22ebd835efb21954ed0a96430002f8 | refs/heads/master | 2021-06-24T17:04:22.378895 | 2019-12-01T06:06:51 | 2019-12-01T06:06:51 | 102,442,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | #1.1创建一个类:
class Student(object):
pass
#1.2给实例绑定一个属性:
s=Student()
s.name="Wushuang"
print(s.name);
#1.3给实例绑定一个方法
def set_age(self,age):
self.age=age
from types import MethodType
#给实例绑定一个方法
s.set_age=MethodType(set_age,s)
#调用实例方法
s.set_age(25)
print(s.age)
#2.0使用__slots_
#但是,如果我们想要限制实例的属性怎么办?
#比如,只允许对Student实例添加name和age属性。
#为了达到限制的目的,Python允许在定义class的时候,
#定义一个特殊的__slots__变量,来限制该class实例能添加的属性:
class Student2(object):
__slots__=('name','age')
s2=Student2()
s2.name="Michael"
s2.age=24
print(s2) | [
"785132826@qq.com"
] | 785132826@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.