id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
161,269 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
if hasattr(password, 'encode'):
password = password.encode('utf-8')
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
def encrypt_key(password, method):
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
return key | null |
161,270 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def encrypt_iv_len(method):
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
return iv_len | null |
161,271 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
try:
return os.urandom(length)
except NotImplementedError as e:
return openssl.rand_bytes(length)
def encrypt_new_iv(method):
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
return random_string(iv_len) | null |
161,272 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def encrypt_all_iv(key, method, op, data, ref_iv):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if op:
iv = ref_iv[0]
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
ref_iv[0] = iv
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result) | null |
161,273 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
class Encryptor(object):
def __init__(self, key, method, iv = None):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.iv_buf = b''
self.cipher_key = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
if iv is None or len(iv) != self._method_info[1]:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
self.cipher = self.get_cipher(key, method, 1, iv)
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
self.cipher_key = key
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is not None: #optimize
return self.decipher.update(buf)
decipher_iv_len = self._method_info[1]
if len(self.iv_buf) <= decipher_iv_len:
self.iv_buf += buf
if len(self.iv_buf) > decipher_iv_len:
decipher_iv = self.iv_buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = self.iv_buf[decipher_iv_len:]
del self.iv_buf
return self.decipher.update(buf)
else:
return b''
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2 | null |
161,274 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2 | null |
161,275 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
_ord = ord
def compat_ord(s):
if type(s) == int:
return s
return _ord(s) | null |
161,276 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
_chr = chr
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d]) | null |
161,277 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
def int32(x):
if x > 0xFFFFFFFF or x < 0:
x &= 0xFFFFFFFF
if x > 0x7FFFFFFF:
x = int(0x100000000 - x)
if x < 0x80000000:
return -x
else:
return -2147483648
return x | null |
161,278 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = ['%02X' % ord(x) for x in v4addr]
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False | null |
161,279 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
def match_regex(regex, text):
regex = re.compile(regex)
for item in regex.findall(text):
return True
return False | null |
161,280 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
def inet_ntop(family, ipstr):
def inet_pton(family, addr):
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop | null |
161,281 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
ord = compat_ord
def pre_parse_header(data):
if not data:
return None
datatype = ord(data[0])
if datatype == 0x80:
if len(data) <= 2:
return None
rand_data_size = ord(data[1])
if rand_data_size + 2 >= len(data):
logging.warn('header too short, maybe wrong password or '
'encryption method')
return None
data = data[rand_data_size + 2:]
elif datatype == 0x81:
data = data[1:]
elif datatype == 0x82:
if len(data) <= 3:
return None
rand_data_size = struct.unpack('>H', data[1:3])[0]
if rand_data_size + 3 >= len(data):
logging.warn('header too short, maybe wrong password or '
'encryption method')
return None
data = data[rand_data_size + 3:]
elif datatype == 0x88 or (~datatype & 0xff) == 0x88:
if len(data) <= 7 + 7:
return None
data_size = struct.unpack('>H', data[1:3])[0]
ogn_data = data
data = data[:data_size]
crc = binascii.crc32(data) & 0xffffffff
if crc != 0xffffffff:
logging.warn('uncorrect CRC32, maybe wrong password or '
'encryption method')
return None
start_pos = 3 + ord(data[3])
data = data[start_pos:-4]
if data_size < len(ogn_data):
data += ogn_data[data_size:]
return data | null |
161,282 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = ['%02X' % ord(x) for x in v4addr]
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6 | null |
161,283 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
connecttype = (addrtype & 0x8) and 1 or 0
addrtype &= ~0x8
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 4 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return connecttype, addrtype, to_bytes(dest_addr), dest_port, header_length
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(0, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(0, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(0, b'2404:6800:4005:805::1011', 80, 19) | null |
161,284 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com' | null |
161,285 | from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
from shadowsocks import lru_cache
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self.addrs_str = addrs
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def __cmp__(self, other):
return cmp(self.addrs_str, other.addrs_str)
def __eq__(self, other):
return self.addrs_str == other.addrs_str
def __ne__(self, other):
return self.addrs_str != other.addrs_str
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network | null |
161,286 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import time
import socket
import select
import errno
import logging
from collections import defaultdict
from shadowsocks import shell
The provided code snippet includes necessary dependencies for implementing the `errno_from_exception` function. Write a Python function `def errno_from_exception(e)` to solve the following problem:
Provides the errno from an Exception object. There are cases that the errno attribute was not set so we pull the errno out of the args but if someone instatiates an Exception without any args you will get a tuple error. So this function abstracts all that behavior to give you a safe way to get the errno.
Here is the function:
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instatiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None | Provides the errno from an Exception object. There are cases that the errno attribute was not set so we pull the errno out of the args but if someone instatiates an Exception without any args you will get a tuple error. So this function abstracts all that behavior to give you a safe way to get the errno. |
161,287 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import time
import socket
import select
import errno
import logging
from collections import defaultdict
from shadowsocks import shell
def get_sock_error(sock):
error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
return socket.error(error_number, os.strerror(error_number)) | null |
161,288 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import time
import random
import hmac
import hashlib
import string
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord
from shadowsocks import lru_cache
class tls_ticket_auth(plain.plain):
def __init__(self, method):
self.method = method
self.handshake_status = 0
self.send_buffer = b''
self.recv_buffer = b''
self.client_id = b''
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.tls_version = b'\x03\x03'
self.overhead = 5
def init_data(self):
return obfs_auth_data()
def get_overhead(self, direction): # direction: true for c->s false for s->c
return self.overhead
def sni(self, url):
url = common.to_bytes(url)
data = b"\x00" + struct.pack('>H', len(url)) + url
data = b"\x00\x00" + struct.pack('>H', len(data) + 2) + struct.pack('>H', len(data)) + data
return data
def pack_auth_data(self, client_id):
utc_time = int(time.time()) & 0xFFFFFFFF
data = struct.pack('>I', utc_time) + os.urandom(18)
data += hmac.new(self.server_info.key + client_id, data, hashlib.sha1).digest()[:10]
return data
def client_encode(self, buf):
if self.handshake_status == -1:
return buf
if self.handshake_status == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
if len(buf) > 0:
self.send_buffer += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
if self.handshake_status == 0:
self.handshake_status = 1
data = self.tls_version + self.pack_auth_data(self.server_info.data.client_id) + b"\x20" + self.server_info.data.client_id + binascii.unhexlify(b"001cc02bc02fcca9cca8cc14cc13c00ac014c009c013009c0035002f000a" + b"0100")
ext = binascii.unhexlify(b"ff01000100")
host = self.server_info.obfs_param or self.server_info.host
if host and host[-1] in string.digits:
host = ''
hosts = host.split(',')
host = random.choice(hosts)
ext += self.sni(host)
ext += b"\x00\x17\x00\x00"
if host not in self.server_info.data.ticket_buf:
self.server_info.data.ticket_buf[host] = os.urandom((struct.unpack('>H', os.urandom(2))[0] % 17 + 8) * 16)
ext += b"\x00\x23" + struct.pack('>H', len(self.server_info.data.ticket_buf[host])) + self.server_info.data.ticket_buf[host]
ext += binascii.unhexlify(b"000d001600140601060305010503040104030301030302010203")
ext += binascii.unhexlify(b"000500050100000000")
ext += binascii.unhexlify(b"00120000")
ext += binascii.unhexlify(b"75500000")
ext += binascii.unhexlify(b"000b00020100")
ext += binascii.unhexlify(b"000a0006000400170018")
data += struct.pack('>H', len(ext)) + ext
data = b"\x01\x00" + struct.pack('>H', len(data)) + data
data = b"\x16\x03\x01" + struct.pack('>H', len(data)) + data
return data
elif self.handshake_status == 1 and len(buf) == 0:
data = b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
data += b"\x16" + self.tls_version + b"\x00\x20" + os.urandom(22) #Finished
data += hmac.new(self.server_info.key + self.server_info.data.client_id, data, hashlib.sha1).digest()[:10]
ret = data + self.send_buffer
self.send_buffer = b''
self.handshake_status = 8
return ret
return b''
def client_decode(self, buf):
if self.handshake_status == -1:
return (buf, False)
if self.handshake_status == 8:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
buf = self.recv_buffer[5:size+5]
ret += buf
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, False)
if len(buf) < 11 + 32 + 1 + 32:
raise Exception('client_decode data error')
verify = buf[11:33]
if hmac.new(self.server_info.key + self.server_info.data.client_id, verify, hashlib.sha1).digest()[:10] != buf[33:43]:
raise Exception('client_decode data error')
if hmac.new(self.server_info.key + self.server_info.data.client_id, buf[:-10], hashlib.sha1).digest()[:10] != buf[-10:]:
raise Exception('client_decode data error')
return (b'', True)
def server_encode(self, buf):
if self.handshake_status == -1:
return buf
if (self.handshake_status & 8) == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
self.handshake_status |= 8
data = self.tls_version + self.pack_auth_data(self.client_id) + b"\x20" + self.client_id + binascii.unhexlify(b"c02f000005ff01000100")
data = b"\x02\x00" + struct.pack('>H', len(data)) + data #server hello
data = b"\x16" + self.tls_version + struct.pack('>H', len(data)) + data
if random.randint(0, 8) < 1:
ticket = os.urandom((struct.unpack('>H', os.urandom(2))[0] % 164) * 2 + 64)
ticket = struct.pack('>H', len(ticket) + 4) + b"\x04\x00" + struct.pack('>H', len(ticket)) + ticket
data += b"\x16" + self.tls_version + ticket #New session ticket
data += b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
finish_len = random.choice([32, 40])
data += b"\x16" + self.tls_version + struct.pack('>H', finish_len) + os.urandom(finish_len - 10) #Finished
data += hmac.new(self.server_info.key + self.client_id, data, hashlib.sha1).digest()[:10]
if buf:
data += self.server_encode(buf)
return data
def decode_error_return(self, buf):
self.handshake_status = -1
if self.overhead > 0:
self.server_info.overhead -= self.overhead
self.overhead = 0
if self.method in ['tls1.2_ticket_auth', 'tls1.2_ticket_fastauth']:
return (b'E'*2048, False, False)
return (buf, True, False)
def server_decode(self, buf):
if self.handshake_status == -1:
return (buf, True, False)
if (self.handshake_status & 4) == 4:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17 or ord(self.recv_buffer[1]) != 0x3 or ord(self.recv_buffer[2]) != 0x3:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
ret += self.recv_buffer[5:size+5]
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, True, False)
if (self.handshake_status & 1) == 1:
self.recv_buffer += buf
buf = self.recv_buffer
verify = buf
if len(buf) < 11:
raise Exception('server_decode data error')
if not match_begin(buf, b"\x14" + self.tls_version + b"\x00\x01\x01"): #ChangeCipherSpec
raise Exception('server_decode data error')
buf = buf[6:]
if not match_begin(buf, b"\x16" + self.tls_version + b"\x00"): #Finished
raise Exception('server_decode data error')
verify_len = struct.unpack('>H', buf[3:5])[0] + 1 # 11 - 10
if len(verify) < verify_len + 10:
return (b'', False, False)
if hmac.new(self.server_info.key + self.client_id, verify[:verify_len], hashlib.sha1).digest()[:10] != verify[verify_len:verify_len+10]:
raise Exception('server_decode data error')
self.recv_buffer = verify[verify_len + 10:]
status = self.handshake_status
self.handshake_status |= 4
ret = self.server_decode(b'')
return ret;
#raise Exception("handshake data = %s" % (binascii.hexlify(buf)))
self.recv_buffer += buf
buf = self.recv_buffer
ogn_buf = buf
if len(buf) < 3:
return (b'', False, False)
if not match_begin(buf, b'\x16\x03\x01'):
return self.decode_error_return(ogn_buf)
buf = buf[3:]
header_len = struct.unpack('>H', buf[:2])[0]
if header_len > len(buf) - 2:
return (b'', False, False)
self.recv_buffer = self.recv_buffer[header_len + 5:]
self.handshake_status = 1
buf = buf[2:header_len + 2]
if not match_begin(buf, b'\x01\x00'): #client hello
logging.info("tls_auth not client hello message")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if struct.unpack('>H', buf[:2])[0] != len(buf) - 2:
logging.info("tls_auth wrong message size")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if not match_begin(buf, self.tls_version):
logging.info("tls_auth wrong tls version")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
verifyid = buf[:32]
buf = buf[32:]
sessionid_len = ord(buf[0])
if sessionid_len < 32:
logging.info("tls_auth wrong sessionid_len")
return self.decode_error_return(ogn_buf)
sessionid = buf[1:sessionid_len + 1]
buf = buf[sessionid_len+1:]
self.client_id = sessionid
sha1 = hmac.new(self.server_info.key + sessionid, verifyid[:22], hashlib.sha1).digest()[:10]
utc_time = struct.unpack('>I', verifyid[:4])[0]
time_dif = common.int32((int(time.time()) & 0xffffffff) - utc_time)
if self.server_info.obfs_param:
try:
self.max_time_dif = int(self.server_info.obfs_param)
except:
pass
if self.max_time_dif > 0 and (time_dif < -self.max_time_dif or time_dif > self.max_time_dif \
or common.int32(utc_time - self.server_info.data.startup_time) < -self.max_time_dif / 2):
logging.info("tls_auth wrong time")
return self.decode_error_return(ogn_buf)
if sha1 != verifyid[22:]:
logging.info("tls_auth wrong sha1")
return self.decode_error_return(ogn_buf)
if self.server_info.data.client_data.get(verifyid[:22]):
logging.info("replay attack detect, id = %s" % (binascii.hexlify(verifyid)))
return self.decode_error_return(ogn_buf)
self.server_info.data.client_data.sweep()
self.server_info.data.client_data[verifyid[:22]] = sessionid
if len(self.recv_buffer) >= 11:
ret = self.server_decode(b'')
return (ret[0], True, True)
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
return (b'', False, True)
def create_tls_ticket_auth_obfs(method):
return tls_ticket_auth(method) | null |
161,289 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import time
import random
import hmac
import hashlib
import string
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord
from shadowsocks import lru_cache
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False | null |
161,290 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import math
import struct
import zlib
import hmac
import hashlib
import bisect
import shadowsocks
from shadowsocks import common, lru_cache, encrypt
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
class auth_chain_a(auth_base):
def __init__(self, method):
super(auth_chain_a, self).__init__(method)
self.hashfunc = hashlib.md5
self.recv_buf = b''
self.unit_len = 2800
self.raw_trans = False
self.has_sent_header = False
self.has_recv_header = False
self.client_id = 0
self.connection_id = 0
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.salt = b"auth_chain_a"
self.no_compatible_method = 'auth_chain_a'
self.pack_id = 1
self.recv_id = 1
self.user_id = None
self.user_id_num = 0
self.user_key = None
self.overhead = 4
self.client_over_head = 4
self.last_client_hash = b''
self.last_server_hash = b''
self.random_client = xorshift128plus()
self.random_server = xorshift128plus()
self.encryptor = None
def init_data(self):
return obfs_auth_chain_data(self.method)
def get_overhead(self, direction): # direction: true for c->s false for s->c
return self.overhead
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
def trapezoid_random_float(self, d):
if d == 0:
return random.random()
s = random.random()
a = 1 - d
return (math.sqrt(a * a + 4 * d * s) - a) / (2 * d)
def trapezoid_random_int(self, max_val, d):
v = self.trapezoid_random_float(d)
return int(v * max_val)
def rnd_data_len(self, buf_size, last_hash, random):
if buf_size > 1440:
return 0
random.init_from_bin_len(last_hash, buf_size)
if buf_size > 1300:
return random.next() % 31
if buf_size > 900:
return random.next() % 127
if buf_size > 400:
return random.next() % 521
return random.next() % 1021
def udp_rnd_data_len(self, last_hash, random):
random.init_from_bin(last_hash)
return random.next() % 127
def rnd_start_pos(self, rand_len, random):
if rand_len > 0:
return random.next() % 8589934609 % rand_len
return 0
def rnd_data(self, buf_size, buf, last_hash, random):
rand_len = self.rnd_data_len(buf_size, last_hash, random)
rnd_data_buf = os.urandom(rand_len)
if buf_size == 0:
return rnd_data_buf
else:
if rand_len > 0:
start_pos = self.rnd_start_pos(rand_len, random)
return rnd_data_buf[:start_pos] + buf + rnd_data_buf[start_pos:]
else:
return buf
def pack_client_data(self, buf):
buf = self.encryptor.encrypt(buf)
data = self.rnd_data(len(buf), buf, self.last_client_hash, self.random_client)
data_len = len(data) + 8
mac_key = self.user_key + struct.pack('<I', self.pack_id)
length = len(buf) ^ struct.unpack('<H', self.last_client_hash[14:])[0]
data = struct.pack('<H', length) + data
self.last_client_hash = hmac.new(mac_key, data, self.hashfunc).digest()
data += self.last_client_hash[:2]
self.pack_id = (self.pack_id + 1) & 0xFFFFFFFF
return data
def pack_server_data(self, buf):
buf = self.encryptor.encrypt(buf)
data = self.rnd_data(len(buf), buf, self.last_server_hash, self.random_server)
data_len = len(data) + 8
mac_key = self.user_key + struct.pack('<I', self.pack_id)
length = len(buf) ^ struct.unpack('<H', self.last_server_hash[14:])[0]
data = struct.pack('<H', length) + data
self.last_server_hash = hmac.new(mac_key, data, self.hashfunc).digest()
data += self.last_server_hash[:2]
self.pack_id = (self.pack_id + 1) & 0xFFFFFFFF
return data
def pack_auth_data(self, auth_data, buf):
data = auth_data
data_len = 12 + 4 + 16 + 4
data = data + (struct.pack('<H', self.server_info.overhead) + struct.pack('<H', 0))
mac_key = self.server_info.iv + self.server_info.key
check_head = os.urandom(4)
self.last_client_hash = hmac.new(mac_key, check_head, self.hashfunc).digest()
check_head += self.last_client_hash[:8]
if b':' in to_bytes(self.server_info.protocol_param):
try:
items = to_bytes(self.server_info.protocol_param).split(b':')
self.user_key = items[1]
uid = struct.pack('<I', int(items[0]))
except:
uid = os.urandom(4)
else:
uid = os.urandom(4)
if self.user_key is None:
self.user_key = self.server_info.key
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + self.salt, 'aes-128-cbc', b'\x00' * 16)
uid = struct.unpack('<I', uid)[0] ^ struct.unpack('<I', self.last_client_hash[8:12])[0]
uid = struct.pack('<I', uid)
data = uid + encryptor.encrypt(data)[16:]
self.last_server_hash = hmac.new(self.user_key, data, self.hashfunc).digest()
data = check_head + data + self.last_server_hash[:4]
self.encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + to_bytes(base64.b64encode(self.last_client_hash)), 'rc4')
return data + self.pack_client_data(buf)
def auth_data(self):
utc_time = int(time.time()) & 0xFFFFFFFF
if self.server_info.data.connection_id > 0xFF000000:
self.server_info.data.local_client_id = b''
if not self.server_info.data.local_client_id:
self.server_info.data.local_client_id = os.urandom(4)
logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),))
self.server_info.data.connection_id = struct.unpack('<I', os.urandom(4))[0] & 0xFFFFFF
self.server_info.data.connection_id += 1
return b''.join([struct.pack('<I', utc_time),
self.server_info.data.local_client_id,
struct.pack('<I', self.server_info.data.connection_id)])
def client_pre_encrypt(self, buf):
ret = b''
ogn_data_len = len(buf)
if not self.has_sent_header:
head_size = self.get_head_size(buf, 30)
datalen = min(len(buf), random.randint(0, 31) + head_size)
ret += self.pack_auth_data(self.auth_data(), buf[:datalen])
buf = buf[datalen:]
self.has_sent_header = True
while len(buf) > self.unit_len:
ret += self.pack_client_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_client_data(buf)
return ret
def client_post_decrypt(self, buf):
if self.raw_trans:
return buf
self.recv_buf += buf
out_buf = b''
while len(self.recv_buf) > 4:
mac_key = self.user_key + struct.pack('<I', self.recv_id)
data_len = struct.unpack('<H', self.recv_buf[:2])[0] ^ struct.unpack('<H', self.last_server_hash[14:16])[0]
rand_len = self.rnd_data_len(data_len, self.last_server_hash, self.random_server)
length = data_len + rand_len
if length >= 4096:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data error')
if length + 4 > len(self.recv_buf):
break
server_hash = hmac.new(mac_key, self.recv_buf[:length + 2], self.hashfunc).digest()
if server_hash[:2] != self.recv_buf[length + 2 : length + 4]:
logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length])))
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data uncorrect checksum')
pos = 2
if data_len > 0 and rand_len > 0:
pos = 2 + self.rnd_start_pos(rand_len, self.random_server)
out_buf += self.encryptor.decrypt(self.recv_buf[pos : data_len + pos])
self.last_server_hash = server_hash
if self.recv_id == 1:
self.server_info.tcp_mss = struct.unpack('<H', out_buf[:2])[0]
out_buf = out_buf[2:]
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
self.recv_buf = self.recv_buf[length + 4:]
return out_buf
def server_pre_encrypt(self, buf):
if self.raw_trans:
return buf
ret = b''
if self.pack_id == 1:
tcp_mss = self.server_info.tcp_mss if self.server_info.tcp_mss < 1500 else 1500
self.server_info.tcp_mss = tcp_mss
buf = struct.pack('<H', tcp_mss) + buf
self.unit_len = tcp_mss - self.client_over_head
while len(buf) > self.unit_len:
ret += self.pack_server_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_server_data(buf)
return ret
def server_post_decrypt(self, buf):
if self.raw_trans:
return (buf, False)
self.recv_buf += buf
out_buf = b''
sendback = False
if not self.has_recv_header:
if len(self.recv_buf) >= 12 or len(self.recv_buf) in [7, 8]:
recv_len = min(len(self.recv_buf), 12)
mac_key = self.server_info.recv_iv + self.server_info.key
md5data = hmac.new(mac_key, self.recv_buf[:4], self.hashfunc).digest()
if md5data[:recv_len - 4] != self.recv_buf[4:recv_len]:
return self.not_match_return(self.recv_buf)
if len(self.recv_buf) < 12 + 24:
return (b'', False)
self.last_client_hash = md5data
uid = struct.unpack('<I', self.recv_buf[12:16])[0] ^ struct.unpack('<I', md5data[8:12])[0]
self.user_id_num = uid
uid = struct.pack('<I', uid)
if uid in self.server_info.users:
self.user_id = uid
self.user_key = self.server_info.users[uid]
self.server_info.update_user_func(uid)
else:
self.user_id_num = 0
if not self.server_info.users:
self.user_key = self.server_info.key
else:
self.user_key = self.server_info.recv_iv
md5data = hmac.new(self.user_key, self.recv_buf[12 : 12 + 20], self.hashfunc).digest()
if md5data[:4] != self.recv_buf[32:36]:
logging.error('%s data uncorrect auth HMAC-MD5 from %s:%d, data %s' % (self.no_compatible_method, self.server_info.client, self.server_info.client_port, binascii.hexlify(self.recv_buf)))
if len(self.recv_buf) < 36:
return (b'', False)
return self.not_match_return(self.recv_buf)
self.last_server_hash = md5data
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + self.salt, 'aes-128-cbc')
head = encryptor.decrypt(b'\x00' * 16 + self.recv_buf[16:32] + b'\x00') # need an extra byte or recv empty
self.client_over_head = struct.unpack('<H', head[12:14])[0]
utc_time = struct.unpack('<I', head[:4])[0]
client_id = struct.unpack('<I', head[4:8])[0]
connection_id = struct.unpack('<I', head[8:12])[0]
time_dif = common.int32(utc_time - (int(time.time()) & 0xffffffff))
if time_dif < -self.max_time_dif or time_dif > self.max_time_dif:
logging.info('%s: wrong timestamp, time_dif %d, data %s' % (self.no_compatible_method, time_dif, binascii.hexlify(head)))
return self.not_match_return(self.recv_buf)
elif self.server_info.data.insert(self.user_id, client_id, connection_id):
self.has_recv_header = True
self.client_id = client_id
self.connection_id = connection_id
else:
logging.info('%s: auth fail, data %s' % (self.no_compatible_method, binascii.hexlify(out_buf)))
return self.not_match_return(self.recv_buf)
self.encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + to_bytes(base64.b64encode(self.last_client_hash)), 'rc4')
self.recv_buf = self.recv_buf[36:]
self.has_recv_header = True
sendback = True
while len(self.recv_buf) > 4:
mac_key = self.user_key + struct.pack('<I', self.recv_id)
data_len = struct.unpack('<H', self.recv_buf[:2])[0] ^ struct.unpack('<H', self.last_client_hash[14:16])[0]
rand_len = self.rnd_data_len(data_len, self.last_client_hash, self.random_client)
length = data_len + rand_len
if length >= 4096:
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
logging.info(self.no_compatible_method + ': over size')
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
if length + 4 > len(self.recv_buf):
break
client_hash = hmac.new(mac_key, self.recv_buf[:length + 2], self.hashfunc).digest()
if client_hash[:2] != self.recv_buf[length + 2 : length + 4]:
logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length])))
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data uncorrect checksum')
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
pos = 2
if data_len > 0 and rand_len > 0:
pos = 2 + self.rnd_start_pos(rand_len, self.random_client)
out_buf += self.encryptor.decrypt(self.recv_buf[pos : data_len + pos])
self.last_client_hash = client_hash
self.recv_buf = self.recv_buf[length + 4:]
if data_len == 0:
sendback = True
if out_buf:
self.server_info.data.update(self.user_id, self.client_id, self.connection_id)
return (out_buf, sendback)
def client_udp_pre_encrypt(self, buf):
if self.user_key is None:
if b':' in to_bytes(self.server_info.protocol_param):
try:
items = to_bytes(self.server_info.protocol_param).split(':')
self.user_key = self.hashfunc(items[1]).digest()
self.user_id = struct.pack('<I', int(items[0]))
except:
pass
if self.user_key is None:
self.user_id = os.urandom(4)
self.user_key = self.server_info.key
authdata = os.urandom(3)
mac_key = self.server_info.key
md5data = hmac.new(mac_key, authdata, self.hashfunc).digest()
uid = struct.unpack('<I', self.user_id)[0] ^ struct.unpack('<I', md5data[:4])[0]
uid = struct.pack('<I', uid)
rand_len = self.udp_rnd_data_len(md5data, self.random_client)
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
out_buf = encryptor.encrypt(buf)
buf = out_buf + os.urandom(rand_len) + authdata + uid
return buf + hmac.new(self.user_key, buf, self.hashfunc).digest()[:1]
def client_udp_post_decrypt(self, buf):
if len(buf) <= 8:
return (b'', None)
if hmac.new(self.user_key, buf[:-1], self.hashfunc).digest()[:1] != buf[-1:]:
return (b'', None)
mac_key = self.server_info.key
md5data = hmac.new(mac_key, buf[-8:-1], self.hashfunc).digest()
rand_len = self.udp_rnd_data_len(md5data, self.random_server)
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
return encryptor.decrypt(buf[:-8 - rand_len])
def server_udp_pre_encrypt(self, buf, uid):
if uid in self.server_info.users:
user_key = self.server_info.users[uid]
else:
uid = None
if not self.server_info.users:
user_key = self.server_info.key
else:
user_key = self.server_info.recv_iv
authdata = os.urandom(7)
mac_key = self.server_info.key
md5data = hmac.new(mac_key, authdata, self.hashfunc).digest()
rand_len = self.udp_rnd_data_len(md5data, self.random_server)
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
out_buf = encryptor.encrypt(buf)
buf = out_buf + os.urandom(rand_len) + authdata
return buf + hmac.new(user_key, buf, self.hashfunc).digest()[:1]
def server_udp_post_decrypt(self, buf):
mac_key = self.server_info.key
md5data = hmac.new(mac_key, buf[-8:-5], self.hashfunc).digest()
uid = struct.unpack('<I', buf[-5:-1])[0] ^ struct.unpack('<I', md5data[:4])[0]
uid = struct.pack('<I', uid)
if uid in self.server_info.users:
user_key = self.server_info.users[uid]
else:
uid = None
if not self.server_info.users:
user_key = self.server_info.key
else:
user_key = self.server_info.recv_iv
if hmac.new(user_key, buf[:-1], self.hashfunc).digest()[:1] != buf[-1:]:
return (b'', None)
rand_len = self.udp_rnd_data_len(md5data, self.random_client)
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
out_buf = encryptor.decrypt(buf[:-8 - rand_len])
return (out_buf, uid)
def dispose(self):
self.server_info.data.remove(self.user_id, self.client_id)
def create_auth_chain_a(method):
return auth_chain_a(method) | null |
161,291 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import math
import struct
import zlib
import hmac
import hashlib
import bisect
import shadowsocks
from shadowsocks import common, lru_cache, encrypt
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
class auth_chain_b(auth_chain_a):
def __init__(self, method):
super(auth_chain_b, self).__init__(method)
self.salt = b"auth_chain_b"
self.no_compatible_method = 'auth_chain_b'
self.data_size_list = []
self.data_size_list2 = []
def init_data_size(self, key):
if self.data_size_list:
self.data_size_list = []
self.data_size_list2 = []
random = xorshift128plus()
random.init_from_bin(key)
list_len = random.next() % 8 + 4
for i in range(0, list_len):
self.data_size_list.append((int)(random.next() % 2340 % 2040 % 1440))
self.data_size_list.sort()
list_len = random.next() % 16 + 8
for i in range(0, list_len):
self.data_size_list2.append((int)(random.next() % 2340 % 2040 % 1440))
self.data_size_list2.sort()
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
self.init_data_size(self.server_info.key)
def rnd_data_len(self, buf_size, last_hash, random):
if buf_size >= 1440:
return 0
random.init_from_bin_len(last_hash, buf_size)
pos = bisect.bisect_left(self.data_size_list, buf_size + self.server_info.overhead)
final_pos = pos + random.next() % (len(self.data_size_list))
if final_pos < len(self.data_size_list):
return self.data_size_list[final_pos] - buf_size - self.server_info.overhead
pos = bisect.bisect_left(self.data_size_list2, buf_size + self.server_info.overhead)
final_pos = pos + random.next() % (len(self.data_size_list2))
if final_pos < len(self.data_size_list2):
return self.data_size_list2[final_pos] - buf_size - self.server_info.overhead
if final_pos < pos + len(self.data_size_list2) - 1:
return 0
if buf_size > 1300:
return random.next() % 31
if buf_size > 900:
return random.next() % 127
if buf_size > 400:
return random.next() % 521
return random.next() % 1021
def create_auth_chain_b(method):
return auth_chain_b(method) | null |
161,292 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import math
import struct
import zlib
import hmac
import hashlib
import bisect
import shadowsocks
from shadowsocks import common, lru_cache, encrypt
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False | null |
161,293 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import struct
import zlib
import hmac
import hashlib
import shadowsocks
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
class verify_deflate(verify_base):
def __init__(self, method):
super(verify_deflate, self).__init__(method)
self.recv_buf = b''
self.unit_len = 32700
self.decrypt_packet_num = 0
self.raw_trans = False
def pack_data(self, buf):
if len(buf) == 0:
return b''
data = zlib.compress(buf)
data = struct.pack('>H', len(data)) + data[2:]
return data
def client_pre_encrypt(self, buf):
ret = b''
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_data(buf)
return ret
def client_post_decrypt(self, buf):
if self.raw_trans:
return buf
self.recv_buf += buf
out_buf = b''
while len(self.recv_buf) > 2:
length = struct.unpack('>H', self.recv_buf[:2])[0]
if length >= 32768 or length < 6:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data error')
if length > len(self.recv_buf):
break
out_buf += zlib.decompress(b'x\x9c' + self.recv_buf[2:length])
self.recv_buf = self.recv_buf[length:]
if out_buf:
self.decrypt_packet_num += 1
return out_buf
def server_pre_encrypt(self, buf):
ret = b''
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_data(buf)
return ret
def server_post_decrypt(self, buf):
if self.raw_trans:
return (buf, False)
self.recv_buf += buf
out_buf = b''
while len(self.recv_buf) > 2:
length = struct.unpack('>H', self.recv_buf[:2])[0]
if length >= 32768 or length < 6:
self.raw_trans = True
self.recv_buf = b''
if self.decrypt_packet_num == 0:
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
if length > len(self.recv_buf):
break
out_buf += zlib.decompress(b'\x78\x9c' + self.recv_buf[2:length])
self.recv_buf = self.recv_buf[length:]
if out_buf:
self.decrypt_packet_num += 1
return (out_buf, False)
def create_verify_deflate(method):
return verify_deflate(method) | null |
161,294 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import struct
import zlib
import hmac
import hashlib
import shadowsocks
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False | null |
161,295 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks.common import ord
class plain(object):
def __init__(self, method):
self.method = method
self.server_info = None
def init_data(self):
return b''
def get_overhead(self, direction): # direction: true for c->s false for s->c
return 0
def get_server_info(self):
return self.server_info
def set_server_info(self, server_info):
self.server_info = server_info
def client_pre_encrypt(self, buf):
return buf
def client_encode(self, buf):
return buf
def client_decode(self, buf):
# (buffer_to_recv, is_need_to_encode_and_send_back)
return (buf, False)
def client_post_decrypt(self, buf):
return buf
def server_pre_encrypt(self, buf):
return buf
def server_encode(self, buf):
return buf
def server_decode(self, buf):
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
return (buf, True, False)
def server_post_decrypt(self, buf):
return (buf, False)
def client_udp_pre_encrypt(self, buf):
return buf
def client_udp_post_decrypt(self, buf):
return buf
def server_udp_pre_encrypt(self, buf, uid):
return buf
def server_udp_post_decrypt(self, buf):
return (buf, None)
def dispose(self):
pass
def get_head_size(self, buf, def_value):
if len(buf) < 2:
return def_value
head_type = ord(buf[0]) & 0x7
if head_type == 1:
return 7
if head_type == 4:
return 19
if head_type == 3:
return 4 + ord(buf[1])
return def_value
def create_obfs(method):
return plain(method) | null |
161,296 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import datetime
import random
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
class http_simple(plain.plain):
def __init__(self, method):
self.method = method
self.has_sent_header = False
self.has_recv_header = False
self.host = None
self.port = 0
self.recv_buffer = b''
self.user_agent = [b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0",
b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/44.0",
b"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
b"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Ubuntu/11.10 Chromium/27.0.1453.93 Chrome/27.0.1453.93 Safari/537.36",
b"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0",
b"Mozilla/5.0 (compatible; WOW64; MSIE 10.0; Windows NT 6.2)",
b"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27",
b"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0; .NET4.0E; .NET4.0C)",
b"Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko",
b"Mozilla/5.0 (Linux; Android 4.4; Nexus 5 Build/BuildID) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36",
b"Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3",
b"Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3"]
def encode_head(self, buf):
hexstr = binascii.hexlify(buf)
chs = []
for i in range(0, len(hexstr), 2):
chs.append(b"%" + hexstr[i:i+2])
return b''.join(chs)
def client_encode(self, buf):
if self.has_sent_header:
return buf
head_size = len(self.server_info.iv) + self.server_info.head_len
if len(buf) - head_size > 64:
headlen = head_size + random.randint(0, 64)
else:
headlen = len(buf)
headdata = buf[:headlen]
buf = buf[headlen:]
port = b''
if self.server_info.port != 80:
port = b':' + to_bytes(str(self.server_info.port))
body = None
hosts = (self.server_info.obfs_param or self.server_info.host)
pos = hosts.find("#")
if pos >= 0:
body = hosts[pos + 1:].replace("\n", "\r\n")
body = body.replace("\\n", "\r\n")
hosts = hosts[:pos]
hosts = hosts.split(',')
host = random.choice(hosts)
http_head = b"GET /" + self.encode_head(headdata) + b" HTTP/1.1\r\n"
http_head += b"Host: " + to_bytes(host) + port + b"\r\n"
if body:
http_head += body + "\r\n\r\n"
else:
http_head += b"User-Agent: " + random.choice(self.user_agent) + b"\r\n"
http_head += b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nDNT: 1\r\nConnection: keep-alive\r\n\r\n"
self.has_sent_header = True
return http_head + buf
def client_decode(self, buf):
if self.has_recv_header:
return (buf, False)
pos = buf.find(b'\r\n\r\n')
if pos >= 0:
self.has_recv_header = True
return (buf[pos + 4:], False)
else:
return (b'', False)
def server_encode(self, buf):
if self.has_sent_header:
return buf
header = b'HTTP/1.1 200 OK\r\nConnection: keep-alive\r\nContent-Encoding: gzip\r\nContent-Type: text/html\r\nDate: '
header += to_bytes(datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT'))
header += b'\r\nServer: nginx\r\nVary: Accept-Encoding\r\n\r\n'
self.has_sent_header = True
return header + buf
def get_data_from_http_header(self, buf):
ret_buf = b''
lines = buf.split(b'\r\n')
if lines and len(lines) > 1:
hex_items = lines[0].split(b'%')
if hex_items and len(hex_items) > 1:
for index in range(1, len(hex_items)):
if len(hex_items[index]) < 2:
ret_buf += binascii.unhexlify('0' + hex_items[index])
break
elif len(hex_items[index]) > 2:
ret_buf += binascii.unhexlify(hex_items[index][:2])
break
else:
ret_buf += binascii.unhexlify(hex_items[index])
return ret_buf
return b''
def get_host_from_http_header(self, buf):
ret_buf = b''
lines = buf.split(b'\r\n')
if lines and len(lines) > 1:
for line in lines:
if match_begin(line, b"Host: "):
return common.to_str(line[6:])
def not_match_return(self, buf):
self.has_sent_header = True
self.has_recv_header = True
if self.method == 'http_simple':
return (b'E'*2048, False, False)
return (buf, True, False)
def error_return(self, buf):
self.has_sent_header = True
self.has_recv_header = True
return (b'E'*2048, False, False)
def server_decode(self, buf):
if self.has_recv_header:
return (buf, True, False)
self.recv_buffer += buf
buf = self.recv_buffer
if len(buf) > 10:
if match_begin(buf, b'GET ') or match_begin(buf, b'POST '):
if len(buf) > 65536:
self.recv_buffer = None
logging.warn('http_simple: over size')
return self.not_match_return(buf)
else: #not http header, run on original protocol
self.recv_buffer = None
logging.debug('http_simple: not match begin')
return self.not_match_return(buf)
else:
return (b'', True, False)
if b'\r\n\r\n' in buf:
datas = buf.split(b'\r\n\r\n', 1)
ret_buf = self.get_data_from_http_header(buf)
host = self.get_host_from_http_header(buf)
if host and self.server_info.obfs_param:
pos = host.find(":")
if pos >= 0:
host = host[:pos]
hosts = self.server_info.obfs_param.split(',')
if host not in hosts:
return self.not_match_return(buf)
if len(ret_buf) < 4:
return self.error_return(buf)
if len(datas) > 1:
ret_buf += datas[1]
if len(ret_buf) >= 13:
self.has_recv_header = True
return (ret_buf, True, False)
return self.not_match_return(buf)
else:
return (b'', True, False)
def create_http_simple_obfs(method):
return http_simple(method) | null |
161,297 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import datetime
import random
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
class http_post(http_simple):
def __init__(self, method):
super(http_post, self).__init__(method)
def boundary(self):
return to_bytes(''.join([random.choice("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") for i in range(32)]))
def client_encode(self, buf):
if self.has_sent_header:
return buf
head_size = len(self.server_info.iv) + self.server_info.head_len
if len(buf) - head_size > 64:
headlen = head_size + random.randint(0, 64)
else:
headlen = len(buf)
headdata = buf[:headlen]
buf = buf[headlen:]
port = b''
if self.server_info.port != 80:
port = b':' + to_bytes(str(self.server_info.port))
body = None
hosts = (self.server_info.obfs_param or self.server_info.host)
pos = hosts.find("#")
if pos >= 0:
body = hosts[pos + 1:].replace("\\n", "\r\n")
hosts = hosts[:pos]
hosts = hosts.split(',')
host = random.choice(hosts)
http_head = b"POST /" + self.encode_head(headdata) + b" HTTP/1.1\r\n"
http_head += b"Host: " + to_bytes(host) + port + b"\r\n"
if body:
http_head += body + "\r\n\r\n"
else:
http_head += b"User-Agent: " + random.choice(self.user_agent) + b"\r\n"
http_head += b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Encoding: gzip, deflate\r\n"
http_head += b"Content-Type: multipart/form-data; boundary=" + self.boundary() + b"\r\nDNT: 1\r\n"
http_head += b"Connection: keep-alive\r\n\r\n"
self.has_sent_header = True
return http_head + buf
def not_match_return(self, buf):
self.has_sent_header = True
self.has_recv_header = True
if self.method == 'http_post':
return (b'E'*2048, False, False)
return (buf, True, False)
def create_http_post_obfs(method):
return http_post(method) | null |
161,298 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import datetime
import random
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
class random_head(plain.plain):
def __init__(self, method):
def client_encode(self, buf):
def client_decode(self, buf):
def server_encode(self, buf):
def server_decode(self, buf):
def create_random_head_obfs(method):
return random_head(method) | null |
161,299 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import datetime
import random
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False | null |
161,300 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import math
import struct
import zlib
import hmac
import hashlib
import shadowsocks
from shadowsocks import common, lru_cache, encrypt
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
class auth_sha1_v4(auth_base):
def __init__(self, method):
def init_data(self):
def set_server_info(self, server_info):
def rnd_data(self, buf_size):
def pack_data(self, buf):
def pack_auth_data(self, buf):
def auth_data(self):
def client_pre_encrypt(self, buf):
def client_post_decrypt(self, buf):
def server_pre_encrypt(self, buf):
def server_post_decrypt(self, buf):
def create_auth_sha1_v4(method):
return auth_sha1_v4(method) | null |
161,301 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import math
import struct
import zlib
import hmac
import hashlib
import shadowsocks
from shadowsocks import common, lru_cache, encrypt
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
class auth_aes128_sha1(auth_base):
def __init__(self, method, hashfunc):
super(auth_aes128_sha1, self).__init__(method)
self.hashfunc = hashfunc
self.recv_buf = b''
self.unit_len = 8100
self.raw_trans = False
self.has_sent_header = False
self.has_recv_header = False
self.client_id = 0
self.connection_id = 0
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.salt = hashfunc == hashlib.md5 and b"auth_aes128_md5" or b"auth_aes128_sha1"
self.no_compatible_method = hashfunc == hashlib.md5 and "auth_aes128_md5" or 'auth_aes128_sha1'
self.extra_wait_size = struct.unpack('>H', os.urandom(2))[0] % 1024
self.pack_id = 1
self.recv_id = 1
self.user_id = None
self.user_key = None
self.last_rnd_len = 0
self.overhead = 9
def init_data(self):
return obfs_auth_mu_data()
def get_overhead(self, direction): # direction: true for c->s false for s->c
return self.overhead
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
def trapezoid_random_float(self, d):
if d == 0:
return random.random()
s = random.random()
a = 1 - d
return (math.sqrt(a * a + 4 * d * s) - a) / (2 * d)
def trapezoid_random_int(self, max_val, d):
v = self.trapezoid_random_float(d)
return int(v * max_val)
def rnd_data_len(self, buf_size, full_buf_size):
if full_buf_size >= self.server_info.buffer_size:
return 0
tcp_mss = self.server_info.tcp_mss
rev_len = tcp_mss - buf_size - 9
if rev_len == 0:
return 0
if rev_len < 0:
if rev_len > -tcp_mss:
return self.trapezoid_random_int(rev_len + tcp_mss, -0.3)
return common.ord(os.urandom(1)[0]) % 32
if buf_size > 900:
return struct.unpack('>H', os.urandom(2))[0] % rev_len
return self.trapezoid_random_int(rev_len, -0.3)
def rnd_data(self, buf_size, full_buf_size):
data_len = self.rnd_data_len(buf_size, full_buf_size)
if data_len < 128:
return common.chr(data_len + 1) + os.urandom(data_len)
return common.chr(255) + struct.pack('<H', data_len + 1) + os.urandom(data_len - 2)
def pack_data(self, buf, full_buf_size):
data = self.rnd_data(len(buf), full_buf_size) + buf
data_len = len(data) + 8
mac_key = self.user_key + struct.pack('<I', self.pack_id)
mac = hmac.new(mac_key, struct.pack('<H', data_len), self.hashfunc).digest()[:2]
data = struct.pack('<H', data_len) + mac + data
data += hmac.new(mac_key, data, self.hashfunc).digest()[:4]
self.pack_id = (self.pack_id + 1) & 0xFFFFFFFF
return data
def pack_auth_data(self, auth_data, buf):
if len(buf) == 0:
return b''
if len(buf) > 400:
rnd_len = struct.unpack('<H', os.urandom(2))[0] % 512
else:
rnd_len = struct.unpack('<H', os.urandom(2))[0] % 1024
data = auth_data
data_len = 7 + 4 + 16 + 4 + len(buf) + rnd_len + 4
data = data + struct.pack('<H', data_len) + struct.pack('<H', rnd_len)
mac_key = self.server_info.iv + self.server_info.key
uid = os.urandom(4)
if b':' in to_bytes(self.server_info.protocol_param):
try:
items = to_bytes(self.server_info.protocol_param).split(b':')
self.user_key = self.hashfunc(items[1]).digest()
uid = struct.pack('<I', int(items[0]))
except:
pass
if self.user_key is None:
self.user_key = self.server_info.key
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + self.salt, 'aes-128-cbc', b'\x00' * 16)
data = uid + encryptor.encrypt(data)[16:]
data += hmac.new(mac_key, data, self.hashfunc).digest()[:4]
check_head = os.urandom(1)
check_head += hmac.new(mac_key, check_head, self.hashfunc).digest()[:6]
data = check_head + data + os.urandom(rnd_len) + buf
data += hmac.new(self.user_key, data, self.hashfunc).digest()[:4]
return data
def auth_data(self):
utc_time = int(time.time()) & 0xFFFFFFFF
if self.server_info.data.connection_id > 0xFF000000:
self.server_info.data.local_client_id = b''
if not self.server_info.data.local_client_id:
self.server_info.data.local_client_id = os.urandom(4)
logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),))
self.server_info.data.connection_id = struct.unpack('<I', os.urandom(4))[0] & 0xFFFFFF
self.server_info.data.connection_id += 1
return b''.join([struct.pack('<I', utc_time),
self.server_info.data.local_client_id,
struct.pack('<I', self.server_info.data.connection_id)])
def client_pre_encrypt(self, buf):
ret = b''
ogn_data_len = len(buf)
if not self.has_sent_header:
head_size = self.get_head_size(buf, 30)
datalen = min(len(buf), random.randint(0, 31) + head_size)
ret += self.pack_auth_data(self.auth_data(), buf[:datalen])
buf = buf[datalen:]
self.has_sent_header = True
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len], ogn_data_len)
buf = buf[self.unit_len:]
ret += self.pack_data(buf, ogn_data_len)
self.last_rnd_len = ogn_data_len
return ret
def client_post_decrypt(self, buf):
if self.raw_trans:
return buf
self.recv_buf += buf
out_buf = b''
while len(self.recv_buf) > 4:
mac_key = self.user_key + struct.pack('<I', self.recv_id)
mac = hmac.new(mac_key, self.recv_buf[:2], self.hashfunc).digest()[:2]
if mac != self.recv_buf[2:4]:
raise Exception('client_post_decrypt data uncorrect mac')
length = struct.unpack('<H', self.recv_buf[:2])[0]
if length >= 8192 or length < 7:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data error')
if length > len(self.recv_buf):
break
if hmac.new(mac_key, self.recv_buf[:length - 4], self.hashfunc).digest()[:4] != self.recv_buf[length - 4:length]:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data uncorrect checksum')
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
pos = common.ord(self.recv_buf[4])
if pos < 255:
pos += 4
else:
pos = struct.unpack('<H', self.recv_buf[5:7])[0] + 4
out_buf += self.recv_buf[pos:length - 4]
self.recv_buf = self.recv_buf[length:]
return out_buf
def server_pre_encrypt(self, buf):
if self.raw_trans:
return buf
ret = b''
ogn_data_len = len(buf)
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len], ogn_data_len)
buf = buf[self.unit_len:]
ret += self.pack_data(buf, ogn_data_len)
self.last_rnd_len = ogn_data_len
return ret
def server_post_decrypt(self, buf):
if self.raw_trans:
return (buf, False)
self.recv_buf += buf
out_buf = b''
sendback = False
if not self.has_recv_header:
if len(self.recv_buf) >= 7 or len(self.recv_buf) in [2, 3]:
recv_len = min(len(self.recv_buf), 7)
mac_key = self.server_info.recv_iv + self.server_info.key
sha1data = hmac.new(mac_key, self.recv_buf[:1], self.hashfunc).digest()[:recv_len - 1]
if sha1data != self.recv_buf[1:recv_len]:
return self.not_match_return(self.recv_buf)
if len(self.recv_buf) < 31:
return (b'', False)
sha1data = hmac.new(mac_key, self.recv_buf[7:27], self.hashfunc).digest()[:4]
if sha1data != self.recv_buf[27:31]:
logging.error('%s data uncorrect auth HMAC-SHA1 from %s:%d, data %s' % (self.no_compatible_method, self.server_info.client, self.server_info.client_port, binascii.hexlify(self.recv_buf)))
if len(self.recv_buf) < 31 + self.extra_wait_size:
return (b'', False)
return self.not_match_return(self.recv_buf)
uid = self.recv_buf[7:11]
if uid in self.server_info.users:
self.user_id = uid
self.user_key = self.hashfunc(self.server_info.users[uid]).digest()
self.server_info.update_user_func(uid)
else:
if not self.server_info.users:
self.user_key = self.server_info.key
else:
self.user_key = self.server_info.recv_iv
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + self.salt, 'aes-128-cbc')
head = encryptor.decrypt(b'\x00' * 16 + self.recv_buf[11:27] + b'\x00') # need an extra byte or recv empty
length = struct.unpack('<H', head[12:14])[0]
if len(self.recv_buf) < length:
return (b'', False)
utc_time = struct.unpack('<I', head[:4])[0]
client_id = struct.unpack('<I', head[4:8])[0]
connection_id = struct.unpack('<I', head[8:12])[0]
rnd_len = struct.unpack('<H', head[14:16])[0]
if hmac.new(self.user_key, self.recv_buf[:length - 4], self.hashfunc).digest()[:4] != self.recv_buf[length - 4:length]:
logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length])))
return self.not_match_return(self.recv_buf)
time_dif = common.int32(utc_time - (int(time.time()) & 0xffffffff))
if time_dif < -self.max_time_dif or time_dif > self.max_time_dif:
logging.info('%s: wrong timestamp, time_dif %d, data %s' % (self.no_compatible_method, time_dif, binascii.hexlify(head)))
return self.not_match_return(self.recv_buf)
elif self.server_info.data.insert(self.user_id, client_id, connection_id):
self.has_recv_header = True
out_buf = self.recv_buf[31 + rnd_len:length - 4]
self.client_id = client_id
self.connection_id = connection_id
else:
logging.info('%s: auth fail, data %s' % (self.no_compatible_method, binascii.hexlify(out_buf)))
return self.not_match_return(self.recv_buf)
self.recv_buf = self.recv_buf[length:]
self.has_recv_header = True
sendback = True
while len(self.recv_buf) > 4:
mac_key = self.user_key + struct.pack('<I', self.recv_id)
mac = hmac.new(mac_key, self.recv_buf[:2], self.hashfunc).digest()[:2]
if mac != self.recv_buf[2:4]:
self.raw_trans = True
logging.info(self.no_compatible_method + ': wrong crc')
if self.recv_id == 0:
logging.info(self.no_compatible_method + ': wrong crc')
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
length = struct.unpack('<H', self.recv_buf[:2])[0]
if length >= 8192 or length < 7:
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
logging.info(self.no_compatible_method + ': over size')
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
if length > len(self.recv_buf):
break
if hmac.new(mac_key, self.recv_buf[:length - 4], self.hashfunc).digest()[:4] != self.recv_buf[length - 4:length]:
logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length])))
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data uncorrect checksum')
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
pos = common.ord(self.recv_buf[4])
if pos < 255:
pos += 4
else:
pos = struct.unpack('<H', self.recv_buf[5:7])[0] + 4
out_buf += self.recv_buf[pos:length - 4]
self.recv_buf = self.recv_buf[length:]
if pos == length - 4:
sendback = True
if out_buf:
self.server_info.data.update(self.user_id, self.client_id, self.connection_id)
return (out_buf, sendback)
def client_udp_pre_encrypt(self, buf):
if self.user_key is None:
if b':' in to_bytes(self.server_info.protocol_param):
try:
items = to_bytes(self.server_info.protocol_param).split(':')
self.user_key = self.hashfunc(items[1]).digest()
self.user_id = struct.pack('<I', int(items[0]))
except:
pass
if self.user_key is None:
self.user_id = os.urandom(4)
self.user_key = self.server_info.key
buf += self.user_id
return buf + hmac.new(self.user_key, buf, self.hashfunc).digest()[:4]
def client_udp_post_decrypt(self, buf):
user_key = self.server_info.key
if hmac.new(user_key, buf[:-4], self.hashfunc).digest()[:4] != buf[-4:]:
return b''
return buf[:-4]
def server_udp_pre_encrypt(self, buf, uid):
user_key = self.server_info.key
return buf + hmac.new(user_key, buf, self.hashfunc).digest()[:4]
def server_udp_post_decrypt(self, buf):
uid = buf[-8:-4]
if uid in self.server_info.users:
user_key = self.hashfunc(self.server_info.users[uid]).digest()
else:
uid = None
if not self.server_info.users:
user_key = self.server_info.key
else:
user_key = self.server_info.recv_iv
if hmac.new(user_key, buf[:-4], self.hashfunc).digest()[:4] != buf[-4:]:
return (b'', None)
return (buf[:-8], uid)
def create_auth_aes128_md5(method):
return auth_aes128_sha1(method, hashlib.md5) | null |
161,302 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import math
import struct
import zlib
import hmac
import hashlib
import shadowsocks
from shadowsocks import common, lru_cache, encrypt
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
class auth_aes128_sha1(auth_base):
def __init__(self, method, hashfunc):
super(auth_aes128_sha1, self).__init__(method)
self.hashfunc = hashfunc
self.recv_buf = b''
self.unit_len = 8100
self.raw_trans = False
self.has_sent_header = False
self.has_recv_header = False
self.client_id = 0
self.connection_id = 0
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.salt = hashfunc == hashlib.md5 and b"auth_aes128_md5" or b"auth_aes128_sha1"
self.no_compatible_method = hashfunc == hashlib.md5 and "auth_aes128_md5" or 'auth_aes128_sha1'
self.extra_wait_size = struct.unpack('>H', os.urandom(2))[0] % 1024
self.pack_id = 1
self.recv_id = 1
self.user_id = None
self.user_key = None
self.last_rnd_len = 0
self.overhead = 9
def init_data(self):
return obfs_auth_mu_data()
def get_overhead(self, direction): # direction: true for c->s false for s->c
return self.overhead
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
def trapezoid_random_float(self, d):
if d == 0:
return random.random()
s = random.random()
a = 1 - d
return (math.sqrt(a * a + 4 * d * s) - a) / (2 * d)
def trapezoid_random_int(self, max_val, d):
v = self.trapezoid_random_float(d)
return int(v * max_val)
def rnd_data_len(self, buf_size, full_buf_size):
if full_buf_size >= self.server_info.buffer_size:
return 0
tcp_mss = self.server_info.tcp_mss
rev_len = tcp_mss - buf_size - 9
if rev_len == 0:
return 0
if rev_len < 0:
if rev_len > -tcp_mss:
return self.trapezoid_random_int(rev_len + tcp_mss, -0.3)
return common.ord(os.urandom(1)[0]) % 32
if buf_size > 900:
return struct.unpack('>H', os.urandom(2))[0] % rev_len
return self.trapezoid_random_int(rev_len, -0.3)
def rnd_data(self, buf_size, full_buf_size):
data_len = self.rnd_data_len(buf_size, full_buf_size)
if data_len < 128:
return common.chr(data_len + 1) + os.urandom(data_len)
return common.chr(255) + struct.pack('<H', data_len + 1) + os.urandom(data_len - 2)
def pack_data(self, buf, full_buf_size):
data = self.rnd_data(len(buf), full_buf_size) + buf
data_len = len(data) + 8
mac_key = self.user_key + struct.pack('<I', self.pack_id)
mac = hmac.new(mac_key, struct.pack('<H', data_len), self.hashfunc).digest()[:2]
data = struct.pack('<H', data_len) + mac + data
data += hmac.new(mac_key, data, self.hashfunc).digest()[:4]
self.pack_id = (self.pack_id + 1) & 0xFFFFFFFF
return data
def pack_auth_data(self, auth_data, buf):
if len(buf) == 0:
return b''
if len(buf) > 400:
rnd_len = struct.unpack('<H', os.urandom(2))[0] % 512
else:
rnd_len = struct.unpack('<H', os.urandom(2))[0] % 1024
data = auth_data
data_len = 7 + 4 + 16 + 4 + len(buf) + rnd_len + 4
data = data + struct.pack('<H', data_len) + struct.pack('<H', rnd_len)
mac_key = self.server_info.iv + self.server_info.key
uid = os.urandom(4)
if b':' in to_bytes(self.server_info.protocol_param):
try:
items = to_bytes(self.server_info.protocol_param).split(b':')
self.user_key = self.hashfunc(items[1]).digest()
uid = struct.pack('<I', int(items[0]))
except:
pass
if self.user_key is None:
self.user_key = self.server_info.key
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + self.salt, 'aes-128-cbc', b'\x00' * 16)
data = uid + encryptor.encrypt(data)[16:]
data += hmac.new(mac_key, data, self.hashfunc).digest()[:4]
check_head = os.urandom(1)
check_head += hmac.new(mac_key, check_head, self.hashfunc).digest()[:6]
data = check_head + data + os.urandom(rnd_len) + buf
data += hmac.new(self.user_key, data, self.hashfunc).digest()[:4]
return data
def auth_data(self):
utc_time = int(time.time()) & 0xFFFFFFFF
if self.server_info.data.connection_id > 0xFF000000:
self.server_info.data.local_client_id = b''
if not self.server_info.data.local_client_id:
self.server_info.data.local_client_id = os.urandom(4)
logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),))
self.server_info.data.connection_id = struct.unpack('<I', os.urandom(4))[0] & 0xFFFFFF
self.server_info.data.connection_id += 1
return b''.join([struct.pack('<I', utc_time),
self.server_info.data.local_client_id,
struct.pack('<I', self.server_info.data.connection_id)])
def client_pre_encrypt(self, buf):
ret = b''
ogn_data_len = len(buf)
if not self.has_sent_header:
head_size = self.get_head_size(buf, 30)
datalen = min(len(buf), random.randint(0, 31) + head_size)
ret += self.pack_auth_data(self.auth_data(), buf[:datalen])
buf = buf[datalen:]
self.has_sent_header = True
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len], ogn_data_len)
buf = buf[self.unit_len:]
ret += self.pack_data(buf, ogn_data_len)
self.last_rnd_len = ogn_data_len
return ret
def client_post_decrypt(self, buf):
if self.raw_trans:
return buf
self.recv_buf += buf
out_buf = b''
while len(self.recv_buf) > 4:
mac_key = self.user_key + struct.pack('<I', self.recv_id)
mac = hmac.new(mac_key, self.recv_buf[:2], self.hashfunc).digest()[:2]
if mac != self.recv_buf[2:4]:
raise Exception('client_post_decrypt data uncorrect mac')
length = struct.unpack('<H', self.recv_buf[:2])[0]
if length >= 8192 or length < 7:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data error')
if length > len(self.recv_buf):
break
if hmac.new(mac_key, self.recv_buf[:length - 4], self.hashfunc).digest()[:4] != self.recv_buf[length - 4:length]:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data uncorrect checksum')
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
pos = common.ord(self.recv_buf[4])
if pos < 255:
pos += 4
else:
pos = struct.unpack('<H', self.recv_buf[5:7])[0] + 4
out_buf += self.recv_buf[pos:length - 4]
self.recv_buf = self.recv_buf[length:]
return out_buf
def server_pre_encrypt(self, buf):
if self.raw_trans:
return buf
ret = b''
ogn_data_len = len(buf)
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len], ogn_data_len)
buf = buf[self.unit_len:]
ret += self.pack_data(buf, ogn_data_len)
self.last_rnd_len = ogn_data_len
return ret
def server_post_decrypt(self, buf):
if self.raw_trans:
return (buf, False)
self.recv_buf += buf
out_buf = b''
sendback = False
if not self.has_recv_header:
if len(self.recv_buf) >= 7 or len(self.recv_buf) in [2, 3]:
recv_len = min(len(self.recv_buf), 7)
mac_key = self.server_info.recv_iv + self.server_info.key
sha1data = hmac.new(mac_key, self.recv_buf[:1], self.hashfunc).digest()[:recv_len - 1]
if sha1data != self.recv_buf[1:recv_len]:
return self.not_match_return(self.recv_buf)
if len(self.recv_buf) < 31:
return (b'', False)
sha1data = hmac.new(mac_key, self.recv_buf[7:27], self.hashfunc).digest()[:4]
if sha1data != self.recv_buf[27:31]:
logging.error('%s data uncorrect auth HMAC-SHA1 from %s:%d, data %s' % (self.no_compatible_method, self.server_info.client, self.server_info.client_port, binascii.hexlify(self.recv_buf)))
if len(self.recv_buf) < 31 + self.extra_wait_size:
return (b'', False)
return self.not_match_return(self.recv_buf)
uid = self.recv_buf[7:11]
if uid in self.server_info.users:
self.user_id = uid
self.user_key = self.hashfunc(self.server_info.users[uid]).digest()
self.server_info.update_user_func(uid)
else:
if not self.server_info.users:
self.user_key = self.server_info.key
else:
self.user_key = self.server_info.recv_iv
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + self.salt, 'aes-128-cbc')
head = encryptor.decrypt(b'\x00' * 16 + self.recv_buf[11:27] + b'\x00') # need an extra byte or recv empty
length = struct.unpack('<H', head[12:14])[0]
if len(self.recv_buf) < length:
return (b'', False)
utc_time = struct.unpack('<I', head[:4])[0]
client_id = struct.unpack('<I', head[4:8])[0]
connection_id = struct.unpack('<I', head[8:12])[0]
rnd_len = struct.unpack('<H', head[14:16])[0]
if hmac.new(self.user_key, self.recv_buf[:length - 4], self.hashfunc).digest()[:4] != self.recv_buf[length - 4:length]:
logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length])))
return self.not_match_return(self.recv_buf)
time_dif = common.int32(utc_time - (int(time.time()) & 0xffffffff))
if time_dif < -self.max_time_dif or time_dif > self.max_time_dif:
logging.info('%s: wrong timestamp, time_dif %d, data %s' % (self.no_compatible_method, time_dif, binascii.hexlify(head)))
return self.not_match_return(self.recv_buf)
elif self.server_info.data.insert(self.user_id, client_id, connection_id):
self.has_recv_header = True
out_buf = self.recv_buf[31 + rnd_len:length - 4]
self.client_id = client_id
self.connection_id = connection_id
else:
logging.info('%s: auth fail, data %s' % (self.no_compatible_method, binascii.hexlify(out_buf)))
return self.not_match_return(self.recv_buf)
self.recv_buf = self.recv_buf[length:]
self.has_recv_header = True
sendback = True
while len(self.recv_buf) > 4:
mac_key = self.user_key + struct.pack('<I', self.recv_id)
mac = hmac.new(mac_key, self.recv_buf[:2], self.hashfunc).digest()[:2]
if mac != self.recv_buf[2:4]:
self.raw_trans = True
logging.info(self.no_compatible_method + ': wrong crc')
if self.recv_id == 0:
logging.info(self.no_compatible_method + ': wrong crc')
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
length = struct.unpack('<H', self.recv_buf[:2])[0]
if length >= 8192 or length < 7:
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
logging.info(self.no_compatible_method + ': over size')
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
if length > len(self.recv_buf):
break
if hmac.new(mac_key, self.recv_buf[:length - 4], self.hashfunc).digest()[:4] != self.recv_buf[length - 4:length]:
logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length])))
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data uncorrect checksum')
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
pos = common.ord(self.recv_buf[4])
if pos < 255:
pos += 4
else:
pos = struct.unpack('<H', self.recv_buf[5:7])[0] + 4
out_buf += self.recv_buf[pos:length - 4]
self.recv_buf = self.recv_buf[length:]
if pos == length - 4:
sendback = True
if out_buf:
self.server_info.data.update(self.user_id, self.client_id, self.connection_id)
return (out_buf, sendback)
def client_udp_pre_encrypt(self, buf):
if self.user_key is None:
if b':' in to_bytes(self.server_info.protocol_param):
try:
items = to_bytes(self.server_info.protocol_param).split(':')
self.user_key = self.hashfunc(items[1]).digest()
self.user_id = struct.pack('<I', int(items[0]))
except:
pass
if self.user_key is None:
self.user_id = os.urandom(4)
self.user_key = self.server_info.key
buf += self.user_id
return buf + hmac.new(self.user_key, buf, self.hashfunc).digest()[:4]
def client_udp_post_decrypt(self, buf):
user_key = self.server_info.key
if hmac.new(user_key, buf[:-4], self.hashfunc).digest()[:4] != buf[-4:]:
return b''
return buf[:-4]
def server_udp_pre_encrypt(self, buf, uid):
user_key = self.server_info.key
return buf + hmac.new(user_key, buf, self.hashfunc).digest()[:4]
def server_udp_post_decrypt(self, buf):
uid = buf[-8:-4]
if uid in self.server_info.users:
user_key = self.hashfunc(self.server_info.users[uid]).digest()
else:
uid = None
if not self.server_info.users:
user_key = self.server_info.key
else:
user_key = self.server_info.recv_iv
if hmac.new(user_key, buf[:-4], self.hashfunc).digest()[:4] != buf[-4:]:
return (b'', None)
return (buf[:-8], uid)
def create_auth_aes128_sha1(method):
return auth_aes128_sha1(method, hashlib.sha1) | null |
161,303 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import math
import struct
import zlib
import hmac
import hashlib
import shadowsocks
from shadowsocks import common, lru_cache, encrypt
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False | null |
161,304 | import os, sys
FG_YELLOW = 33
def reset():
return '\033[{0}m'.format(RESET)
def deco(s: str, fg_color=0x111, bg_color=None, const_deco='', **kwargs):
'''
Params:
fg_color, bg_color: int or 3-tuple
if given as a three digit hex integer, each digit
represents red, green or blue respectively, either 0 or 1.
for example:
0x100 red, 0x010 green, 0x001 blue
0x110 yellow, 0x101 magenta.
if given a value in the list `ALL_COLORS`, it is used
directly.
kwargs: all kwargs are of boolean type.
reset: same as reset() if True, else no effect.
bold: set bold.
underscore: set underscore. on monochrome display adapter only.
blink: set blink.
reverse: reverse backgound and forground color.
concealed: Concealed on.
'''
if not isinstance(s, str):
s = str(s)
if not isinstance(const_deco, str):
const_deco = str(const_deco)
if const_deco:
return const_deco + s
fg = _parse_color_param(fg_color, FG_COLOR_START)
bg = _parse_color_param(bg_color, BG_COLOR_START) if bg_color is not None else ''
TEXT_ATTR_MAP = {
'reset': RESET,
'bold': BOLD,
'underscore': UNDERSCORE,
'blink': BLINK,
'reverse': REVERSE,
'concealed': CONCEALED,
}
attr = []
for a in kwargs:
if(kwargs.get(a, False)):
attr.append(TEXT_ATTR_MAP.get(a))
attr = ';'.join(attr)
return '\033[' + ';'.join([x for x in (fg, bg, attr) if x]) + 'm' + s
def warning(wrnmsg :str):
wrnmsg = 'Warning:' + wrnmsg
print(deco(wrnmsg, FG_YELLOW, bold=True), reset()) | null |
161,305 | import os, sys
if sys.platform.lower() == "win32":
os.system('color')
FG_RED = 31
def reset():
return '\033[{0}m'.format(RESET)
def deco(s: str, fg_color=0x111, bg_color=None, const_deco='', **kwargs):
'''
Params:
fg_color, bg_color: int or 3-tuple
if given as a three digit hex integer, each digit
represents red, green or blue respectively, either 0 or 1.
for example:
0x100 red, 0x010 green, 0x001 blue
0x110 yellow, 0x101 magenta.
if given a value in the list `ALL_COLORS`, it is used
directly.
kwargs: all kwargs are of boolean type.
reset: same as reset() if True, else no effect.
bold: set bold.
underscore: set underscore. on monochrome display adapter only.
blink: set blink.
reverse: reverse backgound and forground color.
concealed: Concealed on.
'''
if not isinstance(s, str):
s = str(s)
if not isinstance(const_deco, str):
const_deco = str(const_deco)
if const_deco:
return const_deco + s
fg = _parse_color_param(fg_color, FG_COLOR_START)
bg = _parse_color_param(bg_color, BG_COLOR_START) if bg_color is not None else ''
TEXT_ATTR_MAP = {
'reset': RESET,
'bold': BOLD,
'underscore': UNDERSCORE,
'blink': BLINK,
'reverse': REVERSE,
'concealed': CONCEALED,
}
attr = []
for a in kwargs:
if(kwargs.get(a, False)):
attr.append(TEXT_ATTR_MAP.get(a))
attr = ';'.join(attr)
return '\033[' + ';'.join([x for x in (fg, bg, attr) if x]) + 'm' + s
def error(errmsg :str):
wrnmsg = 'Error:' + errmsg
print(deco(wrnmsg, FG_RED, bold=True), reset())
sys.exit() | null |
161,306 | import os
import torch
from random import randint
from utils.loss_utils import l1_loss, ssim
from gaussian_renderer import render, network_gui
import sys
from scene import Scene, GaussianModel
from utils.general_utils import safe_state
import uuid
from tqdm import tqdm
from utils.image_utils import psnr
from argparse import ArgumentParser, Namespace
from arguments import ModelParams, PipelineParams, OptimizationParams
try:
from torch.utils.tensorboard import SummaryWriter
TENSORBOARD_FOUND = True
except ImportError:
TENSORBOARD_FOUND = False
def prepare_output_and_logger(args):
if not args.model_path:
if os.getenv('OAR_JOB_ID'):
unique_str=os.getenv('OAR_JOB_ID')
else:
unique_str = str(uuid.uuid4())
args.model_path = os.path.join("./output/", unique_str[0:10])
# Set up output folder
print("Output folder: {}".format(args.model_path))
os.makedirs(args.model_path, exist_ok = True)
with open(os.path.join(args.model_path, "cfg_args"), 'w') as cfg_log_f:
cfg_log_f.write(str(Namespace(**vars(args))))
# Create Tensorboard writer
tb_writer = None
if TENSORBOARD_FOUND:
tb_writer = SummaryWriter(args.model_path)
else:
print("Tensorboard not available: not logging progress")
return tb_writer
def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, scene : Scene, renderFunc, renderArgs):
if tb_writer:
tb_writer.add_scalar('train_loss_patches/l1_loss', Ll1.item(), iteration)
tb_writer.add_scalar('train_loss_patches/total_loss', loss.item(), iteration)
tb_writer.add_scalar('iter_time', elapsed, iteration)
# Report test and samples of training set
if iteration in testing_iterations:
torch.cuda.empty_cache()
validation_configs = ({'name': 'test', 'cameras' : scene.getTestCameras()},
{'name': 'train', 'cameras' : [scene.getTrainCameras()[idx % len(scene.getTrainCameras())] for idx in range(5, 30, 5)]})
for config in validation_configs:
if config['cameras'] and len(config['cameras']) > 0:
l1_test = 0.0
psnr_test = 0.0
for idx, viewpoint in enumerate(config['cameras']):
image = torch.clamp(renderFunc(viewpoint, scene.gaussians, *renderArgs)["render"], 0.0, 1.0)
gt_image = torch.clamp(viewpoint.original_image.to("cuda"), 0.0, 1.0)
if tb_writer and (idx < 5):
tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image[None], global_step=iteration)
if iteration == testing_iterations[0]:
tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image[None], global_step=iteration)
l1_test += l1_loss(image, gt_image).mean().double()
psnr_test += psnr(image, gt_image).mean().double()
psnr_test /= len(config['cameras'])
l1_test /= len(config['cameras'])
print("\n[ITER {}] Evaluating {}: L1 {} PSNR {}".format(iteration, config['name'], l1_test, psnr_test))
if tb_writer:
tb_writer.add_scalar(config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
tb_writer.add_scalar(config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration)
if tb_writer:
tb_writer.add_histogram("scene/opacity_histogram", scene.gaussians.get_opacity, iteration)
tb_writer.add_scalar('total_points', scene.gaussians.get_xyz.shape[0], iteration)
torch.cuda.empty_cache()
def l1_loss(network_output, gt):
return torch.abs((network_output - gt)).mean()
def ssim(img1, img2, window_size=11, size_average=True):
channel = img1.size(-3)
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None):
"""
Render the scene.
Background tensor (bg_color) must be on GPU!
"""
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
try:
screenspace_points.retain_grad()
except:
pass
# Set up rasterization configuration
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
raster_settings = GaussianRasterizationSettings(
image_height=int(viewpoint_camera.image_height),
image_width=int(viewpoint_camera.image_width),
tanfovx=tanfovx,
tanfovy=tanfovy,
bg=bg_color,
scale_modifier=scaling_modifier,
viewmatrix=viewpoint_camera.world_view_transform,
projmatrix=viewpoint_camera.full_proj_transform,
sh_degree=pc.active_sh_degree,
campos=viewpoint_camera.camera_center,
prefiltered=False,
debug=pipe.debug
)
rasterizer = GaussianRasterizer(raster_settings=raster_settings)
means3D = pc.get_xyz
means2D = screenspace_points
opacity = pc.get_opacity
# If precomputed 3d covariance is provided, use it. If not, then it will be computed from
# scaling / rotation by the rasterizer.
scales = None
rotations = None
cov3D_precomp = None
if pipe.compute_cov3D_python:
cov3D_precomp = pc.get_covariance(scaling_modifier)
else:
scales = pc.get_scaling
rotations = pc.get_rotation
# If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
# from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
shs = None
colors_precomp = None
if override_color is None:
if pipe.convert_SHs_python:
shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2)
dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1))
dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
else:
shs = pc.get_features
else:
colors_precomp = override_color
# Rasterize visible Gaussians to image, obtain their radii (on screen).
rendered_image, radii = rasterizer(
means3D = means3D,
means2D = means2D,
shs = shs,
colors_precomp = colors_precomp,
opacities = opacity,
scales = scales,
rotations = rotations,
cov3D_precomp = cov3D_precomp)
# Those Gaussians that were frustum culled or had a radius of 0 were not visible.
# They will be excluded from value updates used in the splitting criteria.
return {"render": rendered_image,
"viewspace_points": screenspace_points,
"visibility_filter" : radii > 0,
"radii": radii}
class Scene:
gaussians : GaussianModel
def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, shuffle=True, resolution_scales=[1.0]):
"""b
:param path: Path to colmap scene main folder.
"""
self.model_path = args.model_path
self.loaded_iter = None
self.gaussians = gaussians
if load_iteration:
if load_iteration == -1:
self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud"))
else:
self.loaded_iter = load_iteration
print("Loading trained model at iteration {}".format(self.loaded_iter))
self.train_cameras = {}
self.test_cameras = {}
if os.path.exists(os.path.join(args.source_path, "sparse")):
scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval)
elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")):
print("Found transforms_train.json file, assuming Blender data set!")
scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval)
else:
assert False, "Could not recognize scene type!"
if not self.loaded_iter:
with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file:
dest_file.write(src_file.read())
json_cams = []
camlist = []
if scene_info.test_cameras:
camlist.extend(scene_info.test_cameras)
if scene_info.train_cameras:
camlist.extend(scene_info.train_cameras)
for id, cam in enumerate(camlist):
json_cams.append(camera_to_JSON(id, cam))
with open(os.path.join(self.model_path, "cameras.json"), 'w') as file:
json.dump(json_cams, file)
if shuffle:
random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling
random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling
self.cameras_extent = scene_info.nerf_normalization["radius"]
for resolution_scale in resolution_scales:
print("Loading Training Cameras")
self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args)
print("Loading Test Cameras")
self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args)
if self.loaded_iter:
self.gaussians.load_ply(os.path.join(self.model_path,
"point_cloud",
"iteration_" + str(self.loaded_iter),
"point_cloud.ply"))
else:
self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent)
def save(self, iteration):
point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration))
self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
def getTrainCameras(self, scale=1.0):
return self.train_cameras[scale]
def getTestCameras(self, scale=1.0):
return self.test_cameras[scale]
def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from):
first_iter = 0
tb_writer = prepare_output_and_logger(dataset)
gaussians = GaussianModel(dataset.sh_degree)
scene = Scene(dataset, gaussians)
gaussians.training_setup(opt)
if checkpoint:
(model_params, first_iter) = torch.load(checkpoint)
gaussians.restore(model_params, opt)
bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
iter_start = torch.cuda.Event(enable_timing = True)
iter_end = torch.cuda.Event(enable_timing = True)
viewpoint_stack = None
ema_loss_for_log = 0.0
progress_bar = tqdm(range(first_iter, opt.iterations), desc="Training progress")
first_iter += 1
for iteration in range(first_iter, opt.iterations + 1):
if network_gui.conn == None:
network_gui.try_connect()
while network_gui.conn != None:
try:
net_image_bytes = None
custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive()
if custom_cam != None:
net_image = render(custom_cam, gaussians, pipe, background, scaling_modifer)["render"]
net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy())
network_gui.send(net_image_bytes, dataset.source_path)
if do_training and ((iteration < int(opt.iterations)) or not keep_alive):
break
except Exception as e:
network_gui.conn = None
iter_start.record()
gaussians.update_learning_rate(iteration)
# Every 1000 its we increase the levels of SH up to a maximum degree
if iteration % 1000 == 0:
gaussians.oneupSHdegree()
# Pick a random Camera
if not viewpoint_stack:
viewpoint_stack = scene.getTrainCameras().copy()
viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
# Render
if (iteration - 1) == debug_from:
pipe.debug = True
bg = torch.rand((3), device="cuda") if opt.random_background else background
render_pkg = render(viewpoint_cam, gaussians, pipe, bg)
image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
# Loss
gt_image = viewpoint_cam.original_image.cuda()
Ll1 = l1_loss(image, gt_image)
loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image))
loss.backward()
iter_end.record()
with torch.no_grad():
# Progress bar
ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
if iteration % 10 == 0:
progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"})
progress_bar.update(10)
if iteration == opt.iterations:
progress_bar.close()
# Log and save
training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, (pipe, background))
if (iteration in saving_iterations):
print("\n[ITER {}] Saving Gaussians".format(iteration))
scene.save(iteration)
# Densification
if iteration < opt.densify_until_iter:
# Keep track of max radii in image-space for pruning
gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
if iteration > opt.densify_from_iter and iteration % opt.densification_interval == 0:
size_threshold = 20 if iteration > opt.opacity_reset_interval else None
gaussians.densify_and_prune(opt.densify_grad_threshold, 0.005, scene.cameras_extent, size_threshold)
if iteration % opt.opacity_reset_interval == 0 or (dataset.white_background and iteration == opt.densify_from_iter):
gaussians.reset_opacity()
# Optimizer step
if iteration < opt.iterations:
gaussians.optimizer.step()
gaussians.optimizer.zero_grad(set_to_none = True)
if (iteration in checkpoint_iterations):
print("\n[ITER {}] Saving Checkpoint".format(iteration))
torch.save((gaussians.capture(), iteration), scene.model_path + "/chkpnt" + str(iteration) + ".pth") | null |
161,307 | from collections import OrderedDict
import torch
def normalize_activation(x, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True))
return x / (norm_factor + eps) | null |
161,308 | from collections import OrderedDict
import torch
def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
# build url
url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
+ f'master/lpips/weights/v{version}/{net_type}.pth'
# download
old_state_dict = torch.hub.load_state_dict_from_url(
url, progress=True,
map_location=None if torch.cuda.is_available() else torch.device('cpu')
)
# rename keys
new_state_dict = OrderedDict()
for key, val in old_state_dict.items():
new_key = key
new_key = new_key.replace('lin', '')
new_key = new_key.replace('model.', '')
new_state_dict[new_key] = val
return new_state_dict | null |
161,309 | from typing import Sequence
from itertools import chain
import torch
import torch.nn as nn
from torchvision import models
from .utils import normalize_activation
class SqueezeNet(BaseNet):
def __init__(self):
super(SqueezeNet, self).__init__()
self.layers = models.squeezenet1_1(True).features
self.target_layers = [2, 5, 8, 10, 11, 12, 13]
self.n_channels_list = [64, 128, 256, 384, 384, 512, 512]
self.set_requires_grad(False)
class AlexNet(BaseNet):
def __init__(self):
super(AlexNet, self).__init__()
self.layers = models.alexnet(True).features
self.target_layers = [2, 5, 8, 10, 12]
self.n_channels_list = [64, 192, 384, 256, 256]
self.set_requires_grad(False)
class VGG16(BaseNet):
def __init__(self):
super(VGG16, self).__init__()
self.layers = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1).features
self.target_layers = [4, 9, 16, 23, 30]
self.n_channels_list = [64, 128, 256, 512, 512]
self.set_requires_grad(False)
def get_network(net_type: str):
if net_type == 'alex':
return AlexNet()
elif net_type == 'squeeze':
return SqueezeNet()
elif net_type == 'vgg':
return VGG16()
else:
raise NotImplementedError('choose net_type from [alex, squeeze, vgg].') | null |
161,311 | import torch
from scene import Scene
import os
from tqdm import tqdm
from os import makedirs
from gaussian_renderer import render
import torchvision
from utils.general_utils import safe_state
from argparse import ArgumentParser
from arguments import ModelParams, PipelineParams, get_combined_args
from gaussian_renderer import GaussianModel
def render_set(model_path, name, iteration, views, gaussians, pipeline, background):
render_path = os.path.join(model_path, name, "ours_{}".format(iteration), "renders")
gts_path = os.path.join(model_path, name, "ours_{}".format(iteration), "gt")
makedirs(render_path, exist_ok=True)
makedirs(gts_path, exist_ok=True)
for idx, view in enumerate(tqdm(views, desc="Rendering progress")):
rendering = render(view, gaussians, pipeline, background)["render"]
gt = view.original_image[0:3, :, :]
torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
torchvision.utils.save_image(gt, os.path.join(gts_path, '{0:05d}'.format(idx) + ".png"))
class Scene:
gaussians : GaussianModel
def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, shuffle=True, resolution_scales=[1.0]):
"""b
:param path: Path to colmap scene main folder.
"""
self.model_path = args.model_path
self.loaded_iter = None
self.gaussians = gaussians
if load_iteration:
if load_iteration == -1:
self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud"))
else:
self.loaded_iter = load_iteration
print("Loading trained model at iteration {}".format(self.loaded_iter))
self.train_cameras = {}
self.test_cameras = {}
if os.path.exists(os.path.join(args.source_path, "sparse")):
scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval)
elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")):
print("Found transforms_train.json file, assuming Blender data set!")
scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval)
else:
assert False, "Could not recognize scene type!"
if not self.loaded_iter:
with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file:
dest_file.write(src_file.read())
json_cams = []
camlist = []
if scene_info.test_cameras:
camlist.extend(scene_info.test_cameras)
if scene_info.train_cameras:
camlist.extend(scene_info.train_cameras)
for id, cam in enumerate(camlist):
json_cams.append(camera_to_JSON(id, cam))
with open(os.path.join(self.model_path, "cameras.json"), 'w') as file:
json.dump(json_cams, file)
if shuffle:
random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling
random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling
self.cameras_extent = scene_info.nerf_normalization["radius"]
for resolution_scale in resolution_scales:
print("Loading Training Cameras")
self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args)
print("Loading Test Cameras")
self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args)
if self.loaded_iter:
self.gaussians.load_ply(os.path.join(self.model_path,
"point_cloud",
"iteration_" + str(self.loaded_iter),
"point_cloud.ply"))
else:
self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent)
def save(self, iteration):
point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration))
self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
def getTrainCameras(self, scale=1.0):
return self.train_cameras[scale]
def getTestCameras(self, scale=1.0):
return self.test_cameras[scale]
class ModelParams(ParamGroup):
def __init__(self, parser, sentinel=False):
self.sh_degree = 3
self._source_path = ""
self._model_path = ""
self._images = "images"
self._resolution = -1
self._white_background = False
self.data_device = "cuda"
self.eval = False
super().__init__(parser, "Loading Parameters", sentinel)
def extract(self, args):
g = super().extract(args)
g.source_path = os.path.abspath(g.source_path)
return g
class PipelineParams(ParamGroup):
def __init__(self, parser):
self.convert_SHs_python = False
self.compute_cov3D_python = False
self.debug = False
super().__init__(parser, "Pipeline Parameters")
def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_test : bool):
with torch.no_grad():
gaussians = GaussianModel(dataset.sh_degree)
scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False)
bg_color = [1,1,1] if dataset.white_background else [0, 0, 0]
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
if not skip_train:
render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background)
if not skip_test:
render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background) | null |
161,314 | import os
import sys
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
import numpy as np
import json
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
xyzs = None
rgbs = None
errors = None
num_points = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
num_points += 1
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
count = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = np.array(float(elems[7]))
xyzs[count] = xyz
rgbs[count] = rgb
errors[count] = error
count += 1
return xyzs, rgbs, errors
def read_points3D_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
for p_id in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
xyzs[p_id] = xyz
rgbs[p_id] = rgb
errors[p_id] = error
return xyzs, rgbs, errors
def read_intrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
def read_extrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_intrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
def read_extrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
if eval:
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info | null |
161,315 | import os
import sys
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
import numpy as np
import json
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
FovY = fovy
FovX = fovx
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1]))
return cam_infos
def SH2RGB(sh):
return sh * C0 + 0.5
def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 100_000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
storePly(ply_path, xyz, SH2RGB(shs) * 255)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info | null |
161,316 | from scene.cameras import Camera
import numpy as np
from utils.general_utils import PILtoTorch
from utils.graphics_utils import fov2focal
def loadCam(args, id, cam_info, resolution_scale):
orig_w, orig_h = cam_info.image.size
if args.resolution in [1, 2, 4, 8]:
resolution = round(orig_w/(resolution_scale * args.resolution)), round(orig_h/(resolution_scale * args.resolution))
else: # should be a type that converts to float
if args.resolution == -1:
if orig_w > 1600:
global WARNED
if not WARNED:
print("[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n "
"If this is not desired, please explicitly specify '--resolution/-r' as 1")
WARNED = True
global_down = orig_w / 1600
else:
global_down = 1
else:
global_down = orig_w / args.resolution
scale = float(global_down) * float(resolution_scale)
resolution = (int(orig_w / scale), int(orig_h / scale))
resized_image_rgb = PILtoTorch(cam_info.image, resolution)
gt_image = resized_image_rgb[:3, ...]
loaded_mask = None
if resized_image_rgb.shape[1] == 4:
loaded_mask = resized_image_rgb[3:4, ...]
return Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T,
FoVx=cam_info.FovX, FoVy=cam_info.FovY,
image=gt_image, gt_alpha_mask=loaded_mask,
image_name=cam_info.image_name, uid=id, data_device=args.data_device)
def cameraList_from_camInfos(cam_infos, resolution_scale, args):
camera_list = []
for id, c in enumerate(cam_infos):
camera_list.append(loadCam(args, id, c, resolution_scale))
return camera_list | null |
161,317 | from scene.cameras import Camera
import numpy as np
from utils.general_utils import PILtoTorch
from utils.graphics_utils import fov2focal
class Camera(nn.Module):
def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
image_name, uid,
trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
):
super(Camera, self).__init__()
self.uid = uid
self.colmap_id = colmap_id
self.R = R
self.T = T
self.FoVx = FoVx
self.FoVy = FoVy
self.image_name = image_name
try:
self.data_device = torch.device(data_device)
except Exception as e:
print(e)
print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
self.data_device = torch.device("cuda")
self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
self.image_width = self.original_image.shape[2]
self.image_height = self.original_image.shape[1]
if gt_alpha_mask is not None:
self.original_image *= gt_alpha_mask.to(self.data_device)
else:
self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
self.zfar = 100.0
self.znear = 0.01
self.trans = trans
self.scale = scale
self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda()
self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0)
self.camera_center = self.world_view_transform.inverse()[3, :3]
def fov2focal(fov, pixels):
return pixels / (2 * math.tan(fov / 2))
def camera_to_JSON(id, camera : Camera):
Rt = np.zeros((4, 4))
Rt[:3, :3] = camera.R.transpose()
Rt[:3, 3] = camera.T
Rt[3, 3] = 1.0
W2C = np.linalg.inv(Rt)
pos = W2C[:3, 3]
rot = W2C[:3, :3]
serializable_array_2d = [x.tolist() for x in rot]
camera_entry = {
'id' : id,
'img_name' : camera.image_name,
'width' : camera.width,
'height' : camera.height,
'position': pos.tolist(),
'rotation': serializable_array_2d,
'fy' : fov2focal(camera.FovY, camera.height),
'fx' : fov2focal(camera.FovX, camera.width)
}
return camera_entry | null |
161,318 | import torch
import torch.nn.functional as F
from torch.autograd import Variable
from math import exp
def l2_loss(network_output, gt):
return ((network_output - gt) ** 2).mean() | null |
161,323 | import torch
import math
import numpy as np
from typing import NamedTuple
def geom_transform_points(points, transf_matrix):
P, _ = points.shape
ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
points_hom = torch.cat([points, ones], dim=1)
points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))
denom = points_out[..., 3:] + 0.0000001
return (points_out[..., :3] / denom).squeeze(dim=0) | null |
161,324 | import torch
import math
import numpy as np
from typing import NamedTuple
def getWorld2View(R, t):
Rt = np.zeros((4, 4))
Rt[:3, :3] = R.transpose()
Rt[:3, 3] = t
Rt[3, 3] = 1.0
return np.float32(Rt) | null |
161,325 | import torch
import math
import numpy as np
from typing import NamedTuple
def getProjectionMatrix(znear, zfar, fovX, fovY):
tanHalfFovY = math.tan((fovY / 2))
tanHalfFovX = math.tan((fovX / 2))
top = tanHalfFovY * znear
bottom = -top
right = tanHalfFovX * znear
left = -right
P = torch.zeros(4, 4)
z_sign = 1.0
P[0, 0] = 2.0 * znear / (right - left)
P[1, 1] = 2.0 * znear / (top - bottom)
P[0, 2] = (right + left) / (right - left)
P[1, 2] = (top + bottom) / (top - bottom)
P[3, 2] = z_sign
P[2, 2] = z_sign * zfar / (zfar - znear)
P[2, 3] = -(zfar * znear) / (zfar - znear)
return P | null |
161,326 | import torch
import sys
from datetime import datetime
import numpy as np
import random
def inverse_sigmoid(x):
return torch.log(x/(1-x)) | null |
161,327 | import torch
import sys
from datetime import datetime
import numpy as np
import random
The provided code snippet includes necessary dependencies for implementing the `get_expon_lr_func` function. Write a Python function `def get_expon_lr_func( lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000 )` to solve the following problem:
Copied from Plenoxels Continuous learning rate decay function. Adapted from JaxNeRF The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. :param conf: config subtree 'lr' or similar :param max_steps: int, the number of steps during optimization. :return HoF which takes step as input
Here is the function:
def get_expon_lr_func(
lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000
):
"""
Copied from Plenoxels
Continuous learning rate decay function. Adapted from JaxNeRF
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
:param conf: config subtree 'lr' or similar
:param max_steps: int, the number of steps during optimization.
:return HoF which takes step as input
"""
def helper(step):
if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
# Disable this parameter
return 0.0
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
t = np.clip(step / max_steps, 0, 1)
log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
return delay_rate * log_lerp
return helper | Copied from Plenoxels Continuous learning rate decay function. Adapted from JaxNeRF The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. :param conf: config subtree 'lr' or similar :param max_steps: int, the number of steps during optimization. :return HoF which takes step as input |
161,328 | import torch
import sys
from datetime import datetime
import numpy as np
import random
def strip_lowerdiag(L):
uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda")
uncertainty[:, 0] = L[:, 0, 0]
uncertainty[:, 1] = L[:, 0, 1]
uncertainty[:, 2] = L[:, 0, 2]
uncertainty[:, 3] = L[:, 1, 1]
uncertainty[:, 4] = L[:, 1, 2]
uncertainty[:, 5] = L[:, 2, 2]
return uncertainty
def strip_symmetric(sym):
return strip_lowerdiag(sym) | null |
161,329 | import torch
import sys
from datetime import datetime
import numpy as np
import random
def build_rotation(r):
norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])
q = r / norm[:, None]
R = torch.zeros((q.size(0), 3, 3), device='cuda')
r = q[:, 0]
x = q[:, 1]
y = q[:, 2]
z = q[:, 3]
R[:, 0, 0] = 1 - 2 * (y*y + z*z)
R[:, 0, 1] = 2 * (x*y - r*z)
R[:, 0, 2] = 2 * (x*z + r*y)
R[:, 1, 0] = 2 * (x*y + r*z)
R[:, 1, 1] = 1 - 2 * (x*x + z*z)
R[:, 1, 2] = 2 * (y*z - r*x)
R[:, 2, 0] = 2 * (x*z - r*y)
R[:, 2, 1] = 2 * (y*z + r*x)
R[:, 2, 2] = 1 - 2 * (x*x + y*y)
return R
def build_scaling_rotation(s, r):
L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
R = build_rotation(r)
L[:,0,0] = s[:,0]
L[:,1,1] = s[:,1]
L[:,2,2] = s[:,2]
L = R @ L
return L | null |
161,330 | import torch
import sys
from datetime import datetime
import numpy as np
import random
def safe_state(silent):
old_f = sys.stdout
class F:
def __init__(self, silent):
self.silent = silent
def write(self, x):
if not self.silent:
if x.endswith("\n"):
old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S")))))
else:
old_f.write(x)
def flush(self):
old_f.flush()
sys.stdout = F(silent)
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.set_device(torch.device("cuda:0")) | null |
161,331 | from pathlib import Path
import os
from PIL import Image
import torch
import torchvision.transforms.functional as tf
from utils.loss_utils import ssim
from lpipsPyTorch import lpips
import json
from tqdm import tqdm
from utils.image_utils import psnr
from argparse import ArgumentParser
def readImages(renders_dir, gt_dir):
def ssim(img1, img2, window_size=11, size_average=True):
def lpips(x: torch.Tensor,
y: torch.Tensor,
net_type: str = 'alex',
version: str = '0.1'):
def psnr(img1, img2):
def evaluate(model_paths):
full_dict = {}
per_view_dict = {}
full_dict_polytopeonly = {}
per_view_dict_polytopeonly = {}
print("")
for scene_dir in model_paths:
try:
print("Scene:", scene_dir)
full_dict[scene_dir] = {}
per_view_dict[scene_dir] = {}
full_dict_polytopeonly[scene_dir] = {}
per_view_dict_polytopeonly[scene_dir] = {}
test_dir = Path(scene_dir) / "test"
for method in os.listdir(test_dir):
print("Method:", method)
full_dict[scene_dir][method] = {}
per_view_dict[scene_dir][method] = {}
full_dict_polytopeonly[scene_dir][method] = {}
per_view_dict_polytopeonly[scene_dir][method] = {}
method_dir = test_dir / method
gt_dir = method_dir/ "gt"
renders_dir = method_dir / "renders"
renders, gts, image_names = readImages(renders_dir, gt_dir)
ssims = []
psnrs = []
lpipss = []
for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"):
ssims.append(ssim(renders[idx], gts[idx]))
psnrs.append(psnr(renders[idx], gts[idx]))
lpipss.append(lpips(renders[idx], gts[idx], net_type='vgg'))
print(" SSIM : {:>12.7f}".format(torch.tensor(ssims).mean(), ".5"))
print(" PSNR : {:>12.7f}".format(torch.tensor(psnrs).mean(), ".5"))
print(" LPIPS: {:>12.7f}".format(torch.tensor(lpipss).mean(), ".5"))
print("")
full_dict[scene_dir][method].update({"SSIM": torch.tensor(ssims).mean().item(),
"PSNR": torch.tensor(psnrs).mean().item(),
"LPIPS": torch.tensor(lpipss).mean().item()})
per_view_dict[scene_dir][method].update({"SSIM": {name: ssim for ssim, name in zip(torch.tensor(ssims).tolist(), image_names)},
"PSNR": {name: psnr for psnr, name in zip(torch.tensor(psnrs).tolist(), image_names)},
"LPIPS": {name: lp for lp, name in zip(torch.tensor(lpipss).tolist(), image_names)}})
with open(scene_dir + "/results.json", 'w') as fp:
json.dump(full_dict[scene_dir], fp, indent=True)
with open(scene_dir + "/per_view.json", 'w') as fp:
json.dump(per_view_dict[scene_dir], fp, indent=True)
except:
print("Unable to compute metrics for model", scene_dir) | null |
161,332 | import copy
import os
import random
import uuid
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.distributions as td
import torch.nn as nn
import torch.nn.functional as F
import wandb
def soft_update(target: nn.Module, source: nn.Module, tau: float):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - tau) * target_param.data + tau * source_param.data) | null |
161,333 | import copy
import os
import random
import uuid
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.distributions as td
import torch.nn as nn
import torch.nn.functional as F
import wandb
def weights_init(m: nn.Module, init_w: float = 3e-3):
if isinstance(m, nn.Linear):
m.weight.data.uniform_(-init_w, init_w)
m.bias.data.uniform_(-init_w, init_w) | null |
161,334 | import os
import random
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import torch.nn.functional
import wandb
from tqdm import trange
def soft_update(target: nn.Module, source: nn.Module, tau: float):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - tau) * target_param.data + tau * source_param.data) | null |
161,335 | import copy
import os
import random
import uuid
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torch.distributions import Normal
from torch.optim.lr_scheduler import CosineAnnealingLR
def soft_update(target: nn.Module, source: nn.Module, tau: float):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - tau) * target_param.data + tau * source_param.data) | null |
161,336 | import copy
import os
import random
import uuid
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torch.distributions import Normal
from torch.optim.lr_scheduler import CosineAnnealingLR
def asymmetric_l2_loss(u: torch.Tensor, tau: float) -> torch.Tensor:
return torch.mean(torch.abs(tau - (u < 0).float()) * u**2) | null |
161,337 | import os
import random
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torch.distributions import Normal, TanhTransform, TransformedDistribution
def soft_update(target: nn.Module, source: nn.Module, tau: float):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - tau) * target_param.data + tau * source_param.data) | null |
161,338 | import os
import random
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torch.distributions import Normal, TanhTransform, TransformedDistribution
def extend_and_repeat(tensor: torch.Tensor, dim: int, repeat: int) -> torch.Tensor:
return tensor.unsqueeze(dim).repeat_interleave(repeat, dim=dim) | null |
161,339 | import os
import random
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torch.distributions import Normal, TanhTransform, TransformedDistribution
def init_module_weights(module: torch.nn.Module, orthogonal_init: bool = False):
if isinstance(module, nn.Linear):
if orthogonal_init:
nn.init.orthogonal_(module.weight, gain=np.sqrt(2))
nn.init.constant_(module.bias, 0.0)
else:
nn.init.xavier_uniform_(module.weight, gain=1e-2) | null |
161,343 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
The provided code snippet includes necessary dependencies for implementing the `pytorch_init` function. Write a Python function `def pytorch_init(fan_in: float) -> Callable` to solve the following problem:
Default init for PyTorch Linear layer weights and biases: https://pytorch.org/docs/stable/generated/torch.nn.Linear.html
Here is the function:
def pytorch_init(fan_in: float) -> Callable:
"""
Default init for PyTorch Linear layer weights and biases:
https://pytorch.org/docs/stable/generated/torch.nn.Linear.html
"""
bound = math.sqrt(1 / fan_in)
def _init(key: jax.random.PRNGKey, shape: Tuple, dtype: type) -> jax.Array:
return jax.random.uniform(
key, shape=shape, minval=-bound, maxval=bound, dtype=dtype
)
return _init | Default init for PyTorch Linear layer weights and biases: https://pytorch.org/docs/stable/generated/torch.nn.Linear.html |
161,344 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def uniform_init(bound: float) -> Callable:
def _init(key: jax.random.PRNGKey, shape: Tuple, dtype: type) -> jax.Array:
return jax.random.uniform(
key, shape=shape, minval=-bound, maxval=bound, dtype=dtype
)
return _init | null |
161,345 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def identity(x: Any) -> Any:
return x | null |
161,346 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def qlearning_dataset(
env: gym.Env,
dataset: Dict = None,
terminate_on_end: bool = False,
**kwargs,
) -> Dict:
if dataset is None:
dataset = env.get_dataset(**kwargs)
N = dataset["rewards"].shape[0]
obs_ = []
next_obs_ = []
action_ = []
next_action_ = []
reward_ = []
done_ = []
# The newer version of the dataset adds an explicit
# timeouts field. Keep old method for backwards compatability.
use_timeouts = "timeouts" in dataset
episode_step = 0
for i in range(N - 1):
obs = dataset["observations"][i].astype(np.float32)
new_obs = dataset["observations"][i + 1].astype(np.float32)
action = dataset["actions"][i].astype(np.float32)
new_action = dataset["actions"][i + 1].astype(np.float32)
reward = dataset["rewards"][i].astype(np.float32)
done_bool = bool(dataset["terminals"][i])
if use_timeouts:
final_timestep = dataset["timeouts"][i]
else:
final_timestep = episode_step == env._max_episode_steps - 1
if (not terminate_on_end) and final_timestep:
# Skip this transition
episode_step = 0
continue
if done_bool or final_timestep:
episode_step = 0
obs_.append(obs)
next_obs_.append(new_obs)
action_.append(action)
next_action_.append(new_action)
reward_.append(reward)
done_.append(done_bool)
episode_step += 1
return {
"observations": np.array(obs_),
"actions": np.array(action_),
"next_observations": np.array(next_obs_),
"next_actions": np.array(next_action_),
"rewards": np.array(reward_),
"terminals": np.array(done_),
} | null |
161,347 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def compute_mean_std(states: jax.Array, eps: float) -> Tuple[jax.Array, jax.Array]:
mean = states.mean(0)
std = states.std(0) + eps
return mean, std | null |
161,348 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def normalize_states(states: jax.Array, mean: jax.Array, std: jax.Array) -> jax.Array:
return (states - mean) / std | null |
161,349 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def normalize(
arr: jax.Array, mean: jax.Array, std: jax.Array, eps: float = 1e-8
) -> jax.Array:
return (arr - mean) / (std + eps) | null |
161,350 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def make_env(env_name: str, seed: int) -> gym.Env:
env = gym.make(env_name)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env | null |
161,351 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def wrap_env(
env: gym.Env,
state_mean: Union[np.ndarray, float] = 0.0,
state_std: Union[np.ndarray, float] = 1.0,
reward_scale: float = 1.0,
) -> gym.Env:
# PEP 8: E731 do not assign a lambda expression, use a def
def normalize_state(state: np.ndarray) -> np.ndarray:
return (
state - state_mean
) / state_std # epsilon should be already added in std.
def scale_reward(reward: float) -> float:
# Please be careful, here reward is multiplied by scale!
return reward_scale * reward
env = gym.wrappers.TransformObservation(env, normalize_state)
if reward_scale != 1.0:
env = gym.wrappers.TransformReward(env, scale_reward)
return env | null |
161,352 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def evaluate(
env: gym.Env,
params: jax.Array,
action_fn: Callable,
num_episodes: int,
seed: int,
) -> np.ndarray:
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
returns = []
for _ in trange(num_episodes, desc="Eval", leave=False):
obs, done = env.reset(), False
total_reward = 0.0
while not done:
action = np.asarray(jax.device_get(action_fn(params, obs)))
obs, reward, done, _ = env.step(action)
total_reward += reward
returns.append(total_reward)
return np.array(returns) | null |
161,353 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
class Metrics:
accumulators: Dict[str, Tuple[jax.Array, jax.Array]]
def create(metrics: Sequence[str]) -> "Metrics":
init_metrics = {key: (jnp.array([0.0]), jnp.array([0.0])) for key in metrics}
return Metrics(accumulators=init_metrics)
def update(self, updates: Dict[str, jax.Array]) -> "Metrics":
new_accumulators = deepcopy(self.accumulators)
for key, value in updates.items():
acc, steps = new_accumulators[key]
new_accumulators[key] = (acc + value, steps + 1)
return self.replace(accumulators=new_accumulators)
def compute(self) -> Dict[str, np.ndarray]:
# cumulative_value / total_steps
return {k: np.array(v[0] / v[1]) for k, v in self.accumulators.items()}
class CriticTrainState(TrainState):
target_params: FrozenDict
def update_actor(
key: jax.random.PRNGKey,
actor: TrainState,
critic: TrainState,
batch: Dict[str, jax.Array],
beta: float,
tau: float,
normalize_q: bool,
metrics: Metrics,
) -> Tuple[jax.random.PRNGKey, TrainState, TrainState, Metrics]:
key, random_action_key = jax.random.split(key, 2)
def actor_loss_fn(params: jax.Array) -> Tuple[jax.Array, Metrics]:
actions = actor.apply_fn(params, batch["states"])
bc_penalty = ((actions - batch["actions"]) ** 2).sum(-1)
q_values = critic.apply_fn(critic.params, batch["states"], actions).min(0)
lmbda = 1
if normalize_q:
lmbda = jax.lax.stop_gradient(1 / jax.numpy.abs(q_values).mean())
loss = (beta * bc_penalty - lmbda * q_values).mean()
# logging stuff
random_actions = jax.random.uniform(
random_action_key, shape=batch["actions"].shape, minval=-1.0, maxval=1.0
)
new_metrics = metrics.update(
{
"actor_loss": loss,
"bc_mse_policy": bc_penalty.mean(),
"bc_mse_random": ((random_actions - batch["actions"]) ** 2)
.sum(-1)
.mean(),
"action_mse": ((actions - batch["actions"]) ** 2).mean(),
}
)
return loss, new_metrics
grads, new_metrics = jax.grad(actor_loss_fn, has_aux=True)(actor.params)
new_actor = actor.apply_gradients(grads=grads)
new_actor = new_actor.replace(
target_params=optax.incremental_update(actor.params, actor.target_params, tau)
)
new_critic = critic.replace(
target_params=optax.incremental_update(critic.params, critic.target_params, tau)
)
return key, new_actor, new_critic, new_metrics
def update_critic(
key: jax.random.PRNGKey,
actor: TrainState,
critic: CriticTrainState,
batch: Dict[str, jax.Array],
gamma: float,
beta: float,
tau: float,
policy_noise: float,
noise_clip: float,
metrics: Metrics,
) -> Tuple[jax.random.PRNGKey, TrainState, Metrics]:
key, actions_key = jax.random.split(key)
next_actions = actor.apply_fn(actor.target_params, batch["next_states"])
noise = jax.numpy.clip(
(jax.random.normal(actions_key, next_actions.shape) * policy_noise),
-noise_clip,
noise_clip,
)
next_actions = jax.numpy.clip(next_actions + noise, -1, 1)
bc_penalty = ((next_actions - batch["next_actions"]) ** 2).sum(-1)
next_q = critic.apply_fn(
critic.target_params, batch["next_states"], next_actions
).min(0)
next_q = next_q - beta * bc_penalty
target_q = batch["rewards"] + (1 - batch["dones"]) * gamma * next_q
def critic_loss_fn(critic_params: jax.Array) -> Tuple[jax.Array, jax.Array]:
# [N, batch_size] - [1, batch_size]
q = critic.apply_fn(critic_params, batch["states"], batch["actions"])
q_min = q.min(0).mean()
loss = ((q - target_q[None, ...]) ** 2).mean(1).sum(0)
return loss, q_min
(loss, q_min), grads = jax.value_and_grad(critic_loss_fn, has_aux=True)(
critic.params
)
new_critic = critic.apply_gradients(grads=grads)
new_metrics = metrics.update(
{
"critic_loss": loss,
"q_min": q_min,
}
)
return key, new_critic, new_metrics
def update_td3(
key: jax.random.PRNGKey,
actor: TrainState,
critic: CriticTrainState,
batch: Dict[str, Any],
metrics: Metrics,
gamma: float,
actor_bc_coef: float,
critic_bc_coef: float,
tau: float,
policy_noise: float,
noise_clip: float,
normalize_q: bool,
) -> Tuple[jax.random.PRNGKey, TrainState, TrainState, Metrics]:
key, new_critic, new_metrics = update_critic(
key,
actor,
critic,
batch,
gamma,
critic_bc_coef,
tau,
policy_noise,
noise_clip,
metrics,
)
key, new_actor, new_critic, new_metrics = update_actor(
key, actor, new_critic, batch, actor_bc_coef, tau, normalize_q, new_metrics
)
return key, new_actor, new_critic, new_metrics | null |
161,354 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
class Metrics:
accumulators: Dict[str, Tuple[jax.Array, jax.Array]]
def create(metrics: Sequence[str]) -> "Metrics":
init_metrics = {key: (jnp.array([0.0]), jnp.array([0.0])) for key in metrics}
return Metrics(accumulators=init_metrics)
def update(self, updates: Dict[str, jax.Array]) -> "Metrics":
new_accumulators = deepcopy(self.accumulators)
for key, value in updates.items():
acc, steps = new_accumulators[key]
new_accumulators[key] = (acc + value, steps + 1)
return self.replace(accumulators=new_accumulators)
def compute(self) -> Dict[str, np.ndarray]:
# cumulative_value / total_steps
return {k: np.array(v[0] / v[1]) for k, v in self.accumulators.items()}
class CriticTrainState(TrainState):
target_params: FrozenDict
def update_critic(
key: jax.random.PRNGKey,
actor: TrainState,
critic: CriticTrainState,
batch: Dict[str, jax.Array],
gamma: float,
beta: float,
tau: float,
policy_noise: float,
noise_clip: float,
metrics: Metrics,
) -> Tuple[jax.random.PRNGKey, TrainState, Metrics]:
key, actions_key = jax.random.split(key)
next_actions = actor.apply_fn(actor.target_params, batch["next_states"])
noise = jax.numpy.clip(
(jax.random.normal(actions_key, next_actions.shape) * policy_noise),
-noise_clip,
noise_clip,
)
next_actions = jax.numpy.clip(next_actions + noise, -1, 1)
bc_penalty = ((next_actions - batch["next_actions"]) ** 2).sum(-1)
next_q = critic.apply_fn(
critic.target_params, batch["next_states"], next_actions
).min(0)
next_q = next_q - beta * bc_penalty
target_q = batch["rewards"] + (1 - batch["dones"]) * gamma * next_q
def critic_loss_fn(critic_params: jax.Array) -> Tuple[jax.Array, jax.Array]:
# [N, batch_size] - [1, batch_size]
q = critic.apply_fn(critic_params, batch["states"], batch["actions"])
q_min = q.min(0).mean()
loss = ((q - target_q[None, ...]) ** 2).mean(1).sum(0)
return loss, q_min
(loss, q_min), grads = jax.value_and_grad(critic_loss_fn, has_aux=True)(
critic.params
)
new_critic = critic.apply_gradients(grads=grads)
new_metrics = metrics.update(
{
"critic_loss": loss,
"q_min": q_min,
}
)
return key, new_critic, new_metrics
def update_td3_no_targets(
key: jax.random.PRNGKey,
actor: TrainState,
critic: CriticTrainState,
batch: Dict[str, Any],
gamma: float,
metrics: Metrics,
actor_bc_coef: float,
critic_bc_coef: float,
tau: float,
policy_noise: float,
noise_clip: float,
) -> Tuple[jax.random.PRNGKey, TrainState, TrainState, Metrics]:
key, new_critic, new_metrics = update_critic(
key,
actor,
critic,
batch,
gamma,
critic_bc_coef,
tau,
policy_noise,
noise_clip,
metrics,
)
return key, actor, new_critic, new_metrics | null |
161,355 | import os
import math
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import partial
from typing import Any, Callable, Dict, Sequence, Tuple, Union
import chex
import d4rl
import flax.linen as nn
import gym
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pyrallis
import wandb
from flax.core import FrozenDict
from flax.training.train_state import TrainState
from tqdm.auto import trange
def action_fn(actor: TrainState) -> Callable:
@jax.jit
def _action_fn(obs: jax.Array) -> jax.Array:
action = actor.apply_fn(actor.params, obs)
return action
return _action_fn | null |
161,356 | import copy
import os
import random
import uuid
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
def soft_update(target: nn.Module, source: nn.Module, tau: float):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - tau) * target_param.data + tau * source_param.data) | null |
161,358 | import os
import random
import uuid
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
def soft_update(target: nn.Module, source: nn.Module, tau: float):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - tau) * target_param.data + tau * source_param.data) | null |
161,359 | import os
import random
import uuid
from collections import defaultdict
from dataclasses import asdict, dataclass
from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import wandb
from torch.nn import functional as F
from torch.utils.data import DataLoader, IterableDataset
from tqdm.auto import tqdm, trange
def pad_along_axis(
arr: np.ndarray, pad_to: int, axis: int = 0, fill_value: float = 0.0
) -> np.ndarray:
pad_size = pad_to - arr.shape[axis]
if pad_size <= 0:
return arr
npad = [(0, 0)] * arr.ndim
npad[axis] = (0, pad_size)
return np.pad(arr, pad_width=npad, mode="constant", constant_values=fill_value) | null |
161,360 | import os
import random
import uuid
from collections import defaultdict
from dataclasses import asdict, dataclass
from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import wandb
from torch.nn import functional as F
from torch.utils.data import DataLoader, IterableDataset
from tqdm.auto import tqdm, trange
def discounted_cumsum(x: np.ndarray, gamma: float) -> np.ndarray:
cumsum = np.zeros_like(x)
cumsum[-1] = x[-1]
for t in reversed(range(x.shape[0] - 1)):
cumsum[t] = x[t] + gamma * cumsum[t + 1]
return cumsum
def load_d4rl_trajectories(
env_name: str, gamma: float = 1.0
) -> Tuple[List[DefaultDict[str, np.ndarray]], Dict[str, Any]]:
dataset = gym.make(env_name).get_dataset()
traj, traj_len = [], []
data_ = defaultdict(list)
for i in trange(dataset["rewards"].shape[0], desc="Processing trajectories"):
data_["observations"].append(dataset["observations"][i])
data_["actions"].append(dataset["actions"][i])
data_["rewards"].append(dataset["rewards"][i])
if dataset["terminals"][i] or dataset["timeouts"][i]:
episode_data = {k: np.array(v, dtype=np.float32) for k, v in data_.items()}
# return-to-go if gamma=1.0, just discounted returns else
episode_data["returns"] = discounted_cumsum(
episode_data["rewards"], gamma=gamma
)
traj.append(episode_data)
traj_len.append(episode_data["actions"].shape[0])
# reset trajectory buffer
data_ = defaultdict(list)
# needed for normalization, weighted sampling, other stats can be added also
info = {
"obs_mean": dataset["observations"].mean(0, keepdims=True),
"obs_std": dataset["observations"].std(0, keepdims=True) + 1e-6,
"traj_lens": np.array(traj_len),
}
return traj, info | null |
161,361 | import os
import random
import uuid
from collections import defaultdict
from dataclasses import asdict, dataclass
from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import wandb
from torch.nn import functional as F
from torch.utils.data import DataLoader, IterableDataset
from tqdm.auto import tqdm, trange
class TrainConfig:
# wandb params
project: str = "CORL"
group: str = "DT-D4RL"
name: str = "DT"
# model params
embedding_dim: int = 128
num_layers: int = 3
num_heads: int = 1
seq_len: int = 20
episode_len: int = 1000
attention_dropout: float = 0.1
residual_dropout: float = 0.1
embedding_dropout: float = 0.1
max_action: float = 1.0
# training params
env_name: str = "halfcheetah-medium-v2"
learning_rate: float = 1e-4
betas: Tuple[float, float] = (0.9, 0.999)
weight_decay: float = 1e-4
clip_grad: Optional[float] = 0.25
batch_size: int = 64
update_steps: int = 100_000
warmup_steps: int = 10_000
reward_scale: float = 0.001
num_workers: int = 4
# evaluation params
target_returns: Tuple[float, ...] = (12000.0, 6000.0)
eval_episodes: int = 100
eval_every: int = 10_000
# general params
checkpoints_path: Optional[str] = None
deterministic_torch: bool = False
train_seed: int = 10
eval_seed: int = 42
device: str = "cuda"
def __post_init__(self):
self.name = f"{self.name}-{self.env_name}-{str(uuid.uuid4())[:8]}"
if self.checkpoints_path is not None:
self.checkpoints_path = os.path.join(self.checkpoints_path, self.name)
def set_seed(
seed: int, env: Optional[gym.Env] = None, deterministic_torch: bool = False
):
if env is not None:
env.seed(seed)
env.action_space.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.use_deterministic_algorithms(deterministic_torch)
def wandb_init(config: dict) -> None:
wandb.init(
config=config,
project=config["project"],
group=config["group"],
name=config["name"],
id=str(uuid.uuid4()),
)
wandb.run.save()
def wrap_env(
env: gym.Env,
state_mean: Union[np.ndarray, float] = 0.0,
state_std: Union[np.ndarray, float] = 1.0,
reward_scale: float = 1.0,
) -> gym.Env:
def normalize_state(state):
return (state - state_mean) / state_std
def scale_reward(reward):
return reward_scale * reward
env = gym.wrappers.TransformObservation(env, normalize_state)
if reward_scale != 1.0:
env = gym.wrappers.TransformReward(env, scale_reward)
return env
class SequenceDataset(IterableDataset):
def __init__(self, env_name: str, seq_len: int = 10, reward_scale: float = 1.0):
self.dataset, info = load_d4rl_trajectories(env_name, gamma=1.0)
self.reward_scale = reward_scale
self.seq_len = seq_len
self.state_mean = info["obs_mean"]
self.state_std = info["obs_std"]
# https://github.com/kzl/decision-transformer/blob/e2d82e68f330c00f763507b3b01d774740bee53f/gym/experiment.py#L116 # noqa
self.sample_prob = info["traj_lens"] / info["traj_lens"].sum()
def __prepare_sample(self, traj_idx, start_idx):
traj = self.dataset[traj_idx]
# https://github.com/kzl/decision-transformer/blob/e2d82e68f330c00f763507b3b01d774740bee53f/gym/experiment.py#L128 # noqa
states = traj["observations"][start_idx : start_idx + self.seq_len]
actions = traj["actions"][start_idx : start_idx + self.seq_len]
returns = traj["returns"][start_idx : start_idx + self.seq_len]
time_steps = np.arange(start_idx, start_idx + self.seq_len)
states = (states - self.state_mean) / self.state_std
returns = returns * self.reward_scale
# pad up to seq_len if needed
mask = np.hstack(
[np.ones(states.shape[0]), np.zeros(self.seq_len - states.shape[0])]
)
if states.shape[0] < self.seq_len:
states = pad_along_axis(states, pad_to=self.seq_len)
actions = pad_along_axis(actions, pad_to=self.seq_len)
returns = pad_along_axis(returns, pad_to=self.seq_len)
return states, actions, returns, time_steps, mask
def __iter__(self):
while True:
traj_idx = np.random.choice(len(self.dataset), p=self.sample_prob)
start_idx = random.randint(0, self.dataset[traj_idx]["rewards"].shape[0] - 1)
yield self.__prepare_sample(traj_idx, start_idx)
class DecisionTransformer(nn.Module):
def __init__(
self,
state_dim: int,
action_dim: int,
seq_len: int = 10,
episode_len: int = 1000,
embedding_dim: int = 128,
num_layers: int = 4,
num_heads: int = 8,
attention_dropout: float = 0.0,
residual_dropout: float = 0.0,
embedding_dropout: float = 0.0,
max_action: float = 1.0,
):
super().__init__()
self.emb_drop = nn.Dropout(embedding_dropout)
self.emb_norm = nn.LayerNorm(embedding_dim)
self.out_norm = nn.LayerNorm(embedding_dim)
# additional seq_len embeddings for padding timesteps
self.timestep_emb = nn.Embedding(episode_len + seq_len, embedding_dim)
self.state_emb = nn.Linear(state_dim, embedding_dim)
self.action_emb = nn.Linear(action_dim, embedding_dim)
self.return_emb = nn.Linear(1, embedding_dim)
self.blocks = nn.ModuleList(
[
TransformerBlock(
seq_len=3 * seq_len,
embedding_dim=embedding_dim,
num_heads=num_heads,
attention_dropout=attention_dropout,
residual_dropout=residual_dropout,
)
for _ in range(num_layers)
]
)
self.action_head = nn.Sequential(nn.Linear(embedding_dim, action_dim), nn.Tanh())
self.seq_len = seq_len
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.action_dim = action_dim
self.episode_len = episode_len
self.max_action = max_action
self.apply(self._init_weights)
def _init_weights(module: nn.Module):
if isinstance(module, (nn.Linear, nn.Embedding)):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
torch.nn.init.zeros_(module.bias)
torch.nn.init.ones_(module.weight)
def forward(
self,
states: torch.Tensor, # [batch_size, seq_len, state_dim]
actions: torch.Tensor, # [batch_size, seq_len, action_dim]
returns_to_go: torch.Tensor, # [batch_size, seq_len]
time_steps: torch.Tensor, # [batch_size, seq_len]
padding_mask: Optional[torch.Tensor] = None, # [batch_size, seq_len]
) -> torch.FloatTensor:
batch_size, seq_len = states.shape[0], states.shape[1]
# [batch_size, seq_len, emb_dim]
time_emb = self.timestep_emb(time_steps)
state_emb = self.state_emb(states) + time_emb
act_emb = self.action_emb(actions) + time_emb
returns_emb = self.return_emb(returns_to_go.unsqueeze(-1)) + time_emb
# [batch_size, seq_len * 3, emb_dim], (r_0, s_0, a_0, r_1, s_1, a_1, ...)
sequence = (
torch.stack([returns_emb, state_emb, act_emb], dim=1)
.permute(0, 2, 1, 3)
.reshape(batch_size, 3 * seq_len, self.embedding_dim)
)
if padding_mask is not None:
# [batch_size, seq_len * 3], stack mask identically to fit the sequence
padding_mask = (
torch.stack([padding_mask, padding_mask, padding_mask], dim=1)
.permute(0, 2, 1)
.reshape(batch_size, 3 * seq_len)
)
# LayerNorm and Dropout (!!!) as in original implementation,
# while minGPT & huggingface uses only embedding dropout
out = self.emb_norm(sequence)
out = self.emb_drop(out)
for block in self.blocks:
out = block(out, padding_mask=padding_mask)
out = self.out_norm(out)
# [batch_size, seq_len, action_dim]
# predict actions only from state embeddings
out = self.action_head(out[:, 1::3]) * self.max_action
return out
def eval_rollout(
model: DecisionTransformer,
env: gym.Env,
target_return: float,
device: str = "cpu",
) -> Tuple[float, float]:
states = torch.zeros(
1, model.episode_len + 1, model.state_dim, dtype=torch.float, device=device
)
actions = torch.zeros(
1, model.episode_len, model.action_dim, dtype=torch.float, device=device
)
returns = torch.zeros(1, model.episode_len + 1, dtype=torch.float, device=device)
time_steps = torch.arange(model.episode_len, dtype=torch.long, device=device)
time_steps = time_steps.view(1, -1)
states[:, 0] = torch.as_tensor(env.reset(), device=device)
returns[:, 0] = torch.as_tensor(target_return, device=device)
# cannot step higher than model episode len, as timestep embeddings will crash
episode_return, episode_len = 0.0, 0.0
for step in range(model.episode_len):
# first select history up to step, then select last seq_len states,
# step + 1 as : operator is not inclusive, last action is dummy with zeros
# (as model will predict last, actual last values are not important)
predicted_actions = model( # fix this noqa!!!
states[:, : step + 1][:, -model.seq_len :],
actions[:, : step + 1][:, -model.seq_len :],
returns[:, : step + 1][:, -model.seq_len :],
time_steps[:, : step + 1][:, -model.seq_len :],
)
predicted_action = predicted_actions[0, -1].cpu().numpy()
next_state, reward, done, info = env.step(predicted_action)
# at step t, we predict a_t, get s_{t + 1}, r_{t + 1}
actions[:, step] = torch.as_tensor(predicted_action)
states[:, step + 1] = torch.as_tensor(next_state)
returns[:, step + 1] = torch.as_tensor(returns[:, step] - reward)
episode_return += reward
episode_len += 1
if done:
break
return episode_return, episode_len
def train(config: TrainConfig):
set_seed(config.train_seed, deterministic_torch=config.deterministic_torch)
# init wandb session for logging
wandb_init(asdict(config))
# data & dataloader setup
dataset = SequenceDataset(
config.env_name, seq_len=config.seq_len, reward_scale=config.reward_scale
)
trainloader = DataLoader(
dataset,
batch_size=config.batch_size,
pin_memory=True,
num_workers=config.num_workers,
)
# evaluation environment with state & reward preprocessing (as in dataset above)
eval_env = wrap_env(
env=gym.make(config.env_name),
state_mean=dataset.state_mean,
state_std=dataset.state_std,
reward_scale=config.reward_scale,
)
# model & optimizer & scheduler setup
config.state_dim = eval_env.observation_space.shape[0]
config.action_dim = eval_env.action_space.shape[0]
model = DecisionTransformer(
state_dim=config.state_dim,
action_dim=config.action_dim,
embedding_dim=config.embedding_dim,
seq_len=config.seq_len,
episode_len=config.episode_len,
num_layers=config.num_layers,
num_heads=config.num_heads,
attention_dropout=config.attention_dropout,
residual_dropout=config.residual_dropout,
embedding_dropout=config.embedding_dropout,
max_action=config.max_action,
).to(config.device)
optim = torch.optim.AdamW(
model.parameters(),
lr=config.learning_rate,
weight_decay=config.weight_decay,
betas=config.betas,
)
scheduler = torch.optim.lr_scheduler.LambdaLR(
optim,
lambda steps: min((steps + 1) / config.warmup_steps, 1),
)
# save config to the checkpoint
if config.checkpoints_path is not None:
print(f"Checkpoints path: {config.checkpoints_path}")
os.makedirs(config.checkpoints_path, exist_ok=True)
with open(os.path.join(config.checkpoints_path, "config.yaml"), "w") as f:
pyrallis.dump(config, f)
print(f"Total parameters: {sum(p.numel() for p in model.parameters())}")
trainloader_iter = iter(trainloader)
for step in trange(config.update_steps, desc="Training"):
batch = next(trainloader_iter)
states, actions, returns, time_steps, mask = [b.to(config.device) for b in batch]
# True value indicates that the corresponding key value will be ignored
padding_mask = ~mask.to(torch.bool)
predicted_actions = model(
states=states,
actions=actions,
returns_to_go=returns,
time_steps=time_steps,
padding_mask=padding_mask,
)
loss = F.mse_loss(predicted_actions, actions.detach(), reduction="none")
# [batch_size, seq_len, action_dim] * [batch_size, seq_len, 1]
loss = (loss * mask.unsqueeze(-1)).mean()
optim.zero_grad()
loss.backward()
if config.clip_grad is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip_grad)
optim.step()
scheduler.step()
wandb.log(
{
"train_loss": loss.item(),
"learning_rate": scheduler.get_last_lr()[0],
},
step=step,
)
# validation in the env for the actual online performance
if step % config.eval_every == 0 or step == config.update_steps - 1:
model.eval()
for target_return in config.target_returns:
eval_env.seed(config.eval_seed)
eval_returns = []
for _ in trange(config.eval_episodes, desc="Evaluation", leave=False):
eval_return, eval_len = eval_rollout(
model=model,
env=eval_env,
target_return=target_return * config.reward_scale,
device=config.device,
)
# unscale for logging & correct normalized score computation
eval_returns.append(eval_return / config.reward_scale)
normalized_scores = (
eval_env.get_normalized_score(np.array(eval_returns)) * 100
)
wandb.log(
{
f"eval/{target_return}_return_mean": np.mean(eval_returns),
f"eval/{target_return}_return_std": np.std(eval_returns),
f"eval/{target_return}_normalized_score_mean": np.mean(
normalized_scores
),
f"eval/{target_return}_normalized_score_std": np.std(
normalized_scores
),
},
step=step,
)
model.train()
if config.checkpoints_path is not None:
checkpoint = {
"model_state": model.state_dict(),
"state_mean": dataset.state_mean,
"state_std": dataset.state_std,
}
torch.save(checkpoint, os.path.join(config.checkpoints_path, "dt_checkpoint.pt")) | null |
161,364 | import math
import os
import random
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import wandb
from torch.distributions import Normal
from tqdm import trange
def soft_update(target: nn.Module, source: nn.Module, tau: float):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - tau) * target_param.data + tau * source_param.data) | null |
161,367 | import os
import random
import uuid
from copy import deepcopy
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import d4rl
import gym
import numpy as np
import pyrallis
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torch.distributions import Normal, TanhTransform, TransformedDistribution
def init_module_weights(module: torch.nn.Sequential, orthogonal_init: bool = False):
# Specific orthgonal initialization for inner layers
# If orthogonal init is off, we do not change default initialization
if orthogonal_init:
for submodule in module[:-1]:
if isinstance(submodule, nn.Linear):
nn.init.orthogonal_(submodule.weight, gain=np.sqrt(2))
nn.init.constant_(submodule.bias, 0.0)
# Lasy layers should be initialzied differently as well
if orthogonal_init:
nn.init.orthogonal_(module[-1].weight, gain=1e-2)
else:
nn.init.xavier_uniform_(module[-1].weight, gain=1e-2)
nn.init.constant_(module[-1].bias, 0.0) | null |
161,370 | import pandas as pd
import wandb
collected_urls = {
"algorithm": [],
"dataset": [],
"url": [],
}
def get_urls(sweep_id, algo_name):
s = sweep_id
api = wandb.Api(timeout=39)
sweep = api.sweep(s)
runs = sweep.runs
for run in runs:
if "env" in run.config:
dataset = run.config["env"]
elif "env_name" in run.config:
dataset = run.config["env_name"]
name = algo_name
if "10" in "-".join(run.name.split("-")[:-1]):
name = "10% " + name
if "medium" not in dataset:
if "cheetah" in dataset or "hopper" in dataset or "walker" in dataset:
continue
if "v0" not in dataset and "dense" not in dataset:
print(name, dataset, run.url)
collected_urls["algorithm"].append(name)
collected_urls["dataset"].append(dataset)
collected_urls["url"].append(run.url.replace("https://wandb.ai/", "")) | null |
161,371 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
avg_scores, avg_stds = get_average_scores(full_scores)
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_scores[algo][data] is not None:
to_draw = avg_scores[algo][data]
std_draw = avg_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
def get_average_scores(scores):
avg_scores = {algo: {ds: None for ds in scores[algo]} for algo in scores}
stds = {algo: {ds: None for ds in scores[algo]} for algo in scores}
for algo in scores:
for data in scores[algo]:
sc = scores[algo][data]
if len(sc) > 0:
ml = min(map(len, sc))
sc = [s[:ml] for s in sc]
scores[algo][data] = sc
avg_scores[algo][data] = np.mean(sc, axis=0)
stds[algo][data] = np.std(sc, axis=0)
return avg_scores, stds | null |
161,372 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
avg_scores, avg_stds = get_average_scores(full_scores)
max_scores, max_stds = get_max_scores(full_scores)
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_scores[algo][data] is not None:
to_draw = avg_scores[algo][data]
std_draw = avg_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
def get_max_scores(scores):
avg_scores = {algo: {ds: None for ds in scores[algo]} for algo in scores}
stds = {algo: {ds: None for ds in scores[algo]} for algo in scores}
for algo in scores:
for data in scores[algo]:
sc = scores[algo][data]
if len(sc) > 0:
ml = min(map(len, sc))
sc = [s[:ml] for s in sc]
scores[algo][data] = sc
max_scores = np.max(sc, axis=1)
avg_scores[algo][data] = np.mean(max_scores)
stds[algo][data] = np.std(max_scores)
return avg_scores, stds | null |
161,373 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
last_scores, last_stds = get_last_scores(avg_scores, avg_stds)
def get_last_scores(avg_scores, avg_stds):
last_scores = {
algo: {
ds: avg_scores[algo][ds][-1] if avg_scores[algo][ds] is not None else None
for ds in avg_scores[algo]
}
for algo in avg_scores
}
stds = {
algo: {
ds: avg_stds[algo][ds][-1] if avg_stds[algo][ds] is not None else None
for ds in avg_scores[algo]
}
for algo in avg_scores
}
return last_scores, stds | null |
161,374 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_scores[algo][data] is not None:
to_draw = avg_scores[algo][data]
std_draw = avg_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
def add_domains_avg(scores):
for algo in scores:
locomotion = [
scores[algo][data]
for data in [
"halfcheetah-medium-v2",
"halfcheetah-medium-replay-v2",
"halfcheetah-medium-expert-v2",
"hopper-medium-v2",
"hopper-medium-replay-v2",
"hopper-medium-expert-v2",
"walker2d-medium-v2",
"walker2d-medium-replay-v2",
"walker2d-medium-expert-v2",
]
]
antmaze = [
scores[algo][data]
for data in [
"antmaze-umaze-v2",
"antmaze-umaze-diverse-v2",
"antmaze-medium-play-v2",
"antmaze-medium-diverse-v2",
"antmaze-large-play-v2",
"antmaze-large-diverse-v2",
]
]
maze2d = [
scores[algo][data]
for data in [
"maze2d-umaze-v1",
"maze2d-medium-v1",
"maze2d-large-v1",
]
]
adroit = [
scores[algo][data]
for data in [
"pen-human-v1",
"pen-cloned-v1",
"pen-expert-v1",
"door-human-v1",
"door-cloned-v1",
"door-expert-v1",
"hammer-human-v1",
"hammer-cloned-v1",
"hammer-expert-v1",
"relocate-human-v1",
"relocate-cloned-v1",
"relocate-expert-v1",
]
]
scores[algo]["locomotion avg"] = np.mean(locomotion)
scores[algo]["antmaze avg"] = np.mean(antmaze)
scores[algo]["maze2d avg"] = np.mean(maze2d)
scores[algo]["adroit avg"] = np.mean(adroit)
scores[algo]["total avg"] = np.mean(
np.hstack((locomotion, antmaze, maze2d, adroit))
) | null |
161,375 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
algorithms = [
"BC",
"10% BC",
"TD3+BC",
"AWAC",
"CQL",
"IQL",
"ReBRAC",
"SAC-N",
"EDAC",
"DT",
]
ordered_datasets = [
"halfcheetah-medium-v2",
"halfcheetah-medium-replay-v2",
"halfcheetah-medium-expert-v2",
"hopper-medium-v2",
"hopper-medium-replay-v2",
"hopper-medium-expert-v2",
"walker2d-medium-v2",
"walker2d-medium-replay-v2",
"walker2d-medium-expert-v2",
"locomotion avg",
"maze2d-umaze-v1",
"maze2d-medium-v1",
"maze2d-large-v1",
"maze2d avg",
"antmaze-umaze-v2",
"antmaze-umaze-diverse-v2",
"antmaze-medium-play-v2",
"antmaze-medium-diverse-v2",
"antmaze-large-play-v2",
"antmaze-large-diverse-v2",
"antmaze avg",
"pen-human-v1",
"pen-cloned-v1",
"pen-expert-v1",
"door-human-v1",
"door-cloned-v1",
"door-expert-v1",
"hammer-human-v1",
"hammer-cloned-v1",
"hammer-expert-v1",
"relocate-human-v1",
"relocate-cloned-v1",
"relocate-expert-v1",
"adroit avg",
"total avg",
]
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_scores[algo][data] is not None:
to_draw = avg_scores[algo][data]
std_draw = avg_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
algorithms = list(flat)
def get_table(
scores,
stds,
pm="$\\pm$",
delim=" & ",
row_delim="\\midrule",
row_end=" \\\\",
row_begin="",
):
rows = [row_begin + delim.join(["Task Name"] + algorithms) + row_end]
prev_env = "halfcheetah"
for data in ordered_datasets:
env = data.split("-")[0]
if env != prev_env:
if len(row_delim) > 0:
rows.append(row_delim)
prev_env = env
row = [data]
for algo in algorithms:
if data in stds[algo]:
row.append(f"{scores[algo][data]:.2f} {pm} {stds[algo][data]:.2f}")
else:
row.append(f"{scores[algo][data]:.2f}")
rows.append(row_begin + delim.join(row) + row_end)
return "\n".join(rows) | null |
161,376 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
algorithms = [
"BC",
"10% BC",
"TD3+BC",
"AWAC",
"CQL",
"IQL",
"ReBRAC",
"SAC-N",
"EDAC",
"DT",
]
plt.rcParams["figure.figsize"] = (15, 8)
plt.rcParams["figure.dpi"] = 300
sns.set(style="ticks", font_scale=1.5)
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_scores[algo][data] is not None:
to_draw = avg_scores[algo][data]
std_draw = avg_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
def convert_dataset_name(name):
plt.rcParams["figure.figsize"] = (10, 6)
plt.rcParams["figure.dpi"] = 300
sns.set(style="ticks", font_scale=0.5)
plt.rcParams.update(
{
# "font.family": "serif",
"font.serif": "Times New Roman"
}
)
algorithms = list(flat)
plt.savefig("out/perf_profiles_offline.pdf", dpi=300, bbox_inches="tight")
plt.close()
sns.set(style="ticks", font_scale=0.5)
plt.savefig("out/improvement_probability_offline.pdf", dpi=300, bbox_inches="tight")
plt.close()
def plot_bars(scores, save_name):
agg_l = []
for env in [
"halfcheetah",
"hopper",
"walker2d",
"maze2d",
"antmaze",
"pen",
"door",
"hammer",
"relocate",
]:
if env in ["halfcheetah", "hopper", "walker2d"]:
datas = ["medium-v2", "medium-expert-v2", "medium-replay-v2"]
elif "maze2d" in env:
datas = ["umaze-v1", "medium-v1", "large-v1"]
elif "antmaze" in env:
datas = [
"umaze-v2",
"umaze-diverse-v2",
"medium-play-v2",
"medium-diverse-v2",
"large-play-v2",
"large-diverse-v2",
]
else:
datas = ["human-v1", "cloned-v1", "expert-v1"]
for data in datas:
line = convert_dataset_name(f"{env}-{data}")
for algo in algorithms:
agg_l.append([algo, line, scores[algo][f"{env}-{data}"]])
df_agg = pd.DataFrame(agg_l, columns=["Algorithm", "Dataset", "Normalized Score"])
sns.set(style="ticks", font_scale=2)
plt.rcParams["figure.figsize"] = (20, 10) # (10, 6)
b = sns.barplot(
data=df_agg[
df_agg.Dataset.apply(
lambda x: "cheetah" in x or "hopper" in x or "walker" in x
)
],
x="Dataset",
y="Normalized Score",
hue="Algorithm",
)
plt.grid()
# plt.tight_layout()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.legend(fontsize=10)
plt.xticks(rotation=45)
sns.move_legend(b, "upper left", bbox_to_anchor=(1, 1))
plt.savefig(f"out/bars_{save_name}_loco.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
b = sns.barplot(
data=df_agg[df_agg.Dataset.apply(lambda x: "maze2d" in x)],
x="Dataset",
y="Normalized Score",
hue="Algorithm",
)
# plt.tight_layout()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.legend(fontsize=10)
plt.xticks(rotation=45)
sns.move_legend(b, "upper left", bbox_to_anchor=(1, 1))
plt.grid()
plt.savefig(f"out/bars_{save_name}_maze.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
b = sns.barplot(
data=df_agg[df_agg.Dataset.apply(lambda x: "ant" in x)],
x="Dataset",
y="Normalized Score",
hue="Algorithm",
)
# plt.tight_layout()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.legend(fontsize=10)
plt.xticks(rotation=45)
sns.move_legend(b, "upper left", bbox_to_anchor=(1, 1))
plt.grid()
plt.savefig(f"out/bars_{save_name}_ant.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
b = sns.barplot(
data=df_agg[
df_agg.Dataset.apply(
lambda x: "pen" in x or "hammer" in x or "door" in x or "relocate" in x
)
],
x="Dataset",
y="Normalized Score",
hue="Algorithm",
)
plt.grid()
# plt.tight_layout()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.legend(fontsize=10)
plt.xticks(rotation=45)
sns.move_legend(b, "upper left", bbox_to_anchor=(1, 1))
plt.savefig(f"out/bars_{save_name}_adroit.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close() | null |
161,377 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
flat = flatten(full_scores)
def flatten(data):
res = {}
for algo in data:
flat = []
for env in data[algo]:
if "avg" not in env:
env_list = np.array(data[algo][env])[:, -1]
flat.append(env_list)
res[algo] = np.array(flat).T
return res | null |
161,378 | import pandas as pd
import wandb
collected_urls = {
"algorithm": [],
"dataset": [],
"url": [],
}
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/gae6mjr6")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/3dda9gfw")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/3sgbj9n0")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/67eno4ma")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/3bur5hke")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/330z0l2v")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/1i05t3vj")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/k9yfle3x")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/1zreo8zw")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/18vbgvb2")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/ky3vncuf")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/3tz0z6nh")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/31dmbfoz")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/1rhop7f6")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2q070txr")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/sbcrq218")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/28iujcoa")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/2f12hcq3")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/1ptuak40")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/36y8187b")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3bn0h2zy")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3joz13bc")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3s9l1a83")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/1q966noh")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2b85pbgd")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/ca0nxbh4")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/1ipey1bk")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/x35k6x12")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/1owdjob7")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/xoosoz9n")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3r09yx27")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3k5v2mso")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/39tqleqs")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/9cddvu7a")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/17v5isiw")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/2a8wzq2t")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/1tgqpiks")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/19yfj5xu")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2bneh6uw")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/3twop214")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/rhkaisgq")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/287bzpdd")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/l2gfzbhg")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3gnugxzy")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/2uwtj2md")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/60yn1nfx")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/2p0w55iq")
collected_urls["algorithm"].append("BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/2rv6pvln")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/84b74c6e-bc52-4083-a601-6a387726c61d")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/e22c302b-e387-4d12-a498-db1c7b787306")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/c76a5b7c-f459-498e-9aa9-6c0366ded313")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/dafaa4dc-9359-4feb-be9b-39c3dcadcdd4")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/7aff87ac-17e1-49a8-b52d-a210c9be9eee")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/d14de446-beea-413f-ad5e-c90dfd0e790c")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/d4713f18-520a-459e-80a6-0acd70d0710f")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/dfbcb740-26ca-4bbf-9065-ad3ecd60c261")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/134273d4-5eb7-4e42-a62b-b3a387a7a2a4")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/f6b33b84-b8c4-42a9-aae4-0d12db4f8b92")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/c8dff5d6-4b22-4e7f-a3b1-5913ae9b0aed")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/6d454981-bf52-4126-b4bc-436e566b76be")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/5d7df542-1567-462f-8885-8c8a0e8a5d19")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/d1d0f883-1b1d-4429-8c3c-02de6c989cdb")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/8ccf19da-a0e6-4267-a53a-276349aea3be")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2c0ea1a2-614b-414a-b6fc-baa9663891da")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3cc3a7f7-8ff0-497c-a6e0-e6c5c5ca9688")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/c0de3f56-a236-44a4-a532-04064af81b18")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/f2f1507a-9066-4df1-962e-a3d9bed3015a")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/6313d5cf-9158-4585-9f48-cccbe1ff16f1")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/ba6e7a6d-2548-4d8a-a35f-286782c3658e")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/ab521663-97d4-4b00-a992-b602d495f7d7")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/f6c1e15a-23d4-472d-846f-e766a835d67b")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/482908a6-eb2e-4b3d-8254-0ef0124f488e")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/7fc8e114-0c73-4c47-977a-7f8d337dac1f")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/c2e4d867-a355-4030-b23f-e9845da0c4bf")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/cec9a1e2-a270-4270-861b-88535dcd4103")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/dcc5696c-bc69-41a3-a4f7-2865a16651ef")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/b86f27c4-05d0-43d8-b95e-b81edeb45144")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/364433ae-2974-48c7-a8e5-fc7606dbc819")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/ba1ae355-2945-4c82-a7be-49e421b59574")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/c9b94c6c-8a73-4259-848b-61f7b9386309")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/323e3a40-e919-4dd6-9d97-3e6f7a01b118")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/6065ffc6-8cee-45d8-b2e5-a600922a89cc")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/b418e6f1-1fcc-43dc-b5e3-475c17d3da1a")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/9b7add9a-d916-4ac8-9538-09d82ea6a7c4")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/0155fffe-76ae-4580-ba4a-c90d8c83c8d6")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/e7ea6fec-ac94-483f-af5a-c20790569efd")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/af373d51-823c-4ebc-b863-3ffefb6ad5f0")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/82e587c5-afc5-47f3-b71c-734472174a19")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/1bca103d-fa9b-405f-a4c3-f4f5aee161c1")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/706ea73c-c148-4f2f-96c6-347e600ae566")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/e51f8235-0ea3-4eb5-a2ff-67d159404783")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/5cd02078-1a5b-4721-9070-c8a5d7bce477")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/40eaf786-7305-46a0-8b4c-2dc608c9cf34")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/4bceaa03-d8e6-4ec5-b417-d1007f4a7504")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/e1f340a7-f659-4143-8c76-22d341532e9c")
collected_urls["algorithm"].append("10% BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/df22f73b-3904-4d3d-be82-8565a94f90a9")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/3gmwuspv")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/hfnz06jo")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/22zd4qy5")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/2je1ydbq")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/2cn5kybz")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/4wfevsn1")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/8uc5g9vl")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/3q3i7kr4")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/1383sspe")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/ujqk6bcx")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/2har775v")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/1t9zpxwq")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/1manw8ou")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/glmwyvtm")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/99lixj21")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/21qd6jdk")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/13i7gvdv")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/lfnzn3ek")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/2iqxrf7v")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/28q8k0is")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/2klwm3m9")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/vgj8gxc9")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/1zpikd1i")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3mhuu91m")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/o9cy1xot")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/9oorg18b")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/8umnr31k")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/8ay8wua0")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/36r6bciu")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3dhx3yws")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/2xgt4p29")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/2i8f6fsw")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/1pocua7w")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3apac4jp")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3axkszn9")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/iyy3p627")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2evz37in")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/rcuf9ji6")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2nguxmuw")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/563x3nqx")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3pp38z95")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/c7htx54f")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/35i1e9k3")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/34kpercv")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/1y6a1ghl")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/1r5ja7w3")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/2ksjowc8")
collected_urls["algorithm"].append("TD3+BC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/1v789w9r")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/f5447eae-38f5-404e-ab97-979d12a62dba")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/063ec049-6092-46fd-8d06-5c43aa0c8933")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/517996bc-48dd-4cc5-a1a2-b599668dfb03")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/cdb110c8-baed-4b72-9338-e2df069c1999")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/863ba3ad-2e15-4027-a561-50a1ce837a2e")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/a120a194-2a4d-493f-a105-29e81c2167f3")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/db99a51a-20ec-4898-b432-7bed581b11eb")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/ef619bf1-e43f-4ca0-b26a-e44a79c8d6c4")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/d61f15f2-bb63-4b0e-8a3f-0a8397f85c99")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/bc356f6c-ff8a-4fcb-8f7d-eda711bf187f")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/e55c1f59-4a22-4adf-90db-55b761184c31")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/754eb9df-300b-4816-b483-1ecc8630d170")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/fcdf10b7-3f06-4950-89e5-0bb706d32fa2")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/3149b249-61b7-42b5-b62c-560263073ceb")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/e3f4068c-2f7a-4d98-8bfe-71e5bcd37f60")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/07bafbb5-cef0-487f-9d18-43f5e6f41e5b")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/bdc16cb0-7ba1-44e5-a634-f7821849e911")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/1c63037a-0f9e-4c92-8e30-f868e5899235")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/49ccdf3d-49f8-43f7-ae5e-5f2166928b08")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/86e2bdf2-bfc8-4dd8-b245-06f3c5948525")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/b7865c5a-6382-4dfe-967d-f5f41caef859")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/1a9ae20a-0ef3-4517-aa21-0114606e8e44")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/68993e5b-f477-496e-ab8c-da7808851e31")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/d9682650-69b2-4cce-832c-a0a5d63d7b87")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/51b5a164-e6ab-4929-bf76-b786a3e40654")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/abd10b19-e2c5-4e27-99ac-2ca8445acd51")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/5c0c2cb0-2457-40dc-905b-8bf32b8a75fe")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/98977940-fab9-462c-ac70-3fcd10bc55cb")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/a513ea52-a879-47a6-ab4c-ac1a046b5cc2")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/0cffd41b-d983-4b45-93c8-2e22fc5801c0")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/c7b8a1c8-170f-4060-860c-62553ff67911")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/7df0497b-d805-47ce-91ba-485d7bff6fb6")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3db49470-beba-49f8-963b-bc7fbe79d107")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/21fea44e-168d-4356-a72c-1ac09a482d05")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/60a8e98b-5933-491e-83c7-f48b777fb52e")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/7eaf035d-9394-4eee-97f0-50347b108b6a")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/76b97aeb-4327-4fb1-bbd4-572f84b9ac6c")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2eaf20df-c7d2-42c7-9d6f-5f29e240b99f")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/fa033830-cec7-4144-894d-741391fdb81d")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/04917eeb-b7a5-4e02-9e89-7eed774cd00b")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/d296d6ef-8a37-4c39-be14-ab54eb85a0ee")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/825a83d5-0ed4-4c97-9c79-13edfa43e6cc")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/277df654-7035-4469-8150-ff3df3f6230e")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/6428588e-c9bc-43ba-a945-285248e0664b")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/0d1ae046-abcb-4da1-b2d3-1360bbd8f54f")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/9eb231d9-6c25-4d42-9564-90164b7e680b")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/f4c212ba-7b8e-428e-9953-71606fd84d67")
collected_urls["algorithm"].append("DT")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3bc164b8-1fc0-4ce5-a32d-701e522ad5b1")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/a7e3d2a0-2dbc-4eba-b28d-8315f992bae3")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/65981364-10fc-47d3-bb35-ccc67254ca23")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/ceb4bd07-50d4-426c-9e2b-a54fc4a1092a")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/a2fe5d76-b680-42b1-aafa-4f7fae8e9575")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/342b9c5e-eb78-45b1-99fc-97654d2d619a")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/eaab4d73-b002-4587-89e9-b101efc5c385")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/f83b4b8c-bddd-469a-acf5-c2c59b80fd3c")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/4c2065f4-e773-4760-a045-18958aff4685")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/eef336bc-42f0-46bc-90df-17d6b5647263")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/16b37de3-9011-4a20-b58a-d1d97946125a")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/81bdccf5-1ce7-4ab5-9228-1193209b9f85")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/700bc2bd-3ae8-4845-a5a7-ea9ce5a5bf68")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/c0015d64-2bce-4bf7-a804-92390d022ec9")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/f7a045fb-89de-4df1-a827-0b0aff6fa803")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/c61cc412-51fa-41ef-be06-5e8eaba5272e")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/e08593b0-edc7-49a7-bf68-e66e613ed20f")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3be8a859-82e5-4cc2-899d-4ff7f88a90ed")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/c5dd3800-eed4-4711-8172-0d22bc985ed9")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/ff761882-9f47-4f3b-8cf9-0f5cf0b40339")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/0257eae7-716d-4c68-b8a2-1d99c74d79d0")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/8c18b80d-028d-48dd-a371-b2fab308469a")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/c86ba1cc-8b4c-4dd8-b64d-8f57a8131d95")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/bc5fda0c-2f5c-4391-8bd5-c4f2e15c2e0c")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/c3fdffef-f3cb-4d18-9d94-af4e0651ba21")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/95c7d8e0-f634-403a-8edb-ea00afd5c69c")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/4580d97f-15b0-4d54-887c-91cf0a3368ea")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/ad47291b-1469-48b5-ba20-266a05bc9326")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/16f77985-8033-4953-8066-c33c49141581")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/69bf1797-94b0-43fa-b22c-a6406a93d222")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/dadbb413-ae11-48bb-a4bb-94c8b4c7d53f")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/c1db8aa9-9bfc-4687-a8b5-6096c90f6e9b")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/b6ff762e-c0be-4b6d-ac23-8b5ffcb28a56")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/ab688db2-ab1d-4d96-ba40-6186c7ecb16b")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/d0a5c6be-7b64-4ddb-b965-1ae8e0533363")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/9f67f421-c55b-4527-8ea0-8e6579a3bb61")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/ab44a4d1-6aee-420e-b691-307bd083d2ea")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/3394eb73-a8b3-463c-9a57-8dd65833ecdd")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/89527361-8f90-47a5-8882-ac3459de0d0a")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/f02528e5-86d6-4242-961f-106cb0e5df14")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/132a99bc-386a-4eb4-a64c-74699d0563b5")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/33ce900d-b858-4bc3-a6dc-71f9615cfad5")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/87addd3a-42bd-45b7-8dcb-a921dfa6dad5")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/bcfb639c-1d44-4228-bbd8-e560b48bb5d6")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/249f88e4-c98f-401f-bb36-4d5f239fff74")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/fc7fa907-ab00-457d-a00d-2bdd65688379")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/20f7258d-0f07-4002-86b2-4c3ec65ee067")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/c3e71147-80a2-4ae8-bb59-9b994daaa516")
collected_urls["algorithm"].append("SAC-N")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/e36a72da-482f-4a70-803f-1a0d7eccb265")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/1m3k2bd1")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/3jzf46zg")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/exlzrv4v")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/3r2qku3k")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/3crj1urn")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/25vxky59")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/258aw9fy")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/3oc7jc1q")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/31ak0z9b")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/hjl7pxfa")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/2qq9dfgc")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/c0pdrw6f")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/5d588f87-fe51-4253-b310-a75fbf8d3702")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/10aa52ac-b2f4-43c4-97f1-4bee57fdab24")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/3500687d-84c6-4cc6-88a9-ac432fe83f42")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2108ebe3-d55d-418a-9fda-f78a8337909a")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/8853c87c-9bdc-411e-8128-f0976c510485")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/b86adeb5-282b-4f9b-bd4f-361b576c9988")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/6b675ca0-3fed-498a-ae54-e964673158d4")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/48813224-53a2-495e-86a2-d72a5b95ba94")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/996be0e1-ae88-492d-b261-15f034cc6203")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/62bcf801-db79-438e-b0f4-74436f3c67b1")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/ffddfea8-2e9b-493b-88df-04a15f97d7a8")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/b07eb900-8653-4688-a10f-111f3eb3c84a")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/59f743f9-3b3a-4306-83b5-98721508bf2f")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/74a7e942-ca43-44e8-85f7-976fa7dd2edd")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/20425c80-a0f3-4e1a-9991-a85db7012417")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/6fb1e9e2-9485-40c9-ac77-b118cd9cc55b")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/6145c71a-ce9b-4817-bf94-a6eef9b79377")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/c7d59200-7e0f-47a4-846a-123fb23d3c30")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/00379327-06d9-4117-9abb-0f4fef0d6f38")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/dc1c3646-d8fd-4671-b43c-b987441f70cf")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/a58fedea-d5fe-4481-bca4-0e44989f049e")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/05dc4e17-4c73-4f71-b5c3-2eb39aae36c8")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/155aa581-5e1f-4d32-acd5-edde7c5e3c6a")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/5e5e6d1a-59c4-4044-9d50-7d1b920bb626")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/ffb22753-338f-4d2a-ba45-aaeba6a5eed3")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/6d1e8c3f-bd50-4e02-8adc-bf7db13d15ad")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/f99181eb-499d-48be-b1e3-5349f8fe3731")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/fd8b7f41-48cc-4578-8fc8-55ec5e5884df")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/a0a92721-04b1-4868-809e-2ce37358516b")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/c484e9cd-ee4d-427a-941d-80926caa3128")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/5790cb46-ea8c-42b6-abe6-a70faa0f4633")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/ed665d8c-1bb5-4858-9136-574bf523b39a")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/1e6e9a77-a335-41e0-9e29-6271f5a4fcda")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/d6492463-82f1-4512-99fa-b23073d6b418")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/96027203-781b-46ee-bf59-e565227f2f7b")
collected_urls["algorithm"].append("EDAC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/d5f5f415-9d1b-4d35-b4e5-c1cf278af46c")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/3me14n0w")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/8671xq2j")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/3keq4k8a")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-large-v1")
collected_urls["url"].append("tlab/CORL/runs/3jq85ti0")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/1vvutaak")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/16nzq1ng")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/3552gil2")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-medium-v1")
collected_urls["url"].append("tlab/CORL/runs/3l3dpq11")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/3usi5cuh")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/2vvw9y8h")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/2vcog7cq")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("maze2d-umaze-v1")
collected_urls["url"].append("tlab/CORL/runs/qp93j6we")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/1n8ttdck")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/1bpgemq2")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/39wb3kat")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/w9i9g39x")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3gfpaz8e")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3aerk47s")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/275nzj65")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/2fxchaks")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/220xo7sy")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/186848oq")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/2qcui7s9")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("halfcheetah-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3izk7ats")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/3p8nop3c")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2n4njt2r")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/cfgxmidd")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/o3jqikii")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/1jg2th4m")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3qqk3v1v")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/1og7e8w1")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/1hg2vtf9")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3b6t3c8p")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/i15nczq4")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3v7jt3p7")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("hopper-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/2uvghydj")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/3v1rznw2")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/2ov8rc9w")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/3funjmu4")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-v2")
collected_urls["url"].append("tlab/CORL/runs/3o823qdi")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/21coamdv")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/35cmwtdl")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/3pvuqbr5")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-replay-v2")
collected_urls["url"].append("tlab/CORL/runs/ic2e00s6")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/2utgl834")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3hvawfk9")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/3mo9ld3q")
collected_urls["algorithm"].append("AWAC")
collected_urls["dataset"].append("walker2d-medium-expert-v2")
collected_urls["url"].append("tlab/CORL/runs/1aihv0tw")
def get_urls(sweep_id, algo_name):
s = sweep_id
api = wandb.Api(timeout=39)
sweep = api.sweep(s)
runs = sweep.runs
for run in runs:
if "env" in run.config:
dataset = run.config["env"]
elif "env_name" in run.config:
dataset = run.config["env_name"]
elif "dataset_name" in run.config:
dataset = run.config["dataset_name"]
name = algo_name
if "10" in "-".join(run.name.split("-")[:-1]):
name = "10% " + name
if "medium" not in dataset:
if "cheetah" in dataset or "hopper" in dataset or "walker" in dataset:
continue
if "v0" not in dataset and "dense" not in dataset:
print(name, dataset, run.url)
collected_urls["algorithm"].append(name)
collected_urls["dataset"].append(dataset)
collected_urls["url"].append(run.url.replace("https://wandb.ai/", "")) | null |
161,379 | import os
import pickle
import pandas as pd
import wandb
from tqdm import tqdm
def get_run_scores(run_id, is_dt=False):
run = api.run(run_id)
score_key = None
full_scores = []
regret = None
max_dt = -1e10
for k in run.history().keys():
if "normalized" in k and "score" in k and "std" not in k:
if is_dt:
st = k
if "eval/" in st:
st = st.replace("eval/", "")
target = float(st.split("_")[0])
if target > max_dt:
max_dt = target
score_key = k
else:
score_key = k
break
for _, row in run.history(keys=[score_key], samples=5000).iterrows():
full_scores.append(row[score_key])
for _, row in run.history(keys=["eval/regret"], samples=5000).iterrows():
if "eval/regret" in row:
regret = row["eval/regret"]
offline_iters = len(full_scores) // 2
return full_scores[:offline_iters], full_scores[offline_iters:], regret
full_scores = process_runs(dataframe)
def process_runs(df):
algorithms = df["algorithm"].unique()
datasets = df["dataset"].unique()
full_scores = {algo: {ds: [] for ds in datasets} for algo in algorithms}
for _, row in tqdm(
df.iterrows(), desc="Runs scores downloading", position=0, leave=True
):
full_scores[row["algorithm"]][row["dataset"]].append(
get_run_scores(row["url"], row["algorithm"] == "DT")
)
return full_scores | null |
161,380 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
for algo in full_offline_scores:
for data in full_offline_scores[algo]:
full_offline_scores[algo][data] = [s[0] for s in full_scores[algo][data]]
full_online_scores[algo][data] = [s[1] for s in full_scores[algo][data]]
regrets[algo][data] = np.mean([s[2] for s in full_scores[algo][data]])
regrets_std[algo][data] = np.std([s[2] for s in full_scores[algo][data]])
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_online_scores[algo][data] is not None:
to_draw = avg_online_scores[algo][data]
std_draw = avg_online_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total tuning steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/tuning_{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
def get_average_scores(scores):
avg_scores = {algo: {ds: None for ds in scores[algo]} for algo in scores}
stds = {algo: {ds: None for ds in scores[algo]} for algo in scores}
for algo in scores:
for data in scores[algo]:
sc = scores[algo][data]
if len(sc) > 0:
ml = min(map(len, sc))
sc = [s[:ml] for s in sc]
scores[algo][data] = sc
avg_scores[algo][data] = np.mean(sc, axis=0)
stds[algo][data] = np.std(sc, axis=0)
return avg_scores, stds | null |
161,381 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
for algo in full_offline_scores:
for data in full_offline_scores[algo]:
full_offline_scores[algo][data] = [s[0] for s in full_scores[algo][data]]
full_online_scores[algo][data] = [s[1] for s in full_scores[algo][data]]
regrets[algo][data] = np.mean([s[2] for s in full_scores[algo][data]])
regrets_std[algo][data] = np.std([s[2] for s in full_scores[algo][data]])
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_online_scores[algo][data] is not None:
to_draw = avg_online_scores[algo][data]
std_draw = avg_online_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total tuning steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/tuning_{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
def get_max_scores(scores):
avg_scores = {algo: {ds: None for ds in scores[algo]} for algo in scores}
stds = {algo: {ds: None for ds in scores[algo]} for algo in scores}
for algo in scores:
for data in scores[algo]:
sc = scores[algo][data]
if len(sc) > 0:
ml = min(map(len, sc))
sc = [s[:ml] for s in sc]
scores[algo][data] = sc
max_scores = np.max(sc, axis=1)
avg_scores[algo][data] = np.mean(max_scores)
stds[algo][data] = np.std(max_scores)
return avg_scores, stds | null |
161,382 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
for algo in full_offline_scores:
for data in full_offline_scores[algo]:
full_offline_scores[algo][data] = [s[0] for s in full_scores[algo][data]]
full_online_scores[algo][data] = [s[1] for s in full_scores[algo][data]]
regrets[algo][data] = np.mean([s[2] for s in full_scores[algo][data]])
regrets_std[algo][data] = np.std([s[2] for s in full_scores[algo][data]])
def get_last_scores(avg_scores, avg_stds):
last_scores = {
algo: {
ds: avg_scores[algo][ds][-1] if avg_scores[algo][ds] is not None else None
for ds in avg_scores[algo]
}
for algo in avg_scores
}
stds = {
algo: {
ds: avg_stds[algo][ds][-1] if avg_stds[algo][ds] is not None else None
for ds in avg_scores[algo]
}
for algo in avg_scores
}
return last_scores, stds | null |
161,383 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
for algo in full_offline_scores:
for data in full_offline_scores[algo]:
full_offline_scores[algo][data] = [s[0] for s in full_scores[algo][data]]
full_online_scores[algo][data] = [s[1] for s in full_scores[algo][data]]
regrets[algo][data] = np.mean([s[2] for s in full_scores[algo][data]])
regrets_std[algo][data] = np.std([s[2] for s in full_scores[algo][data]])
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_online_scores[algo][data] is not None:
to_draw = avg_online_scores[algo][data]
std_draw = avg_online_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total tuning steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/tuning_{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
def add_domains_avg(scores):
for algo in scores:
antmaze = [
scores[algo][data]
for data in [
"antmaze-umaze-v2",
"antmaze-umaze-diverse-v2",
"antmaze-medium-play-v2",
"antmaze-medium-diverse-v2",
"antmaze-large-play-v2",
"antmaze-large-diverse-v2",
]
]
adroit = [
scores[algo][data]
for data in [
"pen-cloned-v1",
"door-cloned-v1",
"hammer-cloned-v1",
"relocate-cloned-v1",
]
]
scores[algo]["antmaze avg"] = np.mean(antmaze)
scores[algo]["adroit avg"] = np.mean(adroit)
scores[algo]["total avg"] = np.mean(np.hstack((antmaze, adroit))) | null |
161,384 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
for algo in full_offline_scores:
for data in full_offline_scores[algo]:
full_offline_scores[algo][data] = [s[0] for s in full_scores[algo][data]]
full_online_scores[algo][data] = [s[1] for s in full_scores[algo][data]]
regrets[algo][data] = np.mean([s[2] for s in full_scores[algo][data]])
regrets_std[algo][data] = np.std([s[2] for s in full_scores[algo][data]])
algorithms = ["AWAC", "CQL", "IQL", "SPOT", "Cal-QL"]
ordered_datasets = [
"antmaze-umaze-v2",
"antmaze-umaze-diverse-v2",
"antmaze-medium-play-v2",
"antmaze-medium-diverse-v2",
"antmaze-large-play-v2",
"antmaze-large-diverse-v2",
"antmaze avg",
"pen-cloned-v1",
"door-cloned-v1",
"hammer-cloned-v1",
"relocate-cloned-v1",
"adroit avg",
"total avg",
]
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_online_scores[algo][data] is not None:
to_draw = avg_online_scores[algo][data]
std_draw = avg_online_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total tuning steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/tuning_{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
algorithms = list(flat)
def get_table(
scores,
stds,
pm="$\\pm$",
delim=" & ",
row_delim="\\midrule",
row_end=" \\\\",
row_begin="",
scores2=None,
stds2=None,
scores_delim=" $\\to$ ",
):
rows = [row_begin + delim.join(["Task Name"] + algorithms) + row_end]
prev_env = "halfcheetah"
for data in ordered_datasets:
env = data.split("-")[0]
if env != prev_env:
if len(row_delim) > 0:
rows.append(row_delim)
prev_env = env
row = [data]
for algo in algorithms:
if data in stds[algo]:
row.append(
f"{scores[algo][data]:.2f} {pm} {stds[algo][data]:.2f}"
+ (
""
if scores2 is None
else f"{scores_delim} {scores2[algo][data]:.2f} {pm} {stds2[algo][data]:.2f}" # noqa
)
)
else:
row.append(
f"{scores[algo][data]:.2f}"
+ (
""
if scores2 is None
else f"{scores_delim} {scores2[algo][data]:.2f}"
)
)
rows.append(row_begin + delim.join(row) + row_end)
return "\n".join(rows) | null |
161,385 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rliable import library as rly, metrics, plot_utils
for algo in full_offline_scores:
for data in full_offline_scores[algo]:
full_offline_scores[algo][data] = [s[0] for s in full_scores[algo][data]]
full_online_scores[algo][data] = [s[1] for s in full_scores[algo][data]]
regrets[algo][data] = np.mean([s[2] for s in full_scores[algo][data]])
regrets_std[algo][data] = np.std([s[2] for s in full_scores[algo][data]])
algorithms = ["AWAC", "CQL", "IQL", "SPOT", "Cal-QL"]
plt.rcParams["figure.figsize"] = (15, 8)
plt.rcParams["figure.dpi"] = 300
sns.set(style="ticks", font_scale=1.5)
for data in datasets:
min_score = 1e6
max_score = -1e6
for i, algo in enumerate(algorithms):
if avg_online_scores[algo][data] is not None:
to_draw = avg_online_scores[algo][data]
std_draw = avg_online_stds[algo][data]
if len(to_draw) == 600 or len(to_draw) == 601:
to_draw = to_draw[::3]
std_draw = std_draw[::3]
if len(to_draw) == 1000:
to_draw = to_draw[::5]
std_draw = std_draw[::5]
if len(to_draw) == 3000:
to_draw = to_draw[::15]
std_draw = std_draw[::15]
steps = np.linspace(0, 1, len(to_draw))
min_score = min(min_score, np.min(to_draw))
max_score = max(max_score, np.max(to_draw))
plt.plot(
steps, to_draw, label=algo, linestyle=linestyles[i % len(linestyles)][1]
)
plt.fill_between(steps, to_draw - std_draw, to_draw + std_draw, alpha=0.1)
plt.title(data)
plt.xlabel("Fraction of total tuning steps")
plt.ylabel("Normalized score")
plt.ylim([min_score - 3, max_score + 3])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.savefig(f"out/tuning_{data}.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
def convert_dataset_name(name):
plt.rcParams["figure.figsize"] = (10, 6)
plt.rcParams["figure.dpi"] = 300
sns.set(style="ticks", font_scale=0.5)
plt.rcParams.update(
{
# "font.family": "serif",
"font.serif": "Times New Roman"
}
)
algorithms = list(flat)
plt.savefig("out/perf_profiles_online.pdf", dpi=300, bbox_inches="tight")
sns.set(style="ticks", font_scale=0.5)
plt.savefig("out/improvement_probability_online.pdf", dpi=300, bbox_inches="tight")
def plot_bars(scores, save_name):
agg_l = []
for env in ["antmaze", "pen", "door", "hammer", "relocate"]:
if env in ["halfcheetah", "hopper", "walker2d"]:
datas = ["medium-v2", "medium-expert-v2", "medium-replay-v2"]
elif "maze2d" in env:
datas = ["umaze-v1", "medium-v1", "large-v1"]
elif "antmaze" in env:
datas = [
"umaze-v2",
"umaze-diverse-v2",
"medium-play-v2",
"medium-diverse-v2",
"large-play-v2",
"large-diverse-v2",
]
else:
datas = ["cloned-v1"]
for data in datas:
line = convert_dataset_name(f"{env}-{data}")
for algo in algorithms:
agg_l.append([algo, line, scores[algo][f"{env}-{data}"]])
df_agg = pd.DataFrame(agg_l, columns=["Algorithm", "Dataset", "Normalized Score"])
sns.set(style="ticks", font_scale=2)
plt.rcParams["figure.figsize"] = (20, 10) # (10, 6)
b = sns.barplot(
data=df_agg[df_agg.Dataset.apply(lambda x: "ant" in x)],
x="Dataset",
y="Normalized Score",
hue="Algorithm",
)
# plt.tight_layout()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.legend(fontsize=10)
plt.xticks(rotation=45)
sns.move_legend(b, "upper left", bbox_to_anchor=(1, 1))
plt.grid()
plt.savefig(f"out/bars_{save_name}_ant.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close()
b = sns.barplot(
data=df_agg[
df_agg.Dataset.apply(
lambda x: "pen" in x or "hammer" in x or "door" in x or "relocate" in x
)
],
x="Dataset",
y="Normalized Score",
hue="Algorithm",
)
plt.grid()
# plt.tight_layout()
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.legend(fontsize=10)
plt.xticks(rotation=45)
sns.move_legend(b, "upper left", bbox_to_anchor=(1, 1))
plt.savefig(f"out/bars_{save_name}_adroit.pdf", dpi=300, bbox_inches="tight")
# plt.show()
plt.close() | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.