Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
EC2 = get_driver(Provider.EC2_US_EAST)
Slicehost = get_driver(Provider.SLICEHOST)
Rackspace = get_driver(Provider.RACKSPACE)
drivers = [ EC2('access key id', 'secret key'),
Slicehost('api key'),
Rackspace('username', 'api key') ]
nodes = [ driver.list_nodes() for driver in drivers ]
print nodes
# [ <Node: provider=Amazon, status=RUNNING, name=bob, ip=1.2.3.4.5>,
# <Node: provider=Slicehost, status=REBOOT, name=korine, ip=6.7.8.9.10>, ... ]
# grab the node named "test"
<|code_end|>
, generate the next line using the imports in this file:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
and context (functions, classes, or occasionally code) from other files:
# Path: libcloud/compute/types.py
# class Provider(object):
# """
# Defines for each of the supported providers
#
# @cvar DUMMY: Example provider
# @cvar EC2_US_EAST: Amazon AWS US N. Virgina
# @cvar EC2_US_WEST: Amazon AWS US N. California
# @cvar EC2_EU_WEST: Amazon AWS EU Ireland
# @cvar RACKSPACE: Rackspace Cloud Servers
# @cvar RACKSPACE_UK: Rackspace UK Cloud Servers
# @cvar SLICEHOST: Slicehost.com
# @cvar GOGRID: GoGrid
# @cvar VPSNET: VPS.net
# @cvar LINODE: Linode.com
# @cvar VCLOUD: vmware vCloud
# @cvar RIMUHOSTING: RimuHosting.com
# @cvar ECP: Enomaly
# @cvar IBM: IBM Developer Cloud
# @cvar OPENNEBULA: OpenNebula.org
# @cvar DREAMHOST: DreamHost Private Server
# @cvar CLOUDSIGMA: CloudSigma
# """
# DUMMY = 0
# EC2 = 1 # deprecated name
# EC2_US_EAST = 1
# EC2_EU = 2 # deprecated name
# EC2_EU_WEST = 2
# RACKSPACE = 3
# SLICEHOST = 4
# GOGRID = 5
# VPSNET = 6
# LINODE = 7
# VCLOUD = 8
# RIMUHOSTING = 9
# EC2_US_WEST = 10
# VOXEL = 11
# SOFTLAYER = 12
# EUCALYPTUS = 13
# ECP = 14
# IBM = 15
# OPENNEBULA = 16
# DREAMHOST = 17
# ELASTICHOSTS = 18
# ELASTICHOSTS_UK1 = 19
# ELASTICHOSTS_UK2 = 20
# ELASTICHOSTS_US1 = 21
# EC2_AP_SOUTHEAST = 22
# RACKSPACE_UK = 23
# BRIGHTBOX = 24
# CLOUDSIGMA = 25
# EC2_AP_NORTHEAST = 26
#
# Path: libcloud/compute/providers.py
# def get_driver(provider):
# return get_provider_driver(DRIVERS, provider)
. Output only the next line. | node = filter(lambda x: x.name == 'test', nodes)[0] |
Given snippet: <|code_start|># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
EC2 = get_driver(Provider.EC2_US_EAST)
Slicehost = get_driver(Provider.SLICEHOST)
Rackspace = get_driver(Provider.RACKSPACE)
drivers = [ EC2('access key id', 'secret key'),
Slicehost('api key'),
Rackspace('username', 'api key') ]
nodes = [ driver.list_nodes() for driver in drivers ]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
and context:
# Path: libcloud/compute/types.py
# class Provider(object):
# """
# Defines for each of the supported providers
#
# @cvar DUMMY: Example provider
# @cvar EC2_US_EAST: Amazon AWS US N. Virgina
# @cvar EC2_US_WEST: Amazon AWS US N. California
# @cvar EC2_EU_WEST: Amazon AWS EU Ireland
# @cvar RACKSPACE: Rackspace Cloud Servers
# @cvar RACKSPACE_UK: Rackspace UK Cloud Servers
# @cvar SLICEHOST: Slicehost.com
# @cvar GOGRID: GoGrid
# @cvar VPSNET: VPS.net
# @cvar LINODE: Linode.com
# @cvar VCLOUD: vmware vCloud
# @cvar RIMUHOSTING: RimuHosting.com
# @cvar ECP: Enomaly
# @cvar IBM: IBM Developer Cloud
# @cvar OPENNEBULA: OpenNebula.org
# @cvar DREAMHOST: DreamHost Private Server
# @cvar CLOUDSIGMA: CloudSigma
# """
# DUMMY = 0
# EC2 = 1 # deprecated name
# EC2_US_EAST = 1
# EC2_EU = 2 # deprecated name
# EC2_EU_WEST = 2
# RACKSPACE = 3
# SLICEHOST = 4
# GOGRID = 5
# VPSNET = 6
# LINODE = 7
# VCLOUD = 8
# RIMUHOSTING = 9
# EC2_US_WEST = 10
# VOXEL = 11
# SOFTLAYER = 12
# EUCALYPTUS = 13
# ECP = 14
# IBM = 15
# OPENNEBULA = 16
# DREAMHOST = 17
# ELASTICHOSTS = 18
# ELASTICHOSTS_UK1 = 19
# ELASTICHOSTS_UK2 = 20
# ELASTICHOSTS_US1 = 21
# EC2_AP_SOUTHEAST = 22
# RACKSPACE_UK = 23
# BRIGHTBOX = 24
# CLOUDSIGMA = 25
# EC2_AP_NORTHEAST = 26
#
# Path: libcloud/compute/providers.py
# def get_driver(provider):
# return get_provider_driver(DRIVERS, provider)
which might include code, classes, or functions. Output only the next line. | print nodes |
Predict the next line after this snippet: <|code_start|># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FileFixturesTests(unittest.TestCase):
def test_success(self):
f = ComputeFileFixtures('meta')
self.assertEqual("Hello, World!", f.load('helloworld.txt'))
def test_failure(self):
f = ComputeFileFixtures('meta')
self.assertRaises(IOError, f.load, 'nil')
if __name__ == '__main__':
<|code_end|>
using the current file's imports:
import sys
import unittest
from test.file_fixtures import ComputeFileFixtures
and any relevant context from other files:
# Path: test/file_fixtures.py
# class ComputeFileFixtures(FileFixtures):
# def __init__(self, sub_dir=''):
# super(ComputeFileFixtures, self).__init__(fixtures_type='compute',
# sub_dir=sub_dir)
. Output only the next line. | sys.exit(unittest.main()) |
Using the snippet: <|code_start|>#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example provides both a running script (invoke from command line)
# and an importable module one can play with in Interactive Mode.
#
# See docstrings for usage examples.
#
try:
except:
pass
<|code_end|>
, determine the next line of code. You have imports:
import secrets
import sys; sys.path.append('..')
from libcloud.compute.types import Provider
from libcloud.providers import get_driver
from pprint import pprint
and context (class names, function names, or code) available:
# Path: libcloud/compute/types.py
# class Provider(object):
# """
# Defines for each of the supported providers
#
# @cvar DUMMY: Example provider
# @cvar EC2_US_EAST: Amazon AWS US N. Virgina
# @cvar EC2_US_WEST: Amazon AWS US N. California
# @cvar EC2_EU_WEST: Amazon AWS EU Ireland
# @cvar RACKSPACE: Rackspace Cloud Servers
# @cvar RACKSPACE_UK: Rackspace UK Cloud Servers
# @cvar SLICEHOST: Slicehost.com
# @cvar GOGRID: GoGrid
# @cvar VPSNET: VPS.net
# @cvar LINODE: Linode.com
# @cvar VCLOUD: vmware vCloud
# @cvar RIMUHOSTING: RimuHosting.com
# @cvar ECP: Enomaly
# @cvar IBM: IBM Developer Cloud
# @cvar OPENNEBULA: OpenNebula.org
# @cvar DREAMHOST: DreamHost Private Server
# @cvar CLOUDSIGMA: CloudSigma
# """
# DUMMY = 0
# EC2 = 1 # deprecated name
# EC2_US_EAST = 1
# EC2_EU = 2 # deprecated name
# EC2_EU_WEST = 2
# RACKSPACE = 3
# SLICEHOST = 4
# GOGRID = 5
# VPSNET = 6
# LINODE = 7
# VCLOUD = 8
# RIMUHOSTING = 9
# EC2_US_WEST = 10
# VOXEL = 11
# SOFTLAYER = 12
# EUCALYPTUS = 13
# ECP = 14
# IBM = 15
# OPENNEBULA = 16
# DREAMHOST = 17
# ELASTICHOSTS = 18
# ELASTICHOSTS_UK1 = 19
# ELASTICHOSTS_UK2 = 20
# ELASTICHOSTS_US1 = 21
# EC2_AP_SOUTHEAST = 22
# RACKSPACE_UK = 23
# BRIGHTBOX = 24
# CLOUDSIGMA = 25
# EC2_AP_NORTHEAST = 26
#
# Path: libcloud/providers.py
. Output only the next line. | def main(argv): |
Based on the snippet: <|code_start|>
# If someone wants to vendor click, we want to ensure the
# correct package is discovered. Ideally we could use a
# relative import here but unfortunately Python does not
# support that.
click = sys.modules[__name__.rsplit('.', 1)[0]]
def _find_unicode_literals_frame():
if not hasattr(sys, '_getframe'): # not all Python implementations have it
return 0
frm = sys._getframe(1)
idx = 1
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import sys
import codecs
import __future__
import locale
import subprocess
from ._compat import PY2
from warnings import warn
and context (classes, functions, sometimes code) from other files:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
. Output only the next line. | while frm is not None: |
Given the code snippet: <|code_start|>"""Test compute_inv_problem."""
matplotlib.use('Agg') # for testing don't use X server
data_path = mne.datasets.testing.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
def test_compute_noise_cov():
"""Test compute noise covariance data from a continuous segment
of raw data."""
# read raw data
raw = mne.io.read_raw_fif(raw_fname)
# save short segment of raw data
segment_raw_fname = raw_fname.replace('raw.fif', '0_10s_raw.fif')
raw.save(segment_raw_fname, tmin=0, tmax=10, overwrite=True)
noise_cov_fpath = compute_noise_cov('', segment_raw_fname)
<|code_end|>
, generate the next line using the imports in this file:
import mne
import numpy as np
import os.path as op
import matplotlib
from ephypype.compute_inv_problem import compute_noise_cov
from ephypype.compute_inv_problem import compute_cov_identity
and context (functions, classes, or occasionally code) from other files:
# Path: ephypype/compute_inv_problem.py
# def compute_noise_cov(fname_template, raw_filename):
# """
# Compute noise covariance data from a continuous segment of raw data.
# Employ empty room data (collected without the subject) to calculate
# the full noise covariance matrix.
# This is recommended for analyzing ongoing spontaneous activity.
#
# Inputs
# cov_fname : str
# noise covariance file name template
# raw_filename : str
# raw filename
#
# Output
# cov_fname : str
# noise covariance file name in which is saved the noise covariance
# matrix
# """
# # Check if cov matrix exists
# cov_fname = _get_cov_fname(fname_template)
#
# if not op.isfile(cov_fname):
# er_raw, cov_fname = _get_er_data(fname_template)
#
# if not op.isfile(cov_fname) and er_raw:
# reject = _create_reject_dict(er_raw.info)
# picks = pick_types(er_raw.info, meg=True,
# ref_meg=False, exclude='bads')
#
# noise_cov = compute_raw_covariance(er_raw, picks=picks,
# reject=reject)
#
# write_cov(cov_fname, noise_cov)
# elif op.isfile(cov_fname):
# print(('*** NOISE cov file {} exists!!!'.format(cov_fname)))
# elif not er_raw:
# cov_fname = compute_cov_identity(raw_filename)
#
# else:
# print(('*** NOISE cov file {} exists!!!'.format(cov_fname)))
#
# return cov_fname
#
# Path: ephypype/compute_inv_problem.py
# def compute_cov_identity(raw_filename):
# "Compute Identity Noise Covariance matrix."
# raw = read_raw_fif(raw_filename)
#
# data_path, basename, ext = split_f(raw_filename)
# cov_fname = op.join(data_path, 'identity_noise-cov.fif')
#
# if not op.isfile(cov_fname):
# picks = pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')
#
# ch_names = [raw.info['ch_names'][k] for k in picks]
# bads = [b for b in raw.info['bads'] if b in ch_names]
# noise_cov = mne.Covariance(np.identity(len(picks)), ch_names, bads,
# raw.info['projs'], nfree=0)
#
# write_cov(cov_fname, noise_cov)
#
# return cov_fname
. Output only the next line. | noise_cov = mne.read_cov(noise_cov_fpath) |
Based on the snippet: <|code_start|>"""Test compute_inv_problem."""
matplotlib.use('Agg') # for testing don't use X server
data_path = mne.datasets.testing.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
def test_compute_noise_cov():
"""Test compute noise covariance data from a continuous segment
of raw data."""
# read raw data
raw = mne.io.read_raw_fif(raw_fname)
# save short segment of raw data
segment_raw_fname = raw_fname.replace('raw.fif', '0_10s_raw.fif')
raw.save(segment_raw_fname, tmin=0, tmax=10, overwrite=True)
noise_cov_fpath = compute_noise_cov('', segment_raw_fname)
noise_cov = mne.read_cov(noise_cov_fpath)
picks = mne.pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')
<|code_end|>
, predict the immediate next line with the help of imports:
import mne
import numpy as np
import os.path as op
import matplotlib
from ephypype.compute_inv_problem import compute_noise_cov
from ephypype.compute_inv_problem import compute_cov_identity
and context (classes, functions, sometimes code) from other files:
# Path: ephypype/compute_inv_problem.py
# def compute_noise_cov(fname_template, raw_filename):
# """
# Compute noise covariance data from a continuous segment of raw data.
# Employ empty room data (collected without the subject) to calculate
# the full noise covariance matrix.
# This is recommended for analyzing ongoing spontaneous activity.
#
# Inputs
# cov_fname : str
# noise covariance file name template
# raw_filename : str
# raw filename
#
# Output
# cov_fname : str
# noise covariance file name in which is saved the noise covariance
# matrix
# """
# # Check if cov matrix exists
# cov_fname = _get_cov_fname(fname_template)
#
# if not op.isfile(cov_fname):
# er_raw, cov_fname = _get_er_data(fname_template)
#
# if not op.isfile(cov_fname) and er_raw:
# reject = _create_reject_dict(er_raw.info)
# picks = pick_types(er_raw.info, meg=True,
# ref_meg=False, exclude='bads')
#
# noise_cov = compute_raw_covariance(er_raw, picks=picks,
# reject=reject)
#
# write_cov(cov_fname, noise_cov)
# elif op.isfile(cov_fname):
# print(('*** NOISE cov file {} exists!!!'.format(cov_fname)))
# elif not er_raw:
# cov_fname = compute_cov_identity(raw_filename)
#
# else:
# print(('*** NOISE cov file {} exists!!!'.format(cov_fname)))
#
# return cov_fname
#
# Path: ephypype/compute_inv_problem.py
# def compute_cov_identity(raw_filename):
# "Compute Identity Noise Covariance matrix."
# raw = read_raw_fif(raw_filename)
#
# data_path, basename, ext = split_f(raw_filename)
# cov_fname = op.join(data_path, 'identity_noise-cov.fif')
#
# if not op.isfile(cov_fname):
# picks = pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')
#
# ch_names = [raw.info['ch_names'][k] for k in picks]
# bads = [b for b in raw.info['bads'] if b in ch_names]
# noise_cov = mne.Covariance(np.identity(len(picks)), ch_names, bads,
# raw.info['projs'], nfree=0)
#
# write_cov(cov_fname, noise_cov)
#
# return cov_fname
. Output only the next line. | assert len(picks) == noise_cov['dim'] |
Based on the snippet: <|code_start|> buffer = get_buffer(b, writable=True)
code_units_to_be_read = bytes_to_be_read // 2
code_units_read = c_ulong()
rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read,
byref(code_units_read), None)
if GetLastError() == ERROR_OPERATION_ABORTED:
# wait for KeyboardInterrupt
time.sleep(0.1)
if not rv:
raise OSError('Windows error: %s' % GetLastError())
if buffer[0] == EOF:
return 0
return 2 * code_units_read.value
class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
def writable(self):
return True
@staticmethod
def _get_error_message(errno):
if errno == ERROR_SUCCESS:
return 'ERROR_SUCCESS'
elif errno == ERROR_NOT_ENOUGH_MEMORY:
return 'ERROR_NOT_ENOUGH_MEMORY'
return 'Windows error %s' % errno
<|code_end|>
, predict the immediate next line with the help of imports:
import io
import os
import sys
import zlib
import time
import ctypes
import msvcrt
from ._compat import _NonClosingTextIOWrapper, text_type, PY2
from ctypes import byref, POINTER, c_int, c_char, c_char_p, \
c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE
from ctypes import pythonapi
from ctypes.wintypes import LPWSTR, LPCWSTR
and context (classes, functions, sometimes code) from other files:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
. Output only the next line. | def write(self, b): |
Continue the code snippet: <|code_start|> def _get_error_message(errno):
if errno == ERROR_SUCCESS:
return 'ERROR_SUCCESS'
elif errno == ERROR_NOT_ENOUGH_MEMORY:
return 'ERROR_NOT_ENOUGH_MEMORY'
return 'Windows error %s' % errno
def write(self, b):
bytes_to_be_written = len(b)
buf = get_buffer(b)
code_units_to_be_written = min(bytes_to_be_written,
MAX_BYTES_WRITTEN) // 2
code_units_written = c_ulong()
WriteConsoleW(self.handle, buf, code_units_to_be_written,
byref(code_units_written), None)
bytes_written = 2 * code_units_written.value
if bytes_written == 0 and bytes_to_be_written > 0:
raise OSError(self._get_error_message(GetLastError()))
return bytes_written
class ConsoleStream(object):
def __init__(self, text_stream, byte_stream):
self._text_stream = text_stream
self.buffer = byte_stream
@property
<|code_end|>
. Use current file imports:
import io
import os
import sys
import zlib
import time
import ctypes
import msvcrt
from ._compat import _NonClosingTextIOWrapper, text_type, PY2
from ctypes import byref, POINTER, c_int, c_char, c_char_p, \
c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE
from ctypes import pythonapi
from ctypes.wintypes import LPWSTR, LPCWSTR
and context (classes, functions, or code) from other files:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
. Output only the next line. | def name(self): |
Given snippet: <|code_start|> return ConsoleStream(text_stream, buffer_stream)
def _get_text_stdout(buffer_stream):
text_stream = _NonClosingTextIOWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
'utf-16-le', 'strict', line_buffering=True)
return ConsoleStream(text_stream, buffer_stream)
def _get_text_stderr(buffer_stream):
text_stream = _NonClosingTextIOWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
'utf-16-le', 'strict', line_buffering=True)
return ConsoleStream(text_stream, buffer_stream)
if PY2:
def _hash_py_argv():
return zlib.crc32('\x00'.join(sys.argv[1:]))
_initial_argv_hash = _hash_py_argv()
def _get_windows_argv():
argc = c_int(0)
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
argv = [argv_unicode[i] for i in range(0, argc.value)]
if not hasattr(sys, 'frozen'):
argv = argv[1:]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import io
import os
import sys
import zlib
import time
import ctypes
import msvcrt
from ._compat import _NonClosingTextIOWrapper, text_type, PY2
from ctypes import byref, POINTER, c_int, c_char, c_char_p, \
c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE
from ctypes import pythonapi
from ctypes.wintypes import LPWSTR, LPCWSTR
and context:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
which might include code, classes, or functions. Output only the next line. | while len(argv) > 0: |
Continue the code snippet: <|code_start|>
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
<|code_end|>
. Use current file imports:
import os
import sys
import struct
import inspect
import itertools
import getpass
import shutil
import fcntl
import termios
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type, Choice, Path
from .globals import resolve_color_default
from ._termui_impl import pager
from ._termui_impl import ProgressBar
from ._termui_impl import Editor
from ._termui_impl import open_url
from ._termui_impl import getchar as f
from ._termui_impl import raw_terminal as f
and context (classes, functions, or code) from other files:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
. Output only the next line. | 'blue': 34, |
Predict the next line for this snippet: <|code_start|>
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = {
'black': 30,
'red': 31,
'green': 32,
<|code_end|>
with the help of current file imports:
import os
import sys
import struct
import inspect
import itertools
import getpass
import shutil
import fcntl
import termios
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type, Choice, Path
from .globals import resolve_color_default
from ._termui_impl import pager
from ._termui_impl import ProgressBar
from ._termui_impl import Editor
from ._termui_impl import open_url
from ._termui_impl import getchar as f
from ._termui_impl import raw_terminal as f
and context from other files:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
, which may contain function names, class names, or code. Output only the next line. | 'yellow': 33, |
Predict the next line after this snippet: <|code_start|>
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = {
'black': 30,
<|code_end|>
using the current file's imports:
import os
import sys
import struct
import inspect
import itertools
import getpass
import shutil
import fcntl
import termios
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type, Choice, Path
from .globals import resolve_color_default
from ._termui_impl import pager
from ._termui_impl import ProgressBar
from ._termui_impl import Editor
from ._termui_impl import open_url
from ._termui_impl import getchar as f
from ._termui_impl import raw_terminal as f
and any relevant context from other files:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
. Output only the next line. | 'red': 31, |
Predict the next line after this snippet: <|code_start|>
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
'reset': 39,
'bright_black': 90,
<|code_end|>
using the current file's imports:
import os
import sys
import struct
import inspect
import itertools
import getpass
import shutil
import fcntl
import termios
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type, Choice, Path
from .globals import resolve_color_default
from ._termui_impl import pager
from ._termui_impl import ProgressBar
from ._termui_impl import Editor
from ._termui_impl import open_url
from ._termui_impl import getchar as f
from ._termui_impl import raw_terminal as f
and any relevant context from other files:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
. Output only the next line. | 'bright_red': 91, |
Given snippet: <|code_start|>
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
'reset': 39,
'bright_black': 90,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import sys
import struct
import inspect
import itertools
import getpass
import shutil
import fcntl
import termios
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type, Choice, Path
from .globals import resolve_color_default
from ._termui_impl import pager
from ._termui_impl import ProgressBar
from ._termui_impl import Editor
from ._termui_impl import open_url
from ._termui_impl import getchar as f
from ._termui_impl import raw_terminal as f
and context:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
which might include code, classes, or functions. Output only the next line. | 'bright_red': 91, |
Using the snippet: <|code_start|>
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
'reset': 39,
'bright_black': 90,
'bright_red': 91,
'bright_green': 92,
'bright_yellow': 93,
<|code_end|>
, determine the next line of code. You have imports:
import os
import sys
import struct
import inspect
import itertools
import getpass
import shutil
import fcntl
import termios
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type, Choice, Path
from .globals import resolve_color_default
from ._termui_impl import pager
from ._termui_impl import ProgressBar
from ._termui_impl import Editor
from ._termui_impl import open_url
from ._termui_impl import getchar as f
from ._termui_impl import raw_terminal as f
and context (class names, function names, or code) available:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
. Output only the next line. | 'bright_blue': 94, |
Predict the next line after this snippet: <|code_start|> 'magenta': 35,
'cyan': 36,
'white': 37,
'reset': 39,
'bright_black': 90,
'bright_red': 91,
'bright_green': 92,
'bright_yellow': 93,
'bright_blue': 94,
'bright_magenta': 95,
'bright_cyan': 96,
'bright_white': 97,
}
_ansi_reset_all = '\033[0m'
def hidden_prompt_func(prompt):
return getpass.getpass(prompt)
def _build_prompt(text, suffix, show_default=False, default=None, show_choices=True, type=None):
prompt = text
if type is not None and show_choices and isinstance(type, Choice):
prompt += ' (' + ", ".join(map(str, type.choices)) + ')'
if default is not None and show_default:
prompt = '%s [%s]' % (prompt, default)
return prompt + suffix
def prompt(text, default=None, hide_input=False, confirmation_prompt=False,
<|code_end|>
using the current file's imports:
import os
import sys
import struct
import inspect
import itertools
import getpass
import shutil
import fcntl
import termios
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type, Choice, Path
from .globals import resolve_color_default
from ._termui_impl import pager
from ._termui_impl import ProgressBar
from ._termui_impl import Editor
from ._termui_impl import open_url
from ._termui_impl import getchar as f
from ._termui_impl import raw_terminal as f
and any relevant context from other files:
# Path: ephypype/externals/click/_compat.py
# PY2 = sys.version_info[0] == 2
# CYGWIN = sys.platform.startswith('cygwin')
# APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
# 'Development/' in os.environ['SERVER_SOFTWARE'])
# WIN = sys.platform.startswith('win') and not APP_ENGINE
# DEFAULT_COLUMNS = 80
# DEFAULT_COLUMNS = 79
# def get_filesystem_encoding():
# def _make_text_stream(stream, encoding, errors,
# force_readable=False, force_writable=False):
# def is_ascii_encoding(encoding):
# def get_best_encoding(stream):
# def __init__(self, stream, encoding, errors,
# force_readable=False, force_writable=False, **extra):
# def write(self, x):
# def writelines(self, lines):
# def __del__(self):
# def isatty(self):
# def __init__(self, stream, force_readable=False, force_writable=False):
# def __getattr__(self, name):
# def read1(self, size):
# def readable(self):
# def writable(self):
# def seekable(self):
# def is_bytes(x):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def set_binary_mode(f):
# def isidentifier(x):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def is_bytes(x):
# def _is_binary_reader(stream, default=False):
# def _is_binary_writer(stream, default=False):
# def _find_binary_reader(stream):
# def _find_binary_writer(stream):
# def _stream_is_misconfigured(stream):
# def _is_compatible_text_stream(stream, encoding, errors):
# def _force_correct_text_reader(text_reader, encoding, errors,
# force_readable=False):
# def _force_correct_text_writer(text_writer, encoding, errors,
# force_writable=False):
# def get_binary_stdin():
# def get_binary_stdout():
# def get_binary_stderr():
# def get_text_stdin(encoding=None, errors=None):
# def get_text_stdout(encoding=None, errors=None):
# def get_text_stderr(encoding=None, errors=None):
# def filename_to_ui(value):
# def get_streerror(e, default=None):
# def open_stream(filename, mode='r', encoding=None, errors='strict',
# atomic=False):
# def __init__(self, f, tmp_filename, real_filename):
# def name(self):
# def close(self, delete=False):
# def __getattr__(self, name):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, tb):
# def __repr__(self):
# def strip_ansi(value):
# def should_strip_ansi(stream=None, color=None):
# def _get_argv_encoding():
# def raw_input(prompt=''):
# def auto_wrap_for_ansi(stream, color=None):
# def _safe_write(s):
# def get_winterm_size():
# def _get_argv_encoding():
# def term_len(x):
# def isatty(stream):
# def _make_cached_stream_func(src_func, wrapper_func):
# def func():
# class _NonClosingTextIOWrapper(io.TextIOWrapper):
# class _FixupStream(object):
# class _AtomicFile(object):
. Output only the next line. | type=None, value_proc=None, prompt_suffix=': ', show_default=True, |
Predict the next line for this snippet: <|code_start|> def __init__(self, http_client: HTTPClient = None) -> None:
if http_client is None:
self._client = HTTPClient()
else:
self._client = http_client
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
_validate_get_image = Query.has("url").as_(str)
@get.register(Image)
def get_image(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> Image:
<|code_end|>
with the help of current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from io import BytesIO
from datapipelines import DataSource, PipelineContext, Query, NotFoundError
from PIL import Image as ImageLoader
from PIL.Image import Image
from .common import HTTPClient
and context from other files:
# Path: cassiopeia/datastores/common.py
# class HTTPClient(object):
# @staticmethod
# def _execute(curl: Curl, close_connection: bool) -> int:
# curl.perform()
# status_code = curl.getinfo(curl.HTTP_CODE)
# if close_connection:
# curl.close()
# return status_code
#
# @staticmethod
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# if not headers:
# request_headers = ["Accept-Encoding: gzip"]
# else:
# request_headers = [
# "{header}: {value}".format(header=key, value=value)
# for key, value in headers.items()
# ]
# if "Accept-Encoding" not in headers:
# request_headers.append("Accept-Encoding: gzip")
#
# response_headers = {}
#
# def get_response_headers(header_line: bytes) -> None:
# header_line = header_line.decode("ISO-8859-1")
#
# if ":" not in header_line:
# return
#
# name, value = header_line.split(":", 1)
# response_headers[name.strip()] = value.strip()
#
# buffer = BytesIO()
#
# curl = connection if connection is not None else Curl()
#
# curl.setopt(curl.URL, url)
# curl.setopt(curl.WRITEDATA, buffer)
# curl.setopt(curl.HEADERFUNCTION, get_response_headers)
# curl.setopt(curl.HTTPHEADER, request_headers)
# if certifi:
# curl.setopt(curl.CAINFO, certifi.where())
#
# if _print_calls:
# _url = url
# if isinstance(_url, bytes):
# _url = str(_url)[2:-1]
# if _print_api_key and ".api.riotgames.com/lol" in _url:
# if "?" not in _url:
# _url += "?api_key={}".format(headers["X-Riot-Token"])
# else:
# _url += "&api_key={}".format(headers["X-Riot-Token"])
# print("Making call: {}".format(_url))
# if rate_limiters:
# with ExitStack() as stack:
# # Enter each context manager / rate limiter
# limiters = [
# stack.enter_context(rate_limiter)
# for rate_limiter in rate_limiters
# ]
# exit_limiters = stack.pop_all().__exit__
# status_code = HTTPClient._execute(curl, connection is None)
# exit_limiters(None, None, None)
# else:
# status_code = HTTPClient._execute(curl, connection is None)
#
# body = buffer.getvalue()
#
# # Decompress if we got gzipped data
# try:
# content_encoding = response_headers["Content-Encoding"].upper()
# if "GZIP" == content_encoding:
# body = zlib.decompress(body, zlib.MAX_WBITS | 16)
# except KeyError:
# pass
#
# return status_code, body, response_headers
#
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# if parameters:
# if encode_parameters:
# parameters = {
# k: str(v).lower() if isinstance(v, bool) else v
# for k, v in parameters.items()
# }
# parameters = urlencode(parameters, doseq=True)
# url = "{url}?{params}".format(url=url, params=parameters)
#
# status_code, body, response_headers = HTTPClient._get(
# url, headers, rate_limiters, connection
# )
#
# content_type = response_headers.get(
# "Content-Type", "application/octet-stream"
# ).upper()
#
# # Decode to text if a charset is included
# match = re.search("CHARSET=(\S+)", content_type)
# if match:
# encoding = match.group(1)
# body = body.decode(encoding)
#
# # Load JSON if necessary
# if "APPLICATION/JSON" in content_type:
# body = json.loads(body)
#
# # Handle errors
# if status_code >= 400:
# if isinstance(body, dict):
# message = body.get("status", {}).get("message", "")
# elif isinstance(body, str):
# message = body
# else:
# message = ""
#
# raise HTTPError(message, status_code, response_headers)
#
# return body, response_headers
#
# @contextmanager
# def new_session(self) -> Curl:
# session = Curl()
# yield session
# session.close()
, which may contain function names, class names, or code. Output only the next line. | ImageDataSource._validate_get_image(query, context) |
Using the snippet: <|code_start|> query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class ThirdPartyCodeAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
#######################
# Verification String #
#######################
_validate_get_verification_string_query = (
Query.has("platform").as_(Platform).also.has("summoner.id").as_(str)
<|code_end|>
, determine the next line of code. You have imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.thirdpartycode import VerificationStringDto
from ..uniquekeys import convert_region_to_platform
and context (class names, function names, or code) available:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | ) |
Given the code snippet: <|code_start|>
T = TypeVar("T")
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class ThirdPartyCodeAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
<|code_end|>
, generate the next line using the imports in this file:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.thirdpartycode import VerificationStringDto
from ..uniquekeys import convert_region_to_platform
and context (functions, classes, or occasionally code) from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | ) -> Iterable[T]: |
Next line prediction: <|code_start|>
T = TypeVar("T")
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class ThirdPartyCodeAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
<|code_end|>
. Use current file imports:
(from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.thirdpartycode import VerificationStringDto
from ..uniquekeys import convert_region_to_platform)
and context including class names, function names, or small code snippets from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | self, |
Using the snippet: <|code_start|>
T = TypeVar("T")
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class ThirdPartyCodeAPI(KernelSource):
@DataSource.dispatch
def get(
self,
<|code_end|>
, determine the next line of code. You have imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.thirdpartycode import VerificationStringDto
from ..uniquekeys import convert_region_to_platform
and context (class names, function names, or code) available:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | type: Type[T], |
Predict the next line for this snippet: <|code_start|>
T = TypeVar("T")
F = TypeVar("F")
class StatusTransformer(DataTransformer):
@DataTransformer.dispatch
def transform(
self, target_type: Type[T], value: F, context: PipelineContext = None
) -> T:
pass
# Dto to Data
@transform.register(ShardStatusDto, ShardStatusData)
def shard_status_dto_to_data(
self, value: ShardStatusDto, context: PipelineContext = None
) -> ShardStatusData:
return ShardStatusData(**value)
# Data to Core
# @transform.register(ShardStatusData, ShardStatus)
<|code_end|>
with the help of current file imports:
from typing import Type, TypeVar
from datapipelines import DataTransformer, PipelineContext
from ..core.status import ShardStatusData, ShardStatus
from ..dto.status import ShardStatusDto
and context from other files:
# Path: cassiopeia/core/status.py
# class ShardStatusData(CoreData):
# _renamed = {"region_tag": "platform"}
#
# def __call__(self, **kwargs):
# if "services" in kwargs:
# self.services = [
# ServiceData(**service) for service in kwargs.pop("services")
# ]
# super().__call__(**kwargs)
# return self
#
# class ShardStatus(CassiopeiaGhost):
# _data_types = {ShardStatusData}
#
# def __init__(self, region: Union[Region, str] = None):
# kwargs = {"region": region}
# super().__init__(**kwargs)
#
# def __get_query__(self):
# return {"region": self.region, "platform": self.platform}
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# @lazy
# def region(self) -> Region:
# return Region(self._data[ShardStatusData].region)
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# @lazy
# def platform(self) -> Platform:
# return self.region.platform
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def name(self) -> str:
# return self._data[ShardStatusData].name
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def hostname(self) -> str:
# return self._data[ShardStatusData].hostname
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def services(self) -> List[Service]:
# return SearchableList(
# [
# Service.from_data(service)
# for service in self._data[ShardStatusData].services
# ]
# )
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def slug(self) -> str:
# return self._data[ShardStatusData].slug
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def locales(self) -> List[str]:
# return self._data[ShardStatusData].locales
, which may contain function names, class names, or code. Output only the next line. | def shard_status_data_to_core( |
Predict the next line after this snippet: <|code_start|>
T = TypeVar("T")
F = TypeVar("F")
class StatusTransformer(DataTransformer):
@DataTransformer.dispatch
def transform(
self, target_type: Type[T], value: F, context: PipelineContext = None
) -> T:
pass
# Dto to Data
@transform.register(ShardStatusDto, ShardStatusData)
def shard_status_dto_to_data(
self, value: ShardStatusDto, context: PipelineContext = None
) -> ShardStatusData:
return ShardStatusData(**value)
# Data to Core
# @transform.register(ShardStatusData, ShardStatus)
<|code_end|>
using the current file's imports:
from typing import Type, TypeVar
from datapipelines import DataTransformer, PipelineContext
from ..core.status import ShardStatusData, ShardStatus
from ..dto.status import ShardStatusDto
and any relevant context from other files:
# Path: cassiopeia/core/status.py
# class ShardStatusData(CoreData):
# _renamed = {"region_tag": "platform"}
#
# def __call__(self, **kwargs):
# if "services" in kwargs:
# self.services = [
# ServiceData(**service) for service in kwargs.pop("services")
# ]
# super().__call__(**kwargs)
# return self
#
# class ShardStatus(CassiopeiaGhost):
# _data_types = {ShardStatusData}
#
# def __init__(self, region: Union[Region, str] = None):
# kwargs = {"region": region}
# super().__init__(**kwargs)
#
# def __get_query__(self):
# return {"region": self.region, "platform": self.platform}
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# @lazy
# def region(self) -> Region:
# return Region(self._data[ShardStatusData].region)
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# @lazy
# def platform(self) -> Platform:
# return self.region.platform
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def name(self) -> str:
# return self._data[ShardStatusData].name
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def hostname(self) -> str:
# return self._data[ShardStatusData].hostname
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def services(self) -> List[Service]:
# return SearchableList(
# [
# Service.from_data(service)
# for service in self._data[ShardStatusData].services
# ]
# )
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def slug(self) -> str:
# return self._data[ShardStatusData].slug
#
# @CassiopeiaGhost.property(ShardStatusData)
# @ghost_load_on
# def locales(self) -> List[str]:
# return self._data[ShardStatusData].locales
. Output only the next line. | def shard_status_data_to_core( |
Given the code snippet: <|code_start|>
def _get_default_version(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
pipeline = context[PipelineContext.Keys.PIPELINE]
versions = pipeline.get(VersionListDto, {"platform": query["platform"]})
return versions["versions"][0]
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class StatusAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
<|code_end|>
, generate the next line using the imports in this file:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform, Region
from ...dto.staticdata.version import VersionListDto
from ...dto.status import ShardStatusDto
from ..uniquekeys import convert_region_to_platform
and context (functions, classes, or occasionally code) from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# class Region(Enum):
# brazil = "BR"
# europe_north_east = "EUNE"
# europe_west = "EUW"
# japan = "JP"
# korea = "KR"
# latin_america_north = "LAN"
# latin_america_south = "LAS"
# north_america = "NA"
# oceania = "OCE"
# turkey = "TR"
# russia = "RU"
#
# @property
# def platform(self) -> "Platform":
# return getattr(Platform, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_platform(platform):
# try:
# return platform.region
# except AttributeError:
# return Platform(platform).region
#
# @property
# def timezone(self) -> str:
# tzs = {
# "NA": "GMT-8",
# "LAN": "GMT-7",
# "LAS": "GMT-5",
# "BR": "GMT-4",
# "EUW": "GMT-2",
# "TR": "GMT-0",
# "EUNE": "GMT+1",
# "RU": "GMT+3",
# "KR": "GMT+6",
# "JP": "GMT+7",
# "OCE": "GMT+8",
# }
# return tzs[self.value]
#
# @property
# def continent(self) -> "Continent":
# if self is Region.brazil:
# return Continent.americas
# if self is Region.europe_north_east:
# return Continent.europe
# if self is Region.europe_west:
# return Continent.europe
# if self is Region.japan:
# return Continent.asia
# if self is Region.korea:
# return Continent.asia
# if self is Region.latin_america_north:
# return Continent.americas
# if self is Region.latin_america_south:
# return Continent.americas
# if self is Region.north_america:
# return Continent.americas
# if self is Region.oceania:
# return (
# Continent.americas
# ) # OCE content is managed by Americas server (as per https://i.imgur.com/FUyf5kv.png), this breaks OCE queries if set to Asia
# if self is Region.turkey:
# return Continent.europe
# if self is Region.russia:
# return Continent.europe
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | query: MutableMapping[str, Any], |
Given the following code snippet before the placeholder: <|code_start|>
T = TypeVar("T")
def _get_default_version(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
pipeline = context[PipelineContext.Keys.PIPELINE]
versions = pipeline.get(VersionListDto, {"platform": query["platform"]})
return versions["versions"][0]
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class StatusAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
<|code_end|>
, predict the next line using imports from the current file:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform, Region
from ...dto.staticdata.version import VersionListDto
from ...dto.status import ShardStatusDto
from ..uniquekeys import convert_region_to_platform
and context including class names, function names, and sometimes code from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# class Region(Enum):
# brazil = "BR"
# europe_north_east = "EUNE"
# europe_west = "EUW"
# japan = "JP"
# korea = "KR"
# latin_america_north = "LAN"
# latin_america_south = "LAS"
# north_america = "NA"
# oceania = "OCE"
# turkey = "TR"
# russia = "RU"
#
# @property
# def platform(self) -> "Platform":
# return getattr(Platform, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_platform(platform):
# try:
# return platform.region
# except AttributeError:
# return Platform(platform).region
#
# @property
# def timezone(self) -> str:
# tzs = {
# "NA": "GMT-8",
# "LAN": "GMT-7",
# "LAS": "GMT-5",
# "BR": "GMT-4",
# "EUW": "GMT-2",
# "TR": "GMT-0",
# "EUNE": "GMT+1",
# "RU": "GMT+3",
# "KR": "GMT+6",
# "JP": "GMT+7",
# "OCE": "GMT+8",
# }
# return tzs[self.value]
#
# @property
# def continent(self) -> "Continent":
# if self is Region.brazil:
# return Continent.americas
# if self is Region.europe_north_east:
# return Continent.europe
# if self is Region.europe_west:
# return Continent.europe
# if self is Region.japan:
# return Continent.asia
# if self is Region.korea:
# return Continent.asia
# if self is Region.latin_america_north:
# return Continent.americas
# if self is Region.latin_america_south:
# return Continent.americas
# if self is Region.north_america:
# return Continent.americas
# if self is Region.oceania:
# return (
# Continent.americas
# ) # OCE content is managed by Americas server (as per https://i.imgur.com/FUyf5kv.png), this breaks OCE queries if set to Asia
# if self is Region.turkey:
# return Continent.europe
# if self is Region.russia:
# return Continent.europe
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | def get_many( |
Predict the next line after this snippet: <|code_start|>
T = TypeVar("T")
def _get_default_version(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
pipeline = context[PipelineContext.Keys.PIPELINE]
versions = pipeline.get(VersionListDto, {"platform": query["platform"]})
return versions["versions"][0]
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
<|code_end|>
using the current file's imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform, Region
from ...dto.staticdata.version import VersionListDto
from ...dto.status import ShardStatusDto
from ..uniquekeys import convert_region_to_platform
and any relevant context from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# class Region(Enum):
# brazil = "BR"
# europe_north_east = "EUNE"
# europe_west = "EUW"
# japan = "JP"
# korea = "KR"
# latin_america_north = "LAN"
# latin_america_south = "LAS"
# north_america = "NA"
# oceania = "OCE"
# turkey = "TR"
# russia = "RU"
#
# @property
# def platform(self) -> "Platform":
# return getattr(Platform, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_platform(platform):
# try:
# return platform.region
# except AttributeError:
# return Platform(platform).region
#
# @property
# def timezone(self) -> str:
# tzs = {
# "NA": "GMT-8",
# "LAN": "GMT-7",
# "LAS": "GMT-5",
# "BR": "GMT-4",
# "EUW": "GMT-2",
# "TR": "GMT-0",
# "EUNE": "GMT+1",
# "RU": "GMT+3",
# "KR": "GMT+6",
# "JP": "GMT+7",
# "OCE": "GMT+8",
# }
# return tzs[self.value]
#
# @property
# def continent(self) -> "Continent":
# if self is Region.brazil:
# return Continent.americas
# if self is Region.europe_north_east:
# return Continent.europe
# if self is Region.europe_west:
# return Continent.europe
# if self is Region.japan:
# return Continent.asia
# if self is Region.korea:
# return Continent.asia
# if self is Region.latin_america_north:
# return Continent.americas
# if self is Region.latin_america_south:
# return Continent.americas
# if self is Region.north_america:
# return Continent.americas
# if self is Region.oceania:
# return (
# Continent.americas
# ) # OCE content is managed by Americas server (as per https://i.imgur.com/FUyf5kv.png), this breaks OCE queries if set to Asia
# if self is Region.turkey:
# return Continent.europe
# if self is Region.russia:
# return Continent.europe
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | ) -> str: |
Given snippet: <|code_start|> def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
##########
# Status #
##########
_validate_get_status_query = Query.has("platform").as_(Platform)
@get.register(ShardStatusDto)
@validate_query(_validate_get_status_query, convert_region_to_platform)
def get_status(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> ShardStatusDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/status/v3/shard-data".format()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform, Region
from ...dto.staticdata.version import VersionListDto
from ...dto.status import ShardStatusDto
from ..uniquekeys import convert_region_to_platform
and context:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# class Region(Enum):
# brazil = "BR"
# europe_north_east = "EUNE"
# europe_west = "EUW"
# japan = "JP"
# korea = "KR"
# latin_america_north = "LAN"
# latin_america_south = "LAS"
# north_america = "NA"
# oceania = "OCE"
# turkey = "TR"
# russia = "RU"
#
# @property
# def platform(self) -> "Platform":
# return getattr(Platform, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_platform(platform):
# try:
# return platform.region
# except AttributeError:
# return Platform(platform).region
#
# @property
# def timezone(self) -> str:
# tzs = {
# "NA": "GMT-8",
# "LAN": "GMT-7",
# "LAS": "GMT-5",
# "BR": "GMT-4",
# "EUW": "GMT-2",
# "TR": "GMT-0",
# "EUNE": "GMT+1",
# "RU": "GMT+3",
# "KR": "GMT+6",
# "JP": "GMT+7",
# "OCE": "GMT+8",
# }
# return tzs[self.value]
#
# @property
# def continent(self) -> "Continent":
# if self is Region.brazil:
# return Continent.americas
# if self is Region.europe_north_east:
# return Continent.europe
# if self is Region.europe_west:
# return Continent.europe
# if self is Region.japan:
# return Continent.asia
# if self is Region.korea:
# return Continent.asia
# if self is Region.latin_america_north:
# return Continent.americas
# if self is Region.latin_america_south:
# return Continent.americas
# if self is Region.north_america:
# return Continent.americas
# if self is Region.oceania:
# return (
# Continent.americas
# ) # OCE content is managed by Americas server (as per https://i.imgur.com/FUyf5kv.png), this breaks OCE queries if set to Asia
# if self is Region.turkey:
# return Continent.europe
# if self is Region.russia:
# return Continent.europe
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
which might include code, classes, or functions. Output only the next line. | try: |
Given snippet: <|code_start|> Query.has("platform")
.as_(Platform)
.also.has("summoner.id")
.as_(str)
.also.has("champion.id")
.as_(int)
)
@get.register(ChampionMasteryDto)
@validate_query(_validate_get_champion_mastery_query, convert_region_to_platform)
def get_champion_mastery(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> ChampionMasteryDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/champion-mastery/v4/champion-masteries/by-summoner/{summonerId}/by-champion/{championId}".format(
summonerId=query["summoner.id"], championId=query["champion.id"]
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["region"] = query["platform"].region.value
data["summonerId"] = query["summoner.id"]
return ChampionMasteryDto(data)
_validate_get_many_champion_mastery_query = (
Query.has("platform")
.as_(Platform)
.also.has("summonerId")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.championmastery import (
ChampionMasteryDto,
ChampionMasteryListDto,
ChampionMasteryScoreDto,
)
from ..uniquekeys import convert_region_to_platform
and context:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
which might include code, classes, or functions. Output only the next line. | .as_(str) |
Given the following code snippet before the placeholder: <|code_start|>
mastery["summonerId"] = query["summonerId"]
mastery["region"] = query["platform"].region.value
yield ChampionMasteryDto(mastery)
return generator()
_validate_get_champion_mastery_list_query = (
Query.has("platform").as_(Platform).also.has("summoner.id").as_(str)
)
@get.register(ChampionMasteryListDto)
@validate_query(
_validate_get_champion_mastery_list_query, convert_region_to_platform
)
def get_champion_mastery_list(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> ChampionMasteryListDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/champion-mastery/v4/champion-masteries/by-summoner/{summonerId}".format(
summonerId=query["summoner.id"]
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
for cm in data:
cm["region"] = query["region"]
return ChampionMasteryListDto(
<|code_end|>
, predict the next line using imports from the current file:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.championmastery import (
ChampionMasteryDto,
ChampionMasteryListDto,
ChampionMasteryScoreDto,
)
from ..uniquekeys import convert_region_to_platform
and context including class names, function names, and sometimes code from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | { |
Here is a snippet: <|code_start|> )
@get_many.register(ChampionMasteryListDto)
@validate_query(
_validate_get_many_champion_mastery_list_query, convert_region_to_platform
)
def get_many_champion_mastery_list(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> Generator[ChampionMasteryListDto, None, None]:
def generator():
parameters = {"platform": query["platform"].value}
for summoner_id in query["summoner.ids"]:
endpoint = "lol/champion-mastery/v4/champion-masteries/by-summoner/{summonerId}".format(
summonerId=summoner_id
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
yield ChampionMasteryListDto(
{
"masteries": data,
"summonerId": summoner_id,
"region": query["platform"].region.value,
}
)
return generator()
<|code_end|>
. Write the next line using the current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.championmastery import (
ChampionMasteryDto,
ChampionMasteryListDto,
ChampionMasteryScoreDto,
)
from ..uniquekeys import convert_region_to_platform
and context from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
, which may include functions, classes, or code. Output only the next line. | _validate_get_champion_mastery_score_query = ( |
Given the following code snippet before the placeholder: <|code_start|> def get_champion_mastery_list(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> ChampionMasteryListDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/champion-mastery/v4/champion-masteries/by-summoner/{summonerId}".format(
summonerId=query["summoner.id"]
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
for cm in data:
cm["region"] = query["region"]
return ChampionMasteryListDto(
{
"masteries": data,
"summonerId": query["summoner.id"],
"region": query["platform"].region.value,
}
)
_validate_get_many_champion_mastery_list_query = (
Query.has("platform").as_(Platform).also.has("summoner.ids").as_(Iterable)
)
@get_many.register(ChampionMasteryListDto)
@validate_query(
_validate_get_many_champion_mastery_list_query, convert_region_to_platform
)
<|code_end|>
, predict the next line using imports from the current file:
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.championmastery import (
ChampionMasteryDto,
ChampionMasteryListDto,
ChampionMasteryScoreDto,
)
from ..uniquekeys import convert_region_to_platform
and context including class names, function names, and sometimes code from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | def get_many_champion_mastery_list( |
Here is a snippet: <|code_start|>
try:
except ImportError:
json.decode = json.loads
T = TypeVar("T")
class MerakiAnalyticsCDN(DataSource):
def __init__(self, http_client: HTTPClient = None) -> None:
if http_client is None:
self._client = HTTPClient()
else:
self._client = http_client
self._cache = {}
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
<|code_end|>
. Write the next line using the current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import DataSource, PipelineContext, NotFoundError
from ..dto.patch import PatchListDto
from ..dto.staticdata.champion import ChampionAllRatesDto, ChampionRatesDto
from .common import HTTPClient, HTTPError
import ujson as json
import json
and context from other files:
# Path: cassiopeia/datastores/common.py
# class HTTPClient(object):
# @staticmethod
# def _execute(curl: Curl, close_connection: bool) -> int:
# curl.perform()
# status_code = curl.getinfo(curl.HTTP_CODE)
# if close_connection:
# curl.close()
# return status_code
#
# @staticmethod
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# if not headers:
# request_headers = ["Accept-Encoding: gzip"]
# else:
# request_headers = [
# "{header}: {value}".format(header=key, value=value)
# for key, value in headers.items()
# ]
# if "Accept-Encoding" not in headers:
# request_headers.append("Accept-Encoding: gzip")
#
# response_headers = {}
#
# def get_response_headers(header_line: bytes) -> None:
# header_line = header_line.decode("ISO-8859-1")
#
# if ":" not in header_line:
# return
#
# name, value = header_line.split(":", 1)
# response_headers[name.strip()] = value.strip()
#
# buffer = BytesIO()
#
# curl = connection if connection is not None else Curl()
#
# curl.setopt(curl.URL, url)
# curl.setopt(curl.WRITEDATA, buffer)
# curl.setopt(curl.HEADERFUNCTION, get_response_headers)
# curl.setopt(curl.HTTPHEADER, request_headers)
# if certifi:
# curl.setopt(curl.CAINFO, certifi.where())
#
# if _print_calls:
# _url = url
# if isinstance(_url, bytes):
# _url = str(_url)[2:-1]
# if _print_api_key and ".api.riotgames.com/lol" in _url:
# if "?" not in _url:
# _url += "?api_key={}".format(headers["X-Riot-Token"])
# else:
# _url += "&api_key={}".format(headers["X-Riot-Token"])
# print("Making call: {}".format(_url))
# if rate_limiters:
# with ExitStack() as stack:
# # Enter each context manager / rate limiter
# limiters = [
# stack.enter_context(rate_limiter)
# for rate_limiter in rate_limiters
# ]
# exit_limiters = stack.pop_all().__exit__
# status_code = HTTPClient._execute(curl, connection is None)
# exit_limiters(None, None, None)
# else:
# status_code = HTTPClient._execute(curl, connection is None)
#
# body = buffer.getvalue()
#
# # Decompress if we got gzipped data
# try:
# content_encoding = response_headers["Content-Encoding"].upper()
# if "GZIP" == content_encoding:
# body = zlib.decompress(body, zlib.MAX_WBITS | 16)
# except KeyError:
# pass
#
# return status_code, body, response_headers
#
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# if parameters:
# if encode_parameters:
# parameters = {
# k: str(v).lower() if isinstance(v, bool) else v
# for k, v in parameters.items()
# }
# parameters = urlencode(parameters, doseq=True)
# url = "{url}?{params}".format(url=url, params=parameters)
#
# status_code, body, response_headers = HTTPClient._get(
# url, headers, rate_limiters, connection
# )
#
# content_type = response_headers.get(
# "Content-Type", "application/octet-stream"
# ).upper()
#
# # Decode to text if a charset is included
# match = re.search("CHARSET=(\S+)", content_type)
# if match:
# encoding = match.group(1)
# body = body.decode(encoding)
#
# # Load JSON if necessary
# if "APPLICATION/JSON" in content_type:
# body = json.loads(body)
#
# # Handle errors
# if status_code >= 400:
# if isinstance(body, dict):
# message = body.get("status", {}).get("message", "")
# elif isinstance(body, str):
# message = body
# else:
# message = ""
#
# raise HTTPError(message, status_code, response_headers)
#
# return body, response_headers
#
# @contextmanager
# def new_session(self) -> Curl:
# session = Curl()
# yield session
# session.close()
#
# class HTTPError(RuntimeError):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# super().__init__(message)
# self.code = code
# self.response_headers = response_headers or {}
, which may include functions, classes, or code. Output only the next line. | pass |
Given the code snippet: <|code_start|>
T = TypeVar("T")
class MerakiAnalyticsCDN(DataSource):
def __init__(self, http_client: HTTPClient = None) -> None:
if http_client is None:
self._client = HTTPClient()
else:
self._client = http_client
self._cache = {}
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
<|code_end|>
, generate the next line using the imports in this file:
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import DataSource, PipelineContext, NotFoundError
from ..dto.patch import PatchListDto
from ..dto.staticdata.champion import ChampionAllRatesDto, ChampionRatesDto
from .common import HTTPClient, HTTPError
import ujson as json
import json
and context (functions, classes, or occasionally code) from other files:
# Path: cassiopeia/datastores/common.py
# class HTTPClient(object):
# @staticmethod
# def _execute(curl: Curl, close_connection: bool) -> int:
# curl.perform()
# status_code = curl.getinfo(curl.HTTP_CODE)
# if close_connection:
# curl.close()
# return status_code
#
# @staticmethod
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# if not headers:
# request_headers = ["Accept-Encoding: gzip"]
# else:
# request_headers = [
# "{header}: {value}".format(header=key, value=value)
# for key, value in headers.items()
# ]
# if "Accept-Encoding" not in headers:
# request_headers.append("Accept-Encoding: gzip")
#
# response_headers = {}
#
# def get_response_headers(header_line: bytes) -> None:
# header_line = header_line.decode("ISO-8859-1")
#
# if ":" not in header_line:
# return
#
# name, value = header_line.split(":", 1)
# response_headers[name.strip()] = value.strip()
#
# buffer = BytesIO()
#
# curl = connection if connection is not None else Curl()
#
# curl.setopt(curl.URL, url)
# curl.setopt(curl.WRITEDATA, buffer)
# curl.setopt(curl.HEADERFUNCTION, get_response_headers)
# curl.setopt(curl.HTTPHEADER, request_headers)
# if certifi:
# curl.setopt(curl.CAINFO, certifi.where())
#
# if _print_calls:
# _url = url
# if isinstance(_url, bytes):
# _url = str(_url)[2:-1]
# if _print_api_key and ".api.riotgames.com/lol" in _url:
# if "?" not in _url:
# _url += "?api_key={}".format(headers["X-Riot-Token"])
# else:
# _url += "&api_key={}".format(headers["X-Riot-Token"])
# print("Making call: {}".format(_url))
# if rate_limiters:
# with ExitStack() as stack:
# # Enter each context manager / rate limiter
# limiters = [
# stack.enter_context(rate_limiter)
# for rate_limiter in rate_limiters
# ]
# exit_limiters = stack.pop_all().__exit__
# status_code = HTTPClient._execute(curl, connection is None)
# exit_limiters(None, None, None)
# else:
# status_code = HTTPClient._execute(curl, connection is None)
#
# body = buffer.getvalue()
#
# # Decompress if we got gzipped data
# try:
# content_encoding = response_headers["Content-Encoding"].upper()
# if "GZIP" == content_encoding:
# body = zlib.decompress(body, zlib.MAX_WBITS | 16)
# except KeyError:
# pass
#
# return status_code, body, response_headers
#
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# if parameters:
# if encode_parameters:
# parameters = {
# k: str(v).lower() if isinstance(v, bool) else v
# for k, v in parameters.items()
# }
# parameters = urlencode(parameters, doseq=True)
# url = "{url}?{params}".format(url=url, params=parameters)
#
# status_code, body, response_headers = HTTPClient._get(
# url, headers, rate_limiters, connection
# )
#
# content_type = response_headers.get(
# "Content-Type", "application/octet-stream"
# ).upper()
#
# # Decode to text if a charset is included
# match = re.search("CHARSET=(\S+)", content_type)
# if match:
# encoding = match.group(1)
# body = body.decode(encoding)
#
# # Load JSON if necessary
# if "APPLICATION/JSON" in content_type:
# body = json.loads(body)
#
# # Handle errors
# if status_code >= 400:
# if isinstance(body, dict):
# message = body.get("status", {}).get("message", "")
# elif isinstance(body, str):
# message = body
# else:
# message = ""
#
# raise HTTPError(message, status_code, response_headers)
#
# return body, response_headers
#
# @contextmanager
# def new_session(self) -> Curl:
# session = Curl()
# yield session
# session.close()
#
# class HTTPError(RuntimeError):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# super().__init__(message)
# self.code = code
# self.response_headers = response_headers or {}
. Output only the next line. | def calculate_hash(self, query): |
Continue the code snippet: <|code_start|>
T = TypeVar("T")
class SummonerAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
<|code_end|>
. Use current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.summoner import SummonerDto
from ..uniquekeys import convert_region_to_platform
and context (classes, functions, or code) from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | @DataSource.dispatch |
Here is a snippet: <|code_start|> def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
_validate_get_summoner_query = (
Query.has("id")
.as_(str)
.or_("accountId")
.as_(str)
.or_("puuid")
.as_(str)
.or_("name")
.as_(str)
.also.has("platform")
.as_(Platform)
)
@get.register(SummonerDto)
@validate_query(_validate_get_summoner_query, convert_region_to_platform)
def get_summoner(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> SummonerDto:
parameters = {"platform": query["platform"].value}
if "id" in query:
endpoint = "lol/summoner/v4/summoners/{summonerId}".format(
summonerId=query["id"]
<|code_end|>
. Write the next line using the current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.summoner import SummonerDto
from ..uniquekeys import convert_region_to_platform
and context from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
, which may include functions, classes, or code. Output only the next line. | ) |
Continue the code snippet: <|code_start|> @get.register(SummonerDto)
@validate_query(_validate_get_summoner_query, convert_region_to_platform)
def get_summoner(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> SummonerDto:
parameters = {"platform": query["platform"].value}
if "id" in query:
endpoint = "lol/summoner/v4/summoners/{summonerId}".format(
summonerId=query["id"]
)
elif "accountId" in query:
endpoint = "lol/summoner/v4/summoners/by-account/{accountId}".format(
accountId=query["accountId"]
)
elif "name" in query:
endpoint = "lol/summoner/v4/summoners/by-name/{name}".format(
name=query["name"].replace(" ", "%20")
)
elif "puuid" in query:
endpoint = "lol/summoner/v4/summoners/by-puuid/{puuid}".format(
puuid=query["puuid"]
)
else:
RuntimeError("Impossible")
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
<|code_end|>
. Use current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.summoner import SummonerDto
from ..uniquekeys import convert_region_to_platform
and context (classes, functions, or code) from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | data["region"] = query["platform"].region.value |
Using the snippet: <|code_start|> .or_("puuid")
.as_(str)
.or_("name")
.as_(str)
.also.has("platform")
.as_(Platform)
)
@get.register(SummonerDto)
@validate_query(_validate_get_summoner_query, convert_region_to_platform)
def get_summoner(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> SummonerDto:
parameters = {"platform": query["platform"].value}
if "id" in query:
endpoint = "lol/summoner/v4/summoners/{summonerId}".format(
summonerId=query["id"]
)
elif "accountId" in query:
endpoint = "lol/summoner/v4/summoners/by-account/{accountId}".format(
accountId=query["accountId"]
)
elif "name" in query:
endpoint = "lol/summoner/v4/summoners/by-name/{name}".format(
name=query["name"].replace(" ", "%20")
)
elif "puuid" in query:
endpoint = "lol/summoner/v4/summoners/by-puuid/{puuid}".format(
puuid=query["puuid"]
)
<|code_end|>
, determine the next line of code. You have imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.summoner import SummonerDto
from ..uniquekeys import convert_region_to_platform
and context (class names, function names, or code) available:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | else: |
Given the following code snippet before the placeholder: <|code_start|>
class TestLeague(unittest.TestCase):
def setUp(self):
cassiopeia.apply_settings(cassiopeia.get_default_config())
cassiopeia.set_riot_api_key(os.environ.get("RIOT_API_KEY"))
def test_access_league_properties(self):
lg = cassiopeia.League(id=LEAGUE_UUID)
self.assertIsNotNone(lg.region)
self.assertIsNotNone(lg.platform)
self.assertEqual(lg.id, LEAGUE_UUID)
self.assertIsNotNone(lg.tier)
<|code_end|>
, predict the next line using imports from the current file:
import io
import os
import unittest
from unittest.mock import patch
from cassiopeia import cassiopeia
from .constants import LEAGUE_UUID, SUMMONER_NAME
and context including class names, function names, and sometimes code from other files:
# Path: cassiopeia/cassiopeia.py
# def apply_settings(config: Union[str, TextIO, Dict, Settings]):
# def set_riot_api_key(key: str):
# def print_calls(calls: bool, api_key: bool = False):
# def get_league_entries(summoner: Summoner) -> LeagueEntries:
# def get_paginated_league_entries(
# queue: Queue, tier: Tier, division: Division, region: Union[Region, str] = None
# ) -> LeagueEntries:
# def get_master_league(
# queue: Union[Queue, int, str], region: Union[Region, str] = None
# ) -> MasterLeague:
# def get_grandmaster_league(
# queue: Union[Queue, int, str], region: Union[Region, str] = None
# ) -> GrandmasterLeague:
# def get_challenger_league(
# queue: Union[Queue, int, str], region: Union[Region, str] = None
# ) -> ChallengerLeague:
# def get_match_history(
# continent: Continent = None,
# region: Region = None,
# platform: Platform = None,
# puuid: str = None,
# begin_index: int = None,
# end_index: int = None,
# begin_time: arrow.Arrow = None,
# end_time: arrow.Arrow = None,
# queue: Queue = None,
# type: MatchType = None,
# ):
# def get_match(id: int, region: Union[Region, str] = None) -> Match:
# def get_featured_matches(region: Union[Region, str] = None) -> FeaturedMatches:
# def get_current_match(
# summoner: Summoner, region: Union[Region, str] = None
# ) -> CurrentMatch:
# def get_champion_masteries(
# summoner: Summoner, region: Union[Region, str] = None
# ) -> ChampionMasteries:
# def get_champion_mastery(
# summoner: Summoner,
# champion: Union[Champion, int, str],
# region: Union[Region, str] = None,
# ) -> ChampionMastery:
# def get_summoner(
# *,
# id: str = None,
# account_id: str = None,
# name: str = None,
# region: Union[Region, str] = None
# ) -> Summoner:
# def get_champion(key: Union[str, int], region: Union[Region, str] = None) -> Champion:
# def get_champions(region: Union[Region, str] = None) -> Champions:
# def get_runes(region: Union[Region, str] = None) -> Runes:
# def get_summoner_spells(region: Union[Region, str] = None) -> SummonerSpells:
# def get_items(region: Union[Region, str] = None) -> Items:
# def get_maps(region: Union[Region, str] = None) -> Maps:
# def get_profile_icons(region: Union[Region, str] = None) -> ProfileIcons:
# def get_realms(region: Union[Region, str] = None) -> Realms:
# def get_status(region: Union[Region, str] = None) -> ShardStatus:
# def get_language_strings(region: Union[Region, str] = None) -> LanguageStrings:
# def get_locales(region: Union[Region, str] = None) -> List[str]:
# def get_versions(region: Union[Region, str] = None) -> List[str]:
# def get_version(
# date: datetime.date = None, region: Union[Region, str] = None
# ) -> Union[None, str]:
# def get_verification_string(summoner: Summoner) -> VerificationString:
# def get_champion_rotations(region: Union[Region, str] = None) -> ChampionRotation:
# def _get_pipeline():
. Output only the next line. | self.assertIsNotNone(lg.queue) |
Predict the next line for this snippet: <|code_start|> context: PipelineContext = None,
) -> Iterable[T]:
pass
_validate_get_champion_rotation_query = Query.has("platform").as_(Platform)
@get.register(ChampionRotationDto)
@validate_query(_validate_get_champion_rotation_query, convert_region_to_platform)
def get_champion_rotation(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> ChampionRotationDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/platform/v3/champion-rotations"
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["platform"] = query["platform"].value
data["region"] = query["platform"].region.value
return ChampionRotationDto(**data)
_validate_get_many_champion_rotation_query = Query.has("platforms").as_(Iterable)
@get_many.register(ChampionRotationDto)
@validate_query(
_validate_get_many_champion_rotation_query, convert_region_to_platform
)
def get_many_champion_rotations(
self, query: MutableMapping[str, Any], context: PipelineContext = None
<|code_end|>
with the help of current file imports:
import copy
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.champion import ChampionRotationDto
from ..uniquekeys import convert_region_to_platform
and context from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
, which may contain function names, class names, or code. Output only the next line. | ) -> Generator[ChampionRotationDto, None, None]: |
Using the snippet: <|code_start|>
T = TypeVar("T")
class ChampionAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
_validate_get_champion_rotation_query = Query.has("platform").as_(Platform)
@get.register(ChampionRotationDto)
@validate_query(_validate_get_champion_rotation_query, convert_region_to_platform)
def get_champion_rotation(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> ChampionRotationDto:
<|code_end|>
, determine the next line of code. You have imports:
import copy
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.champion import ChampionRotationDto
from ..uniquekeys import convert_region_to_platform
and context (class names, function names, or code) available:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | parameters = {"platform": query["platform"].value} |
Continue the code snippet: <|code_start|>
T = TypeVar("T")
class ChampionAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
<|code_end|>
. Use current file imports:
import copy
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.champion import ChampionRotationDto
from ..uniquekeys import convert_region_to_platform
and context (classes, functions, or code) from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | pass |
Continue the code snippet: <|code_start|>
class ChampionAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
_validate_get_champion_rotation_query = Query.has("platform").as_(Platform)
@get.register(ChampionRotationDto)
@validate_query(_validate_get_champion_rotation_query, convert_region_to_platform)
def get_champion_rotation(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> ChampionRotationDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/platform/v3/champion-rotations"
try:
<|code_end|>
. Use current file imports:
import copy
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.champion import ChampionRotationDto
from ..uniquekeys import convert_region_to_platform
and context (classes, functions, or code) from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | data = self._get(endpoint=endpoint, parameters=parameters) |
Given the code snippet: <|code_start|>
def _retry_request_by_handling_error(self, error: HTTPError, handlers=None):
if handlers is None:
handlers = []
# Try to properly handling the 429 and retry the call after the appropriate time limit.
if error.code == 429:
# Identify which rate limit was hit (application, method, or service)
if "X-Rate-Limit-Type" not in error.response_headers:
rate_limiting_type = "service"
elif error.response_headers["X-Rate-Limit-Type"] == "application":
rate_limiting_type = "application"
elif error.response_headers["X-Rate-Limit-Type"] == "method":
rate_limiting_type = "method"
elif error.response_headers["X-Rate-Limit-Type"] == "service":
rate_limiting_type = "service"
else:
raise ValueError(
"Unknown cause of rate limit; aborting. Headers were: {}".format(
error.response_headers
)
)
# Create a new handler
new_handler = self.service._handlers[429][
rate_limiting_type
]() # type: FailedRequestHandler
else:
new_handler = self.service._handlers[error.code]()
# If we will handle the new error in the same way as we did previously, don't use a new instance
<|code_end|>
, generate the next line using the imports in this file:
import time
import copy
import functools
import collections.abc
from abc import abstractmethod, ABC
from typing import (
MutableMapping,
Any,
Union,
TypeVar,
Iterable,
Type,
List,
Tuple,
Dict,
Callable,
)
from datapipelines import DataSource, PipelineContext
from merakicommons.ratelimits import FixedWindowRateLimiter, MultiRateLimiter
from ..common import HTTPClient, HTTPError, Curl
from ...data import Platform
from ...dto.staticdata.realm import RealmDto
and context (functions, classes, or occasionally code) from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
. Output only the next line. | for handler in handlers: |
Here is a snippet: <|code_start|> code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIForbiddenError:
new_error = APIForbiddenError(
'The Riot API returned a FORBIDDEN error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
else:
new_error = new_error_type(str(error))
raise new_error from error
@abstractmethod
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@abstractmethod
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
<|code_end|>
. Write the next line using the current file imports:
import time
import copy
import functools
import collections.abc
from abc import abstractmethod, ABC
from typing import (
MutableMapping,
Any,
Union,
TypeVar,
Iterable,
Type,
List,
Tuple,
Dict,
Callable,
)
from datapipelines import DataSource, PipelineContext
from merakicommons.ratelimits import FixedWindowRateLimiter, MultiRateLimiter
from ..common import HTTPClient, HTTPError, Curl
from ...data import Platform
from ...dto.staticdata.realm import RealmDto
and context from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
, which may include functions, classes, or code. Output only the next line. | context: PipelineContext = None, |
Continue the code snippet: <|code_start|> body, response_headers = self.service._client.get(
url=self.url,
parameters=self.parameters,
headers=self.service._headers,
rate_limiters=[self.app_limiter, self.method_limiter],
connection=self.connection,
)
self.service._adjust_rate_limiters_from_headers(
app_limiter=self.app_limiter,
method_limiter=self.method_limiter,
response_headers=response_headers,
)
return body
except HTTPError as error:
return self._retry_request_by_handling_error(error)
def _retry_request_by_handling_error(self, error: HTTPError, handlers=None):
if handlers is None:
handlers = []
# Try to properly handling the 429 and retry the call after the appropriate time limit.
if error.code == 429:
# Identify which rate limit was hit (application, method, or service)
if "X-Rate-Limit-Type" not in error.response_headers:
rate_limiting_type = "service"
elif error.response_headers["X-Rate-Limit-Type"] == "application":
rate_limiting_type = "application"
elif error.response_headers["X-Rate-Limit-Type"] == "method":
rate_limiting_type = "method"
elif error.response_headers["X-Rate-Limit-Type"] == "service":
rate_limiting_type = "service"
<|code_end|>
. Use current file imports:
import time
import copy
import functools
import collections.abc
from abc import abstractmethod, ABC
from typing import (
MutableMapping,
Any,
Union,
TypeVar,
Iterable,
Type,
List,
Tuple,
Dict,
Callable,
)
from datapipelines import DataSource, PipelineContext
from merakicommons.ratelimits import FixedWindowRateLimiter, MultiRateLimiter
from ..common import HTTPClient, HTTPError, Curl
from ...data import Platform
from ...dto.staticdata.realm import RealmDto
and context (classes, functions, or code) from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
. Output only the next line. | else: |
Based on the snippet: <|code_start|>
class RetryFromHeaders(object):
def __init__(self, max_attempts: int):
self.max_attempts = int(max_attempts)
self.attempts = 0
self.stop = False
def __call__(
self, error, requester, url, parameters, headers, rate_limiters, connection
) -> Tuple[Union[dict, list, str, bytes], dict]:
if self.attempts >= self.max_attempts:
self.stop = True
raise error
backoff = int(error.response_headers["Retry-After"])
print(
"INFO: Unexpected {} rate limit, backing off for {} seconds (from headers).".format(
headers.get("X-Rate-Limit-Type", "service"), backoff
)
)
for rate_limiter in rate_limiters:
rate_limiter.restrict_for(backoff)
self.attempts += 1
return requester(url, parameters, headers, rate_limiters, connection)
class ThrowException(FailedRequestHandler):
def __init__(self):
self.stop = True
def __call__(
<|code_end|>
, predict the immediate next line with the help of imports:
import time
import copy
import functools
import collections.abc
from abc import abstractmethod, ABC
from typing import (
MutableMapping,
Any,
Union,
TypeVar,
Iterable,
Type,
List,
Tuple,
Dict,
Callable,
)
from datapipelines import DataSource, PipelineContext
from merakicommons.ratelimits import FixedWindowRateLimiter, MultiRateLimiter
from ..common import HTTPClient, HTTPError, Curl
from ...data import Platform
from ...dto.staticdata.realm import RealmDto
and context (classes, functions, sometimes code) from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
. Output only the next line. | self, error, requester, url, parameters, headers, rate_limiters, connection |
Given the code snippet: <|code_start|> self._client = http_client
def _get(
self,
endpoint: str,
parameters: MutableMapping[str, Any] = None,
connection: Curl = None,
) -> Union[dict, list, Any]:
url = f"{self._server_url}:{self._port}/{endpoint}"
try:
result, headers = self._client.get(
url=url, parameters=parameters, connection=connection
)
# Ensure compatibility with both Curl and Requests based clients
if isinstance(result, bytes):
result = json.loads(result.decode())
elif isinstance(result, str):
result = json.loads(result)
if not isinstance(result, (dict, list)):
raise ValueError(
"Unexpected type returned from HTTPClient: {}".format(type(result))
)
except HTTPError as error:
# The error handlers didn't work, so raise an appropriate error.
new_error_type = _ERROR_CODES[error.code]
if new_error_type is RuntimeError:
new_error = RuntimeError(
'Encountered an HTTP error code {code} with message "{message}" which should have already been handled. Report this to the Cassiopeia team.'.format(
<|code_end|>
, generate the next line using the imports in this file:
from abc import abstractmethod
from typing import MutableMapping, Any, Union, TypeVar, Iterable, Type
from datapipelines import DataSource, PipelineContext
from ..common import HTTPClient, HTTPError, Curl
from ..riotapi.common import (
APIForbiddenError,
APINotFoundError,
APIRequestError,
APIError,
_ERROR_CODES,
)
from ...dto.staticdata.realm import RealmDto
import ujson as json
import json
and context (functions, classes, or occasionally code) from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/datastores/riotapi/common.py
# class APIForbiddenError(APINotFoundError):
# pass
#
# class APINotFoundError(HTTPError):
# pass
#
# class APIRequestError(HTTPError):
# pass
#
# class APIError(HTTPError):
# pass
#
# _ERROR_CODES = {
# 400: APIRequestError,
# 401: APIForbiddenError,
# 403: APIRequestError,
# 404: APINotFoundError,
# 415: RuntimeError,
# 429: RuntimeError,
# 500: APIError,
# 502: APIError,
# 503: APIError,
# 504: APIError,
# }
. Output only the next line. | code=error.code, message=str(error) |
Using the snippet: <|code_start|> )
except HTTPError as error:
# The error handlers didn't work, so raise an appropriate error.
new_error_type = _ERROR_CODES[error.code]
if new_error_type is RuntimeError:
new_error = RuntimeError(
'Encountered an HTTP error code {code} with message "{message}" which should have already been handled. Report this to the Cassiopeia team.'.format(
code=error.code, message=str(error)
)
)
elif new_error_type is APIError:
new_error = APIError(
'Kernel experienced an internal error on the request. You may want to retry the request after a short wait or continue without the result. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APINotFoundError:
new_error = APINotFoundError(
'Kernel returned a NOT FOUND error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIRequestError:
new_error = APIRequestError(
'Kernel returned an error on the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
<|code_end|>
, determine the next line of code. You have imports:
from abc import abstractmethod
from typing import MutableMapping, Any, Union, TypeVar, Iterable, Type
from datapipelines import DataSource, PipelineContext
from ..common import HTTPClient, HTTPError, Curl
from ..riotapi.common import (
APIForbiddenError,
APINotFoundError,
APIRequestError,
APIError,
_ERROR_CODES,
)
from ...dto.staticdata.realm import RealmDto
import ujson as json
import json
and context (class names, function names, or code) available:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/datastores/riotapi/common.py
# class APIForbiddenError(APINotFoundError):
# pass
#
# class APINotFoundError(HTTPError):
# pass
#
# class APIRequestError(HTTPError):
# pass
#
# class APIError(HTTPError):
# pass
#
# _ERROR_CODES = {
# 400: APIRequestError,
# 401: APIForbiddenError,
# 403: APIRequestError,
# 404: APINotFoundError,
# 415: RuntimeError,
# 429: RuntimeError,
# 500: APIError,
# 502: APIError,
# 503: APIError,
# 504: APIError,
# }
. Output only the next line. | ) |
Next line prediction: <|code_start|> new_error = RuntimeError(
'Encountered an HTTP error code {code} with message "{message}" which should have already been handled. Report this to the Cassiopeia team.'.format(
code=error.code, message=str(error)
)
)
elif new_error_type is APIError:
new_error = APIError(
'Kernel experienced an internal error on the request. You may want to retry the request after a short wait or continue without the result. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APINotFoundError:
new_error = APINotFoundError(
'Kernel returned a NOT FOUND error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIRequestError:
new_error = APIRequestError(
'Kernel returned an error on the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIForbiddenError:
new_error = APIForbiddenError(
'Kernel returned a FORBIDDEN error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
<|code_end|>
. Use current file imports:
(from abc import abstractmethod
from typing import MutableMapping, Any, Union, TypeVar, Iterable, Type
from datapipelines import DataSource, PipelineContext
from ..common import HTTPClient, HTTPError, Curl
from ..riotapi.common import (
APIForbiddenError,
APINotFoundError,
APIRequestError,
APIError,
_ERROR_CODES,
)
from ...dto.staticdata.realm import RealmDto
import ujson as json
import json)
and context including class names, function names, or small code snippets from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/datastores/riotapi/common.py
# class APIForbiddenError(APINotFoundError):
# pass
#
# class APINotFoundError(HTTPError):
# pass
#
# class APIRequestError(HTTPError):
# pass
#
# class APIError(HTTPError):
# pass
#
# _ERROR_CODES = {
# 400: APIRequestError,
# 401: APIForbiddenError,
# 403: APIRequestError,
# 404: APINotFoundError,
# 415: RuntimeError,
# 429: RuntimeError,
# 500: APIError,
# 502: APIError,
# 503: APIError,
# 504: APIError,
# }
. Output only the next line. | ), |
Next line prediction: <|code_start|> error.code,
)
elif new_error_type is APINotFoundError:
new_error = APINotFoundError(
'Kernel returned a NOT FOUND error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIRequestError:
new_error = APIRequestError(
'Kernel returned an error on the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIForbiddenError:
new_error = APIForbiddenError(
'Kernel returned a FORBIDDEN error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
else:
new_error = new_error_type(str(error))
raise new_error from error
return result
@abstractmethod
<|code_end|>
. Use current file imports:
(from abc import abstractmethod
from typing import MutableMapping, Any, Union, TypeVar, Iterable, Type
from datapipelines import DataSource, PipelineContext
from ..common import HTTPClient, HTTPError, Curl
from ..riotapi.common import (
APIForbiddenError,
APINotFoundError,
APIRequestError,
APIError,
_ERROR_CODES,
)
from ...dto.staticdata.realm import RealmDto
import ujson as json
import json)
and context including class names, function names, or small code snippets from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/datastores/riotapi/common.py
# class APIForbiddenError(APINotFoundError):
# pass
#
# class APINotFoundError(HTTPError):
# pass
#
# class APIRequestError(HTTPError):
# pass
#
# class APIError(HTTPError):
# pass
#
# _ERROR_CODES = {
# 400: APIRequestError,
# 401: APIForbiddenError,
# 403: APIRequestError,
# 404: APINotFoundError,
# 415: RuntimeError,
# 429: RuntimeError,
# 500: APIError,
# 502: APIError,
# 503: APIError,
# 504: APIError,
# }
. Output only the next line. | def get( |
Continue the code snippet: <|code_start|> ) -> Union[dict, list, Any]:
url = f"{self._server_url}:{self._port}/{endpoint}"
try:
result, headers = self._client.get(
url=url, parameters=parameters, connection=connection
)
# Ensure compatibility with both Curl and Requests based clients
if isinstance(result, bytes):
result = json.loads(result.decode())
elif isinstance(result, str):
result = json.loads(result)
if not isinstance(result, (dict, list)):
raise ValueError(
"Unexpected type returned from HTTPClient: {}".format(type(result))
)
except HTTPError as error:
# The error handlers didn't work, so raise an appropriate error.
new_error_type = _ERROR_CODES[error.code]
if new_error_type is RuntimeError:
new_error = RuntimeError(
'Encountered an HTTP error code {code} with message "{message}" which should have already been handled. Report this to the Cassiopeia team.'.format(
code=error.code, message=str(error)
)
)
elif new_error_type is APIError:
new_error = APIError(
'Kernel experienced an internal error on the request. You may want to retry the request after a short wait or continue without the result. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
<|code_end|>
. Use current file imports:
from abc import abstractmethod
from typing import MutableMapping, Any, Union, TypeVar, Iterable, Type
from datapipelines import DataSource, PipelineContext
from ..common import HTTPClient, HTTPError, Curl
from ..riotapi.common import (
APIForbiddenError,
APINotFoundError,
APIRequestError,
APIError,
_ERROR_CODES,
)
from ...dto.staticdata.realm import RealmDto
import ujson as json
import json
and context (classes, functions, or code) from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/datastores/riotapi/common.py
# class APIForbiddenError(APINotFoundError):
# pass
#
# class APINotFoundError(HTTPError):
# pass
#
# class APIRequestError(HTTPError):
# pass
#
# class APIError(HTTPError):
# pass
#
# _ERROR_CODES = {
# 400: APIRequestError,
# 401: APIForbiddenError,
# 403: APIRequestError,
# 404: APINotFoundError,
# 415: RuntimeError,
# 429: RuntimeError,
# 500: APIError,
# 502: APIError,
# 503: APIError,
# 504: APIError,
# }
. Output only the next line. | ), |
Given the following code snippet before the placeholder: <|code_start|> 'Kernel returned a NOT FOUND error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIRequestError:
new_error = APIRequestError(
'Kernel returned an error on the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIForbiddenError:
new_error = APIForbiddenError(
'Kernel returned a FORBIDDEN error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
else:
new_error = new_error_type(str(error))
raise new_error from error
return result
@abstractmethod
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
<|code_end|>
, predict the next line using imports from the current file:
from abc import abstractmethod
from typing import MutableMapping, Any, Union, TypeVar, Iterable, Type
from datapipelines import DataSource, PipelineContext
from ..common import HTTPClient, HTTPError, Curl
from ..riotapi.common import (
APIForbiddenError,
APINotFoundError,
APIRequestError,
APIError,
_ERROR_CODES,
)
from ...dto.staticdata.realm import RealmDto
import ujson as json
import json
and context including class names, function names, and sometimes code from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/datastores/riotapi/common.py
# class APIForbiddenError(APINotFoundError):
# pass
#
# class APINotFoundError(HTTPError):
# pass
#
# class APIRequestError(HTTPError):
# pass
#
# class APIError(HTTPError):
# pass
#
# _ERROR_CODES = {
# 400: APIRequestError,
# 401: APIForbiddenError,
# 403: APIRequestError,
# 404: APINotFoundError,
# 415: RuntimeError,
# 429: RuntimeError,
# 500: APIError,
# 502: APIError,
# 503: APIError,
# 504: APIError,
# }
. Output only the next line. | context: PipelineContext = None, |
Given snippet: <|code_start|> elif new_error_type is APIRequestError:
new_error = APIRequestError(
'Kernel returned an error on the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIForbiddenError:
new_error = APIForbiddenError(
'Kernel returned a FORBIDDEN error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
else:
new_error = new_error_type(str(error))
raise new_error from error
return result
@abstractmethod
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@abstractmethod
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from abc import abstractmethod
from typing import MutableMapping, Any, Union, TypeVar, Iterable, Type
from datapipelines import DataSource, PipelineContext
from ..common import HTTPClient, HTTPError, Curl
from ..riotapi.common import (
APIForbiddenError,
APINotFoundError,
APIRequestError,
APIError,
_ERROR_CODES,
)
from ...dto.staticdata.realm import RealmDto
import ujson as json
import json
and context:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/datastores/riotapi/common.py
# class APIForbiddenError(APINotFoundError):
# pass
#
# class APINotFoundError(HTTPError):
# pass
#
# class APIRequestError(HTTPError):
# pass
#
# class APIError(HTTPError):
# pass
#
# _ERROR_CODES = {
# 400: APIRequestError,
# 401: APIForbiddenError,
# 403: APIRequestError,
# 404: APINotFoundError,
# 415: RuntimeError,
# 429: RuntimeError,
# 500: APIError,
# 502: APIError,
# 503: APIError,
# 504: APIError,
# }
which might include code, classes, or functions. Output only the next line. | def get_many( |
Given the following code snippet before the placeholder: <|code_start|> )
)
elif new_error_type is APIError:
new_error = APIError(
'Kernel experienced an internal error on the request. You may want to retry the request after a short wait or continue without the result. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APINotFoundError:
new_error = APINotFoundError(
'Kernel returned a NOT FOUND error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIRequestError:
new_error = APIRequestError(
'Kernel returned an error on the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
elif new_error_type is APIForbiddenError:
new_error = APIForbiddenError(
'Kernel returned a FORBIDDEN error for the request. The received error was {code}: "{message}"'.format(
code=error.code, message=str(error)
),
error.code,
)
<|code_end|>
, predict the next line using imports from the current file:
from abc import abstractmethod
from typing import MutableMapping, Any, Union, TypeVar, Iterable, Type
from datapipelines import DataSource, PipelineContext
from ..common import HTTPClient, HTTPError, Curl
from ..riotapi.common import (
APIForbiddenError,
APINotFoundError,
APIRequestError,
APIError,
_ERROR_CODES,
)
from ...dto.staticdata.realm import RealmDto
import ujson as json
import json
and context including class names, function names, and sometimes code from other files:
# Path: cassiopeia/datastores/common.py
# USE_PYCURL = True
# USE_PYCURL = False
# class HTTPError(RuntimeError):
# class HTTPClient(object):
# class HTTPClient(object):
# def __init__(self, message, code, response_headers: Dict[str, str] = None):
# def _execute(curl: Curl, close_connection: bool) -> int:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# ) -> (int, bytes, dict):
# def get_response_headers(header_line: bytes) -> None:
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> Curl:
# def _get(
# url: str,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# ) -> (int, bytes, dict):
# def get(
# self,
# url: str,
# parameters: MutableMapping[str, Any] = None,
# headers: Mapping[str, str] = None,
# rate_limiters: List[RateLimiter] = None,
# connection: Curl = None,
# encode_parameters: bool = True,
# ) -> (Union[dict, list, str, bytes], dict):
# def new_session(self) -> requests.Session:
#
# Path: cassiopeia/datastores/riotapi/common.py
# class APIForbiddenError(APINotFoundError):
# pass
#
# class APINotFoundError(HTTPError):
# pass
#
# class APIRequestError(HTTPError):
# pass
#
# class APIError(HTTPError):
# pass
#
# _ERROR_CODES = {
# 400: APIRequestError,
# 401: APIForbiddenError,
# 403: APIRequestError,
# 404: APINotFoundError,
# 415: RuntimeError,
# 429: RuntimeError,
# 500: APIError,
# 502: APIError,
# 503: APIError,
# 504: APIError,
# }
. Output only the next line. | else: |
Using the snippet: <|code_start|>
T = TypeVar("T")
def _get_default_version(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
pipeline = context[PipelineContext.Keys.PIPELINE]
versions = pipeline.get(VersionListDto, {"platform": query["platform"]})
return versions["versions"][0]
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class SpectatorAPI(KernelSource):
@DataSource.dispatch
def get(
self,
<|code_end|>
, determine the next line of code. You have imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform, Region
from ...dto.staticdata.version import VersionListDto
from ...dto.spectator import CurrentGameInfoDto, FeaturedGamesDto
from ..uniquekeys import convert_region_to_platform
and context (class names, function names, or code) available:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# class Region(Enum):
# brazil = "BR"
# europe_north_east = "EUNE"
# europe_west = "EUW"
# japan = "JP"
# korea = "KR"
# latin_america_north = "LAN"
# latin_america_south = "LAS"
# north_america = "NA"
# oceania = "OCE"
# turkey = "TR"
# russia = "RU"
#
# @property
# def platform(self) -> "Platform":
# return getattr(Platform, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_platform(platform):
# try:
# return platform.region
# except AttributeError:
# return Platform(platform).region
#
# @property
# def timezone(self) -> str:
# tzs = {
# "NA": "GMT-8",
# "LAN": "GMT-7",
# "LAS": "GMT-5",
# "BR": "GMT-4",
# "EUW": "GMT-2",
# "TR": "GMT-0",
# "EUNE": "GMT+1",
# "RU": "GMT+3",
# "KR": "GMT+6",
# "JP": "GMT+7",
# "OCE": "GMT+8",
# }
# return tzs[self.value]
#
# @property
# def continent(self) -> "Continent":
# if self is Region.brazil:
# return Continent.americas
# if self is Region.europe_north_east:
# return Continent.europe
# if self is Region.europe_west:
# return Continent.europe
# if self is Region.japan:
# return Continent.asia
# if self is Region.korea:
# return Continent.asia
# if self is Region.latin_america_north:
# return Continent.americas
# if self is Region.latin_america_south:
# return Continent.americas
# if self is Region.north_america:
# return Continent.americas
# if self is Region.oceania:
# return (
# Continent.americas
# ) # OCE content is managed by Americas server (as per https://i.imgur.com/FUyf5kv.png), this breaks OCE queries if set to Asia
# if self is Region.turkey:
# return Continent.europe
# if self is Region.russia:
# return Continent.europe
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
. Output only the next line. | type: Type[T], |
Predict the next line for this snippet: <|code_start|>
T = TypeVar("T")
def _get_default_version(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
pipeline = context[PipelineContext.Keys.PIPELINE]
versions = pipeline.get(VersionListDto, {"platform": query["platform"]})
return versions["versions"][0]
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class SpectatorAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
<|code_end|>
with the help of current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform, Region
from ...dto.staticdata.version import VersionListDto
from ...dto.spectator import CurrentGameInfoDto, FeaturedGamesDto
from ..uniquekeys import convert_region_to_platform
and context from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# class Region(Enum):
# brazil = "BR"
# europe_north_east = "EUNE"
# europe_west = "EUW"
# japan = "JP"
# korea = "KR"
# latin_america_north = "LAN"
# latin_america_south = "LAS"
# north_america = "NA"
# oceania = "OCE"
# turkey = "TR"
# russia = "RU"
#
# @property
# def platform(self) -> "Platform":
# return getattr(Platform, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_platform(platform):
# try:
# return platform.region
# except AttributeError:
# return Platform(platform).region
#
# @property
# def timezone(self) -> str:
# tzs = {
# "NA": "GMT-8",
# "LAN": "GMT-7",
# "LAS": "GMT-5",
# "BR": "GMT-4",
# "EUW": "GMT-2",
# "TR": "GMT-0",
# "EUNE": "GMT+1",
# "RU": "GMT+3",
# "KR": "GMT+6",
# "JP": "GMT+7",
# "OCE": "GMT+8",
# }
# return tzs[self.value]
#
# @property
# def continent(self) -> "Continent":
# if self is Region.brazil:
# return Continent.americas
# if self is Region.europe_north_east:
# return Continent.europe
# if self is Region.europe_west:
# return Continent.europe
# if self is Region.japan:
# return Continent.asia
# if self is Region.korea:
# return Continent.asia
# if self is Region.latin_america_north:
# return Continent.americas
# if self is Region.latin_america_south:
# return Continent.americas
# if self is Region.north_america:
# return Continent.americas
# if self is Region.oceania:
# return (
# Continent.americas
# ) # OCE content is managed by Americas server (as per https://i.imgur.com/FUyf5kv.png), this breaks OCE queries if set to Asia
# if self is Region.turkey:
# return Continent.europe
# if self is Region.russia:
# return Continent.europe
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
, which may contain function names, class names, or code. Output only the next line. | ) -> T: |
Predict the next line for this snippet: <|code_start|>T = TypeVar("T")
def _get_default_version(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
pipeline = context[PipelineContext.Keys.PIPELINE]
versions = pipeline.get(VersionListDto, {"platform": query["platform"]})
return versions["versions"][0]
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class SpectatorAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
<|code_end|>
with the help of current file imports:
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform, Region
from ...dto.staticdata.version import VersionListDto
from ...dto.spectator import CurrentGameInfoDto, FeaturedGamesDto
from ..uniquekeys import convert_region_to_platform
and context from other files:
# Path: cassiopeia/datastores/kernel/common.py
# def _get_latest_version(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def _get_default_locale(
# query: MutableMapping[str, Any], context: PipelineContext
# ) -> str:
# def __init__(self, server_url: str, port: int, http_client: HTTPClient = None):
# def _get(
# self,
# endpoint: str,
# parameters: MutableMapping[str, Any] = None,
# connection: Curl = None,
# ) -> Union[dict, list, Any]:
# def get(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> T:
# def get_many(
# self,
# type: Type[T],
# query: MutableMapping[str, Any],
# context: PipelineContext = None,
# ) -> Iterable[T]:
# T = TypeVar("T")
# class KernelSource(DataSource):
#
# Path: cassiopeia/data.py
# class Platform(Enum):
# brazil = "BR1"
# europe_north_east = "EUN1"
# europe_west = "EUW1"
# japan = "JP1"
# korea = "KR"
# latin_america_north = "LA1"
# latin_america_south = "LA2"
# north_america = "NA1"
# oceania = "OC1"
# turkey = "TR1"
# russia = "RU"
#
# @property
# def region(self) -> "Region":
# return getattr(Region, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_region(region):
# try:
# return region.platform
# except AttributeError:
# return Region(region).platform
#
# @property
# def continent(self):
# return self.region.continent
#
# class Region(Enum):
# brazil = "BR"
# europe_north_east = "EUNE"
# europe_west = "EUW"
# japan = "JP"
# korea = "KR"
# latin_america_north = "LAN"
# latin_america_south = "LAS"
# north_america = "NA"
# oceania = "OCE"
# turkey = "TR"
# russia = "RU"
#
# @property
# def platform(self) -> "Platform":
# return getattr(Platform, self.name)
#
# @property
# def default_locale(self) -> str:
# return DEFAULT_LOCALE[self]
#
# @staticmethod
# def from_platform(platform):
# try:
# return platform.region
# except AttributeError:
# return Platform(platform).region
#
# @property
# def timezone(self) -> str:
# tzs = {
# "NA": "GMT-8",
# "LAN": "GMT-7",
# "LAS": "GMT-5",
# "BR": "GMT-4",
# "EUW": "GMT-2",
# "TR": "GMT-0",
# "EUNE": "GMT+1",
# "RU": "GMT+3",
# "KR": "GMT+6",
# "JP": "GMT+7",
# "OCE": "GMT+8",
# }
# return tzs[self.value]
#
# @property
# def continent(self) -> "Continent":
# if self is Region.brazil:
# return Continent.americas
# if self is Region.europe_north_east:
# return Continent.europe
# if self is Region.europe_west:
# return Continent.europe
# if self is Region.japan:
# return Continent.asia
# if self is Region.korea:
# return Continent.asia
# if self is Region.latin_america_north:
# return Continent.americas
# if self is Region.latin_america_south:
# return Continent.americas
# if self is Region.north_america:
# return Continent.americas
# if self is Region.oceania:
# return (
# Continent.americas
# ) # OCE content is managed by Americas server (as per https://i.imgur.com/FUyf5kv.png), this breaks OCE queries if set to Asia
# if self is Region.turkey:
# return Continent.europe
# if self is Region.russia:
# return Continent.europe
#
# Path: cassiopeia/datastores/uniquekeys.py
# def convert_region_to_platform(query: MutableMapping[str, Any]) -> None:
# if "region" in query and "platform" not in query:
# try:
# query["platform"] = Region(query["region"]).platform
# except ValueError as e:
# raise QueryValidationError from e
#
# if "regions" in query and "platforms" not in query:
# query["platforms"] = _region_to_platform_generator(query["regions"])
#
# if "region" in query and not isinstance(query["region"], Region):
# query["region"] = Region(query["region"])
, which may contain function names, class names, or code. Output only the next line. | type: Type[T], |
Next line prediction: <|code_start|>__copyright__ = "Copyright (C) 2013 Kristoffer Carlsson"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
<|code_end|>
. Use current file imports:
(from collections import defaultdict
from phon.mesh_objects.element import Element
from phon.mesh_objects.element_set import ElementSet
from phon.mesh_objects.node import Node
import numpy as np)
and context including class names, function names, or small code snippets from other files:
# Path: phon/mesh_objects/element.py
# class Element:
#
# """ Represents a finite element. """
#
# def __init__(self, elem_type, vertices):
# """
# :param elem_type: The type of the element in Abaqus format.
# :type elem_type: str
# :param vertices: The vertices of the element given as node identifiers.
# :type vertices: list of ints
#
# """
# self.elem_type = elem_type
# self.vertices = vertices
#
# def __str__(self):
# """
# Returns a string representation of the element.
#
# :return: The string representation
# :rtype: string
#
# """
# return ("Element of type {0} containing the vertices with "
# "id {1}.".format(self.elem_type, self.vertices))
#
# Path: phon/mesh_objects/element_set.py
# class ElementSet:
#
# """ Represents a set of elements """
#
# def __init__(self, name, dimension, ids=None):
# """
# :param name: Name of the element set.
# :type name: str
# :param dimension: The dimension of the elements in the set
# :type dimension: int
# :param ids: Identifiers of the elements in the set.
# :type ids: list of ints
#
# """
# self.name = name
# self.dimension = dimension
#
# if ids is None:
# ids = []
# self.ids = ids
#
# # Specific properties assigned to the set, e.g. material properties.
# self.set_properties = {}
#
# def get_dimension(self):
# """
# Get the dimension of the element set.
#
# :return: The dimension.
# :rtype: int
#
# """
# return self.dimension
#
# def __str__(self):
# """
# Returns a string representation of the element set.
#
# :return: The string representation
# :rtype: str
#
# """
# return ("Element set with name {0} containing elements with the "
# "following ids {1}".format(self.name, self.ids))
#
# def get_all_node_ids(self, mesh):
# """
# Gets all the nodes identifiers for the elements in the element set.
#
# :param mesh: The mesh
# :type mesh: :class:`phon.mesh_objects.mesh.Mesh()`
# :return: The node identifiers
# :rtype: list of ints
#
# """
# all_node_ids = []
#
# for element_id in self.ids:
# all_node_ids += mesh.elements[element_id].vertices
#
# return list(set(all_node_ids))
#
# Path: phon/mesh_objects/node.py
# class Node:
#
# """ Represents a node."""
#
# def __init__(self, c):
# """
# :param c: The coordinates of the node
# :type c: ndarray
#
# """
# self.c = c
#
# def __str__(self):
# """
# Returns a string representation of the node.
#
# :return: The string representation.
# :rtype: str
#
# """
# return "Node located at (%f, %f, %f)." % (
# self.c[0], self.c[1], self.c[2])
. Output only the next line. | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
Predict the next line for this snippet: <|code_start|>__copyright__ = "Copyright (C) 2013 Kristoffer Carlsson"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
<|code_end|>
with the help of current file imports:
from collections import defaultdict
from phon.mesh_objects.element import Element
from phon.mesh_objects.element_set import ElementSet
from phon.mesh_objects.node import Node
import numpy as np
and context from other files:
# Path: phon/mesh_objects/element.py
# class Element:
#
# """ Represents a finite element. """
#
# def __init__(self, elem_type, vertices):
# """
# :param elem_type: The type of the element in Abaqus format.
# :type elem_type: str
# :param vertices: The vertices of the element given as node identifiers.
# :type vertices: list of ints
#
# """
# self.elem_type = elem_type
# self.vertices = vertices
#
# def __str__(self):
# """
# Returns a string representation of the element.
#
# :return: The string representation
# :rtype: string
#
# """
# return ("Element of type {0} containing the vertices with "
# "id {1}.".format(self.elem_type, self.vertices))
#
# Path: phon/mesh_objects/element_set.py
# class ElementSet:
#
# """ Represents a set of elements """
#
# def __init__(self, name, dimension, ids=None):
# """
# :param name: Name of the element set.
# :type name: str
# :param dimension: The dimension of the elements in the set
# :type dimension: int
# :param ids: Identifiers of the elements in the set.
# :type ids: list of ints
#
# """
# self.name = name
# self.dimension = dimension
#
# if ids is None:
# ids = []
# self.ids = ids
#
# # Specific properties assigned to the set, e.g. material properties.
# self.set_properties = {}
#
# def get_dimension(self):
# """
# Get the dimension of the element set.
#
# :return: The dimension.
# :rtype: int
#
# """
# return self.dimension
#
# def __str__(self):
# """
# Returns a string representation of the element set.
#
# :return: The string representation
# :rtype: str
#
# """
# return ("Element set with name {0} containing elements with the "
# "following ids {1}".format(self.name, self.ids))
#
# def get_all_node_ids(self, mesh):
# """
# Gets all the nodes identifiers for the elements in the element set.
#
# :param mesh: The mesh
# :type mesh: :class:`phon.mesh_objects.mesh.Mesh()`
# :return: The node identifiers
# :rtype: list of ints
#
# """
# all_node_ids = []
#
# for element_id in self.ids:
# all_node_ids += mesh.elements[element_id].vertices
#
# return list(set(all_node_ids))
#
# Path: phon/mesh_objects/node.py
# class Node:
#
# """ Represents a node."""
#
# def __init__(self, c):
# """
# :param c: The coordinates of the node
# :type c: ndarray
#
# """
# self.c = c
#
# def __str__(self):
# """
# Returns a string representation of the node.
#
# :return: The string representation.
# :rtype: str
#
# """
# return "Node located at (%f, %f, %f)." % (
# self.c[0], self.c[1], self.c[2])
, which may contain function names, class names, or code. Output only the next line. | of this software and associated documentation files (the "Software"), to deal |
Next line prediction: <|code_start|>__copyright__ = "Copyright (C) 2013 Kristoffer Carlsson"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
<|code_end|>
. Use current file imports:
(import unittest
import os
from phon.io_tools.read.read_from_abaqus_inp import read_from_abaqus_inp
from phon.io_tools.read.read_from_abaqus_inp import to_number)
and context including class names, function names, or small code snippets from other files:
# Path: phon/io_tools/read/read_from_abaqus_inp.py
# def read_from_abaqus_inp(filename, verbose=0):
# """
# Reads a mesh file in Abaqus .inp format and stores it into a
# Mesh class object.
#
# :param filename: The name of the file from where to read the mesh from.
# :type filename: string
# :param verbose: Determines what level of print out to the console.
# :type verbose: 0, 1 or 2
# :return: A mesh class containing the read mesh objects.
# :rtype: :class:`phon.mesh_objects.mesh()`
# :raises ReadInpFileError: If specific syntax error are found.
#
# """
#
# with open(filename, "rU") as f:
#
# # Read mesh objects
# num_elems = 0
# mesh = Mesh("temp_name")
# while True:
# start_of_line = f.tell()
# keyword = f.readline()
# if f.readline() == "":
# break
# keyword = keyword.strip().split(",")[0]
# f.seek(start_of_line)
#
# if keyword.lower() == "*part":
# _read_part(f, mesh, verbose)
# elif keyword.lower() == "*node":
# _read_nodes(f, mesh, verbose)
# elif keyword.lower() == "*element":
# num_elems += _read_elements(f, mesh, num_elems, verbose)
# elif keyword.lower() == "*elset":
# _read_element_set(f, mesh, verbose)
# elif keyword.lower() == "*nset":
# _read_node_set(f, mesh, verbose)
# elif keyword.lower() == "*end part":
# break
# else:
# f.readline()
#
# continue
#
# f.close()
#
# return mesh
#
# Path: phon/io_tools/read/read_from_abaqus_inp.py
# def to_number(number):
# """
# Converts a string to a int if possible, else a float.
#
# :param number: The string to convert to a number
# :type number: string
#
# :return: The converted number
# :rtype: : int or float depending on the format of the string
#
# """
# try:
# return int(number)
# except ValueError:
# return float(number)
. Output only the next line. | furnished to do so, subject to the following conditions: |
Next line prediction: <|code_start|>__copyright__ = "Copyright (C) 2013 Kristoffer Carlsson"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
<|code_end|>
. Use current file imports:
(import unittest
import os
from phon.io_tools.read.read_from_abaqus_inp import read_from_abaqus_inp
from phon.io_tools.read.read_from_abaqus_inp import to_number)
and context including class names, function names, or small code snippets from other files:
# Path: phon/io_tools/read/read_from_abaqus_inp.py
# def read_from_abaqus_inp(filename, verbose=0):
# """
# Reads a mesh file in Abaqus .inp format and stores it into a
# Mesh class object.
#
# :param filename: The name of the file from where to read the mesh from.
# :type filename: string
# :param verbose: Determines what level of print out to the console.
# :type verbose: 0, 1 or 2
# :return: A mesh class containing the read mesh objects.
# :rtype: :class:`phon.mesh_objects.mesh()`
# :raises ReadInpFileError: If specific syntax error are found.
#
# """
#
# with open(filename, "rU") as f:
#
# # Read mesh objects
# num_elems = 0
# mesh = Mesh("temp_name")
# while True:
# start_of_line = f.tell()
# keyword = f.readline()
# if f.readline() == "":
# break
# keyword = keyword.strip().split(",")[0]
# f.seek(start_of_line)
#
# if keyword.lower() == "*part":
# _read_part(f, mesh, verbose)
# elif keyword.lower() == "*node":
# _read_nodes(f, mesh, verbose)
# elif keyword.lower() == "*element":
# num_elems += _read_elements(f, mesh, num_elems, verbose)
# elif keyword.lower() == "*elset":
# _read_element_set(f, mesh, verbose)
# elif keyword.lower() == "*nset":
# _read_node_set(f, mesh, verbose)
# elif keyword.lower() == "*end part":
# break
# else:
# f.readline()
#
# continue
#
# f.close()
#
# return mesh
#
# Path: phon/io_tools/read/read_from_abaqus_inp.py
# def to_number(number):
# """
# Converts a string to a int if possible, else a float.
#
# :param number: The string to convert to a number
# :type number: string
#
# :return: The converted number
# :rtype: : int or float depending on the format of the string
#
# """
# try:
# return int(number)
# except ValueError:
# return float(number)
. Output only the next line. | all copies or substantial portions of the Software. |
Predict the next line for this snippet: <|code_start|>
matplotlib.use('TkAgg')
def display_dataset():
logging.basicConfig(level=logging.DEBUG)
cfg = load_config()
dataset = dataset_create(cfg)
dataset.set_shuffle(False)
while True:
batch = dataset.next_batch()
for frame_id in range(1):
img = batch[Batch.inputs][frame_id,:,:,:]
img = np.squeeze(img).astype('uint8')
scmap = batch[Batch.part_score_targets][frame_id,:,:,:]
scmap = np.squeeze(scmap)
# scmask = batch[Batch.part_score_weights]
# if scmask.size > 1:
# scmask = np.squeeze(scmask).astype('uint8')
# else:
# scmask = np.zeros(img.shape)
subplot_height = 4
subplot_width = 5
<|code_end|>
with the help of current file imports:
import logging
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.misc import imresize
from util.config import load_config
from dataset.pose_dataset import Batch
from dataset.factory import create as dataset_create
and context from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
, which may contain function names, class names, or code. Output only the next line. | num_plots = subplot_width * subplot_height |
Here is a snippet: <|code_start|> cfg = load_config()
dataset = dataset_create(cfg)
dataset.set_shuffle(False)
while True:
batch = dataset.next_batch()
for frame_id in range(1):
img = batch[Batch.inputs][frame_id,:,:,:]
img = np.squeeze(img).astype('uint8')
scmap = batch[Batch.part_score_targets][frame_id,:,:,:]
scmap = np.squeeze(scmap)
# scmask = batch[Batch.part_score_weights]
# if scmask.size > 1:
# scmask = np.squeeze(scmask).astype('uint8')
# else:
# scmask = np.zeros(img.shape)
subplot_height = 4
subplot_width = 5
num_plots = subplot_width * subplot_height
f, axarr = plt.subplots(subplot_height, subplot_width)
for j in range(num_plots):
plot_j = j // subplot_width
plot_i = j % subplot_width
curr_plot = axarr[plot_j, plot_i]
<|code_end|>
. Write the next line using the current file imports:
import logging
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.misc import imresize
from util.config import load_config
from dataset.pose_dataset import Batch
from dataset.factory import create as dataset_create
and context from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
, which may include functions, classes, or code. Output only the next line. | curr_plot.axis('off') |
Using the snippet: <|code_start|> img = np.squeeze(img).astype('uint8')
scmap = batch[Batch.part_score_targets][frame_id,:,:,:]
scmap = np.squeeze(scmap)
# scmask = batch[Batch.part_score_weights]
# if scmask.size > 1:
# scmask = np.squeeze(scmask).astype('uint8')
# else:
# scmask = np.zeros(img.shape)
subplot_height = 4
subplot_width = 5
num_plots = subplot_width * subplot_height
f, axarr = plt.subplots(subplot_height, subplot_width)
for j in range(num_plots):
plot_j = j // subplot_width
plot_i = j % subplot_width
curr_plot = axarr[plot_j, plot_i]
curr_plot.axis('off')
if j >= cfg.num_joints:
continue
scmap_part = scmap[:,:,j]
scmap_part = imresize(scmap_part, 8.0, interp='nearest')
scmap_part = np.lib.pad(scmap_part, ((4, 0), (4, 0)), 'minimum')
<|code_end|>
, determine the next line of code. You have imports:
import logging
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.misc import imresize
from util.config import load_config
from dataset.pose_dataset import Batch
from dataset.factory import create as dataset_create
and context (class names, function names, or code) available:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
. Output only the next line. | curr_plot.set_title("{}".format(j+1)) |
Given the following code snippet before the placeholder: <|code_start|>def rect_size(rect):
return np.array([rect[2]-rect[0], rect[3]-rect[1]])
def print_results(pck, cfg):
str = ""
for heading in (cfg.all_joints_names + ["total"]):
str += " & " + heading
print(str)
str = ""
all_joint_ids = cfg.all_joints + [np.arange(cfg.num_joints)]
for j_ids in all_joint_ids:
j_ids_np = arr(j_ids)
pck_av = np.mean(pck[j_ids_np])
str += " & {0:.1f}".format(pck_av)
print(str)
def eval_pck(cfg):
dataset = dataset_create(cfg)
filename = 'predictions.mat'
pred = sio.loadmat(filename)
joints = pred['joints']
pck_ratio_thresh = cfg.pck_threshold
num_joints = cfg.num_joints
num_images = joints.shape[1]
<|code_end|>
, predict the next line using imports from the current file:
import argparse
import numpy as np
import scipy.io as sio
from numpy import array as arr
from util.config import load_config
from dataset.factory import create as dataset_create
and context including class names, function names, and sometimes code from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
. Output only the next line. | pred_joints = np.zeros((num_images, num_joints, 2)) |
Based on the snippet: <|code_start|>
def rect_size(rect):
return np.array([rect[2]-rect[0], rect[3]-rect[1]])
def print_results(pck, cfg):
str = ""
for heading in (cfg.all_joints_names + ["total"]):
str += " & " + heading
print(str)
str = ""
all_joint_ids = cfg.all_joints + [np.arange(cfg.num_joints)]
for j_ids in all_joint_ids:
j_ids_np = arr(j_ids)
pck_av = np.mean(pck[j_ids_np])
str += " & {0:.1f}".format(pck_av)
print(str)
def eval_pck(cfg):
dataset = dataset_create(cfg)
filename = 'predictions.mat'
pred = sio.loadmat(filename)
joints = pred['joints']
pck_ratio_thresh = cfg.pck_threshold
num_joints = cfg.num_joints
<|code_end|>
, predict the immediate next line with the help of imports:
import argparse
import numpy as np
import scipy.io as sio
from numpy import array as arr
from util.config import load_config
from dataset.factory import create as dataset_create
and context (classes, functions, sometimes code) from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
. Output only the next line. | num_images = joints.shape[1] |
Next line prediction: <|code_start|>
sys.path.append(os.path.dirname(__file__) + "/../")
cfg = load_config("demo/pose_cfg.yaml")
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
# Read image from file
<|code_end|>
. Use current file imports:
(import os
import sys
from scipy.misc import imread
from util.config import load_config
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input)
and context including class names, function names, or small code snippets from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# def argmax_pose_predict(scmap, offmat, stride):
# def argmax_arrows_predict(scmap, offmat, pairwise_diff, stride):
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
#
# Path: dataset/pose_dataset.py
# def data_to_input(data):
# return np.expand_dims(data, axis=0).astype(float)
. Output only the next line. | file_name = "demo/image.png" |
Predict the next line for this snippet: <|code_start|>
sys.path.append(os.path.dirname(__file__) + "/../")
cfg = load_config("demo/pose_cfg.yaml")
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
# Read image from file
<|code_end|>
with the help of current file imports:
import os
import sys
from scipy.misc import imread
from util.config import load_config
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
and context from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# def argmax_pose_predict(scmap, offmat, stride):
# def argmax_arrows_predict(scmap, offmat, pairwise_diff, stride):
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
#
# Path: dataset/pose_dataset.py
# def data_to_input(data):
# return np.expand_dims(data, axis=0).astype(float)
, which may contain function names, class names, or code. Output only the next line. | file_name = "demo/image.png" |
Predict the next line for this snippet: <|code_start|>
sys.path.append(os.path.dirname(__file__) + "/../")
cfg = load_config("demo/pose_cfg.yaml")
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
# Read image from file
file_name = "demo/image.png"
<|code_end|>
with the help of current file imports:
import os
import sys
from scipy.misc import imread
from util.config import load_config
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
and context from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# def argmax_pose_predict(scmap, offmat, stride):
# def argmax_arrows_predict(scmap, offmat, pairwise_diff, stride):
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
#
# Path: dataset/pose_dataset.py
# def data_to_input(data):
# return np.expand_dims(data, axis=0).astype(float)
, which may contain function names, class names, or code. Output only the next line. | image = imread(file_name, mode='RGB') |
Next line prediction: <|code_start|> self.prev_color_assignment = None
# generated colors from http://tools.medialab.sciences-po.fr/iwanthue/
track_colors_str = ["#F5591E",
"#3870FB",
"#FE5DB0",
"#B4A691",
"#43053F",
"#3475B1",
"#642612",
"#B3B43D",
"#DD9BFE",
"#28948D",
"#E99D53",
"#012B46",
"#9D2DA3",
"#04220A",
"#62CB22",
"#EE8F91",
"#D71638",
"#00613A",
"#318918",
"#B770FF",
"#82C091",
"#6C1333",
"#973405",
"#B19CB2",
"#F6267B",
"#284489",
"#97BF17",
<|code_end|>
. Use current file imports:
(import math
import numpy as np
import scipy.spatial
import matplotlib.pyplot as plt
import munkres
from util.visualize import check_point, _npcircle
from util import visualize)
and context including class names, function names, or small code snippets from other files:
# Path: util/visualize.py
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# return minx < cur_x < maxx and miny < cur_y < maxy
#
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# """Draw a circle on an image using only numpy methods."""
# radius = int(radius)
# cx = int(cx)
# cy = int(cy)
# y, x = np.ogrid[-radius: radius, -radius: radius]
# index = x**2 + y**2 <= radius**2
# image[cy-radius:cy+radius, cx-radius:cx+radius][index] = (
# image[cy-radius:cy+radius, cx-radius:cx+radius][index].astype('float32') * transparency +
# np.array(color).astype('float32') * (1.0 - transparency)).astype('uint8')
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
. Output only the next line. | "#3B899C", |
Given snippet: <|code_start|> conf_assign = [(idx2, idx1) for idx1, idx2 in conf_assign]
cost_matrix = cost_matrix.T
for pidx1, pidx2 in conf_assign:
if cost_matrix[pidx1][pidx2] < min_match_dist:
color_assignment[pidx1] = self.prev_color_assignment[pidx2]
print("#tracked objects:", len(color_assignment))
free_coloridx = sorted(list(set(range(len(self.track_colors))).difference(set(color_assignment.values()))),
reverse=True)
for pidx in range(num_people):
# color_idx = pidx % len(self.track_colors)
if pidx in color_assignment:
color_idx = color_assignment[pidx]
else:
if len(free_coloridx) > 0:
color_idx = free_coloridx[-1]
free_coloridx = free_coloridx[:-1]
else:
color_idx = np.random.randint(len(self.track_colors))
color_assignment[pidx] = color_idx
assert (color_idx < len(self.track_colors))
if np.sum(person_conf[pidx, :, 0] > 0) < draw_conf_min_count:
continue
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import math
import numpy as np
import scipy.spatial
import matplotlib.pyplot as plt
import munkres
from util.visualize import check_point, _npcircle
from util import visualize
and context:
# Path: util/visualize.py
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# return minx < cur_x < maxx and miny < cur_y < maxy
#
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# """Draw a circle on an image using only numpy methods."""
# radius = int(radius)
# cx = int(cx)
# cy = int(cy)
# y, x = np.ogrid[-radius: radius, -radius: radius]
# index = x**2 + y**2 <= radius**2
# image[cy-radius:cy+radius, cx-radius:cx+radius][index] = (
# image[cy-radius:cy+radius, cx-radius:cx+radius][index].astype('float32') * transparency +
# np.array(color).astype('float32') * (1.0 - transparency)).astype('uint8')
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
which might include code, classes, or functions. Output only the next line. | for kidx1, kidx2 in dataset.get_pose_segments(): |
Here is a snippet: <|code_start|>
def remap_keys(mapping):
return [{'key': k, 'value': v} for k, v in mapping.items()]
def save_stats(stats, cfg):
mat_stats = {}
mat_stats["graph"] = []
mat_stats["means"] = []
mat_stats["std_devs"] = []
for start in range(cfg.num_joints):
for end in range(cfg.num_joints):
if start != end:
joint_pair = (start, end)
mat_stats["graph"].append([start, end])
mat_stats["means"].append(stats[joint_pair]["mean"])
mat_stats["std_devs"].append(stats[joint_pair]["std"])
print(mat_stats)
scipy.io.savemat(cfg.pairwise_stats_fn, mat_stats)
# Compute pairwise statistics at reference scale
def pairwise_stats():
cfg = load_config()
dataset = create_dataset(cfg)
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import scipy.io
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
and context from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
, which may include functions, classes, or code. Output only the next line. | dataset.set_shuffle(True) |
Based on the snippet: <|code_start|> scipy.io.savemat(cfg.pairwise_stats_fn, mat_stats)
# Compute pairwise statistics at reference scale
def pairwise_stats():
cfg = load_config()
dataset = create_dataset(cfg)
dataset.set_shuffle(True)
dataset.set_pairwise_stats_collect(True)
num_images = dataset.num_images
all_pairwise_differences = {}
if cfg.mirror:
num_images *= 2
for k in range(num_images):
print('processing image {}/{}'.format(k, num_images-1))
batch = dataset.next_batch()
batch_stats = batch[Batch.data_item].pairwise_stats
for joint_pair in batch_stats:
if joint_pair not in all_pairwise_differences:
all_pairwise_differences[joint_pair] = []
all_pairwise_differences[joint_pair] += batch_stats[joint_pair]
stats = {}
for joint_pair in all_pairwise_differences:
stats[joint_pair] = {}
stats[joint_pair]["mean"] = np.mean(all_pairwise_differences[joint_pair], axis=0)
<|code_end|>
, predict the immediate next line with the help of imports:
import numpy as np
import scipy.io
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
and context (classes, functions, sometimes code) from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
. Output only the next line. | stats[joint_pair]["std"] = np.std(all_pairwise_differences[joint_pair], axis=0) |
Given snippet: <|code_start|> return [{'key': k, 'value': v} for k, v in mapping.items()]
def save_stats(stats, cfg):
mat_stats = {}
mat_stats["graph"] = []
mat_stats["means"] = []
mat_stats["std_devs"] = []
for start in range(cfg.num_joints):
for end in range(cfg.num_joints):
if start != end:
joint_pair = (start, end)
mat_stats["graph"].append([start, end])
mat_stats["means"].append(stats[joint_pair]["mean"])
mat_stats["std_devs"].append(stats[joint_pair]["std"])
print(mat_stats)
scipy.io.savemat(cfg.pairwise_stats_fn, mat_stats)
# Compute pairwise statistics at reference scale
def pairwise_stats():
cfg = load_config()
dataset = create_dataset(cfg)
dataset.set_shuffle(True)
dataset.set_pairwise_stats_collect(True)
num_images = dataset.num_images
all_pairwise_differences = {}
if cfg.mirror:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
import scipy.io
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
and context:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
which might include code, classes, or functions. Output only the next line. | num_images *= 2 |
Given the code snippet: <|code_start|>
def setup_pose_prediction(cfg):
inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
outputs = pose_net(cfg).test(inputs)
restorer = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Restore variables from disk.
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
import tensorflow as tf
from nnet.net_factory import pose_net
and context (functions, classes, or occasionally code) from other files:
# Path: nnet/net_factory.py
# def pose_net(cfg):
# if cfg.video:
# from nnet.pose_seq_net import PoseSeqNet
# cls = PoseSeqNet
# else:
# cls = PoseNet
# return cls(cfg)
. Output only the next line. | restorer.restore(sess, cfg.init_weights) |
Next line prediction: <|code_start|> scmap, locref, pairwise_diff = extract_cnn_output(outputs_np, cfg)
pose = argmax_pose_predict(scmap, locref, cfg.stride)
pose_refscale = np.copy(pose)
pose_refscale[:, 0:2] /= cfg.global_scale
predictions[k] = pose_refscale
if visualise:
img = np.squeeze(batch[Batch.inputs]).astype('uint8')
visualize.show_heatmaps(cfg, img, scmap, pose)
visualize.waitforbuttonpress()
if cache_scoremaps:
base = os.path.basename(batch[Batch.data_item].im_path)
raw_name = os.path.splitext(base)[0]
out_fn = os.path.join(out_dir, raw_name + '.mat')
scipy.io.savemat(out_fn, mdict={'scoremaps': scmap.astype('float32')})
out_fn = os.path.join(out_dir, raw_name + '_locreg' + '.mat')
if cfg.location_refinement:
scipy.io.savemat(out_fn, mdict={'locreg_pred': locref.astype('float32')})
scipy.io.savemat('predictions.mat', mdict={'joints': predictions})
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
<|code_end|>
. Use current file imports:
(import argparse
import logging
import os
import numpy as np
import scipy.io
import scipy.ndimage
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
from nnet.predict import setup_pose_prediction, extract_cnn_output, argmax_pose_predict
from util import visualize)
and context including class names, function names, or small code snippets from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
#
# outputs = pose_net(cfg).test(inputs)
#
# restorer = tf.train.Saver()
#
# sess = tf.Session()
#
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
#
# # Restore variables from disk.
# restorer.restore(sess, cfg.init_weights)
#
# return sess, inputs, outputs
#
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# scmap = outputs_np['part_prob']
# scmap = np.squeeze(scmap)
# locref = None
# pairwise_diff = None
# if cfg.location_refinement:
# locref = np.squeeze(outputs_np['locref'])
# shape = locref.shape
# locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
# locref *= cfg.locref_stdev
# if cfg.pairwise_predict:
# pairwise_diff = np.squeeze(outputs_np['pairwise_pred'])
# shape = pairwise_diff.shape
# pairwise_diff = np.reshape(pairwise_diff, (shape[0], shape[1], -1, 2))
# num_joints = cfg.num_joints
# for pair in pairwise_stats:
# pair_id = (num_joints - 1) * pair[0] + pair[1] - int(pair[0] < pair[1])
# pairwise_diff[:, :, pair_id, 0] *= pairwise_stats[pair]["std"][0]
# pairwise_diff[:, :, pair_id, 0] += pairwise_stats[pair]["mean"][0]
# pairwise_diff[:, :, pair_id, 1] *= pairwise_stats[pair]["std"][1]
# pairwise_diff[:, :, pair_id, 1] += pairwise_stats[pair]["mean"][1]
# return scmap, locref, pairwise_diff
#
# def argmax_pose_predict(scmap, offmat, stride):
# """Combine scoremat and offsets to the final pose."""
# num_joints = scmap.shape[2]
# pose = []
# for joint_idx in range(num_joints):
# maxloc = np.unravel_index(np.argmax(scmap[:, :, joint_idx]),
# scmap[:, :, joint_idx].shape)
# offset = np.array(offmat[maxloc][joint_idx])[::-1] if offmat is not None else 0
# pos_f8 = (np.array(maxloc).astype('float') * stride + 0.5 * stride +
# offset)
# pose.append(np.hstack((pos_f8[::-1],
# [scmap[maxloc][joint_idx]])))
# return np.array(pose)
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
. Output only the next line. | parser.add_argument('--novis', default=False, action='store_true') |
Predict the next line for this snippet: <|code_start|>
pose = argmax_pose_predict(scmap, locref, cfg.stride)
pose_refscale = np.copy(pose)
pose_refscale[:, 0:2] /= cfg.global_scale
predictions[k] = pose_refscale
if visualise:
img = np.squeeze(batch[Batch.inputs]).astype('uint8')
visualize.show_heatmaps(cfg, img, scmap, pose)
visualize.waitforbuttonpress()
if cache_scoremaps:
base = os.path.basename(batch[Batch.data_item].im_path)
raw_name = os.path.splitext(base)[0]
out_fn = os.path.join(out_dir, raw_name + '.mat')
scipy.io.savemat(out_fn, mdict={'scoremaps': scmap.astype('float32')})
out_fn = os.path.join(out_dir, raw_name + '_locreg' + '.mat')
if cfg.location_refinement:
scipy.io.savemat(out_fn, mdict={'locreg_pred': locref.astype('float32')})
scipy.io.savemat('predictions.mat', mdict={'joints': predictions})
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--novis', default=False, action='store_true')
<|code_end|>
with the help of current file imports:
import argparse
import logging
import os
import numpy as np
import scipy.io
import scipy.ndimage
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
from nnet.predict import setup_pose_prediction, extract_cnn_output, argmax_pose_predict
from util import visualize
and context from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
#
# outputs = pose_net(cfg).test(inputs)
#
# restorer = tf.train.Saver()
#
# sess = tf.Session()
#
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
#
# # Restore variables from disk.
# restorer.restore(sess, cfg.init_weights)
#
# return sess, inputs, outputs
#
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# scmap = outputs_np['part_prob']
# scmap = np.squeeze(scmap)
# locref = None
# pairwise_diff = None
# if cfg.location_refinement:
# locref = np.squeeze(outputs_np['locref'])
# shape = locref.shape
# locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
# locref *= cfg.locref_stdev
# if cfg.pairwise_predict:
# pairwise_diff = np.squeeze(outputs_np['pairwise_pred'])
# shape = pairwise_diff.shape
# pairwise_diff = np.reshape(pairwise_diff, (shape[0], shape[1], -1, 2))
# num_joints = cfg.num_joints
# for pair in pairwise_stats:
# pair_id = (num_joints - 1) * pair[0] + pair[1] - int(pair[0] < pair[1])
# pairwise_diff[:, :, pair_id, 0] *= pairwise_stats[pair]["std"][0]
# pairwise_diff[:, :, pair_id, 0] += pairwise_stats[pair]["mean"][0]
# pairwise_diff[:, :, pair_id, 1] *= pairwise_stats[pair]["std"][1]
# pairwise_diff[:, :, pair_id, 1] += pairwise_stats[pair]["mean"][1]
# return scmap, locref, pairwise_diff
#
# def argmax_pose_predict(scmap, offmat, stride):
# """Combine scoremat and offsets to the final pose."""
# num_joints = scmap.shape[2]
# pose = []
# for joint_idx in range(num_joints):
# maxloc = np.unravel_index(np.argmax(scmap[:, :, joint_idx]),
# scmap[:, :, joint_idx].shape)
# offset = np.array(offmat[maxloc][joint_idx])[::-1] if offmat is not None else 0
# pos_f8 = (np.array(maxloc).astype('float') * stride + 0.5 * stride +
# offset)
# pose.append(np.hstack((pos_f8[::-1],
# [scmap[maxloc][joint_idx]])))
# return np.array(pose)
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
, which may contain function names, class names, or code. Output only the next line. | parser.add_argument('--cache', default=False, action='store_true') |
Given snippet: <|code_start|> outputs_np = sess.run(outputs, feed_dict={inputs: batch[Batch.inputs]})
scmap, locref, pairwise_diff = extract_cnn_output(outputs_np, cfg)
pose = argmax_pose_predict(scmap, locref, cfg.stride)
pose_refscale = np.copy(pose)
pose_refscale[:, 0:2] /= cfg.global_scale
predictions[k] = pose_refscale
if visualise:
img = np.squeeze(batch[Batch.inputs]).astype('uint8')
visualize.show_heatmaps(cfg, img, scmap, pose)
visualize.waitforbuttonpress()
if cache_scoremaps:
base = os.path.basename(batch[Batch.data_item].im_path)
raw_name = os.path.splitext(base)[0]
out_fn = os.path.join(out_dir, raw_name + '.mat')
scipy.io.savemat(out_fn, mdict={'scoremaps': scmap.astype('float32')})
out_fn = os.path.join(out_dir, raw_name + '_locreg' + '.mat')
if cfg.location_refinement:
scipy.io.savemat(out_fn, mdict={'locreg_pred': locref.astype('float32')})
scipy.io.savemat('predictions.mat', mdict={'joints': predictions})
sess.close()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import argparse
import logging
import os
import numpy as np
import scipy.io
import scipy.ndimage
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
from nnet.predict import setup_pose_prediction, extract_cnn_output, argmax_pose_predict
from util import visualize
and context:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
#
# outputs = pose_net(cfg).test(inputs)
#
# restorer = tf.train.Saver()
#
# sess = tf.Session()
#
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
#
# # Restore variables from disk.
# restorer.restore(sess, cfg.init_weights)
#
# return sess, inputs, outputs
#
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# scmap = outputs_np['part_prob']
# scmap = np.squeeze(scmap)
# locref = None
# pairwise_diff = None
# if cfg.location_refinement:
# locref = np.squeeze(outputs_np['locref'])
# shape = locref.shape
# locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
# locref *= cfg.locref_stdev
# if cfg.pairwise_predict:
# pairwise_diff = np.squeeze(outputs_np['pairwise_pred'])
# shape = pairwise_diff.shape
# pairwise_diff = np.reshape(pairwise_diff, (shape[0], shape[1], -1, 2))
# num_joints = cfg.num_joints
# for pair in pairwise_stats:
# pair_id = (num_joints - 1) * pair[0] + pair[1] - int(pair[0] < pair[1])
# pairwise_diff[:, :, pair_id, 0] *= pairwise_stats[pair]["std"][0]
# pairwise_diff[:, :, pair_id, 0] += pairwise_stats[pair]["mean"][0]
# pairwise_diff[:, :, pair_id, 1] *= pairwise_stats[pair]["std"][1]
# pairwise_diff[:, :, pair_id, 1] += pairwise_stats[pair]["mean"][1]
# return scmap, locref, pairwise_diff
#
# def argmax_pose_predict(scmap, offmat, stride):
# """Combine scoremat and offsets to the final pose."""
# num_joints = scmap.shape[2]
# pose = []
# for joint_idx in range(num_joints):
# maxloc = np.unravel_index(np.argmax(scmap[:, :, joint_idx]),
# scmap[:, :, joint_idx].shape)
# offset = np.array(offmat[maxloc][joint_idx])[::-1] if offmat is not None else 0
# pos_f8 = (np.array(maxloc).astype('float') * stride + 0.5 * stride +
# offset)
# pose.append(np.hstack((pos_f8[::-1],
# [scmap[maxloc][joint_idx]])))
# return np.array(pose)
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
which might include code, classes, or functions. Output only the next line. | if __name__ == '__main__': |
Given the following code snippet before the placeholder: <|code_start|> pose = argmax_pose_predict(scmap, locref, cfg.stride)
pose_refscale = np.copy(pose)
pose_refscale[:, 0:2] /= cfg.global_scale
predictions[k] = pose_refscale
if visualise:
img = np.squeeze(batch[Batch.inputs]).astype('uint8')
visualize.show_heatmaps(cfg, img, scmap, pose)
visualize.waitforbuttonpress()
if cache_scoremaps:
base = os.path.basename(batch[Batch.data_item].im_path)
raw_name = os.path.splitext(base)[0]
out_fn = os.path.join(out_dir, raw_name + '.mat')
scipy.io.savemat(out_fn, mdict={'scoremaps': scmap.astype('float32')})
out_fn = os.path.join(out_dir, raw_name + '_locreg' + '.mat')
if cfg.location_refinement:
scipy.io.savemat(out_fn, mdict={'locreg_pred': locref.astype('float32')})
scipy.io.savemat('predictions.mat', mdict={'joints': predictions})
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--novis', default=False, action='store_true')
parser.add_argument('--cache', default=False, action='store_true')
<|code_end|>
, predict the next line using imports from the current file:
import argparse
import logging
import os
import numpy as np
import scipy.io
import scipy.ndimage
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
from nnet.predict import setup_pose_prediction, extract_cnn_output, argmax_pose_predict
from util import visualize
and context including class names, function names, and sometimes code from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
#
# outputs = pose_net(cfg).test(inputs)
#
# restorer = tf.train.Saver()
#
# sess = tf.Session()
#
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
#
# # Restore variables from disk.
# restorer.restore(sess, cfg.init_weights)
#
# return sess, inputs, outputs
#
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# scmap = outputs_np['part_prob']
# scmap = np.squeeze(scmap)
# locref = None
# pairwise_diff = None
# if cfg.location_refinement:
# locref = np.squeeze(outputs_np['locref'])
# shape = locref.shape
# locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
# locref *= cfg.locref_stdev
# if cfg.pairwise_predict:
# pairwise_diff = np.squeeze(outputs_np['pairwise_pred'])
# shape = pairwise_diff.shape
# pairwise_diff = np.reshape(pairwise_diff, (shape[0], shape[1], -1, 2))
# num_joints = cfg.num_joints
# for pair in pairwise_stats:
# pair_id = (num_joints - 1) * pair[0] + pair[1] - int(pair[0] < pair[1])
# pairwise_diff[:, :, pair_id, 0] *= pairwise_stats[pair]["std"][0]
# pairwise_diff[:, :, pair_id, 0] += pairwise_stats[pair]["mean"][0]
# pairwise_diff[:, :, pair_id, 1] *= pairwise_stats[pair]["std"][1]
# pairwise_diff[:, :, pair_id, 1] += pairwise_stats[pair]["mean"][1]
# return scmap, locref, pairwise_diff
#
# def argmax_pose_predict(scmap, offmat, stride):
# """Combine scoremat and offsets to the final pose."""
# num_joints = scmap.shape[2]
# pose = []
# for joint_idx in range(num_joints):
# maxloc = np.unravel_index(np.argmax(scmap[:, :, joint_idx]),
# scmap[:, :, joint_idx].shape)
# offset = np.array(offmat[maxloc][joint_idx])[::-1] if offmat is not None else 0
# pos_f8 = (np.array(maxloc).astype('float') * stride + 0.5 * stride +
# offset)
# pose.append(np.hstack((pos_f8[::-1],
# [scmap[maxloc][joint_idx]])))
# return np.array(pose)
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
. Output only the next line. | args, unparsed = parser.parse_known_args() |
Based on the snippet: <|code_start|> out_dir = cfg.scoremap_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
num_images = dataset.num_images
predictions = np.zeros((num_images,), dtype=np.object)
for k in range(num_images):
print('processing image {}/{}'.format(k, num_images-1))
batch = dataset.next_batch()
outputs_np = sess.run(outputs, feed_dict={inputs: batch[Batch.inputs]})
scmap, locref, pairwise_diff = extract_cnn_output(outputs_np, cfg)
pose = argmax_pose_predict(scmap, locref, cfg.stride)
pose_refscale = np.copy(pose)
pose_refscale[:, 0:2] /= cfg.global_scale
predictions[k] = pose_refscale
if visualise:
img = np.squeeze(batch[Batch.inputs]).astype('uint8')
visualize.show_heatmaps(cfg, img, scmap, pose)
visualize.waitforbuttonpress()
if cache_scoremaps:
base = os.path.basename(batch[Batch.data_item].im_path)
raw_name = os.path.splitext(base)[0]
<|code_end|>
, predict the immediate next line with the help of imports:
import argparse
import logging
import os
import numpy as np
import scipy.io
import scipy.ndimage
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
from nnet.predict import setup_pose_prediction, extract_cnn_output, argmax_pose_predict
from util import visualize
and context (classes, functions, sometimes code) from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
#
# outputs = pose_net(cfg).test(inputs)
#
# restorer = tf.train.Saver()
#
# sess = tf.Session()
#
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
#
# # Restore variables from disk.
# restorer.restore(sess, cfg.init_weights)
#
# return sess, inputs, outputs
#
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# scmap = outputs_np['part_prob']
# scmap = np.squeeze(scmap)
# locref = None
# pairwise_diff = None
# if cfg.location_refinement:
# locref = np.squeeze(outputs_np['locref'])
# shape = locref.shape
# locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
# locref *= cfg.locref_stdev
# if cfg.pairwise_predict:
# pairwise_diff = np.squeeze(outputs_np['pairwise_pred'])
# shape = pairwise_diff.shape
# pairwise_diff = np.reshape(pairwise_diff, (shape[0], shape[1], -1, 2))
# num_joints = cfg.num_joints
# for pair in pairwise_stats:
# pair_id = (num_joints - 1) * pair[0] + pair[1] - int(pair[0] < pair[1])
# pairwise_diff[:, :, pair_id, 0] *= pairwise_stats[pair]["std"][0]
# pairwise_diff[:, :, pair_id, 0] += pairwise_stats[pair]["mean"][0]
# pairwise_diff[:, :, pair_id, 1] *= pairwise_stats[pair]["std"][1]
# pairwise_diff[:, :, pair_id, 1] += pairwise_stats[pair]["mean"][1]
# return scmap, locref, pairwise_diff
#
# def argmax_pose_predict(scmap, offmat, stride):
# """Combine scoremat and offsets to the final pose."""
# num_joints = scmap.shape[2]
# pose = []
# for joint_idx in range(num_joints):
# maxloc = np.unravel_index(np.argmax(scmap[:, :, joint_idx]),
# scmap[:, :, joint_idx].shape)
# offset = np.array(offmat[maxloc][joint_idx])[::-1] if offmat is not None else 0
# pos_f8 = (np.array(maxloc).astype('float') * stride + 0.5 * stride +
# offset)
# pose.append(np.hstack((pos_f8[::-1],
# [scmap[maxloc][joint_idx]])))
# return np.array(pose)
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
. Output only the next line. | out_fn = os.path.join(out_dir, raw_name + '.mat') |
Next line prediction: <|code_start|>
def test_net(visualise, cache_scoremaps):
logging.basicConfig(level=logging.INFO)
cfg = load_config()
dataset = create_dataset(cfg)
dataset.set_shuffle(False)
<|code_end|>
. Use current file imports:
(import argparse
import logging
import os
import numpy as np
import scipy.io
import scipy.ndimage
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
from nnet.predict import setup_pose_prediction, extract_cnn_output, argmax_pose_predict
from util import visualize)
and context including class names, function names, or small code snippets from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
#
# outputs = pose_net(cfg).test(inputs)
#
# restorer = tf.train.Saver()
#
# sess = tf.Session()
#
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
#
# # Restore variables from disk.
# restorer.restore(sess, cfg.init_weights)
#
# return sess, inputs, outputs
#
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# scmap = outputs_np['part_prob']
# scmap = np.squeeze(scmap)
# locref = None
# pairwise_diff = None
# if cfg.location_refinement:
# locref = np.squeeze(outputs_np['locref'])
# shape = locref.shape
# locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
# locref *= cfg.locref_stdev
# if cfg.pairwise_predict:
# pairwise_diff = np.squeeze(outputs_np['pairwise_pred'])
# shape = pairwise_diff.shape
# pairwise_diff = np.reshape(pairwise_diff, (shape[0], shape[1], -1, 2))
# num_joints = cfg.num_joints
# for pair in pairwise_stats:
# pair_id = (num_joints - 1) * pair[0] + pair[1] - int(pair[0] < pair[1])
# pairwise_diff[:, :, pair_id, 0] *= pairwise_stats[pair]["std"][0]
# pairwise_diff[:, :, pair_id, 0] += pairwise_stats[pair]["mean"][0]
# pairwise_diff[:, :, pair_id, 1] *= pairwise_stats[pair]["std"][1]
# pairwise_diff[:, :, pair_id, 1] += pairwise_stats[pair]["mean"][1]
# return scmap, locref, pairwise_diff
#
# def argmax_pose_predict(scmap, offmat, stride):
# """Combine scoremat and offsets to the final pose."""
# num_joints = scmap.shape[2]
# pose = []
# for joint_idx in range(num_joints):
# maxloc = np.unravel_index(np.argmax(scmap[:, :, joint_idx]),
# scmap[:, :, joint_idx].shape)
# offset = np.array(offmat[maxloc][joint_idx])[::-1] if offmat is not None else 0
# pos_f8 = (np.array(maxloc).astype('float') * stride + 0.5 * stride +
# offset)
# pose.append(np.hstack((pos_f8[::-1],
# [scmap[maxloc][joint_idx]])))
# return np.array(pose)
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
. Output only the next line. | dataset.set_test_mode(True) |
Given snippet: <|code_start|> scmap, locref, pairwise_diff = extract_cnn_output(outputs_np, cfg)
pose = argmax_pose_predict(scmap, locref, cfg.stride)
pose_refscale = np.copy(pose)
pose_refscale[:, 0:2] /= cfg.global_scale
predictions[k] = pose_refscale
if visualise:
img = np.squeeze(batch[Batch.inputs]).astype('uint8')
visualize.show_heatmaps(cfg, img, scmap, pose)
visualize.waitforbuttonpress()
if cache_scoremaps:
base = os.path.basename(batch[Batch.data_item].im_path)
raw_name = os.path.splitext(base)[0]
out_fn = os.path.join(out_dir, raw_name + '.mat')
scipy.io.savemat(out_fn, mdict={'scoremaps': scmap.astype('float32')})
out_fn = os.path.join(out_dir, raw_name + '_locreg' + '.mat')
if cfg.location_refinement:
scipy.io.savemat(out_fn, mdict={'locreg_pred': locref.astype('float32')})
scipy.io.savemat('predictions.mat', mdict={'joints': predictions})
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import argparse
import logging
import os
import numpy as np
import scipy.io
import scipy.ndimage
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
from nnet.predict import setup_pose_prediction, extract_cnn_output, argmax_pose_predict
from util import visualize
and context:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: nnet/predict.py
# def setup_pose_prediction(cfg):
# inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
#
# outputs = pose_net(cfg).test(inputs)
#
# restorer = tf.train.Saver()
#
# sess = tf.Session()
#
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
#
# # Restore variables from disk.
# restorer.restore(sess, cfg.init_weights)
#
# return sess, inputs, outputs
#
# def extract_cnn_output(outputs_np, cfg, pairwise_stats = None):
# scmap = outputs_np['part_prob']
# scmap = np.squeeze(scmap)
# locref = None
# pairwise_diff = None
# if cfg.location_refinement:
# locref = np.squeeze(outputs_np['locref'])
# shape = locref.shape
# locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
# locref *= cfg.locref_stdev
# if cfg.pairwise_predict:
# pairwise_diff = np.squeeze(outputs_np['pairwise_pred'])
# shape = pairwise_diff.shape
# pairwise_diff = np.reshape(pairwise_diff, (shape[0], shape[1], -1, 2))
# num_joints = cfg.num_joints
# for pair in pairwise_stats:
# pair_id = (num_joints - 1) * pair[0] + pair[1] - int(pair[0] < pair[1])
# pairwise_diff[:, :, pair_id, 0] *= pairwise_stats[pair]["std"][0]
# pairwise_diff[:, :, pair_id, 0] += pairwise_stats[pair]["mean"][0]
# pairwise_diff[:, :, pair_id, 1] *= pairwise_stats[pair]["std"][1]
# pairwise_diff[:, :, pair_id, 1] += pairwise_stats[pair]["mean"][1]
# return scmap, locref, pairwise_diff
#
# def argmax_pose_predict(scmap, offmat, stride):
# """Combine scoremat and offsets to the final pose."""
# num_joints = scmap.shape[2]
# pose = []
# for joint_idx in range(num_joints):
# maxloc = np.unravel_index(np.argmax(scmap[:, :, joint_idx]),
# scmap[:, :, joint_idx].shape)
# offset = np.array(offmat[maxloc][joint_idx])[::-1] if offmat is not None else 0
# pos_f8 = (np.array(maxloc).astype('float') * stride + 0.5 * stride +
# offset)
# pose.append(np.hstack((pos_f8[::-1],
# [scmap[maxloc][joint_idx]])))
# return np.array(pose)
#
# Path: util/visualize.py
# def _npcircle(image, cx, cy, radius, color, transparency=0.0):
# def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
# def visualize_joints(image, pose):
# def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
# def show_arrows(cfg, img, pose, arrows):
# def waitforbuttonpress():
which might include code, classes, or functions. Output only the next line. | parser.add_argument('--novis', default=False, action='store_true') |
Given snippet: <|code_start|>
def train():
setup_logging()
cfg = load_config()
dataset = create_dataset(cfg)
batch_spec = get_batch_spec(cfg)
batch, enqueue_op, placeholders = setup_preloading(batch_spec)
losses = pose_net(cfg).train(batch)
total_loss = losses['total_loss']
for k, t in losses.items():
tf.summary.scalar(k, t)
merged_summaries = tf.summary.merge_all()
variables_to_restore = slim.get_variables_to_restore(include=["resnet_v1"])
restorer = tf.train.Saver(variables_to_restore)
saver = tf.train.Saver(max_to_keep=5)
sess = tf.Session()
coord, thread = start_preloading(sess, enqueue_op, dataset, placeholders)
train_writer = tf.summary.FileWriter(cfg.log_dir, sess.graph)
learning_rate, train_op = get_optimizer(total_loss, cfg)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
import threading
import tensorflow as tf
import tensorflow.contrib.slim as slim
from util.config import load_config
from dataset.factory import create as create_dataset
from nnet.net_factory import pose_net
from nnet.pose_net import get_batch_spec
from util.logging import setup_logging
and context:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: nnet/net_factory.py
# def pose_net(cfg):
# if cfg.video:
# from nnet.pose_seq_net import PoseSeqNet
# cls = PoseSeqNet
# else:
# cls = PoseNet
# return cls(cfg)
#
# Path: nnet/pose_net.py
# def get_batch_spec(cfg):
# num_joints = cfg.num_joints
# batch_size = cfg.batch_size
# batch_spec = {
# Batch.inputs: [batch_size, None, None, 3],
# Batch.part_score_targets: [batch_size, None, None, num_joints],
# Batch.part_score_weights: [batch_size, None, None, num_joints]
# }
# if cfg.location_refinement:
# batch_spec[Batch.locref_targets] = [batch_size, None, None, num_joints * 2]
# batch_spec[Batch.locref_mask] = [batch_size, None, None, num_joints * 2]
# if cfg.pairwise_predict:
# batch_spec[Batch.pairwise_targets] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# batch_spec[Batch.pairwise_mask] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# return batch_spec
#
# Path: util/logging.py
# def setup_logging():
# FORMAT = '%(asctime)-15s %(message)s'
# logging.basicConfig(filename='log.txt', filemode='w',
# datefmt='%Y-%m-%d %H:%M:%S',
# level=logging.INFO, format=FORMAT)
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# logging.getLogger('').addHandler(console)
which might include code, classes, or functions. Output only the next line. | sess.run(tf.global_variables_initializer()) |
Next line prediction: <|code_start|> def __init__(self, cfg):
self.steps = cfg.multi_step
self.current_step = 0
def get_lr(self, iteration):
lr = self.steps[self.current_step][0]
if iteration == self.steps[self.current_step][1]:
self.current_step += 1
return lr
def setup_preloading(batch_spec):
placeholders = {name: tf.placeholder(tf.float32, shape=spec) for (name, spec) in batch_spec.items()}
names = placeholders.keys()
placeholders_list = list(placeholders.values())
QUEUE_SIZE = 20
q = tf.FIFOQueue(QUEUE_SIZE, [tf.float32]*len(batch_spec))
enqueue_op = q.enqueue(placeholders_list)
batch_list = q.dequeue()
batch = {}
for idx, name in enumerate(names):
batch[name] = batch_list[idx]
batch[name].set_shape(batch_spec[name])
return batch, enqueue_op, placeholders
<|code_end|>
. Use current file imports:
(import logging
import threading
import tensorflow as tf
import tensorflow.contrib.slim as slim
from util.config import load_config
from dataset.factory import create as create_dataset
from nnet.net_factory import pose_net
from nnet.pose_net import get_batch_spec
from util.logging import setup_logging)
and context including class names, function names, or small code snippets from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: nnet/net_factory.py
# def pose_net(cfg):
# if cfg.video:
# from nnet.pose_seq_net import PoseSeqNet
# cls = PoseSeqNet
# else:
# cls = PoseNet
# return cls(cfg)
#
# Path: nnet/pose_net.py
# def get_batch_spec(cfg):
# num_joints = cfg.num_joints
# batch_size = cfg.batch_size
# batch_spec = {
# Batch.inputs: [batch_size, None, None, 3],
# Batch.part_score_targets: [batch_size, None, None, num_joints],
# Batch.part_score_weights: [batch_size, None, None, num_joints]
# }
# if cfg.location_refinement:
# batch_spec[Batch.locref_targets] = [batch_size, None, None, num_joints * 2]
# batch_spec[Batch.locref_mask] = [batch_size, None, None, num_joints * 2]
# if cfg.pairwise_predict:
# batch_spec[Batch.pairwise_targets] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# batch_spec[Batch.pairwise_mask] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# return batch_spec
#
# Path: util/logging.py
# def setup_logging():
# FORMAT = '%(asctime)-15s %(message)s'
# logging.basicConfig(filename='log.txt', filemode='w',
# datefmt='%Y-%m-%d %H:%M:%S',
# level=logging.INFO, format=FORMAT)
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# logging.getLogger('').addHandler(console)
. Output only the next line. | def load_and_enqueue(sess, enqueue_op, coord, dataset, placeholders): |
Continue the code snippet: <|code_start|>
class LearningRate(object):
def __init__(self, cfg):
self.steps = cfg.multi_step
self.current_step = 0
def get_lr(self, iteration):
lr = self.steps[self.current_step][0]
if iteration == self.steps[self.current_step][1]:
self.current_step += 1
return lr
def setup_preloading(batch_spec):
placeholders = {name: tf.placeholder(tf.float32, shape=spec) for (name, spec) in batch_spec.items()}
names = placeholders.keys()
placeholders_list = list(placeholders.values())
<|code_end|>
. Use current file imports:
import logging
import threading
import tensorflow as tf
import tensorflow.contrib.slim as slim
from util.config import load_config
from dataset.factory import create as create_dataset
from nnet.net_factory import pose_net
from nnet.pose_net import get_batch_spec
from util.logging import setup_logging
and context (classes, functions, or code) from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: nnet/net_factory.py
# def pose_net(cfg):
# if cfg.video:
# from nnet.pose_seq_net import PoseSeqNet
# cls = PoseSeqNet
# else:
# cls = PoseNet
# return cls(cfg)
#
# Path: nnet/pose_net.py
# def get_batch_spec(cfg):
# num_joints = cfg.num_joints
# batch_size = cfg.batch_size
# batch_spec = {
# Batch.inputs: [batch_size, None, None, 3],
# Batch.part_score_targets: [batch_size, None, None, num_joints],
# Batch.part_score_weights: [batch_size, None, None, num_joints]
# }
# if cfg.location_refinement:
# batch_spec[Batch.locref_targets] = [batch_size, None, None, num_joints * 2]
# batch_spec[Batch.locref_mask] = [batch_size, None, None, num_joints * 2]
# if cfg.pairwise_predict:
# batch_spec[Batch.pairwise_targets] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# batch_spec[Batch.pairwise_mask] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# return batch_spec
#
# Path: util/logging.py
# def setup_logging():
# FORMAT = '%(asctime)-15s %(message)s'
# logging.basicConfig(filename='log.txt', filemode='w',
# datefmt='%Y-%m-%d %H:%M:%S',
# level=logging.INFO, format=FORMAT)
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# logging.getLogger('').addHandler(console)
. Output only the next line. | QUEUE_SIZE = 20 |
Predict the next line after this snippet: <|code_start|> dataset = create_dataset(cfg)
batch_spec = get_batch_spec(cfg)
batch, enqueue_op, placeholders = setup_preloading(batch_spec)
losses = pose_net(cfg).train(batch)
total_loss = losses['total_loss']
for k, t in losses.items():
tf.summary.scalar(k, t)
merged_summaries = tf.summary.merge_all()
variables_to_restore = slim.get_variables_to_restore(include=["resnet_v1"])
restorer = tf.train.Saver(variables_to_restore)
saver = tf.train.Saver(max_to_keep=5)
sess = tf.Session()
coord, thread = start_preloading(sess, enqueue_op, dataset, placeholders)
train_writer = tf.summary.FileWriter(cfg.log_dir, sess.graph)
learning_rate, train_op = get_optimizer(total_loss, cfg)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Restore variables from disk.
restorer.restore(sess, cfg.init_weights)
<|code_end|>
using the current file's imports:
import logging
import threading
import tensorflow as tf
import tensorflow.contrib.slim as slim
from util.config import load_config
from dataset.factory import create as create_dataset
from nnet.net_factory import pose_net
from nnet.pose_net import get_batch_spec
from util.logging import setup_logging
and any relevant context from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: nnet/net_factory.py
# def pose_net(cfg):
# if cfg.video:
# from nnet.pose_seq_net import PoseSeqNet
# cls = PoseSeqNet
# else:
# cls = PoseNet
# return cls(cfg)
#
# Path: nnet/pose_net.py
# def get_batch_spec(cfg):
# num_joints = cfg.num_joints
# batch_size = cfg.batch_size
# batch_spec = {
# Batch.inputs: [batch_size, None, None, 3],
# Batch.part_score_targets: [batch_size, None, None, num_joints],
# Batch.part_score_weights: [batch_size, None, None, num_joints]
# }
# if cfg.location_refinement:
# batch_spec[Batch.locref_targets] = [batch_size, None, None, num_joints * 2]
# batch_spec[Batch.locref_mask] = [batch_size, None, None, num_joints * 2]
# if cfg.pairwise_predict:
# batch_spec[Batch.pairwise_targets] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# batch_spec[Batch.pairwise_mask] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# return batch_spec
#
# Path: util/logging.py
# def setup_logging():
# FORMAT = '%(asctime)-15s %(message)s'
# logging.basicConfig(filename='log.txt', filemode='w',
# datefmt='%Y-%m-%d %H:%M:%S',
# level=logging.INFO, format=FORMAT)
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# logging.getLogger('').addHandler(console)
. Output only the next line. | max_iter = int(cfg.multi_step[-1][1]) |
Next line prediction: <|code_start|> enqueue_op = q.enqueue(placeholders_list)
batch_list = q.dequeue()
batch = {}
for idx, name in enumerate(names):
batch[name] = batch_list[idx]
batch[name].set_shape(batch_spec[name])
return batch, enqueue_op, placeholders
def load_and_enqueue(sess, enqueue_op, coord, dataset, placeholders):
while not coord.should_stop():
batch_np = dataset.next_batch()
food = {pl: batch_np[name] for (name, pl) in placeholders.items()}
sess.run(enqueue_op, feed_dict=food)
def start_preloading(sess, enqueue_op, dataset, placeholders):
coord = tf.train.Coordinator()
t = threading.Thread(target=load_and_enqueue,
args=(sess, enqueue_op, coord, dataset, placeholders))
t.start()
return coord, t
def get_optimizer(loss_op, cfg):
learning_rate = tf.placeholder(tf.float32, shape=[])
<|code_end|>
. Use current file imports:
(import logging
import threading
import tensorflow as tf
import tensorflow.contrib.slim as slim
from util.config import load_config
from dataset.factory import create as create_dataset
from nnet.net_factory import pose_net
from nnet.pose_net import get_batch_spec
from util.logging import setup_logging)
and context including class names, function names, or small code snippets from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
#
# Path: dataset/factory.py
# def create(cfg):
# dataset_type = cfg.dataset_type
# if dataset_type == "mpii":
# from dataset.mpii import MPII
# data = MPII(cfg)
# elif dataset_type == "coco":
# from dataset.mscoco import MSCOCO
# data = MSCOCO(cfg)
# elif dataset_type == "penn_action":
# from dataset.penn_action import PennAction
# data = PennAction(cfg)
# elif dataset_type == "default":
# data = PoseDataset(cfg)
# else:
# raise Exception("Unsupported dataset_type: \"{}\"".format(dataset_type))
# return data
#
# Path: nnet/net_factory.py
# def pose_net(cfg):
# if cfg.video:
# from nnet.pose_seq_net import PoseSeqNet
# cls = PoseSeqNet
# else:
# cls = PoseNet
# return cls(cfg)
#
# Path: nnet/pose_net.py
# def get_batch_spec(cfg):
# num_joints = cfg.num_joints
# batch_size = cfg.batch_size
# batch_spec = {
# Batch.inputs: [batch_size, None, None, 3],
# Batch.part_score_targets: [batch_size, None, None, num_joints],
# Batch.part_score_weights: [batch_size, None, None, num_joints]
# }
# if cfg.location_refinement:
# batch_spec[Batch.locref_targets] = [batch_size, None, None, num_joints * 2]
# batch_spec[Batch.locref_mask] = [batch_size, None, None, num_joints * 2]
# if cfg.pairwise_predict:
# batch_spec[Batch.pairwise_targets] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# batch_spec[Batch.pairwise_mask] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
# return batch_spec
#
# Path: util/logging.py
# def setup_logging():
# FORMAT = '%(asctime)-15s %(message)s'
# logging.basicConfig(filename='log.txt', filemode='w',
# datefmt='%Y-%m-%d %H:%M:%S',
# level=logging.INFO, format=FORMAT)
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# logging.getLogger('').addHandler(console)
. Output only the next line. | if cfg.optimizer == "sgd": |
Given snippet: <|code_start|>
net_funcs = {'resnet_50': resnet_v1.resnet_v1_50,
'resnet_101': resnet_v1.resnet_v1_101}
def prediction_layer(cfg, input, name, num_outputs):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], padding='SAME',
activation_fn=None, normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(cfg.weight_decay)):
with tf.variable_scope(name):
pred = slim.conv2d_transpose(input, num_outputs,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_v1
from dataset.pose_dataset import Batch
from nnet import losses
and context:
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: nnet/losses.py
# def huber_loss(labels, predictions, weight=1.0, k=1.0, scope=None):
which might include code, classes, or functions. Output only the next line. | kernel_size=[3, 3], stride=2, |
Predict the next line after this snippet: <|code_start|> layer_name = 'resnet_v1_{}'.format(num_layers) + '/block{}/unit_{}/bottleneck_v1'
out = {}
with tf.variable_scope(scope, reuse=reuse):
out['part_pred'] = prediction_layer(cfg, features, 'part_pred',
cfg.num_joints)
if cfg.location_refinement:
out['locref'] = prediction_layer(cfg, features, 'locref_pred',
cfg.num_joints * 2)
if cfg.pairwise_predict:
out['pairwise_pred'] = prediction_layer(cfg, features, 'pairwise_pred',
cfg.num_joints * (cfg.num_joints - 1) * 2)
if cfg.intermediate_supervision and not no_interm:
interm_name = layer_name.format(3, cfg.intermediate_supervision_layer)
block_interm_out = end_points[interm_name]
out['part_pred_interm'] = prediction_layer(cfg, block_interm_out,
'intermediate_supervision',
cfg.num_joints)
return out
def get_net(self, inputs):
net, end_points = self.extract_features(inputs)
return self.prediction_layers(net, end_points)
def test(self, inputs):
heads = self.get_net(inputs)
return self.add_test_layers(heads)
def add_test_layers(self, heads):
<|code_end|>
using the current file's imports:
import re
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_v1
from dataset.pose_dataset import Batch
from nnet import losses
and any relevant context from other files:
# Path: dataset/pose_dataset.py
# class Batch(Enum):
# inputs = 0
# part_score_targets = 1
# part_score_weights = 2
# locref_targets = 3
# locref_mask = 4
# pairwise_targets = 5
# pairwise_mask = 6
# data_item = 7
#
# Path: nnet/losses.py
# def huber_loss(labels, predictions, weight=1.0, k=1.0, scope=None):
. Output only the next line. | prob = tf.sigmoid(heads['part_pred']) |
Given the following code snippet before the placeholder: <|code_start|> return not sparse_graph or [cidx1, cidx2] in sparse_graph
def eval(self, cidx1, cidx2, detections):
unPos = detections.coord
idx_type1 = np.array(range(unPos[cidx1].shape[0]))
idx_type2 = np.array(range(unPos[cidx2].shape[0]))
assert(idx_type1.shape[0] > 0)
assert(idx_type2.shape[0] > 0)
num_edges = len(idx_type1) * len(idx_type2)
tmpidx1, tmpidx2 = np.meshgrid(idx_type1, idx_type2)
ptidx = np.hstack((tmpidx1.T.reshape((num_edges, 1)), tmpidx2.T.reshape((num_edges, 1))))
if cidx1 != cidx2:
cur_prob = self.compute_different_part_pairwise(cidx1, cidx2, detections, ptidx, num_edges)
else:
cur_prob = None
ptidx = ptidx[ptidx[:, 0] < ptidx[:, 1]]
delta = unPos[cidx2][ptidx[:, 1], :] - unPos[cidx1][ptidx[:, 0], :]
dists = np.linalg.norm(delta, axis=1, ord=2)
cur_prob = 1./(1 + np.exp(self.same_part_pw_coef*dists-7.5))
return cur_prob, ptidx
def compute_different_part_pairwise(self, cidx1, cidx2, detections, ptidx, num_edges):
<|code_end|>
, predict the next line using imports from the current file:
import math
import time
import sys
import os
import numpy as np
import scipy.io as sio
from collections import namedtuple
from multicut import solve_nl_lmp
from dataset.pose_dataset import get_pairwise_index
and context including class names, function names, and sometimes code from other files:
# Path: dataset/pose_dataset.py
# def get_pairwise_index(j_id, j_id_end, num_joints):
# return (num_joints - 1) * j_id + j_id_end - int(j_id < j_id_end)
. Output only the next line. | unPos = detections.coord |
Here is a snippet: <|code_start|> dataset_phase = cfg.dataset_phase
dataset_ann = cfg.dataset_ann
threshold = 0
# initialize cocoGT api
annFile = '%s/annotations/%s_%s.json' % (dataset, dataset_ann, dataset_phase)
cocoGT = COCO(annFile)
# initialize cocoPred api
inFile = "predictions_with_segm.json"
predFile = apply_threhsold(inFile, threshold)
cocoPred = cocoGT.loadRes(predFile)
return cocoGT, cocoPred
def eval_mscoco_with_segm(cocoGT, cocoPred):
# running evaluation
cocoEval = COCOeval(cocoGT, cocoPred, "keypoints")
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args, unparsed = parser.parse_known_args()
cfg = load_config()
<|code_end|>
. Write the next line using the current file imports:
import os
import sys
import argparse
import json
from util.config import load_config
from pycocotools.coco import COCO as COCO
from pycocotools.cocoeval import COCOeval
and context from other files:
# Path: util/config.py
# def load_config(filename = "pose_cfg.yaml"):
# if 'POSE_PARAM_PATH' in os.environ:
# filename = os.environ['POSE_PARAM_PATH'] + '/' + filename
# return cfg_from_file(filename)
, which may include functions, classes, or code. Output only the next line. | cocoGT, cocoPred = eval_init(cfg) |
Here is a snippet: <|code_start|> # vThunder defaults
for vk, vv in defaults.VTHUNDER_OPTIONAL_DEFAULTS.items():
if vk not in self._vthunder:
self._vthunder[vk] = vv
for dk, dv in defaults.DEVICE_OPTIONAL_DEFAULTS.items():
if dk not in self._vthunder:
self._vthunder[dk] = dv
# Setup db foo
if self._config.use_database and self._config.database_connection is None:
self._config.database_connection = self._get_neutron_db_string()
if self._config.keystone_auth_url is None:
self._config.keystone_auth_url = self._get_neutron_conf(
'keystone_authtoken', 'auth_uri')
# TODO(mdurrant) - There's a way to do this with getattr/setattr
self._vport_defaults = {}
if hasattr(self._config, "vport_defaults"):
self._vport_defaults = self._config.vport_defaults
self._vport_expressions = {}
if hasattr(self._config, "vport_expressions"):
self._vport_expressions = self._config.vport_expressions
self._virtual_server_expressions = {}
if hasattr(self._config, "virtual_server_expressions"):
self._virtual_server_expressions = self._config.virtual_server_expressions
self._service_group_expressions = {}
<|code_end|>
. Write the next line using the current file imports:
import logging
import os
import runpy
import sys
import ConfigParser as ini
import configparser as ini
from debtcollector import removals
from a10_neutron_lbaas import a10_exceptions as a10_ex
from a10_neutron_lbaas.etc import config as blank_config
from a10_neutron_lbaas.etc import defaults
from a10_neutron_lbaas.db import models
from a10_neutron_lbaas.db import models
and context from other files:
# Path: a10_neutron_lbaas/a10_exceptions.py
# class InternalError(Exception):
# class InvalidConfig(Exception):
# class InvalidDeviceConfig(Exception):
# class UnsupportedFeature(Exception):
# class DeviceConfigMissing(Exception):
# class InstanceMissing(Exception):
# class NoDatabaseURL(Exception):
# class RequiresDatabase(Exception):
# class ImageNotFoundError(Exception):
# class FlavorNotFoundError(Exception):
# class NetworksNotFoundError(Exception):
# class IdentifierUnspecifiedError(Exception):
# class ServiceUnavailableError(Exception):
# class FeatureNotConfiguredError(Exception):
# class NoDevicesAvailableError(Exception):
# class NotImplemented(Exception):
# class ConnLimitOutOfBounds(Exception):
#
# Path: a10_neutron_lbaas/etc/config.py
#
# Path: a10_neutron_lbaas/etc/defaults.py
# GLOBAL_DEFAULTS = {
# "verify_appliances": False,
# "use_database": False,
# "database_connection": None,
# "neutron_conf_dir": '/etc/neutron',
# "member_name_use_uuid": False,
# "keystone_auth_url": None,
# "keystone_version": 2,
# "plumbing_hooks_class": a10_neutron_lbaas.plumbing_hooks.PlumbingHooks,
# "nova_api_version": "2.1",
# "vport_defaults": {},
# "use_parent_project": False
# }
# DEVICE_REQUIRED_FIELDS = [
# "host",
# "username",
# "password",
# ]
# DEVICE_OPTIONAL_DEFAULTS = {
# "protocol": "https",
# "port": 443,
# "api_version": "2.1",
# "status": True,
# "autosnat": False,
# "v_method": "LSI",
# "shared_partition": "shared",
# "use_float": False,
# "default_virtual_server_vrid": None,
# "ipinip": False,
# "ha_sync_list": [],
# "write_memory": True,
# "vport_defaults": {},
# # "max_instance": 5000,
# # "method": "hash",
#
# # "max_partitions": 10,
# # "per_partition_lif_max": 10,
# # "peer_mgmt_ip": "",
# # "peer_mgmt_port": "",
# # "default_virtual_server_vrid": 1,
# # "vlan": 0,
# # "gateway_mode": 1,
# }
# VTHUNDER_REQUIRED_FIELDS = [
# 'username',
# 'password',
#
# 'nova_flavor',
# 'glance_image',
#
# 'vthunder_management_network',
# 'vthunder_data_networks',
# ]
# VTHUNDER_OPTIONAL_DEFAULTS = DEVICE_OPTIONAL_DEFAULTS
# VTHUNDER_OPTIONAL_DEFAULTS = dict(DEVICE_OPTIONAL_DEFAULTS)
, which may include functions, classes, or code. Output only the next line. | if hasattr(self._config, "service_group_expressions"): |
Given the code snippet: <|code_start|> neutron_conf_dir = os.environ.get('NEUTRON_CONF_DIR', self._config.neutron_conf_dir)
neutron_conf = '%s/neutron.conf' % neutron_conf_dir
if os.path.exists(neutron_conf):
LOG.debug("found neutron.conf file in /etc")
n = ini.ConfigParser()
n.read(neutron_conf)
try:
return n.get(section, option)
except (ini.NoSectionError, ini.NoOptionError):
pass
def _get_neutron_db_string(self):
z = self._get_neutron_conf('database', 'connection')
if z is None:
raise a10_ex.NoDatabaseURL('must set db connection url or neutron dir in config.py')
LOG.debug("using %s as db connect string", z)
return z
def get(self, key):
return getattr(self._config, key)
def get_device(self, device_name, db_session=None):
if device_name in self._devices:
return self._devices.get(device_name, {})
if self.get('use_database'):
instance = models.A10DeviceInstance.find_by(name=device_name, db_session=db_session)
<|code_end|>
, generate the next line using the imports in this file:
import logging
import os
import runpy
import sys
import ConfigParser as ini
import configparser as ini
from debtcollector import removals
from a10_neutron_lbaas import a10_exceptions as a10_ex
from a10_neutron_lbaas.etc import config as blank_config
from a10_neutron_lbaas.etc import defaults
from a10_neutron_lbaas.db import models
from a10_neutron_lbaas.db import models
and context (functions, classes, or occasionally code) from other files:
# Path: a10_neutron_lbaas/a10_exceptions.py
# class InternalError(Exception):
# class InvalidConfig(Exception):
# class InvalidDeviceConfig(Exception):
# class UnsupportedFeature(Exception):
# class DeviceConfigMissing(Exception):
# class InstanceMissing(Exception):
# class NoDatabaseURL(Exception):
# class RequiresDatabase(Exception):
# class ImageNotFoundError(Exception):
# class FlavorNotFoundError(Exception):
# class NetworksNotFoundError(Exception):
# class IdentifierUnspecifiedError(Exception):
# class ServiceUnavailableError(Exception):
# class FeatureNotConfiguredError(Exception):
# class NoDevicesAvailableError(Exception):
# class NotImplemented(Exception):
# class ConnLimitOutOfBounds(Exception):
#
# Path: a10_neutron_lbaas/etc/config.py
#
# Path: a10_neutron_lbaas/etc/defaults.py
# GLOBAL_DEFAULTS = {
# "verify_appliances": False,
# "use_database": False,
# "database_connection": None,
# "neutron_conf_dir": '/etc/neutron',
# "member_name_use_uuid": False,
# "keystone_auth_url": None,
# "keystone_version": 2,
# "plumbing_hooks_class": a10_neutron_lbaas.plumbing_hooks.PlumbingHooks,
# "nova_api_version": "2.1",
# "vport_defaults": {},
# "use_parent_project": False
# }
# DEVICE_REQUIRED_FIELDS = [
# "host",
# "username",
# "password",
# ]
# DEVICE_OPTIONAL_DEFAULTS = {
# "protocol": "https",
# "port": 443,
# "api_version": "2.1",
# "status": True,
# "autosnat": False,
# "v_method": "LSI",
# "shared_partition": "shared",
# "use_float": False,
# "default_virtual_server_vrid": None,
# "ipinip": False,
# "ha_sync_list": [],
# "write_memory": True,
# "vport_defaults": {},
# # "max_instance": 5000,
# # "method": "hash",
#
# # "max_partitions": 10,
# # "per_partition_lif_max": 10,
# # "peer_mgmt_ip": "",
# # "peer_mgmt_port": "",
# # "default_virtual_server_vrid": 1,
# # "vlan": 0,
# # "gateway_mode": 1,
# }
# VTHUNDER_REQUIRED_FIELDS = [
# 'username',
# 'password',
#
# 'nova_flavor',
# 'glance_image',
#
# 'vthunder_management_network',
# 'vthunder_data_networks',
# ]
# VTHUNDER_OPTIONAL_DEFAULTS = DEVICE_OPTIONAL_DEFAULTS
# VTHUNDER_OPTIONAL_DEFAULTS = dict(DEVICE_OPTIONAL_DEFAULTS)
. Output only the next line. | if instance is not None: |
Predict the next line for this snippet: <|code_start|> LOG.error(msg)
raise a10_ex.InvalidDeviceConfig(msg)
# vThunder defaults
for vk, vv in defaults.VTHUNDER_OPTIONAL_DEFAULTS.items():
if vk not in self._vthunder:
self._vthunder[vk] = vv
for dk, dv in defaults.DEVICE_OPTIONAL_DEFAULTS.items():
if dk not in self._vthunder:
self._vthunder[dk] = dv
# Setup db foo
if self._config.use_database and self._config.database_connection is None:
self._config.database_connection = self._get_neutron_db_string()
if self._config.keystone_auth_url is None:
self._config.keystone_auth_url = self._get_neutron_conf(
'keystone_authtoken', 'auth_uri')
# TODO(mdurrant) - There's a way to do this with getattr/setattr
self._vport_defaults = {}
if hasattr(self._config, "vport_defaults"):
self._vport_defaults = self._config.vport_defaults
self._vport_expressions = {}
if hasattr(self._config, "vport_expressions"):
self._vport_expressions = self._config.vport_expressions
self._virtual_server_expressions = {}
if hasattr(self._config, "virtual_server_expressions"):
<|code_end|>
with the help of current file imports:
import logging
import os
import runpy
import sys
import ConfigParser as ini
import configparser as ini
from debtcollector import removals
from a10_neutron_lbaas import a10_exceptions as a10_ex
from a10_neutron_lbaas.etc import config as blank_config
from a10_neutron_lbaas.etc import defaults
from a10_neutron_lbaas.db import models
from a10_neutron_lbaas.db import models
and context from other files:
# Path: a10_neutron_lbaas/a10_exceptions.py
# class InternalError(Exception):
# class InvalidConfig(Exception):
# class InvalidDeviceConfig(Exception):
# class UnsupportedFeature(Exception):
# class DeviceConfigMissing(Exception):
# class InstanceMissing(Exception):
# class NoDatabaseURL(Exception):
# class RequiresDatabase(Exception):
# class ImageNotFoundError(Exception):
# class FlavorNotFoundError(Exception):
# class NetworksNotFoundError(Exception):
# class IdentifierUnspecifiedError(Exception):
# class ServiceUnavailableError(Exception):
# class FeatureNotConfiguredError(Exception):
# class NoDevicesAvailableError(Exception):
# class NotImplemented(Exception):
# class ConnLimitOutOfBounds(Exception):
#
# Path: a10_neutron_lbaas/etc/config.py
#
# Path: a10_neutron_lbaas/etc/defaults.py
# GLOBAL_DEFAULTS = {
# "verify_appliances": False,
# "use_database": False,
# "database_connection": None,
# "neutron_conf_dir": '/etc/neutron',
# "member_name_use_uuid": False,
# "keystone_auth_url": None,
# "keystone_version": 2,
# "plumbing_hooks_class": a10_neutron_lbaas.plumbing_hooks.PlumbingHooks,
# "nova_api_version": "2.1",
# "vport_defaults": {},
# "use_parent_project": False
# }
# DEVICE_REQUIRED_FIELDS = [
# "host",
# "username",
# "password",
# ]
# DEVICE_OPTIONAL_DEFAULTS = {
# "protocol": "https",
# "port": 443,
# "api_version": "2.1",
# "status": True,
# "autosnat": False,
# "v_method": "LSI",
# "shared_partition": "shared",
# "use_float": False,
# "default_virtual_server_vrid": None,
# "ipinip": False,
# "ha_sync_list": [],
# "write_memory": True,
# "vport_defaults": {},
# # "max_instance": 5000,
# # "method": "hash",
#
# # "max_partitions": 10,
# # "per_partition_lif_max": 10,
# # "peer_mgmt_ip": "",
# # "peer_mgmt_port": "",
# # "default_virtual_server_vrid": 1,
# # "vlan": 0,
# # "gateway_mode": 1,
# }
# VTHUNDER_REQUIRED_FIELDS = [
# 'username',
# 'password',
#
# 'nova_flavor',
# 'glance_image',
#
# 'vthunder_management_network',
# 'vthunder_data_networks',
# ]
# VTHUNDER_OPTIONAL_DEFAULTS = DEVICE_OPTIONAL_DEFAULTS
# VTHUNDER_OPTIONAL_DEFAULTS = dict(DEVICE_OPTIONAL_DEFAULTS)
, which may contain function names, class names, or code. Output only the next line. | self._virtual_server_expressions = self._config.virtual_server_expressions |
Continue the code snippet: <|code_start|> missing_tables = [model.__tablename__ for model in a10_models if
model.__tablename__ not in tables]
self.assertEqual([], missing_tables,
"The following tables weren't created by installing {0}".
format(missing_tables))
dialect = self.connection.dialect
ddl_compiler = dialect.ddl_compiler(dialect, None)
def normalize(schema_type):
copied_type = copy.copy(schema_type)
# We don't care about display width
if getattr(copied_type, 'display_width', None) is not None:
copied_type.display_width = None
if type(schema_type) is sqlalchemy.sql.sqltypes.Text:
copied_type.length = None
normalized_type = copied_type.compile(dialect=dialect)
# mysql has some weird synonyms
if dialect.name == 'mysql':
weird_synonyms = {
'BOOL': 'TINYINT',
'BOOLEAN': 'TINYINT',
'TEXT()': 'TEXT'
}
normalized_type = weird_synonyms.get(normalized_type, normalized_type)
return normalized_type
<|code_end|>
. Use current file imports:
import copy
import mock
import os
import alembic.command as alembic_command
import alembic.config as alembic_config
import alembic.op as op
import sqlalchemy
from nose.plugins.attrib import attr
from a10_neutron_lbaas.db import migration
from a10_neutron_lbaas.tests.db.migration import test_base
from a10_neutron_lbaas.tests.db import session
and context (classes, functions, or code) from other files:
# Path: a10_neutron_lbaas/tests/db/migration/test_base.py
# class UnitTestBase(test_base.DbTestBase):
# def setUp(self):
. Output only the next line. | for model in a10_models: |
Continue the code snippet: <|code_start|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def empty_config():
return a10_config.A10Config(config=blank_config)
def config(config_dict):
config_constructor = type('config', (object,), config_dict)
return a10_config.A10Config(config=config_constructor())
def use_config_dir(config_dir=None):
if config_dir is None:
config_dir = os.path.dirname(__file__)
<|code_end|>
. Use current file imports:
import os
from a10_neutron_lbaas import a10_config
from a10_neutron_lbaas.etc import config as blank_config
and context (classes, functions, or code) from other files:
# Path: a10_neutron_lbaas/a10_config.py
# LOG = logging.getLogger(__name__)
# class ConfigModule(object):
# class A10Config(object):
# class OldConfig(object):
# def __init__(self, d, provider=None):
# def load(cls, path, provider=None):
# def __init__(self, config_dir=None, config=None, provider=None):
# def _find_config_dir(self, config_dir):
# def _load_config(self):
# def _get_neutron_conf(self, section, option):
# def _get_neutron_db_string(self):
# def get(self, key):
# def get_device(self, device_name, db_session=None):
# def get_devices(self, db_session=None):
# def get_vthunder_config(self):
# def get_vport_defaults(self):
# def get_vport_expressions(self):
# def get_virtual_server_expressions(self):
# def get_service_group_expressions(self):
# def get_member_expressions(self):
# def get_monitor_expressions(self):
# def devices(self):
# def use_database(self):
# def database_connection(self):
# def verify_appliances(self):
# def __init__(self, main_config):
# def devices(self):
# def use_database(self):
# def database_connection(self):
# def verify_appliances(self):
#
# Path: a10_neutron_lbaas/etc/config.py
. Output only the next line. | if 'A10_CONFIG_DIR' in os.environ: |
Next line prediction: <|code_start|># WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def empty_config():
return a10_config.A10Config(config=blank_config)
def config(config_dict):
config_constructor = type('config', (object,), config_dict)
return a10_config.A10Config(config=config_constructor())
def use_config_dir(config_dir=None):
if config_dir is None:
config_dir = os.path.dirname(__file__)
if 'A10_CONFIG_DIR' in os.environ:
current = os.environ['A10_CONFIG_DIR']
def cleanup():
os.environ['A10_CONFIG_DIR'] = current
else:
def cleanup():
del os.environ['A10_CONFIG_DIR']
<|code_end|>
. Use current file imports:
(import os
from a10_neutron_lbaas import a10_config
from a10_neutron_lbaas.etc import config as blank_config)
and context including class names, function names, or small code snippets from other files:
# Path: a10_neutron_lbaas/a10_config.py
# LOG = logging.getLogger(__name__)
# class ConfigModule(object):
# class A10Config(object):
# class OldConfig(object):
# def __init__(self, d, provider=None):
# def load(cls, path, provider=None):
# def __init__(self, config_dir=None, config=None, provider=None):
# def _find_config_dir(self, config_dir):
# def _load_config(self):
# def _get_neutron_conf(self, section, option):
# def _get_neutron_db_string(self):
# def get(self, key):
# def get_device(self, device_name, db_session=None):
# def get_devices(self, db_session=None):
# def get_vthunder_config(self):
# def get_vport_defaults(self):
# def get_vport_expressions(self):
# def get_virtual_server_expressions(self):
# def get_service_group_expressions(self):
# def get_member_expressions(self):
# def get_monitor_expressions(self):
# def devices(self):
# def use_database(self):
# def database_connection(self):
# def verify_appliances(self):
# def __init__(self, main_config):
# def devices(self):
# def use_database(self):
# def database_connection(self):
# def verify_appliances(self):
#
# Path: a10_neutron_lbaas/etc/config.py
. Output only the next line. | os.environ['A10_CONFIG_DIR'] = config_dir |
Given the following code snippet before the placeholder: <|code_start|>
def split_items(s): # pragma: no cover
if not s:
return set()
r = []
for ss in set(s.strip().split()):
if '**:' in ss:
ss = ss.split('**:')[0] + '**'
if ss.endswith(','):
ss = ss[:-1].strip()
r.append(ss)
<|code_end|>
, predict the next line using imports from the current file:
import json
import pathlib
from clld.db.meta import DBSession
from clld.db.models import common
from clldutils import misc
from clldutils.text import split_text
from glottolog3.models import TreeClosureTable
and context including class names, function names, and sometimes code from other files:
# Path: glottolog3/models.py
# class TreeClosureTable(Base):
# __table_args__ = (UniqueConstraint('parent_pk', 'child_pk'),)
# parent_pk = Column(Integer, ForeignKey('languoid.pk'))
# child_pk = Column(Integer, ForeignKey('languoid.pk'))
# depth = Column(Integer)
. Output only the next line. | return set(r) |
Predict the next line after this snippet: <|code_start|>
def test_normalize_language_explanation():
#for s in [' X [aaa]', 'L [aaa] = "X"', 'X = L [aaa]']:
# assert normalize_language_explanation(s) == 'X [aaa]'
assert normalize_language_explanation(' abcdefg') == 'abcdefg'
def test_ModelInstance():
mi = ModelInstance(Doctype)
assert mi.serialize(None, colander.null) == colander.null
with pytest.raises(colander.Invalid):
mi.serialize(None, '')
assert mi.serialize(None, Doctype(id='id')) == 'id'
<|code_end|>
using the current file's imports:
import pytest
import colander
from glottolog3.models import Doctype
from glottolog3.util import normalize_language_explanation, ModelInstance
and any relevant context from other files:
# Path: glottolog3/models.py
# class Doctype(Base, IdNameDescriptionMixin):
# """
# id -> pk
# id -> id
# abbr -> abbr
# name -> name
# """
# abbr = Column(Unicode)
#
# ord = Column(Integer)
#
# def __str__(self):
# return capwords(self.name.replace('_', ' '))
#
# Path: glottolog3/util.py
# def normalize_language_explanation(chunk):
# """
# i) X [aaa]
# ii) L [aaa] = "X"
# iii) X = L [aaa]
#
# :return: X [aaa]
# """
# if '[' in chunk and not chunk.endswith(']'):
# chunk += ']'
# chunk = chunk.strip()
# if '=' not in chunk:
# return chunk
# chunks = chunk.split('=')
# left = '='.join(chunks[:-1]).strip()
# right = chunks[-1].strip()
# if right.startswith('"') and right.endswith('"') and '[' not in right and '[' in left:
# # case ii)
# return right[1:-1].strip() + ' [' + left.split('[', 1)[1]
# if '[' in right and '[' not in left:
# # case iii)
# return left + ' [' + right.split('[', 1)[1]
# return chunk
#
# class ModelInstance(object):
# def __init__(self, cls, attr='id', collection=None, alias=None):
# self.cls = cls
# self.attr = attr
# self.alias = alias
# self.collection = collection
#
# def serialize(self, node, appstruct):
# if appstruct is colander.null:
# return colander.null
# if self.cls and not isinstance(appstruct, self.cls):
# raise colander.Invalid(node, '%r is not a %s' % (appstruct, self.cls))
# return getattr(appstruct, self.attr)
#
# def deserialize(self, node, cstruct):
# if cstruct is colander.null:
# return colander.null
# value = None
# if self.collection:
# for obj in self.collection:
# if getattr(obj, self.attr) == cstruct \
# or (self.alias and getattr(obj, self.alias) == cstruct):
# value = obj
# else:
# value = self.cls.get(cstruct, key=self.attr, default=None) if self.cls else cstruct
# if self.alias and value is None:
# value = self.cls.get(cstruct, key=self.alias, default=None) if self.cls else cstruct
# if value is None:
# raise colander.Invalid(node, 'no single result found')
# return value
#
# def cstruct_children(self, node, cstruct): # pragma: no cover
# return []
. Output only the next line. | assert mi.deserialize(None, colander.null) == colander.null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.