Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|>'''
Test Tiger columns
'''
# TODO clean this up in a more general init script
try:
shell('createdb test')
except:
pass
@with_setup(setup, teardown)
def test_geom_columns_run():
runtask(GeomColumns(year='2015'))
@with_setup(setup, teardown)
def test_geoid_columns_run():
<|code_end|>
, generate the next line using the imports in this file:
from tasks.util import shell
from nose.tools import with_setup
from tasks.us.census.tiger import GeoidColumns, GeomColumns
from tests.util import runtask, setup, teardown
and context (functions, classes, or occasionally code) from other files:
# Path: tasks/util.py
# def shell(cmd, encoding='utf-8'):
# '''
# Run a shell command, uses :py:func:`subprocess.check_output(cmd,
# shell=True)` under the hood.
#
# Returns the ``STDOUT`` output, and raises an error if there is a
# none-zero exit code.
# '''
# try:
# return subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode(encoding)
# except subprocess.CalledProcessError as err:
# LOGGER.error(err.output)
# raise
#
# Path: tasks/us/census/tiger.py
# class GeoidColumns(ColumnsTask):
# '''
# Used for external dependencies on Tiger.
#
# This creates two different geoid columns
# (GEOID_SUMLEVEL_COLUMN for the SumLevels geometries and
# GEOID_SHORELINECLIPPED_COLUMN for the ShorelineClipped geometries).
#
# This allows external tables to depend on both shoreline clipped and non-shoreline clipped geometries.
# '''
#
# year = IntParameter()
#
# def version(self):
# return 8
#
# def requires(self):
# return {
# 'raw': GeomColumns(year=self.year),
# 'clipped': ClippedGeomColumns(year=self.year)
# }
#
# def columns(self):
# cols = OrderedDict()
# clipped = self.input()['clipped']
# for colname, coltarget in self.input()['raw'].items():
# col = coltarget._column
# cols[colname + GEOID_SHORELINECLIPPED_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# clipped[colname + '_clipped']._column: GEOM_REF
# }
# )
# cols[colname + GEOID_SUMLEVEL_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# col: GEOM_REF,
# }
# )
#
# return cols
#
# class GeomColumns(ColumnsTask):
# year = IntParameter()
#
# def version(self):
# return 18
#
# def requires(self):
# return {
# 'sections': SectionTags(),
# 'subsections': SubsectionTags(),
# 'source': TigerSourceTags(),
# 'license': LicenseTags(),
# 'boundary': BoundaryTags(),
# }
#
# def _generate_desc(self, sumlevel):
# '''
# Add figure to the description
# '''
# return SUMLEVELS[sumlevel]['census_description']
#
# def columns(self):
# input_ = self.input()
# sections = input_['sections']
# subsections = input_['subsections']
# source = input_['source']['tiger-source']
# license = input_['license']['no-restrictions']
#
# columns = {}
# for level in list(SUMLEVELS.values()):
# columns[level['slug'] + '_{}'.format(self.year)] = OBSColumn(
# type='Geometry',
# name=level['name'] + '_{}'.format(self.year),
# description=level['census_description'],
# weight=level['weight'],
# tags=[sections['united_states'], subsections['boundary'], source, license]
# )
#
# return columns
#
# Path: tests/util.py
# def runtask(task, superclasses=None):
# '''
# Run deps of tasks then the task, faking session management
#
# superclasses is a list of classes that we will be willing to run as
# pre-reqs, other pre-reqs will be ignored. Can be useful when testing to
# only run metadata classes, for example.
# '''
# from lib.logger import get_logger
# LOGGER = get_logger(__name__)
# if task.complete():
# return
# for dep in task.deps():
# if superclasses:
# for klass in superclasses:
# if isinstance(dep, klass):
# runtask(dep, superclasses=superclasses)
# assert dep.complete() is True, 'dependency {} not complete for class {}'.format(dep, klass)
# else:
# runtask(dep)
# assert dep.complete() is True, 'dependency {} not complete'.format(dep)
# try:
# before = time()
# for klass, cb_dict in task._event_callbacks.items():
# if isinstance(task, klass):
# start_callbacks = cb_dict.get('event.core.start', [])
# for scb in start_callbacks:
# scb(task)
# task.run()
# task.on_success()
# after = time()
# LOGGER.warn('runtask timing %s: %s', task, round(after - before, 2))
# except Exception as exc:
# task.on_failure(exc)
# raise
#
# def setup():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.execute('CREATE SCHEMA observatory')
# session.commit()
# Base.metadata.create_all()
# session.close()
#
# def teardown():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# Base.metadata.drop_all()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.commit()
# session.close()
. Output only the next line. | runtask(GeoidColumns(year='2015')) |
Based on the snippet: <|code_start|>'''
Test Tiger columns
'''
# TODO clean this up in a more general init script
try:
shell('createdb test')
except:
pass
@with_setup(setup, teardown)
def test_geom_columns_run():
<|code_end|>
, predict the immediate next line with the help of imports:
from tasks.util import shell
from nose.tools import with_setup
from tasks.us.census.tiger import GeoidColumns, GeomColumns
from tests.util import runtask, setup, teardown
and context (classes, functions, sometimes code) from other files:
# Path: tasks/util.py
# def shell(cmd, encoding='utf-8'):
# '''
# Run a shell command, uses :py:func:`subprocess.check_output(cmd,
# shell=True)` under the hood.
#
# Returns the ``STDOUT`` output, and raises an error if there is a
# none-zero exit code.
# '''
# try:
# return subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode(encoding)
# except subprocess.CalledProcessError as err:
# LOGGER.error(err.output)
# raise
#
# Path: tasks/us/census/tiger.py
# class GeoidColumns(ColumnsTask):
# '''
# Used for external dependencies on Tiger.
#
# This creates two different geoid columns
# (GEOID_SUMLEVEL_COLUMN for the SumLevels geometries and
# GEOID_SHORELINECLIPPED_COLUMN for the ShorelineClipped geometries).
#
# This allows external tables to depend on both shoreline clipped and non-shoreline clipped geometries.
# '''
#
# year = IntParameter()
#
# def version(self):
# return 8
#
# def requires(self):
# return {
# 'raw': GeomColumns(year=self.year),
# 'clipped': ClippedGeomColumns(year=self.year)
# }
#
# def columns(self):
# cols = OrderedDict()
# clipped = self.input()['clipped']
# for colname, coltarget in self.input()['raw'].items():
# col = coltarget._column
# cols[colname + GEOID_SHORELINECLIPPED_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# clipped[colname + '_clipped']._column: GEOM_REF
# }
# )
# cols[colname + GEOID_SUMLEVEL_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# col: GEOM_REF,
# }
# )
#
# return cols
#
# class GeomColumns(ColumnsTask):
# year = IntParameter()
#
# def version(self):
# return 18
#
# def requires(self):
# return {
# 'sections': SectionTags(),
# 'subsections': SubsectionTags(),
# 'source': TigerSourceTags(),
# 'license': LicenseTags(),
# 'boundary': BoundaryTags(),
# }
#
# def _generate_desc(self, sumlevel):
# '''
# Add figure to the description
# '''
# return SUMLEVELS[sumlevel]['census_description']
#
# def columns(self):
# input_ = self.input()
# sections = input_['sections']
# subsections = input_['subsections']
# source = input_['source']['tiger-source']
# license = input_['license']['no-restrictions']
#
# columns = {}
# for level in list(SUMLEVELS.values()):
# columns[level['slug'] + '_{}'.format(self.year)] = OBSColumn(
# type='Geometry',
# name=level['name'] + '_{}'.format(self.year),
# description=level['census_description'],
# weight=level['weight'],
# tags=[sections['united_states'], subsections['boundary'], source, license]
# )
#
# return columns
#
# Path: tests/util.py
# def runtask(task, superclasses=None):
# '''
# Run deps of tasks then the task, faking session management
#
# superclasses is a list of classes that we will be willing to run as
# pre-reqs, other pre-reqs will be ignored. Can be useful when testing to
# only run metadata classes, for example.
# '''
# from lib.logger import get_logger
# LOGGER = get_logger(__name__)
# if task.complete():
# return
# for dep in task.deps():
# if superclasses:
# for klass in superclasses:
# if isinstance(dep, klass):
# runtask(dep, superclasses=superclasses)
# assert dep.complete() is True, 'dependency {} not complete for class {}'.format(dep, klass)
# else:
# runtask(dep)
# assert dep.complete() is True, 'dependency {} not complete'.format(dep)
# try:
# before = time()
# for klass, cb_dict in task._event_callbacks.items():
# if isinstance(task, klass):
# start_callbacks = cb_dict.get('event.core.start', [])
# for scb in start_callbacks:
# scb(task)
# task.run()
# task.on_success()
# after = time()
# LOGGER.warn('runtask timing %s: %s', task, round(after - before, 2))
# except Exception as exc:
# task.on_failure(exc)
# raise
#
# def setup():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.execute('CREATE SCHEMA observatory')
# session.commit()
# Base.metadata.create_all()
# session.close()
#
# def teardown():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# Base.metadata.drop_all()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.commit()
# session.close()
. Output only the next line. | runtask(GeomColumns(year='2015')) |
Here is a snippet: <|code_start|>'''
Test Tiger columns
'''
# TODO clean this up in a more general init script
try:
shell('createdb test')
except:
pass
@with_setup(setup, teardown)
def test_geom_columns_run():
<|code_end|>
. Write the next line using the current file imports:
from tasks.util import shell
from nose.tools import with_setup
from tasks.us.census.tiger import GeoidColumns, GeomColumns
from tests.util import runtask, setup, teardown
and context from other files:
# Path: tasks/util.py
# def shell(cmd, encoding='utf-8'):
# '''
# Run a shell command, uses :py:func:`subprocess.check_output(cmd,
# shell=True)` under the hood.
#
# Returns the ``STDOUT`` output, and raises an error if there is a
# none-zero exit code.
# '''
# try:
# return subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode(encoding)
# except subprocess.CalledProcessError as err:
# LOGGER.error(err.output)
# raise
#
# Path: tasks/us/census/tiger.py
# class GeoidColumns(ColumnsTask):
# '''
# Used for external dependencies on Tiger.
#
# This creates two different geoid columns
# (GEOID_SUMLEVEL_COLUMN for the SumLevels geometries and
# GEOID_SHORELINECLIPPED_COLUMN for the ShorelineClipped geometries).
#
# This allows external tables to depend on both shoreline clipped and non-shoreline clipped geometries.
# '''
#
# year = IntParameter()
#
# def version(self):
# return 8
#
# def requires(self):
# return {
# 'raw': GeomColumns(year=self.year),
# 'clipped': ClippedGeomColumns(year=self.year)
# }
#
# def columns(self):
# cols = OrderedDict()
# clipped = self.input()['clipped']
# for colname, coltarget in self.input()['raw'].items():
# col = coltarget._column
# cols[colname + GEOID_SHORELINECLIPPED_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# clipped[colname + '_clipped']._column: GEOM_REF
# }
# )
# cols[colname + GEOID_SUMLEVEL_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# col: GEOM_REF,
# }
# )
#
# return cols
#
# class GeomColumns(ColumnsTask):
# year = IntParameter()
#
# def version(self):
# return 18
#
# def requires(self):
# return {
# 'sections': SectionTags(),
# 'subsections': SubsectionTags(),
# 'source': TigerSourceTags(),
# 'license': LicenseTags(),
# 'boundary': BoundaryTags(),
# }
#
# def _generate_desc(self, sumlevel):
# '''
# Add figure to the description
# '''
# return SUMLEVELS[sumlevel]['census_description']
#
# def columns(self):
# input_ = self.input()
# sections = input_['sections']
# subsections = input_['subsections']
# source = input_['source']['tiger-source']
# license = input_['license']['no-restrictions']
#
# columns = {}
# for level in list(SUMLEVELS.values()):
# columns[level['slug'] + '_{}'.format(self.year)] = OBSColumn(
# type='Geometry',
# name=level['name'] + '_{}'.format(self.year),
# description=level['census_description'],
# weight=level['weight'],
# tags=[sections['united_states'], subsections['boundary'], source, license]
# )
#
# return columns
#
# Path: tests/util.py
# def runtask(task, superclasses=None):
# '''
# Run deps of tasks then the task, faking session management
#
# superclasses is a list of classes that we will be willing to run as
# pre-reqs, other pre-reqs will be ignored. Can be useful when testing to
# only run metadata classes, for example.
# '''
# from lib.logger import get_logger
# LOGGER = get_logger(__name__)
# if task.complete():
# return
# for dep in task.deps():
# if superclasses:
# for klass in superclasses:
# if isinstance(dep, klass):
# runtask(dep, superclasses=superclasses)
# assert dep.complete() is True, 'dependency {} not complete for class {}'.format(dep, klass)
# else:
# runtask(dep)
# assert dep.complete() is True, 'dependency {} not complete'.format(dep)
# try:
# before = time()
# for klass, cb_dict in task._event_callbacks.items():
# if isinstance(task, klass):
# start_callbacks = cb_dict.get('event.core.start', [])
# for scb in start_callbacks:
# scb(task)
# task.run()
# task.on_success()
# after = time()
# LOGGER.warn('runtask timing %s: %s', task, round(after - before, 2))
# except Exception as exc:
# task.on_failure(exc)
# raise
#
# def setup():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.execute('CREATE SCHEMA observatory')
# session.commit()
# Base.metadata.create_all()
# session.close()
#
# def teardown():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# Base.metadata.drop_all()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.commit()
# session.close()
, which may include functions, classes, or code. Output only the next line. | runtask(GeomColumns(year='2015')) |
Based on the snippet: <|code_start|>'''
Test Tiger columns
'''
# TODO clean this up in a more general init script
try:
shell('createdb test')
except:
pass
<|code_end|>
, predict the immediate next line with the help of imports:
from tasks.util import shell
from nose.tools import with_setup
from tasks.us.census.tiger import GeoidColumns, GeomColumns
from tests.util import runtask, setup, teardown
and context (classes, functions, sometimes code) from other files:
# Path: tasks/util.py
# def shell(cmd, encoding='utf-8'):
# '''
# Run a shell command, uses :py:func:`subprocess.check_output(cmd,
# shell=True)` under the hood.
#
# Returns the ``STDOUT`` output, and raises an error if there is a
# none-zero exit code.
# '''
# try:
# return subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode(encoding)
# except subprocess.CalledProcessError as err:
# LOGGER.error(err.output)
# raise
#
# Path: tasks/us/census/tiger.py
# class GeoidColumns(ColumnsTask):
# '''
# Used for external dependencies on Tiger.
#
# This creates two different geoid columns
# (GEOID_SUMLEVEL_COLUMN for the SumLevels geometries and
# GEOID_SHORELINECLIPPED_COLUMN for the ShorelineClipped geometries).
#
# This allows external tables to depend on both shoreline clipped and non-shoreline clipped geometries.
# '''
#
# year = IntParameter()
#
# def version(self):
# return 8
#
# def requires(self):
# return {
# 'raw': GeomColumns(year=self.year),
# 'clipped': ClippedGeomColumns(year=self.year)
# }
#
# def columns(self):
# cols = OrderedDict()
# clipped = self.input()['clipped']
# for colname, coltarget in self.input()['raw'].items():
# col = coltarget._column
# cols[colname + GEOID_SHORELINECLIPPED_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# clipped[colname + '_clipped']._column: GEOM_REF
# }
# )
# cols[colname + GEOID_SUMLEVEL_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# col: GEOM_REF,
# }
# )
#
# return cols
#
# class GeomColumns(ColumnsTask):
# year = IntParameter()
#
# def version(self):
# return 18
#
# def requires(self):
# return {
# 'sections': SectionTags(),
# 'subsections': SubsectionTags(),
# 'source': TigerSourceTags(),
# 'license': LicenseTags(),
# 'boundary': BoundaryTags(),
# }
#
# def _generate_desc(self, sumlevel):
# '''
# Add figure to the description
# '''
# return SUMLEVELS[sumlevel]['census_description']
#
# def columns(self):
# input_ = self.input()
# sections = input_['sections']
# subsections = input_['subsections']
# source = input_['source']['tiger-source']
# license = input_['license']['no-restrictions']
#
# columns = {}
# for level in list(SUMLEVELS.values()):
# columns[level['slug'] + '_{}'.format(self.year)] = OBSColumn(
# type='Geometry',
# name=level['name'] + '_{}'.format(self.year),
# description=level['census_description'],
# weight=level['weight'],
# tags=[sections['united_states'], subsections['boundary'], source, license]
# )
#
# return columns
#
# Path: tests/util.py
# def runtask(task, superclasses=None):
# '''
# Run deps of tasks then the task, faking session management
#
# superclasses is a list of classes that we will be willing to run as
# pre-reqs, other pre-reqs will be ignored. Can be useful when testing to
# only run metadata classes, for example.
# '''
# from lib.logger import get_logger
# LOGGER = get_logger(__name__)
# if task.complete():
# return
# for dep in task.deps():
# if superclasses:
# for klass in superclasses:
# if isinstance(dep, klass):
# runtask(dep, superclasses=superclasses)
# assert dep.complete() is True, 'dependency {} not complete for class {}'.format(dep, klass)
# else:
# runtask(dep)
# assert dep.complete() is True, 'dependency {} not complete'.format(dep)
# try:
# before = time()
# for klass, cb_dict in task._event_callbacks.items():
# if isinstance(task, klass):
# start_callbacks = cb_dict.get('event.core.start', [])
# for scb in start_callbacks:
# scb(task)
# task.run()
# task.on_success()
# after = time()
# LOGGER.warn('runtask timing %s: %s', task, round(after - before, 2))
# except Exception as exc:
# task.on_failure(exc)
# raise
#
# def setup():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.execute('CREATE SCHEMA observatory')
# session.commit()
# Base.metadata.create_all()
# session.close()
#
# def teardown():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# Base.metadata.drop_all()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.commit()
# session.close()
. Output only the next line. | @with_setup(setup, teardown) |
Next line prediction: <|code_start|>'''
Test Tiger columns
'''
# TODO clean this up in a more general init script
try:
shell('createdb test')
except:
pass
<|code_end|>
. Use current file imports:
(from tasks.util import shell
from nose.tools import with_setup
from tasks.us.census.tiger import GeoidColumns, GeomColumns
from tests.util import runtask, setup, teardown)
and context including class names, function names, or small code snippets from other files:
# Path: tasks/util.py
# def shell(cmd, encoding='utf-8'):
# '''
# Run a shell command, uses :py:func:`subprocess.check_output(cmd,
# shell=True)` under the hood.
#
# Returns the ``STDOUT`` output, and raises an error if there is a
# none-zero exit code.
# '''
# try:
# return subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode(encoding)
# except subprocess.CalledProcessError as err:
# LOGGER.error(err.output)
# raise
#
# Path: tasks/us/census/tiger.py
# class GeoidColumns(ColumnsTask):
# '''
# Used for external dependencies on Tiger.
#
# This creates two different geoid columns
# (GEOID_SUMLEVEL_COLUMN for the SumLevels geometries and
# GEOID_SHORELINECLIPPED_COLUMN for the ShorelineClipped geometries).
#
# This allows external tables to depend on both shoreline clipped and non-shoreline clipped geometries.
# '''
#
# year = IntParameter()
#
# def version(self):
# return 8
#
# def requires(self):
# return {
# 'raw': GeomColumns(year=self.year),
# 'clipped': ClippedGeomColumns(year=self.year)
# }
#
# def columns(self):
# cols = OrderedDict()
# clipped = self.input()['clipped']
# for colname, coltarget in self.input()['raw'].items():
# col = coltarget._column
# cols[colname + GEOID_SHORELINECLIPPED_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# clipped[colname + '_clipped']._column: GEOM_REF
# }
# )
# cols[colname + GEOID_SUMLEVEL_COLUMN] = OBSColumn(
# type='Text',
# name=col.name + ' Geoids',
# weight=0,
# targets={
# col: GEOM_REF,
# }
# )
#
# return cols
#
# class GeomColumns(ColumnsTask):
# year = IntParameter()
#
# def version(self):
# return 18
#
# def requires(self):
# return {
# 'sections': SectionTags(),
# 'subsections': SubsectionTags(),
# 'source': TigerSourceTags(),
# 'license': LicenseTags(),
# 'boundary': BoundaryTags(),
# }
#
# def _generate_desc(self, sumlevel):
# '''
# Add figure to the description
# '''
# return SUMLEVELS[sumlevel]['census_description']
#
# def columns(self):
# input_ = self.input()
# sections = input_['sections']
# subsections = input_['subsections']
# source = input_['source']['tiger-source']
# license = input_['license']['no-restrictions']
#
# columns = {}
# for level in list(SUMLEVELS.values()):
# columns[level['slug'] + '_{}'.format(self.year)] = OBSColumn(
# type='Geometry',
# name=level['name'] + '_{}'.format(self.year),
# description=level['census_description'],
# weight=level['weight'],
# tags=[sections['united_states'], subsections['boundary'], source, license]
# )
#
# return columns
#
# Path: tests/util.py
# def runtask(task, superclasses=None):
# '''
# Run deps of tasks then the task, faking session management
#
# superclasses is a list of classes that we will be willing to run as
# pre-reqs, other pre-reqs will be ignored. Can be useful when testing to
# only run metadata classes, for example.
# '''
# from lib.logger import get_logger
# LOGGER = get_logger(__name__)
# if task.complete():
# return
# for dep in task.deps():
# if superclasses:
# for klass in superclasses:
# if isinstance(dep, klass):
# runtask(dep, superclasses=superclasses)
# assert dep.complete() is True, 'dependency {} not complete for class {}'.format(dep, klass)
# else:
# runtask(dep)
# assert dep.complete() is True, 'dependency {} not complete'.format(dep)
# try:
# before = time()
# for klass, cb_dict in task._event_callbacks.items():
# if isinstance(task, klass):
# start_callbacks = cb_dict.get('event.core.start', [])
# for scb in start_callbacks:
# scb(task)
# task.run()
# task.on_success()
# after = time()
# LOGGER.warn('runtask timing %s: %s', task, round(after - before, 2))
# except Exception as exc:
# task.on_failure(exc)
# raise
#
# def setup():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.execute('CREATE SCHEMA observatory')
# session.commit()
# Base.metadata.create_all()
# session.close()
#
# def teardown():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# Base.metadata.drop_all()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.commit()
# session.close()
. Output only the next line. | @with_setup(setup, teardown) |
Predict the next line for this snippet: <|code_start|>
LOGGER = get_logger(__name__)
# TODO Add block level when we have ACS for block
GEOGRAPHY_LEVELS = {'state': 'us.census.tiger.state',
'county': 'us.census.tiger.county',
'census_tract': 'us.census.tiger.census_tract',
'zcta5': 'us.census.tiger.zcta5',
'block_group': 'us.census.tiger.block_group',
'block': 'us.census.tiger.block'}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
return {
<|code_end|>
with the help of current file imports:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.us.census.tiger import ShorelineClip
from tasks.us.census.acs import ACSMetaWrapper
from tasks.meta import current_session
from lib.logger import get_logger
and context from other files:
# Path: tasks/us/census/tiger.py
# class ShorelineClip(TableTask):
# '''
# Clip the provided geography to shoreline.
# '''
#
# # MTFCC meanings:
# # http://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2009/TGRSHP09AF.pdf
#
# year = IntParameter()
# geography = Parameter()
#
# def version(self):
# return 10
#
# def requires(self):
# if self.geography == BLOCK:
# tiger = SimplifiedUnionTigerWaterGeomsByState(year=self.year, geography=self.geography)
# else:
# tiger = SimplifiedUnionTigerWaterGeoms(year=self.year, geography=self.geography)
# return {
# 'data': tiger,
# 'geoms': ClippedGeomColumns(year=self.year),
# 'geoids': GeoidColumns(year=self.year),
# 'attributes': Attributes(),
# 'geonames': GeonameColumns(year=self.year),
# }
#
# def columns(self):
# return OrderedDict([
# ('geoid', self.input()['geoids'][self.geography + '_{}'.format(self.year) + GEOID_SHORELINECLIPPED_COLUMN]),
# ('the_geom', self.input()['geoms'][self.geography + '_{}'.format(self.year) + '_clipped']),
# ('aland', self.input()['attributes']['aland'])
# ])
#
# def table_timespan(self):
# return get_timespan(str(self.year))
#
# # TODO: https://github.com/CartoDB/bigmetadata/issues/435
# def targets(self):
# return {
# OBSTable(id='.'.join([self.schema(), self.name()])): GEOM_REF,
# }
#
# def populate(self):
# session = current_session()
#
# stmt = ('''INSERT INTO {output}
# SELECT
# geoid,
# ST_Union(ARRAY(
# SELECT ST_MakePolygon(ST_ExteriorRing(
# (ST_Dump(ST_CollectionExtract(the_geom, 3))).geom
# ))
# )),
# aland
# FROM {input}'''.format(
# output=self.output().table,
# input=self.input()['data'].table), )[0]
# session.execute(stmt)
#
# Path: tasks/us/census/acs.py
# class ACSMetaWrapper(MetaWrapper):
#
# geography = Parameter()
# year = Parameter()
# sample = Parameter()
#
# params = {
# 'geography': GEOGRAPHIES,
# 'year': YEARS,
# 'sample': SAMPLES
# }
#
# def tables(self):
# # no ZCTA for 2010
# if self.year == '2010' and self.geography == ZCTA5:
# pass
# # 1yr sample doesn't have block group or census_tract
# elif self.sample == SAMPLE_1YR and self.geography in (CENSUS_TRACT, BLOCK_GROUP, BLOCK, ZCTA5):
# pass
# else:
# yield Quantiles(geography=self.geography, year=self.year, sample=self.sample)
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
, which may contain function names, class names, or code. Output only the next line. | 'shorelineclip': ShorelineClip(geography=self.geography, year='2015'), |
Next line prediction: <|code_start|>
LOGGER = get_logger(__name__)
# TODO Add block level when we have ACS for block
GEOGRAPHY_LEVELS = {'state': 'us.census.tiger.state',
'county': 'us.census.tiger.county',
'census_tract': 'us.census.tiger.census_tract',
'zcta5': 'us.census.tiger.zcta5',
'block_group': 'us.census.tiger.block_group',
'block': 'us.census.tiger.block'}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
return {
'shorelineclip': ShorelineClip(geography=self.geography, year='2015'),
<|code_end|>
. Use current file imports:
(import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.us.census.tiger import ShorelineClip
from tasks.us.census.acs import ACSMetaWrapper
from tasks.meta import current_session
from lib.logger import get_logger)
and context including class names, function names, or small code snippets from other files:
# Path: tasks/us/census/tiger.py
# class ShorelineClip(TableTask):
# '''
# Clip the provided geography to shoreline.
# '''
#
# # MTFCC meanings:
# # http://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2009/TGRSHP09AF.pdf
#
# year = IntParameter()
# geography = Parameter()
#
# def version(self):
# return 10
#
# def requires(self):
# if self.geography == BLOCK:
# tiger = SimplifiedUnionTigerWaterGeomsByState(year=self.year, geography=self.geography)
# else:
# tiger = SimplifiedUnionTigerWaterGeoms(year=self.year, geography=self.geography)
# return {
# 'data': tiger,
# 'geoms': ClippedGeomColumns(year=self.year),
# 'geoids': GeoidColumns(year=self.year),
# 'attributes': Attributes(),
# 'geonames': GeonameColumns(year=self.year),
# }
#
# def columns(self):
# return OrderedDict([
# ('geoid', self.input()['geoids'][self.geography + '_{}'.format(self.year) + GEOID_SHORELINECLIPPED_COLUMN]),
# ('the_geom', self.input()['geoms'][self.geography + '_{}'.format(self.year) + '_clipped']),
# ('aland', self.input()['attributes']['aland'])
# ])
#
# def table_timespan(self):
# return get_timespan(str(self.year))
#
# # TODO: https://github.com/CartoDB/bigmetadata/issues/435
# def targets(self):
# return {
# OBSTable(id='.'.join([self.schema(), self.name()])): GEOM_REF,
# }
#
# def populate(self):
# session = current_session()
#
# stmt = ('''INSERT INTO {output}
# SELECT
# geoid,
# ST_Union(ARRAY(
# SELECT ST_MakePolygon(ST_ExteriorRing(
# (ST_Dump(ST_CollectionExtract(the_geom, 3))).geom
# ))
# )),
# aland
# FROM {input}'''.format(
# output=self.output().table,
# input=self.input()['data'].table), )[0]
# session.execute(stmt)
#
# Path: tasks/us/census/acs.py
# class ACSMetaWrapper(MetaWrapper):
#
# geography = Parameter()
# year = Parameter()
# sample = Parameter()
#
# params = {
# 'geography': GEOGRAPHIES,
# 'year': YEARS,
# 'sample': SAMPLES
# }
#
# def tables(self):
# # no ZCTA for 2010
# if self.year == '2010' and self.geography == ZCTA5:
# pass
# # 1yr sample doesn't have block group or census_tract
# elif self.sample == SAMPLE_1YR and self.geography in (CENSUS_TRACT, BLOCK_GROUP, BLOCK, ZCTA5):
# pass
# else:
# yield Quantiles(geography=self.geography, year=self.year, sample=self.sample)
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | 'acs5yr': ACSMetaWrapper(geography=self.geography, year='2015', sample='5yr'), |
Next line prediction: <|code_start|>
# TODO Add block level when we have ACS for block
GEOGRAPHY_LEVELS = {'state': 'us.census.tiger.state',
'county': 'us.census.tiger.county',
'census_tract': 'us.census.tiger.census_tract',
'zcta5': 'us.census.tiger.zcta5',
'block_group': 'us.census.tiger.block_group',
'block': 'us.census.tiger.block'}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
return {
'shorelineclip': ShorelineClip(geography=self.geography, year='2015'),
'acs5yr': ACSMetaWrapper(geography=self.geography, year='2015', sample='5yr'),
'acs1yr': ACSMetaWrapper(geography=self.geography, year='2015', sample='1yr'),
}
def _get_config_data(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
with (open('{}/{}'.format(dir_path, 'measurements.json'))) as f:
return json.load(f)
def run(self):
<|code_end|>
. Use current file imports:
(import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.us.census.tiger import ShorelineClip
from tasks.us.census.acs import ACSMetaWrapper
from tasks.meta import current_session
from lib.logger import get_logger)
and context including class names, function names, or small code snippets from other files:
# Path: tasks/us/census/tiger.py
# class ShorelineClip(TableTask):
# '''
# Clip the provided geography to shoreline.
# '''
#
# # MTFCC meanings:
# # http://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2009/TGRSHP09AF.pdf
#
# year = IntParameter()
# geography = Parameter()
#
# def version(self):
# return 10
#
# def requires(self):
# if self.geography == BLOCK:
# tiger = SimplifiedUnionTigerWaterGeomsByState(year=self.year, geography=self.geography)
# else:
# tiger = SimplifiedUnionTigerWaterGeoms(year=self.year, geography=self.geography)
# return {
# 'data': tiger,
# 'geoms': ClippedGeomColumns(year=self.year),
# 'geoids': GeoidColumns(year=self.year),
# 'attributes': Attributes(),
# 'geonames': GeonameColumns(year=self.year),
# }
#
# def columns(self):
# return OrderedDict([
# ('geoid', self.input()['geoids'][self.geography + '_{}'.format(self.year) + GEOID_SHORELINECLIPPED_COLUMN]),
# ('the_geom', self.input()['geoms'][self.geography + '_{}'.format(self.year) + '_clipped']),
# ('aland', self.input()['attributes']['aland'])
# ])
#
# def table_timespan(self):
# return get_timespan(str(self.year))
#
# # TODO: https://github.com/CartoDB/bigmetadata/issues/435
# def targets(self):
# return {
# OBSTable(id='.'.join([self.schema(), self.name()])): GEOM_REF,
# }
#
# def populate(self):
# session = current_session()
#
# stmt = ('''INSERT INTO {output}
# SELECT
# geoid,
# ST_Union(ARRAY(
# SELECT ST_MakePolygon(ST_ExteriorRing(
# (ST_Dump(ST_CollectionExtract(the_geom, 3))).geom
# ))
# )),
# aland
# FROM {input}'''.format(
# output=self.output().table,
# input=self.input()['data'].table), )[0]
# session.execute(stmt)
#
# Path: tasks/us/census/acs.py
# class ACSMetaWrapper(MetaWrapper):
#
# geography = Parameter()
# year = Parameter()
# sample = Parameter()
#
# params = {
# 'geography': GEOGRAPHIES,
# 'year': YEARS,
# 'sample': SAMPLES
# }
#
# def tables(self):
# # no ZCTA for 2010
# if self.year == '2010' and self.geography == ZCTA5:
# pass
# # 1yr sample doesn't have block group or census_tract
# elif self.sample == SAMPLE_1YR and self.geography in (CENSUS_TRACT, BLOCK_GROUP, BLOCK, ZCTA5):
# pass
# else:
# yield Quantiles(geography=self.geography, year=self.year, sample=self.sample)
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | session = current_session() |
Here is a snippet: <|code_start|>
@pytest.mark.parametrize('attr', list(attrs.keys()))
def test_attrs_exist_and_type(self, sim, attr):
assert hasattr(sim, attr)
d_type = self.attrs[attr]
assert isinstance(getattr(sim, attr), d_type)
@pytest.mark.parametrize('step', list(mdps.keys()))
def test_methods_exist_and_callable(self, sim, step):
assert hasattr(sim, step)
assert callable(getattr(sim, step))
def test_fp(self, sim):
sample_file = 'tests/__init__.py'
fp = sim._fp(sample_file)
assert fp.exists()
assert fp.is_absolute()
assert fp.is_file()
assert fp.samefile(sample_file)
def test_last_geom(self, sim):
gro = sim.last_geometry
assert isinstance(gro, pathlib.Path)
assert gro.suffix == '.gro'
assert gro.is_absolute()
assert gro.is_file()
def test_next_folder_index(self, sim):
assert sim._next_folder_index == 1
path = sim.base_folder
<|code_end|>
. Write the next line using the current file imports:
import pathlib
import pytest
from paratemp.tools import cd
from paratemp.sim_setup import Simulation
from paratemp.sim_setup import Simulation
and context from other files:
# Path: paratemp/tools.py
# @contextmanager
# def cd(new_dir):
# prev_dir = os.getcwd()
# os.chdir(os.path.expanduser(new_dir))
# try:
# yield
# finally:
# os.chdir(prev_dir)
, which may include functions, classes, or code. Output only the next line. | with cd(path): |
Given snippet: <|code_start|>########################################################################
# #
# This script was written by Thomas Heavey in 2018. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2017-18 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import
class TestGetGroFiles(object):
def test_get_gro_files(self, pt_run_dir):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import py
import pytest
import re
from paratemp.tools import cd
from paratemp.sim_setup import get_gro_files
from paratemp.sim_setup import get_gro_files
from paratemp.sim_setup.sim_setup import _job_info_from_qsub
from paratemp.sim_setup.sim_setup import _update_num
from paratemp.sim_setup.sim_setup import _update_num
from paratemp.exceptions import InputError
from paratemp.sim_setup import get_n_solvent
from paratemp.sim_setup import get_n_solvent
from paratemp.sim_setup import get_n_solvent
from paratemp.sim_setup import get_solv_count_top
from paratemp.sim_setup import get_solv_count_top
from paratemp.sim_setup import set_solv_count_top, get_solv_count_top
from paratemp.sim_setup import set_solv_count_top, get_solv_count_top
from paratemp.sim_setup import set_solv_count_top, \
get_solv_count_top
from paratemp.sim_setup import set_solv_count_top
from paratemp.sim_setup.sim_setup import _get_n_top
from paratemp.exceptions import InputError
from paratemp.sim_setup.sim_setup import _make_sge_line
from paratemp.sim_setup import make_gromacs_sub_script
from paratemp.sim_setup import make_gromacs_sub_script
from paratemp.sim_setup.sim_setup import _get_mdrun_line
from paratemp.sim_setup.sim_setup import _get_sge_basic_lines
from paratemp.sim_setup import make_gromacs_sub_script
and context:
# Path: paratemp/tools.py
# @contextmanager
# def cd(new_dir):
# prev_dir = os.getcwd()
# os.chdir(os.path.expanduser(new_dir))
# try:
# yield
# finally:
# os.chdir(prev_dir)
which might include code, classes, or functions. Output only the next line. | with cd(pt_run_dir): |
Predict the next line for this snippet: <|code_start|>
@pytest.fixture
def grompp():
if distutils.spawn.find_executable('gmx'):
return 'gmx grompp'
if distutils.spawn.find_executable('gmx_mpi'):
return 'gmx_mpi grompp'
elif distutils.spawn.find_executable('grompp'):
return 'grompp'
else:
raise OSError(errno.ENOENT, 'No GROMACS executable found')
class TestCompileTPRs(object):
def test_pt_dir_blank(self, pt_blank_dir):
files_present = {f.name for f in pt_blank_dir.glob('*')}
must_contain = {n_top, n_gro, n_template, n_ndx, n_gro_o1, n_gro_o2}
assert must_contain - files_present == set()
def test_basic(self, pt_blank_dir, grompp):
"""
:param pathlib.PosixPath pt_blank_dir:
:return:
"""
dir_topo = pt_blank_dir.joinpath('TOPO')
dir_topo.mkdir()
number = 2
<|code_end|>
with the help of current file imports:
import distutils.spawn
import errno
import os
import pathlib
import pytest
import shutil
from paratemp.tools import cd
from paratemp.sim_setup import compile_tprs
from paratemp.tools import get_temperatures
from paratemp.sim_setup import compile_tprs
from paratemp.tools import get_temperatures
from paratemp.sim_setup import compile_tprs
from paratemp.sim_setup import compile_tprs
from paratemp.sim_setup import compile_tprs
from paratemp.tools import get_temperatures
from paratemp.sim_setup.para_temp_setup import \
_add_cpt_to_sub_script as acpt
from paratemp.sim_setup.para_temp_setup import \
_add_cpt_to_sub_script as acpt
from paratemp.sim_setup.para_temp_setup import \
_add_cpt_to_sub_script as acpt
from paratemp.sim_setup.para_temp_setup import \
_add_cpt_to_sub_script as acpt
from paratemp.sim_setup.para_temp_setup import _find_cpt_base
from paratemp.sim_setup.para_temp_setup import _find_cpt_base
and context from other files:
# Path: paratemp/tools.py
# @contextmanager
# def cd(new_dir):
# prev_dir = os.getcwd()
# os.chdir(os.path.expanduser(new_dir))
# try:
# yield
# finally:
# os.chdir(prev_dir)
, which may contain function names, class names, or code. Output only the next line. | with cd(dir_topo): |
Here is a snippet: <|code_start|> vec_x = Vector(1, 0, 0)
angle = self.coords[index].diff_angle(vec_x)
axis = self.coords[index].cross(vec_x)
self.coords = [coord.rotate(angle, axis) for coord in self.coords]
def center_and_rotate_on(self, index1, index2):
self.center_on(index1)
self.rotate_to_x_axis_on(index2)
def __str__(self):
f_string = " {0: <10s} {1.x: > 10.5f} {1.y: > 10.5f} " "{1.z: > 10.5f}\n"
output_list = list(self._header)
output_list += [
f_string.format(self.atoms[i], self.coords[i])
for i in range(len(self.atoms))
]
return "".join(output_list)
@property
def n_atoms(self):
_n_atoms = len(self.atoms)
_n_coords = len(self.coords)
if _n_atoms != _n_coords:
print("!!n atoms != n coords!! ({} != {})".format(_n_atoms, _n_coords))
else:
return _n_atoms
@property
def energy(self):
if self._energy is None:
<|code_end|>
. Write the next line using the current file imports:
import re
import numpy as np
from numpy.linalg import norm
from .exceptions import UnknownEnergyError, InputError
and context from other files:
# Path: paratemp/exceptions.py
# class UnknownEnergyError(Exception):
# """
# Exception raised when an object does not know it's energy, but it's queried
#
# """
#
# def __init__(self, msg=None):
# self.msg = msg
#
# def __str__(self):
# standard_response = (
# "The energy is unknown either because it wasn't "
# "in the original file or the coordinates have "
# "changed.\nCould try XYZ.original_energy"
# )
# if self.msg is None:
# return repr(standard_response)
# else:
# return repr(self.msg)
#
# class InputError(Exception):
# """Exception raised for errors in the input.
#
# Attributes:
# expr -- input expression in which the error occurred
# msg -- explanation of the error
# """
#
# def __init__(self, expr, msg):
# self.expr = expr
# self.msg = msg
#
# def __str__(self):
# output = 'Incorrect input "{}". {}'.format(self.expr, self.msg)
# return repr(output)
, which may include functions, classes, or code. Output only the next line. | raise UnknownEnergyError() |
Here is a snippet: <|code_start|>
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
copied from
https://stackoverflow.com/questions/6802577/python-rotation-of-3d-vector
"""
axis = np.asarray(axis)
axis = axis / np.sqrt(np.dot(axis, axis))
a = np.cos(theta / 2.0)
b, c, d = -axis * np.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array(
[
[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc],
]
)
class Vector(np.ndarray):
def __new__(cls, *xyz):
if len(xyz) != 3:
try:
xyz = xyz[0]
except IndexError:
<|code_end|>
. Write the next line using the current file imports:
import re
import numpy as np
from numpy.linalg import norm
from .exceptions import UnknownEnergyError, InputError
and context from other files:
# Path: paratemp/exceptions.py
# class UnknownEnergyError(Exception):
# """
# Exception raised when an object does not know it's energy, but it's queried
#
# """
#
# def __init__(self, msg=None):
# self.msg = msg
#
# def __str__(self):
# standard_response = (
# "The energy is unknown either because it wasn't "
# "in the original file or the coordinates have "
# "changed.\nCould try XYZ.original_energy"
# )
# if self.msg is None:
# return repr(standard_response)
# else:
# return repr(self.msg)
#
# class InputError(Exception):
# """Exception raised for errors in the input.
#
# Attributes:
# expr -- input expression in which the error occurred
# msg -- explanation of the error
# """
#
# def __init__(self, expr, msg):
# self.expr = expr
# self.msg = msg
#
# def __str__(self):
# output = 'Incorrect input "{}". {}'.format(self.expr, self.msg)
# return repr(output)
, which may include functions, classes, or code. Output only the next line. | raise InputError(xyz, "3 values are required to make a vector") |
Given the following code snippet before the placeholder: <|code_start|> out_top.write(line)
if not done:
# Not the right error, but fine for now
# Also, this should not be accessible: if anything, get_solv_count_top
# will fail with a RuntimeError first for any issue.
raise RuntimeError(
"Did not find a line with the solvent count" " in {}".format(n_top)
)
elif verbose:
print(
"Solvent count in {} set at {}".format(os.path.relpath(n_top), str(s_count))
+ "\nOriginal copied to {}.".format(os.path.relpath(bak_name))
)
return None
def copy_topology(f_from, f_to, overwrite=False):
try:
os.makedirs(f_to)
except OSError as e:
if e.errno == 17:
pass # Ignore FileExistsError
else:
raise
to_copy = glob.glob(f_from + "/*.top")
to_copy += glob.glob(f_from + "/*.itp")
for path in to_copy:
copy_no_overwrite(path, f_to, silent=overwrite)
<|code_end|>
, predict the next line using imports from the current file:
import errno
import glob
import os
import pathlib
import py
import re
import subprocess
import warnings
from typing import Callable, Iterable, Match
from paratemp.tools import _BlankStream
from paratemp.exceptions import InputError
from paratemp.tools import cd, copy_no_overwrite
from glob import glob
from gromacs.tools import Trjconv
and context including class names, function names, and sometimes code from other files:
# Path: paratemp/tools.py
# class _BlankStream(object):
# """
# A class for use when not actually wanting to write to a file.
# """
#
# def write(self, string):
# pass
#
# def fileno(self):
# return 0 # Not sure if this works. Maybe None would be better
#
# def flush(self):
# pass
#
# Path: paratemp/exceptions.py
# class InputError(Exception):
# """Exception raised for errors in the input.
#
# Attributes:
# expr -- input expression in which the error occurred
# msg -- explanation of the error
# """
#
# def __init__(self, expr, msg):
# self.expr = expr
# self.msg = msg
#
# def __str__(self):
# output = 'Incorrect input "{}". {}'.format(self.expr, self.msg)
# return repr(output)
#
# Path: paratemp/tools.py
# @contextmanager
# def cd(new_dir):
# prev_dir = os.getcwd()
# os.chdir(os.path.expanduser(new_dir))
# try:
# yield
# finally:
# os.chdir(prev_dir)
#
# def copy_no_overwrite(src, dst, silent=False):
# exists = False
# if os.path.isdir(src):
# raise OSError(errno.EISDIR, "Is a directory: {}".format(src))
# elif os.path.isdir(dst):
# if os.path.isfile(os.path.join(dst, os.path.basename(src))):
# exists = True
# elif os.path.isfile(dst):
# exists = True
# if exists:
# if silent:
# return dst
# else:
# raise OSError(errno.EEXIST, "File already exists", dst)
# else:
# return shutil.copy(src, dst)
. Output only the next line. | def _submit_script(script_name, log_stream=_BlankStream()): |
Next line prediction: <|code_start|> mol_section = False
for line in in_top:
if line.strip().startswith(";"):
pass
elif not mol_section:
if re.search(r"\[\s*molecules\s*\]", line, flags=re.IGNORECASE):
mol_section = True
else:
solv_match = re_n_solv.search(line)
if solv_match:
return int(solv_match.group(1))
# Not the right error, but fine for now
raise RuntimeError(
"Did not find a line with the solvent count in " "{}".format(n_top)
)
def _get_n_top(n_top, folder):
"""
Get path and name of topology file
:param str n_top: None or path and file name of topology file.
:param str folder: None or folder containing one topology file.
:return: path to the topology file
:rtype: str
:raises ValueError: This is raised if more than one topology is found in
the given folder.
"""
if n_top is None:
if folder is None:
<|code_end|>
. Use current file imports:
(import errno
import glob
import os
import pathlib
import py
import re
import subprocess
import warnings
from typing import Callable, Iterable, Match
from paratemp.tools import _BlankStream
from paratemp.exceptions import InputError
from paratemp.tools import cd, copy_no_overwrite
from glob import glob
from gromacs.tools import Trjconv)
and context including class names, function names, or small code snippets from other files:
# Path: paratemp/tools.py
# class _BlankStream(object):
# """
# A class for use when not actually wanting to write to a file.
# """
#
# def write(self, string):
# pass
#
# def fileno(self):
# return 0 # Not sure if this works. Maybe None would be better
#
# def flush(self):
# pass
#
# Path: paratemp/exceptions.py
# class InputError(Exception):
# """Exception raised for errors in the input.
#
# Attributes:
# expr -- input expression in which the error occurred
# msg -- explanation of the error
# """
#
# def __init__(self, expr, msg):
# self.expr = expr
# self.msg = msg
#
# def __str__(self):
# output = 'Incorrect input "{}". {}'.format(self.expr, self.msg)
# return repr(output)
#
# Path: paratemp/tools.py
# @contextmanager
# def cd(new_dir):
# prev_dir = os.getcwd()
# os.chdir(os.path.expanduser(new_dir))
# try:
# yield
# finally:
# os.chdir(prev_dir)
#
# def copy_no_overwrite(src, dst, silent=False):
# exists = False
# if os.path.isdir(src):
# raise OSError(errno.EISDIR, "Is a directory: {}".format(src))
# elif os.path.isdir(dst):
# if os.path.isfile(os.path.join(dst, os.path.basename(src))):
# exists = True
# elif os.path.isfile(dst):
# exists = True
# if exists:
# if silent:
# return dst
# else:
# raise OSError(errno.EEXIST, "File already exists", dst)
# else:
# return shutil.copy(src, dst)
. Output only the next line. | raise InputError("None", "Either folder or n_top must be " "specified") |
Continue the code snippet: <|code_start|> "Number of trr and tpr files not equal: "
"{} != {}".format(len(trr_files), len(tpr_files))
)
out_files = list()
for tpr_file, trr_file in zip(tpr_files, trr_files):
out_file = trr_file.replace("trr", "gro")
Trjconv(s=tpr_file, f=trr_file, o=out_file, dump=time, input="0")()
out_files.append(out_file)
return out_files
def get_n_solvent(folder, solvent="DCM"):
"""
Find the number of solvent molecules of given type in topology file.
Note, this function is being deprecated in favor of the more general
:func:`get_solv_count_top`, which takes the strengths of this function
while also allowing for specification of an exact top file.
:param str folder: The folder in which to look for a file ending in '.top'.
:param str solvent: Default: 'DCM'
:return: The number of solvent molecules.
:rtype: int
"""
warnings.warn(
"This function is deprecated. Please use " "get_solv_count_top",
DeprecationWarning,
)
re_n_solv = re.compile(r"(?:^\s*{}\s+)(\d+)".format(solvent))
<|code_end|>
. Use current file imports:
import errno
import glob
import os
import pathlib
import py
import re
import subprocess
import warnings
from typing import Callable, Iterable, Match
from paratemp.tools import _BlankStream
from paratemp.exceptions import InputError
from paratemp.tools import cd, copy_no_overwrite
from glob import glob
from gromacs.tools import Trjconv
and context (classes, functions, or code) from other files:
# Path: paratemp/tools.py
# class _BlankStream(object):
# """
# A class for use when not actually wanting to write to a file.
# """
#
# def write(self, string):
# pass
#
# def fileno(self):
# return 0 # Not sure if this works. Maybe None would be better
#
# def flush(self):
# pass
#
# Path: paratemp/exceptions.py
# class InputError(Exception):
# """Exception raised for errors in the input.
#
# Attributes:
# expr -- input expression in which the error occurred
# msg -- explanation of the error
# """
#
# def __init__(self, expr, msg):
# self.expr = expr
# self.msg = msg
#
# def __str__(self):
# output = 'Incorrect input "{}". {}'.format(self.expr, self.msg)
# return repr(output)
#
# Path: paratemp/tools.py
# @contextmanager
# def cd(new_dir):
# prev_dir = os.getcwd()
# os.chdir(os.path.expanduser(new_dir))
# try:
# yield
# finally:
# os.chdir(prev_dir)
#
# def copy_no_overwrite(src, dst, silent=False):
# exists = False
# if os.path.isdir(src):
# raise OSError(errno.EISDIR, "Is a directory: {}".format(src))
# elif os.path.isdir(dst):
# if os.path.isfile(os.path.join(dst, os.path.basename(src))):
# exists = True
# elif os.path.isfile(dst):
# exists = True
# if exists:
# if silent:
# return dst
# else:
# raise OSError(errno.EEXIST, "File already exists", dst)
# else:
# return shutil.copy(src, dst)
. Output only the next line. | with cd(folder): |
Here is a snippet: <|code_start|> :param str n_top: Default: None. Name (and path) of the topology file. If
None, folder will be used, but this argument takes priority.
:param str folder: Default: None. If n_top is not provided, this is the
folder that will be searched for a file ending in '.top' to be used.
:param int s_count: Default: 0. The count of solvent molecules to be set
in the topology file.
:param str res_name: Default: 'DCM'. Name of the residue to look for in
the topology file. This is case insensitive (this and the line will
be made lower case when searching for it).
:param str prepend: Default: 'unequal-'. The string to prepend to the
topology file name when copying it (to keep a copy of the original).
:param bool verbose: Default: True. If True, messages will be printed if
no changes need to be made or after the changes have successfully been
made.
:return: None
:raises RuntimeError: This is raised if the it is unable to find the
line with the solvent count. This could also be raised if it cannot find
the molecules section.
"""
n_top = _get_n_top(n_top, folder)
if s_count == get_solv_count_top(n_top=n_top, res_name=res_name):
if verbose:
print(
"Solvent count in {} already set at {}".format(
os.path.relpath(n_top), str(s_count)
)
+ "\nNot copying or changing file."
)
return None
bak_name = os.path.join(os.path.dirname(n_top), prepend + os.path.basename(n_top))
<|code_end|>
. Write the next line using the current file imports:
import errno
import glob
import os
import pathlib
import py
import re
import subprocess
import warnings
from typing import Callable, Iterable, Match
from paratemp.tools import _BlankStream
from paratemp.exceptions import InputError
from paratemp.tools import cd, copy_no_overwrite
from glob import glob
from gromacs.tools import Trjconv
and context from other files:
# Path: paratemp/tools.py
# class _BlankStream(object):
# """
# A class for use when not actually wanting to write to a file.
# """
#
# def write(self, string):
# pass
#
# def fileno(self):
# return 0 # Not sure if this works. Maybe None would be better
#
# def flush(self):
# pass
#
# Path: paratemp/exceptions.py
# class InputError(Exception):
# """Exception raised for errors in the input.
#
# Attributes:
# expr -- input expression in which the error occurred
# msg -- explanation of the error
# """
#
# def __init__(self, expr, msg):
# self.expr = expr
# self.msg = msg
#
# def __str__(self):
# output = 'Incorrect input "{}". {}'.format(self.expr, self.msg)
# return repr(output)
#
# Path: paratemp/tools.py
# @contextmanager
# def cd(new_dir):
# prev_dir = os.getcwd()
# os.chdir(os.path.expanduser(new_dir))
# try:
# yield
# finally:
# os.chdir(prev_dir)
#
# def copy_no_overwrite(src, dst, silent=False):
# exists = False
# if os.path.isdir(src):
# raise OSError(errno.EISDIR, "Is a directory: {}".format(src))
# elif os.path.isdir(dst):
# if os.path.isfile(os.path.join(dst, os.path.basename(src))):
# exists = True
# elif os.path.isfile(dst):
# exists = True
# if exists:
# if silent:
# return dst
# else:
# raise OSError(errno.EEXIST, "File already exists", dst)
# else:
# return shutil.copy(src, dst)
, which may include functions, classes, or code. Output only the next line. | copy_no_overwrite(n_top, bak_name) |
Given snippet: <|code_start|># #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import, division, print_function
__all__ = ["calc_fes_2d", "calc_fes_1d"]
def _parse_bin_input(bins):
if bins is None:
return dict()
return dict(bins=bins)
def calc_fes_2d(x, y, temp, bins=None):
d_bins = _parse_bin_input(bins)
counts, xedges, yedges = np.histogram2d(x, y, **d_bins)
probs = np.array([[i / counts.max() for i in j] for j in counts]) + 1e-40
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
from matplotlib import pyplot as plt
from .constants import r
from .tools import running_mean
and context:
# Path: paratemp/constants.py
#
# Path: paratemp/tools.py
# def running_mean(x, n=2):
# """
# Calculate running mean over an iterable
#
# Taken from https://stackoverflow.com/a/22621523/3961920
#
# :param Iterable x: List over which to calculate the mean.
# :param int n: Default: 2. Width for the means.
# :return: Array of the running mean values.
# :rtype: np.ndarray
# """
# if len(x) != 0:
# return np.convolve(x, np.ones((n,)) / n, mode="valid")
# else:
# raise ValueError("x cannot be empty")
which might include code, classes, or functions. Output only the next line. | delta_g = np.array([[-r * temp * np.log(p) for p in j] for j in probs]) |
Here is a snippet: <|code_start|># http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import, division, print_function
__all__ = ["calc_fes_2d", "calc_fes_1d"]
def _parse_bin_input(bins):
if bins is None:
return dict()
return dict(bins=bins)
def calc_fes_2d(x, y, temp, bins=None):
d_bins = _parse_bin_input(bins)
counts, xedges, yedges = np.histogram2d(x, y, **d_bins)
probs = np.array([[i / counts.max() for i in j] for j in counts]) + 1e-40
delta_g = np.array([[-r * temp * np.log(p) for p in j] for j in probs])
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
from matplotlib import pyplot as plt
from .constants import r
from .tools import running_mean
and context from other files:
# Path: paratemp/constants.py
#
# Path: paratemp/tools.py
# def running_mean(x, n=2):
# """
# Calculate running mean over an iterable
#
# Taken from https://stackoverflow.com/a/22621523/3961920
#
# :param Iterable x: List over which to calculate the mean.
# :param int n: Default: 2. Width for the means.
# :return: Array of the running mean values.
# :rtype: np.ndarray
# """
# if len(x) != 0:
# return np.convolve(x, np.ones((n,)) / n, mode="valid")
# else:
# raise ValueError("x cannot be empty")
, which may include functions, classes, or code. Output only the next line. | xmids, ymids = running_mean(xedges), running_mean(yedges) |
Continue the code snippet: <|code_start|> def _get_amber_env() -> Dict[str, str]:
log.info("Using special environment variables for Amber executables")
amber_env_stream = pkg_resources.resource_stream(
__name__, "SimpleSim_data/amber_env.json"
) # type: TextIOBase
amber_env = json.load(amber_env_stream)
curr_env = dict(os.environ)
curr_env.update(amber_env)
try:
conda_prefix = Path(curr_env["CONDA_PREFIX"])
python_bin = conda_prefix / "bin"
except KeyError:
python_bin = Path(sys.executable).parent
curr_env["PATH"] += os.pathsep + str(python_bin)
return curr_env
@property
def topology(self) -> parmed.Structure:
return self._ptop
@property
def directory(self) -> Path:
return self._directory
@property
def name(self) -> str:
return self._name
def _run_in_dir(self, cl, **kwargs) -> subprocess.CompletedProcess:
<|code_end|>
. Use current file imports:
from io import TextIOBase
from pathlib import Path
from typing import Union, Dict, Any
from ..tools import cd
import json
import logging
import os
import shlex
import shutil
import subprocess
import parmed
import pkg_resources
import sys
and context (classes, functions, or code) from other files:
# Path: paratemp/tools.py
# @contextmanager
# def cd(new_dir):
# prev_dir = os.getcwd()
# os.chdir(os.path.expanduser(new_dir))
# try:
# yield
# finally:
# os.chdir(prev_dir)
. Output only the next line. | with cd(self._directory): |
Predict the next line for this snippet: <|code_start|> :param str xlabel: Default: 'distance / $\\mathrm{\\AA}$'. The label for
the x axis.
:type ax: matplotlib.axes.Axes
:param ax: Default: None. The axes objects on which to make the plots.
If None is supplied, new axes objects will be created.
:param data: Default: None.
If given, this must be an object that can be indexed by `x` to give
the series from which the FES should be made.
For example, these are equivalent:
>>> fes_1d(data[x], temp)
>>> fes_1d(x, temp, data=data)
:param kwargs: keyword arguments to pass to the plotter
:rtype: Tuple(np.ndarray, np.ndarray, matplotlib.lines.Line2D,
matplotlib.figure.Figure, matplotlib.axes.Axes)
:return: The delta G values, the bin centers, the lines object, the
figure and the axes
"""
if data is not None:
_x = data[x]
else:
_x = x
_fig, _ax = _parse_ax_input(ax)
<|code_end|>
with the help of current file imports:
import math
import matplotlib as mpl
from matplotlib import pyplot as plt
from six.moves import range
from .utils import calc_fes_1d, _parse_ax_input
from .exceptions import InputError
and context from other files:
# Path: paratemp/utils.py
# def calc_fes_1d(data, temp, bins=None):
# d_bins = _parse_bin_input(bins)
# n, _bins = np.histogram(data, **d_bins)
# n = [float(j) for j in n]
# # TODO find better way to account for zeros here rather than
# # just adding a small amount to each.
# prob = np.array([j / max(n) for j in n]) + 1e-40
# delta_g = np.array([-r * temp * np.log(p) for p in prob])
# bin_mids = running_mean(_bins, 2)
# return delta_g, bin_mids
#
# def _parse_ax_input(ax):
# if ax is None:
# fig, ax = plt.subplots()
# else:
# fig = ax.figure
# return fig, ax
#
# Path: paratemp/exceptions.py
# class InputError(Exception):
# """Exception raised for errors in the input.
#
# Attributes:
# expr -- input expression in which the error occurred
# msg -- explanation of the error
# """
#
# def __init__(self, expr, msg):
# self.expr = expr
# self.msg = msg
#
# def __str__(self):
# output = 'Incorrect input "{}". {}'.format(self.expr, self.msg)
# return repr(output)
, which may contain function names, class names, or code. Output only the next line. | delta_g, bin_mids = calc_fes_1d(_x, temp=temp, bins=bins) |
Given the code snippet: <|code_start|>
:param str xlabel: Default: 'distance / $\\mathrm{\\AA}$'. The label for
the x axis.
:type ax: matplotlib.axes.Axes
:param ax: Default: None. The axes objects on which to make the plots.
If None is supplied, new axes objects will be created.
:param data: Default: None.
If given, this must be an object that can be indexed by `x` to give
the series from which the FES should be made.
For example, these are equivalent:
>>> fes_1d(data[x], temp)
>>> fes_1d(x, temp, data=data)
:param kwargs: keyword arguments to pass to the plotter
:rtype: Tuple(np.ndarray, np.ndarray, matplotlib.lines.Line2D,
matplotlib.figure.Figure, matplotlib.axes.Axes)
:return: The delta G values, the bin centers, the lines object, the
figure and the axes
"""
if data is not None:
_x = data[x]
else:
_x = x
<|code_end|>
, generate the next line using the imports in this file:
import math
import matplotlib as mpl
from matplotlib import pyplot as plt
from six.moves import range
from .utils import calc_fes_1d, _parse_ax_input
from .exceptions import InputError
and context (functions, classes, or occasionally code) from other files:
# Path: paratemp/utils.py
# def calc_fes_1d(data, temp, bins=None):
# d_bins = _parse_bin_input(bins)
# n, _bins = np.histogram(data, **d_bins)
# n = [float(j) for j in n]
# # TODO find better way to account for zeros here rather than
# # just adding a small amount to each.
# prob = np.array([j / max(n) for j in n]) + 1e-40
# delta_g = np.array([-r * temp * np.log(p) for p in prob])
# bin_mids = running_mean(_bins, 2)
# return delta_g, bin_mids
#
# def _parse_ax_input(ax):
# if ax is None:
# fig, ax = plt.subplots()
# else:
# fig = ax.figure
# return fig, ax
#
# Path: paratemp/exceptions.py
# class InputError(Exception):
# """Exception raised for errors in the input.
#
# Attributes:
# expr -- input expression in which the error occurred
# msg -- explanation of the error
# """
#
# def __init__(self, expr, msg):
# self.expr = expr
# self.msg = msg
#
# def __str__(self):
# output = 'Incorrect input "{}". {}'.format(self.expr, self.msg)
# return repr(output)
. Output only the next line. | _fig, _ax = _parse_ax_input(ax) |
Using the snippet: <|code_start|> given, these must correspond to columns in `data`. If `labels` is
None, the first three columns of `data` will be used with their column
names as the labels.
:param float temp: Temperature at which to calculate the free energy
surface. This should be the temperature at which the simulation was run.
:param Iterable[str] labels: An iterable of at least length three or None.
If this is not None, the first three elements of it will be used as
column names to pick with data to plot from `data`.
If it is None, the first three column names from `data` will be used.
These values will also be used as labels for the legend.
:param np.array(matplotlib.axes.Axes) axes: A set of axes on which to
make the FESes and the legend. If this is given, it must support up to
axes.flat[3].
If None, a new figure with 2x2 axes will be created.
:type bins: int or Sequence[int or float] or str
:param bins: Default: None. The bins argument to be passed to
np.histogram
:param kwargs: keyword arguments to pass to the plot function
:rtype: Tuple(List(np.ndarray), List(np.ndarray),
List(matplotlib.lines.Line2D), matplotlib.figure.Figure,
matplotlib.axes.Axes)
:return: The delta G values, the bin centers, the lines objects, the
figure and the axes
"""
if axes is None:
fig, axes = plt.subplots(nrows=2, ncols=2, sharey=True, sharex=True)
else:
try:
fig = axes.flat[3].figure
except (IndexError, TypeError):
<|code_end|>
, determine the next line of code. You have imports:
import math
import matplotlib as mpl
from matplotlib import pyplot as plt
from six.moves import range
from .utils import calc_fes_1d, _parse_ax_input
from .exceptions import InputError
and context (class names, function names, or code) available:
# Path: paratemp/utils.py
# def calc_fes_1d(data, temp, bins=None):
# d_bins = _parse_bin_input(bins)
# n, _bins = np.histogram(data, **d_bins)
# n = [float(j) for j in n]
# # TODO find better way to account for zeros here rather than
# # just adding a small amount to each.
# prob = np.array([j / max(n) for j in n]) + 1e-40
# delta_g = np.array([-r * temp * np.log(p) for p in prob])
# bin_mids = running_mean(_bins, 2)
# return delta_g, bin_mids
#
# def _parse_ax_input(ax):
# if ax is None:
# fig, ax = plt.subplots()
# else:
# fig = ax.figure
# return fig, ax
#
# Path: paratemp/exceptions.py
# class InputError(Exception):
# """Exception raised for errors in the input.
#
# Attributes:
# expr -- input expression in which the error occurred
# msg -- explanation of the error
# """
#
# def __init__(self, expr, msg):
# self.expr = expr
# self.msg = msg
#
# def __str__(self):
# output = 'Incorrect input "{}". {}'.format(self.expr, self.msg)
# return repr(output)
. Output only the next line. | raise InputError( |
Predict the next line for this snippet: <|code_start|># See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import
n_gro, n_top, n_template, n_ndx = ('spc-and-methanol.gro',
'spc-and-methanol.top',
'templatemdp.txt',
'index.ndx')
n_gro_o1, n_gro_o2 = 'PT-out0.gro', 'PT-out1.gro'
n_edr_o1, n_edr_o2 = 'PT-out0.edr', 'PT-out1.edr'
n_log_o1, n_log_o2 = 'PT-out0.log', 'PT-out1.log'
n_trr_o1, n_trr_o2 = 'PT-out0.trr', 'PT-out1.trr'
def test_pt_run_dir(pt_run_dir: pathlib.PosixPath):
files_present = {f.name for f in pt_run_dir.iterdir()}
must_contain = {n_gro_o1, n_gro_o2, n_log_o1, n_log_o2,
n_edr_o1, n_edr_o2, n_trr_o1, n_trr_o2}
assert must_contain - files_present == set()
def test_find_energies(pt_run_dir: pathlib.PosixPath):
# Doesn't currently test:
# content of the outputs
# what happens if they already exist
<|code_end|>
with the help of current file imports:
import numpy as np
import pathlib
import re
from paratemp.tools import cd
from paratemp.energy_histo import find_energies
from paratemp.energy_histo import make_indices
from paratemp.energy_histo import import_energies, find_energies
and context from other files:
# Path: paratemp/tools.py
# @contextmanager
# def cd(new_dir):
# prev_dir = os.getcwd()
# os.chdir(os.path.expanduser(new_dir))
# try:
# yield
# finally:
# os.chdir(prev_dir)
, which may contain function names, class names, or code. Output only the next line. | with cd(pt_run_dir): |
Here is a snippet: <|code_start|>gbsa_itp = pkg_resources.resource_string(__name__, "SimpleSim_data/gbsa_all.itp")
def get_gbsa_itp(directory: Path):
to_path = directory / "gbsa_all.itp"
to_path.write_bytes(gbsa_itp)
return to_path.resolve()
class System(object):
"""
The System class is intended to combine several Molecules.
It will add the molecules together into one box and optionally shift the
molecules in the z direction to make sure the molecules are no longer
overlapping.
It can also optionally add GBSA implicit solvation parameters based on
Amber03.
:param args: molecules to combine into this object
:param name: name of the system
:param shift: If True, the molecules will be moved to be non-overlapping
:param spacing: distance to put between the molecules (in angstroms)
:param include_gbsa: If True, GBSA parameters for implicit solvation will
be included in the topology file
:param box_length: Length of cubic box (in angstroms)
"""
def __init__(
self,
<|code_end|>
. Write the next line using the current file imports:
import logging
import re
import parmed
import pkg_resources
from pathlib import Path
from typing import Dict
from . import Molecule
and context from other files:
# Path: paratemp/sim_setup/molecule.py
# class Molecule(object):
# """
# Molecule class will make a GAFF-parameterized Structure from an input
# """
#
# def __init__(
# self,
# geometry: Union[str, Path],
# charge: int = 0,
# name: str = None,
# resname: str = "MOL",
# ):
# log.debug("Initializing Molecule with {}".format(geometry))
# self._input_geo_path = Path(geometry)
# self._name = self._input_geo_path.stem if name is None else name
# self.resname = resname
# self._directory = Path(self._name).resolve()
# self._directory.mkdir(exist_ok=True)
# shutil.copy(self._input_geo_path, self._directory)
# self.charge = int(charge)
# self._parameterized = False
# self._gro = None
# self._top = None
# self._ptop = None
# self.atom_types = None
#
# @classmethod
# def from_make_mol_inputs(cls, mol_inputs):
# return cls(**mol_inputs)
#
# @classmethod
# def assisted(cls):
# return cls(**make_mol_inputs())
#
# def parameterize(self):
# # could take keywords for FF
# # could use charges from QM calc
# # TODO convert from whatever to PDB, MDL, or MOL2
# log.debug("Parameterizing {} with acpype".format(self._name))
# env_to_load = self._get_amber_env()
# cl = shlex.split(
# "acpype -i {} "
# "-o gmx "
# "-n {} "
# "-c user "
# "-b {} ".format(self._input_geo_path.resolve(), self.charge, self._name)
# )
# log.warning("Running acpype; this may take a few minutes")
# proc = self._run_in_dir(cl, env=env_to_load)
# log.info("acpype said:\n {}".format(proc.stdout))
# proc.check_returncode()
# ac_dir = self._directory / "{}.acpype".format(self._name)
# gro = ac_dir / "{}_GMX.gro".format(self._name)
# top = ac_dir / "{}_GMX.top".format(self._name)
# if not gro.is_file() or not top.is_file():
# mes = "gro or top file not created in {}".format(ac_dir)
# log.error(mes)
# raise FileNotFoundError(mes)
# self._gro = gro
# self._top = top
# ptop = parmed.gromacs.GromacsTopologyFile(str(top), xyz=str(gro))
# self._ptop = ptop
# for res in ptop.residues:
# res.name = self.resname
# self.atom_types = set(a.type for a in ptop.atoms)
# ptop.write(str(self._directory / "{}.top".format(self._name)))
# ptop.save(str(self._directory / "{}.gro".format(self._name)))
# log.info("Wrote top and gro files in {}".format(self._directory))
# self._parameterized = True
#
# @staticmethod
# def _get_amber_env() -> Dict[str, str]:
# log.info("Using special environment variables for Amber executables")
# amber_env_stream = pkg_resources.resource_stream(
# __name__, "SimpleSim_data/amber_env.json"
# ) # type: TextIOBase
# amber_env = json.load(amber_env_stream)
# curr_env = dict(os.environ)
# curr_env.update(amber_env)
# try:
# conda_prefix = Path(curr_env["CONDA_PREFIX"])
# python_bin = conda_prefix / "bin"
# except KeyError:
# import sys
#
# python_bin = Path(sys.executable).parent
# curr_env["PATH"] += os.pathsep + str(python_bin)
# return curr_env
#
# @property
# def topology(self) -> parmed.Structure:
# return self._ptop
#
# @property
# def directory(self) -> Path:
# return self._directory
#
# @property
# def name(self) -> str:
# return self._name
#
# def _run_in_dir(self, cl, **kwargs) -> subprocess.CompletedProcess:
# with cd(self._directory):
# proc = subprocess.run(
# cl,
# stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT,
# universal_newlines=True,
# **kwargs
# )
# return proc
#
# def __repr__(self):
# return "<{} Molecule; parameterized: {}>".format(self.name, self._parameterized)
, which may include functions, classes, or code. Output only the next line. | *args: Molecule, |
Continue the code snippet: <|code_start|>
return contract._status_class(**attributes)
def register_contract(contract, description=''):
"""
Register a contract and make it
:param contract:
:param description:
:return:
"""
global contract_registry
if contract.__class__.__name__ in contract_registry:
raise ValueError('A contract with the same name already registered')
else:
contract_registry[contract.__class__.__name__] = contract
db_contract = Contract()
db_contract.name = contract.__class__.__name__
db_contract.created = datetime.datetime.now()
db_contract.description = description
first_status = Status()
first_status.contract = db_contract
first_status.when = datetime.datetime.now()
first_status.attributes = status(contract).dump()
# Genesis key is the name of the contract
first_status.key = contract.__class__.__name__.encode('utf-8')
<|code_end|>
. Use current file imports:
import abc
import datetime
import inspect
from pyledger.server.db import DB, Contract, Status
from pyledger.server.status import SimpleStatus
and context (classes, functions, or code) from other files:
# Path: pyledger/server/db.py
# DB = Handler()
#
# class Contract(Model):
# __tablename__ = 'contracts'
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# description = Column(String)
# created = Column(DateTime)
# status = relationship("Status", lazy="subquery")
# user_id = Column(Integer, ForeignKey('users.id'))
# user = relationship("User", back_populates="contracts")
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# @staticmethod
# def from_name(name):
# return Contract.query().filter(Contract.name == name).one_or_none()
#
# def last_status(self):
# return Status.query().filter(
# Status.contract == self
# ).order_by(desc(Status.when)).first()
#
# def last_statuses(self):
# return Status.query().filter(
# Status.contract == self
# ).order_by(desc.Status.when).limit(2).all()
#
# class Status(Model):
# __tablename__ = 'status'
# id = Column(Integer, primary_key=True)
# contract_id = Column(Integer, ForeignKey('contracts.id'))
# contract = relationship("Contract", back_populates="status")
# attributes = Column(LargeBinary)
# key = Column(LargeBinary, unique=True) # Crash if there is a key collision.
# when = Column(DateTime)
# owner = Column(String)
#
# def __repr__(self):
# return '<Status key: {}>'.format(self.key)
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# Path: pyledger/server/status.py
# class SimpleStatus(BaseStatus):
# """
# Simple status for the smart contract based on a dictionary.
# """
# def __init__(self, **kwargs):
# self.args_list = [a for a in kwargs]
#
# for k, v in kwargs.items():
# setattr(self, k, v)
#
# def dump(self):
# return pickle.dumps({k: getattr(self, k) for k in self.args_list})
#
# def load(self, dump: bytes):
# status = pickle.loads(dump)
# self.args_list = [a for a in status]
#
# for k, v in status.items():
# setattr(self, k, v)
#
# def to_dict(self):
# return {k: getattr(self, k) for k in self.args_list}
#
# def __contains__(self, item):
# return item in self.__dict__
#
# def __repr__(self):
# return 'Pyledger status with attributes {}'.format(
# self.args_list)
. Output only the next line. | DB.session.add(db_contract) |
Predict the next line for this snippet: <|code_start|>
def status(contract):
all_attributes = inspect.getmembers(
contract,
predicate=lambda a: not(inspect.isroutine(a)))
attributes = {}
for attribute in all_attributes:
if not attribute[0].startswith('_'):
attributes[attribute[0]] = attribute[1]
return contract._status_class(**attributes)
def register_contract(contract, description=''):
"""
Register a contract and make it
:param contract:
:param description:
:return:
"""
global contract_registry
if contract.__class__.__name__ in contract_registry:
raise ValueError('A contract with the same name already registered')
else:
contract_registry[contract.__class__.__name__] = contract
<|code_end|>
with the help of current file imports:
import abc
import datetime
import inspect
from pyledger.server.db import DB, Contract, Status
from pyledger.server.status import SimpleStatus
and context from other files:
# Path: pyledger/server/db.py
# DB = Handler()
#
# class Contract(Model):
# __tablename__ = 'contracts'
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# description = Column(String)
# created = Column(DateTime)
# status = relationship("Status", lazy="subquery")
# user_id = Column(Integer, ForeignKey('users.id'))
# user = relationship("User", back_populates="contracts")
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# @staticmethod
# def from_name(name):
# return Contract.query().filter(Contract.name == name).one_or_none()
#
# def last_status(self):
# return Status.query().filter(
# Status.contract == self
# ).order_by(desc(Status.when)).first()
#
# def last_statuses(self):
# return Status.query().filter(
# Status.contract == self
# ).order_by(desc.Status.when).limit(2).all()
#
# class Status(Model):
# __tablename__ = 'status'
# id = Column(Integer, primary_key=True)
# contract_id = Column(Integer, ForeignKey('contracts.id'))
# contract = relationship("Contract", back_populates="status")
# attributes = Column(LargeBinary)
# key = Column(LargeBinary, unique=True) # Crash if there is a key collision.
# when = Column(DateTime)
# owner = Column(String)
#
# def __repr__(self):
# return '<Status key: {}>'.format(self.key)
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# Path: pyledger/server/status.py
# class SimpleStatus(BaseStatus):
# """
# Simple status for the smart contract based on a dictionary.
# """
# def __init__(self, **kwargs):
# self.args_list = [a for a in kwargs]
#
# for k, v in kwargs.items():
# setattr(self, k, v)
#
# def dump(self):
# return pickle.dumps({k: getattr(self, k) for k in self.args_list})
#
# def load(self, dump: bytes):
# status = pickle.loads(dump)
# self.args_list = [a for a in status]
#
# for k, v in status.items():
# setattr(self, k, v)
#
# def to_dict(self):
# return {k: getattr(self, k) for k in self.args_list}
#
# def __contains__(self, item):
# return item in self.__dict__
#
# def __repr__(self):
# return 'Pyledger status with attributes {}'.format(
# self.args_list)
, which may contain function names, class names, or code. Output only the next line. | db_contract = Contract() |
Given snippet: <|code_start|> predicate=lambda a: not(inspect.isroutine(a)))
attributes = {}
for attribute in all_attributes:
if not attribute[0].startswith('_'):
attributes[attribute[0]] = attribute[1]
return contract._status_class(**attributes)
def register_contract(contract, description=''):
"""
Register a contract and make it
:param contract:
:param description:
:return:
"""
global contract_registry
if contract.__class__.__name__ in contract_registry:
raise ValueError('A contract with the same name already registered')
else:
contract_registry[contract.__class__.__name__] = contract
db_contract = Contract()
db_contract.name = contract.__class__.__name__
db_contract.created = datetime.datetime.now()
db_contract.description = description
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import abc
import datetime
import inspect
from pyledger.server.db import DB, Contract, Status
from pyledger.server.status import SimpleStatus
and context:
# Path: pyledger/server/db.py
# DB = Handler()
#
# class Contract(Model):
# __tablename__ = 'contracts'
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# description = Column(String)
# created = Column(DateTime)
# status = relationship("Status", lazy="subquery")
# user_id = Column(Integer, ForeignKey('users.id'))
# user = relationship("User", back_populates="contracts")
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# @staticmethod
# def from_name(name):
# return Contract.query().filter(Contract.name == name).one_or_none()
#
# def last_status(self):
# return Status.query().filter(
# Status.contract == self
# ).order_by(desc(Status.when)).first()
#
# def last_statuses(self):
# return Status.query().filter(
# Status.contract == self
# ).order_by(desc.Status.when).limit(2).all()
#
# class Status(Model):
# __tablename__ = 'status'
# id = Column(Integer, primary_key=True)
# contract_id = Column(Integer, ForeignKey('contracts.id'))
# contract = relationship("Contract", back_populates="status")
# attributes = Column(LargeBinary)
# key = Column(LargeBinary, unique=True) # Crash if there is a key collision.
# when = Column(DateTime)
# owner = Column(String)
#
# def __repr__(self):
# return '<Status key: {}>'.format(self.key)
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# Path: pyledger/server/status.py
# class SimpleStatus(BaseStatus):
# """
# Simple status for the smart contract based on a dictionary.
# """
# def __init__(self, **kwargs):
# self.args_list = [a for a in kwargs]
#
# for k, v in kwargs.items():
# setattr(self, k, v)
#
# def dump(self):
# return pickle.dumps({k: getattr(self, k) for k in self.args_list})
#
# def load(self, dump: bytes):
# status = pickle.loads(dump)
# self.args_list = [a for a in status]
#
# for k, v in status.items():
# setattr(self, k, v)
#
# def to_dict(self):
# return {k: getattr(self, k) for k in self.args_list}
#
# def __contains__(self, item):
# return item in self.__dict__
#
# def __repr__(self):
# return 'Pyledger status with attributes {}'.format(
# self.args_list)
which might include code, classes, or functions. Output only the next line. | first_status = Status() |
Based on the snippet: <|code_start|># Copyright (C) 2017 Guillem Borrell Nogueras
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
contract_registry = {}
class BaseContract(abc.ABC):
pass
class SimpleContract(BaseContract):
"""
Contract that uses SimpleStatus for serialization.
The goal of this class is to make a contact feel just like a Python class.
"""
<|code_end|>
, predict the immediate next line with the help of imports:
import abc
import datetime
import inspect
from pyledger.server.db import DB, Contract, Status
from pyledger.server.status import SimpleStatus
and context (classes, functions, sometimes code) from other files:
# Path: pyledger/server/db.py
# DB = Handler()
#
# class Contract(Model):
# __tablename__ = 'contracts'
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# description = Column(String)
# created = Column(DateTime)
# status = relationship("Status", lazy="subquery")
# user_id = Column(Integer, ForeignKey('users.id'))
# user = relationship("User", back_populates="contracts")
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# @staticmethod
# def from_name(name):
# return Contract.query().filter(Contract.name == name).one_or_none()
#
# def last_status(self):
# return Status.query().filter(
# Status.contract == self
# ).order_by(desc(Status.when)).first()
#
# def last_statuses(self):
# return Status.query().filter(
# Status.contract == self
# ).order_by(desc.Status.when).limit(2).all()
#
# class Status(Model):
# __tablename__ = 'status'
# id = Column(Integer, primary_key=True)
# contract_id = Column(Integer, ForeignKey('contracts.id'))
# contract = relationship("Contract", back_populates="status")
# attributes = Column(LargeBinary)
# key = Column(LargeBinary, unique=True) # Crash if there is a key collision.
# when = Column(DateTime)
# owner = Column(String)
#
# def __repr__(self):
# return '<Status key: {}>'.format(self.key)
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# Path: pyledger/server/status.py
# class SimpleStatus(BaseStatus):
# """
# Simple status for the smart contract based on a dictionary.
# """
# def __init__(self, **kwargs):
# self.args_list = [a for a in kwargs]
#
# for k, v in kwargs.items():
# setattr(self, k, v)
#
# def dump(self):
# return pickle.dumps({k: getattr(self, k) for k in self.args_list})
#
# def load(self, dump: bytes):
# status = pickle.loads(dump)
# self.args_list = [a for a in status]
#
# for k, v in status.items():
# setattr(self, k, v)
#
# def to_dict(self):
# return {k: getattr(self, k) for k in self.args_list}
#
# def __contains__(self, item):
# return item in self.__dict__
#
# def __repr__(self):
# return 'Pyledger status with attributes {}'.format(
# self.args_list)
. Output only the next line. | _status_class = SimpleStatus |
Using the snippet: <|code_start|># (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
loop = asyncio.get_event_loop()
class Protocol(WebSocketServerProtocol):
contract = None
bcast_topic = 36*b'0'
def onConnect(self, request):
self.factory.register(self)
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
try:
topic = payload[:36]
payload = payload[36:]
<|code_end|>
, determine the next line of code. You have imports:
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from pyledger.server.handlers import handle_request
from pyledger.server.contract import register_contract, SimpleContract
from pyledger.server.db import DB
from pyledger.server.auth import Permissions, method_allow
import asyncio
and context (class names, function names, or code) available:
# Path: pyledger/server/handlers.py
# def handle_request(payload: bytes):
# """
# Handle a single request
#
# :param payload: Serialized PyledgerRequest message
# :return:
# """
# handler = Handler()
# message = PyledgerRequest()
# response = PyledgerResponse()
#
# try:
# message.ParseFromString(payload)
# except DecodeError:
# response.successful = False
# response.data = b'Message not properly formatted'
# return response.SerializeToString()
#
# if message.request not in handler_methods(handler):
# response.successful = False
# response.data = 'Request type {} not available'.format(message.request).encode()
# return response.SerializeToString()
#
# else:
# # Handle authentication
# if message.request in permissions_registry:
# user = User.from_name(message.user)
# permission_required = permissions_registry[message.request]
#
# if not user.check_password(message.password):
# response.successful = False
# response.data = b'Wrong user and/or password'
# return response.SerializeToString()
#
# if user.get_permissions().value > permission_required.value:
# response.successful = False
# response.data = b'Not enough permissions'
# return response.SerializeToString()
#
# session = Session.from_key(message.session_key)
#
# if not session:
# response.successful = False
# response.data = b'Session not available'
# return response.SerializeToString()
#
# if not session.user == user:
# response.successful = False
# response.data = b'Session not owned by this user'
# return response.SerializeToString()
#
# if session.until < datetime.datetime.now():
# response.successful = False
# response.data = b'Session expired, restart your client'
# return response.SerializeToString()
#
# # Select the function from the handler
# try:
# print('Handling message', message)
# successful, result = getattr(handler, message.request)(message)
# except Exception as exc:
# successful = False
# result = b'Exception in user function: ' + repr(exc).encode('utf-8')
#
# response.successful = successful
# response.data = result
# return response.SerializeToString()
#
# Path: pyledger/server/contract.py
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# Path: pyledger/server/db.py
# DB = Handler()
#
# Path: pyledger/server/auth.py
# def create_master(password):
# def create_user(name, password):
# def allow(permission):
# def decorator(func):
# def method_allow(permission):
# def decorator(func):
. Output only the next line. | response = handle_request(payload) |
Here is a snippet: <|code_start|>
self.accounts[key] = 0.0
return key
def increment(self, key: str, quantity: float):
if key not in self.accounts:
raise Exception('Account not found')
self.accounts[key] += quantity
def transfer(self, source: str, dest: str, quantity: float):
if source not in self.accounts:
raise Exception('Source account not found')
if dest not in self.accounts:
raise Exception('Destination account not found')
if self.accounts[source] < quantity:
raise Exception('Not enough funds in source account')
if quantity < 0:
raise Exception('You cannot transfer negative currency')
self.accounts[source] -= quantity
self.accounts[dest] += quantity
def balance(self, key: str):
if key not in self.accounts:
print(self.accounts)
raise Exception('Account not found')
return str(self.accounts[key])
<|code_end|>
. Write the next line using the current file imports:
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from pyledger.server.handlers import handle_request
from pyledger.server.contract import register_contract, SimpleContract
from pyledger.server.db import DB
from pyledger.server.auth import Permissions, method_allow
import asyncio
and context from other files:
# Path: pyledger/server/handlers.py
# def handle_request(payload: bytes):
# """
# Handle a single request
#
# :param payload: Serialized PyledgerRequest message
# :return:
# """
# handler = Handler()
# message = PyledgerRequest()
# response = PyledgerResponse()
#
# try:
# message.ParseFromString(payload)
# except DecodeError:
# response.successful = False
# response.data = b'Message not properly formatted'
# return response.SerializeToString()
#
# if message.request not in handler_methods(handler):
# response.successful = False
# response.data = 'Request type {} not available'.format(message.request).encode()
# return response.SerializeToString()
#
# else:
# # Handle authentication
# if message.request in permissions_registry:
# user = User.from_name(message.user)
# permission_required = permissions_registry[message.request]
#
# if not user.check_password(message.password):
# response.successful = False
# response.data = b'Wrong user and/or password'
# return response.SerializeToString()
#
# if user.get_permissions().value > permission_required.value:
# response.successful = False
# response.data = b'Not enough permissions'
# return response.SerializeToString()
#
# session = Session.from_key(message.session_key)
#
# if not session:
# response.successful = False
# response.data = b'Session not available'
# return response.SerializeToString()
#
# if not session.user == user:
# response.successful = False
# response.data = b'Session not owned by this user'
# return response.SerializeToString()
#
# if session.until < datetime.datetime.now():
# response.successful = False
# response.data = b'Session expired, restart your client'
# return response.SerializeToString()
#
# # Select the function from the handler
# try:
# print('Handling message', message)
# successful, result = getattr(handler, message.request)(message)
# except Exception as exc:
# successful = False
# result = b'Exception in user function: ' + repr(exc).encode('utf-8')
#
# response.successful = successful
# response.data = result
# return response.SerializeToString()
#
# Path: pyledger/server/contract.py
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# Path: pyledger/server/db.py
# DB = Handler()
#
# Path: pyledger/server/auth.py
# def create_master(password):
# def create_user(name, password):
# def allow(permission):
# def decorator(func):
# def method_allow(permission):
# def decorator(func):
, which may include functions, classes, or code. Output only the next line. | register_contract(DigitalCurrency()) |
Based on the snippet: <|code_start|> print("broadcasting message '{}' ..".format(msg))
for c in self.clients:
c.sendMessage(msg, True)
print("message sent to {}".format(c.peer))
def run_server(address="ws://127.0.0.1:9000"):
factory = BroadcastServerFactory(address)
factory.protocol = Protocol
server = loop.create_server(factory,
'0.0.0.0',
int(address.split(':')[2])
)
task = loop.run_until_complete(server)
try:
print('Starting event loop...')
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
task.close()
loop.close()
if __name__ == '__main__':
DB.sync_tables()
# Base contract for testing
<|code_end|>
, predict the immediate next line with the help of imports:
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from pyledger.server.handlers import handle_request
from pyledger.server.contract import register_contract, SimpleContract
from pyledger.server.db import DB
from pyledger.server.auth import Permissions, method_allow
import asyncio
and context (classes, functions, sometimes code) from other files:
# Path: pyledger/server/handlers.py
# def handle_request(payload: bytes):
# """
# Handle a single request
#
# :param payload: Serialized PyledgerRequest message
# :return:
# """
# handler = Handler()
# message = PyledgerRequest()
# response = PyledgerResponse()
#
# try:
# message.ParseFromString(payload)
# except DecodeError:
# response.successful = False
# response.data = b'Message not properly formatted'
# return response.SerializeToString()
#
# if message.request not in handler_methods(handler):
# response.successful = False
# response.data = 'Request type {} not available'.format(message.request).encode()
# return response.SerializeToString()
#
# else:
# # Handle authentication
# if message.request in permissions_registry:
# user = User.from_name(message.user)
# permission_required = permissions_registry[message.request]
#
# if not user.check_password(message.password):
# response.successful = False
# response.data = b'Wrong user and/or password'
# return response.SerializeToString()
#
# if user.get_permissions().value > permission_required.value:
# response.successful = False
# response.data = b'Not enough permissions'
# return response.SerializeToString()
#
# session = Session.from_key(message.session_key)
#
# if not session:
# response.successful = False
# response.data = b'Session not available'
# return response.SerializeToString()
#
# if not session.user == user:
# response.successful = False
# response.data = b'Session not owned by this user'
# return response.SerializeToString()
#
# if session.until < datetime.datetime.now():
# response.successful = False
# response.data = b'Session expired, restart your client'
# return response.SerializeToString()
#
# # Select the function from the handler
# try:
# print('Handling message', message)
# successful, result = getattr(handler, message.request)(message)
# except Exception as exc:
# successful = False
# result = b'Exception in user function: ' + repr(exc).encode('utf-8')
#
# response.successful = successful
# response.data = result
# return response.SerializeToString()
#
# Path: pyledger/server/contract.py
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# Path: pyledger/server/db.py
# DB = Handler()
#
# Path: pyledger/server/auth.py
# def create_master(password):
# def create_user(name, password):
# def allow(permission):
# def decorator(func):
# def method_allow(permission):
# def decorator(func):
. Output only the next line. | class DigitalCurrency(SimpleContract): |
Predict the next line after this snippet: <|code_start|> print("unregistered client {}".format(client.peer))
self.clients.remove(client)
def broadcast(self, msg):
print("broadcasting message '{}' ..".format(msg))
for c in self.clients:
c.sendMessage(msg, True)
print("message sent to {}".format(c.peer))
def run_server(address="ws://127.0.0.1:9000"):
factory = BroadcastServerFactory(address)
factory.protocol = Protocol
server = loop.create_server(factory,
'0.0.0.0',
int(address.split(':')[2])
)
task = loop.run_until_complete(server)
try:
print('Starting event loop...')
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
task.close()
loop.close()
if __name__ == '__main__':
<|code_end|>
using the current file's imports:
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from pyledger.server.handlers import handle_request
from pyledger.server.contract import register_contract, SimpleContract
from pyledger.server.db import DB
from pyledger.server.auth import Permissions, method_allow
import asyncio
and any relevant context from other files:
# Path: pyledger/server/handlers.py
# def handle_request(payload: bytes):
# """
# Handle a single request
#
# :param payload: Serialized PyledgerRequest message
# :return:
# """
# handler = Handler()
# message = PyledgerRequest()
# response = PyledgerResponse()
#
# try:
# message.ParseFromString(payload)
# except DecodeError:
# response.successful = False
# response.data = b'Message not properly formatted'
# return response.SerializeToString()
#
# if message.request not in handler_methods(handler):
# response.successful = False
# response.data = 'Request type {} not available'.format(message.request).encode()
# return response.SerializeToString()
#
# else:
# # Handle authentication
# if message.request in permissions_registry:
# user = User.from_name(message.user)
# permission_required = permissions_registry[message.request]
#
# if not user.check_password(message.password):
# response.successful = False
# response.data = b'Wrong user and/or password'
# return response.SerializeToString()
#
# if user.get_permissions().value > permission_required.value:
# response.successful = False
# response.data = b'Not enough permissions'
# return response.SerializeToString()
#
# session = Session.from_key(message.session_key)
#
# if not session:
# response.successful = False
# response.data = b'Session not available'
# return response.SerializeToString()
#
# if not session.user == user:
# response.successful = False
# response.data = b'Session not owned by this user'
# return response.SerializeToString()
#
# if session.until < datetime.datetime.now():
# response.successful = False
# response.data = b'Session expired, restart your client'
# return response.SerializeToString()
#
# # Select the function from the handler
# try:
# print('Handling message', message)
# successful, result = getattr(handler, message.request)(message)
# except Exception as exc:
# successful = False
# result = b'Exception in user function: ' + repr(exc).encode('utf-8')
#
# response.successful = successful
# response.data = result
# return response.SerializeToString()
#
# Path: pyledger/server/contract.py
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# Path: pyledger/server/db.py
# DB = Handler()
#
# Path: pyledger/server/auth.py
# def create_master(password):
# def create_user(name, password):
# def allow(permission):
# def decorator(func):
# def method_allow(permission):
# def decorator(func):
. Output only the next line. | DB.sync_tables() |
Continue the code snippet: <|code_start|># Pyledger. A simple ledger for smart contracts implemented in Python
# Copyright (C) 2017 Guillem Borrell Nogueras
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def test_clientlib_call_request():
request = call_request(call='add_account', user='master',
password='password', contract='AuthDigitalCurrency',
data={'key': 'another_account'})
<|code_end|>
. Use current file imports:
from pyledger.client.lib import *
from pyledger.server.handlers import handle_request
and context (classes, functions, or code) from other files:
# Path: pyledger/server/handlers.py
# def handle_request(payload: bytes):
# """
# Handle a single request
#
# :param payload: Serialized PyledgerRequest message
# :return:
# """
# handler = Handler()
# message = PyledgerRequest()
# response = PyledgerResponse()
#
# try:
# message.ParseFromString(payload)
# except DecodeError:
# response.successful = False
# response.data = b'Message not properly formatted'
# return response.SerializeToString()
#
# if message.request not in handler_methods(handler):
# response.successful = False
# response.data = 'Request type {} not available'.format(message.request).encode()
# return response.SerializeToString()
#
# else:
# # Handle authentication
# if message.request in permissions_registry:
# user = User.from_name(message.user)
# permission_required = permissions_registry[message.request]
#
# if not user.check_password(message.password):
# response.successful = False
# response.data = b'Wrong user and/or password'
# return response.SerializeToString()
#
# if user.get_permissions().value > permission_required.value:
# response.successful = False
# response.data = b'Not enough permissions'
# return response.SerializeToString()
#
# session = Session.from_key(message.session_key)
#
# if not session:
# response.successful = False
# response.data = b'Session not available'
# return response.SerializeToString()
#
# if not session.user == user:
# response.successful = False
# response.data = b'Session not owned by this user'
# return response.SerializeToString()
#
# if session.until < datetime.datetime.now():
# response.successful = False
# response.data = b'Session expired, restart your client'
# return response.SerializeToString()
#
# # Select the function from the handler
# try:
# print('Handling message', message)
# successful, result = getattr(handler, message.request)(message)
# except Exception as exc:
# successful = False
# result = b'Exception in user function: ' + repr(exc).encode('utf-8')
#
# response.successful = successful
# response.data = result
# return response.SerializeToString()
. Output only the next line. | succesful, response = handle_response(handle_request(request)) |
Predict the next line after this snippet: <|code_start|>
permissions_registry = {}
method_permissions_registry = {}
def create_master(password):
kpdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=SECRET,
iterations=1000000,
<|code_end|>
using the current file's imports:
import datetime
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from pyledger.server.config import password_backend, SECRET
from pyledger.server.db import User, DB, Permissions
and any relevant context from other files:
# Path: pyledger/server/config.py
# SECRET = b'test'
# LIFETIME = 1
#
# Path: pyledger/server/db.py
# class User(Model):
# __tablename__ = 'users'
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# when = Column(DateTime)
# info = Column(LargeBinary)
# key = Column(String)
# password = Column(String)
# profile = Column(Integer)
# contracts = relationship("Contract", back_populates="user")
# sessions = relationship("Session", back_populates='user')
#
# def __repr__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def __str__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def set_password(self, password):
# self.password = base64.b64encode(password)
#
# def get_password(self):
# return base64.b64decode(self.password)
#
# def check_password(self, password):
# kpdf = PBKDF2HMAC(
# algorithm=hashes.SHA256(),
# length=32,
# salt=SECRET,
# iterations=1000000,
# backend=password_backend
# )
# try:
# kpdf.verify(password.encode('utf-8'), self.get_password())
# correct = True
# except InvalidKey as e:
# print(e)
# correct = False
#
# return correct
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# @staticmethod
# def from_name(name):
# return User.query().filter(User.name == name).one_or_none()
#
# def get_permissions(self):
# return Permissions(self.profile)
#
# def set_permissions(self, permissions):
# self.profile = permissions.value
#
# DB = Handler()
#
# class Permissions(Enum):
# ROOT = 1
# USER = 2
# ANON = 3
. Output only the next line. | backend=password_backend |
Based on the snippet: <|code_start|>
permissions_registry = {}
method_permissions_registry = {}
def create_master(password):
kpdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
<|code_end|>
, predict the immediate next line with the help of imports:
import datetime
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from pyledger.server.config import password_backend, SECRET
from pyledger.server.db import User, DB, Permissions
and context (classes, functions, sometimes code) from other files:
# Path: pyledger/server/config.py
# SECRET = b'test'
# LIFETIME = 1
#
# Path: pyledger/server/db.py
# class User(Model):
# __tablename__ = 'users'
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# when = Column(DateTime)
# info = Column(LargeBinary)
# key = Column(String)
# password = Column(String)
# profile = Column(Integer)
# contracts = relationship("Contract", back_populates="user")
# sessions = relationship("Session", back_populates='user')
#
# def __repr__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def __str__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def set_password(self, password):
# self.password = base64.b64encode(password)
#
# def get_password(self):
# return base64.b64decode(self.password)
#
# def check_password(self, password):
# kpdf = PBKDF2HMAC(
# algorithm=hashes.SHA256(),
# length=32,
# salt=SECRET,
# iterations=1000000,
# backend=password_backend
# )
# try:
# kpdf.verify(password.encode('utf-8'), self.get_password())
# correct = True
# except InvalidKey as e:
# print(e)
# correct = False
#
# return correct
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# @staticmethod
# def from_name(name):
# return User.query().filter(User.name == name).one_or_none()
#
# def get_permissions(self):
# return Permissions(self.profile)
#
# def set_permissions(self, permissions):
# self.profile = permissions.value
#
# DB = Handler()
#
# class Permissions(Enum):
# ROOT = 1
# USER = 2
# ANON = 3
. Output only the next line. | salt=SECRET, |
Next line prediction: <|code_start|>
permissions_registry = {}
method_permissions_registry = {}
def create_master(password):
kpdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=SECRET,
iterations=1000000,
backend=password_backend
)
<|code_end|>
. Use current file imports:
(import datetime
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from pyledger.server.config import password_backend, SECRET
from pyledger.server.db import User, DB, Permissions)
and context including class names, function names, or small code snippets from other files:
# Path: pyledger/server/config.py
# SECRET = b'test'
# LIFETIME = 1
#
# Path: pyledger/server/db.py
# class User(Model):
# __tablename__ = 'users'
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# when = Column(DateTime)
# info = Column(LargeBinary)
# key = Column(String)
# password = Column(String)
# profile = Column(Integer)
# contracts = relationship("Contract", back_populates="user")
# sessions = relationship("Session", back_populates='user')
#
# def __repr__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def __str__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def set_password(self, password):
# self.password = base64.b64encode(password)
#
# def get_password(self):
# return base64.b64decode(self.password)
#
# def check_password(self, password):
# kpdf = PBKDF2HMAC(
# algorithm=hashes.SHA256(),
# length=32,
# salt=SECRET,
# iterations=1000000,
# backend=password_backend
# )
# try:
# kpdf.verify(password.encode('utf-8'), self.get_password())
# correct = True
# except InvalidKey as e:
# print(e)
# correct = False
#
# return correct
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# @staticmethod
# def from_name(name):
# return User.query().filter(User.name == name).one_or_none()
#
# def get_permissions(self):
# return Permissions(self.profile)
#
# def set_permissions(self, permissions):
# self.profile = permissions.value
#
# DB = Handler()
#
# class Permissions(Enum):
# ROOT = 1
# USER = 2
# ANON = 3
. Output only the next line. | master_user = User() |
Predict the next line for this snippet: <|code_start|>
permissions_registry = {}
method_permissions_registry = {}
def create_master(password):
kpdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=SECRET,
iterations=1000000,
backend=password_backend
)
master_user = User()
master_user.name = 'master'
master_user.when = datetime.datetime.now()
master_user.set_permissions(Permissions.ROOT)
master_user.set_password(kpdf.derive(password.encode('utf-8')))
<|code_end|>
with the help of current file imports:
import datetime
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from pyledger.server.config import password_backend, SECRET
from pyledger.server.db import User, DB, Permissions
and context from other files:
# Path: pyledger/server/config.py
# SECRET = b'test'
# LIFETIME = 1
#
# Path: pyledger/server/db.py
# class User(Model):
# __tablename__ = 'users'
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# when = Column(DateTime)
# info = Column(LargeBinary)
# key = Column(String)
# password = Column(String)
# profile = Column(Integer)
# contracts = relationship("Contract", back_populates="user")
# sessions = relationship("Session", back_populates='user')
#
# def __repr__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def __str__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def set_password(self, password):
# self.password = base64.b64encode(password)
#
# def get_password(self):
# return base64.b64decode(self.password)
#
# def check_password(self, password):
# kpdf = PBKDF2HMAC(
# algorithm=hashes.SHA256(),
# length=32,
# salt=SECRET,
# iterations=1000000,
# backend=password_backend
# )
# try:
# kpdf.verify(password.encode('utf-8'), self.get_password())
# correct = True
# except InvalidKey as e:
# print(e)
# correct = False
#
# return correct
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# @staticmethod
# def from_name(name):
# return User.query().filter(User.name == name).one_or_none()
#
# def get_permissions(self):
# return Permissions(self.profile)
#
# def set_permissions(self, permissions):
# self.profile = permissions.value
#
# DB = Handler()
#
# class Permissions(Enum):
# ROOT = 1
# USER = 2
# ANON = 3
, which may contain function names, class names, or code. Output only the next line. | DB.session.add(master_user) |
Here is a snippet: <|code_start|>
permissions_registry = {}
method_permissions_registry = {}
def create_master(password):
kpdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=SECRET,
iterations=1000000,
backend=password_backend
)
master_user = User()
master_user.name = 'master'
master_user.when = datetime.datetime.now()
<|code_end|>
. Write the next line using the current file imports:
import datetime
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from pyledger.server.config import password_backend, SECRET
from pyledger.server.db import User, DB, Permissions
and context from other files:
# Path: pyledger/server/config.py
# SECRET = b'test'
# LIFETIME = 1
#
# Path: pyledger/server/db.py
# class User(Model):
# __tablename__ = 'users'
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# when = Column(DateTime)
# info = Column(LargeBinary)
# key = Column(String)
# password = Column(String)
# profile = Column(Integer)
# contracts = relationship("Contract", back_populates="user")
# sessions = relationship("Session", back_populates='user')
#
# def __repr__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def __str__(self):
# return '<User {} with key: {}>'.format(self.name, self.key)
#
# def set_password(self, password):
# self.password = base64.b64encode(password)
#
# def get_password(self):
# return base64.b64decode(self.password)
#
# def check_password(self, password):
# kpdf = PBKDF2HMAC(
# algorithm=hashes.SHA256(),
# length=32,
# salt=SECRET,
# iterations=1000000,
# backend=password_backend
# )
# try:
# kpdf.verify(password.encode('utf-8'), self.get_password())
# correct = True
# except InvalidKey as e:
# print(e)
# correct = False
#
# return correct
#
# @classmethod
# def query(cls):
# return DB.session.query(cls)
#
# @staticmethod
# def from_name(name):
# return User.query().filter(User.name == name).one_or_none()
#
# def get_permissions(self):
# return Permissions(self.profile)
#
# def set_permissions(self, permissions):
# self.profile = permissions.value
#
# DB = Handler()
#
# class Permissions(Enum):
# ROOT = 1
# USER = 2
# ANON = 3
, which may include functions, classes, or code. Output only the next line. | master_user.set_permissions(Permissions.ROOT) |
Continue the code snippet: <|code_start|>
async def stdio(loop=None):
if loop is None:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader()
reader_protocol = asyncio.StreamReaderProtocol(reader)
writer_transport, writer_protocol = await loop.connect_write_pipe(
FlowControlMixin, os.fdopen(0, 'wb'))
writer = StreamWriter(writer_transport, writer_protocol, None, loop)
await loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)
return reader, writer
async def async_input(message, protocol):
if isinstance(message, str):
message = message.encode('utf8')
global reader, writer
if (reader, writer) == (None, None):
reader, writer = await stdio()
writer.write(message)
await writer.drain()
line = await reader.readline()
# This is where everything happens in the client side
<|code_end|>
. Use current file imports:
from autobahn.asyncio.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
from asyncio.streams import StreamWriter, FlowControlMixin
from pyledger.client.repl import parse
from pyledger.client.lib import handle_response
from uuid import uuid4
from pprint import pprint
import os
import sys
import asyncio
and context (classes, functions, or code) from other files:
# Path: pyledger/client/repl.py
# def disconnect(*args, protocol=None):
# def contracts(*args, protocol=None):
# def api(*args, protocol=None):
# def broadcast(*args, protocol=None):
# def call(*args, protocol=None):
# def general_parser(line, protocol=None, instruction_dict=None,
# user_instruction_dict=None):
#
# Path: pyledger/client/lib.py
# def handle_response(bin_response, callback=None):
# response = PyledgerResponse()
# response.ParseFromString(bin_response)
#
# if response.successful:
# if callback:
# response_data = pickle.loads(response.data)
# print('Executing callback...')
# callback(response_data)
# return True, response_data
# else:
# return True, pickle.loads(response.data)
#
# else:
# return False, response.data.decode('utf-8')
. Output only the next line. | return parse(line, protocol=protocol) |
Given snippet: <|code_start|> print("Pyledger REPL client, write 'help' for help or 'help command' "
"for help on a specific command")
while True:
success, message = await async_input('PL >>> ', self)
if success:
# Create topic for subscription.
if message.startswith(36*b'0'):
topic = message[:36]
message = message[36:]
else:
topic = str(uuid4()).encode()
self.topics.append(topic)
self.sendMessage(topic + message, isBinary=True)
else:
print(message)
if message == 'Successfully closed, you can kill this with Ctrl-C':
break
await asyncio.sleep(0.1)
def onMessage(self, payload, isBinary):
topic = payload[:36]
payload = payload[36:]
# 36 zero-bytes means broadcast
if topic in self.topics or topic == 36*b'0':
if topic != 36*b'0':
self.topics.remove(topic)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from autobahn.asyncio.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
from asyncio.streams import StreamWriter, FlowControlMixin
from pyledger.client.repl import parse
from pyledger.client.lib import handle_response
from uuid import uuid4
from pprint import pprint
import os
import sys
import asyncio
and context:
# Path: pyledger/client/repl.py
# def disconnect(*args, protocol=None):
# def contracts(*args, protocol=None):
# def api(*args, protocol=None):
# def broadcast(*args, protocol=None):
# def call(*args, protocol=None):
# def general_parser(line, protocol=None, instruction_dict=None,
# user_instruction_dict=None):
#
# Path: pyledger/client/lib.py
# def handle_response(bin_response, callback=None):
# response = PyledgerResponse()
# response.ParseFromString(bin_response)
#
# if response.successful:
# if callback:
# response_data = pickle.loads(response.data)
# print('Executing callback...')
# callback(response_data)
# return True, response_data
# else:
# return True, pickle.loads(response.data)
#
# else:
# return False, response.data.decode('utf-8')
which might include code, classes, or functions. Output only the next line. | success, response = handle_response(payload) |
Continue the code snippet: <|code_start|>class User(Model):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
when = Column(DateTime)
info = Column(LargeBinary)
key = Column(String)
password = Column(String)
profile = Column(Integer)
contracts = relationship("Contract", back_populates="user")
sessions = relationship("Session", back_populates='user')
def __repr__(self):
return '<User {} with key: {}>'.format(self.name, self.key)
def __str__(self):
return '<User {} with key: {}>'.format(self.name, self.key)
def set_password(self, password):
self.password = base64.b64encode(password)
def get_password(self):
return base64.b64decode(self.password)
def check_password(self, password):
kpdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=SECRET,
iterations=1000000,
<|code_end|>
. Use current file imports:
import base64
from enum import Enum
from cryptography.exceptions import InvalidKey
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, \
LargeBinary
from sqlalchemy import create_engine, desc
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker, scoped_session
from pyledger.server.config import args
from pyledger.server.config import password_backend, SECRET
and context (classes, functions, or code) from other files:
# Path: pyledger/server/config.py
# SECRET = b'test'
# LIFETIME = 1
#
# Path: pyledger/server/config.py
# SECRET = b'test'
# LIFETIME = 1
. Output only the next line. | backend=password_backend |
Continue the code snippet: <|code_start|>
class User(Model):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
when = Column(DateTime)
info = Column(LargeBinary)
key = Column(String)
password = Column(String)
profile = Column(Integer)
contracts = relationship("Contract", back_populates="user")
sessions = relationship("Session", back_populates='user')
def __repr__(self):
return '<User {} with key: {}>'.format(self.name, self.key)
def __str__(self):
return '<User {} with key: {}>'.format(self.name, self.key)
def set_password(self, password):
self.password = base64.b64encode(password)
def get_password(self):
return base64.b64decode(self.password)
def check_password(self, password):
kpdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
<|code_end|>
. Use current file imports:
import base64
from enum import Enum
from cryptography.exceptions import InvalidKey
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, \
LargeBinary
from sqlalchemy import create_engine, desc
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker, scoped_session
from pyledger.server.config import args
from pyledger.server.config import password_backend, SECRET
and context (classes, functions, or code) from other files:
# Path: pyledger/server/config.py
# SECRET = b'test'
# LIFETIME = 1
#
# Path: pyledger/server/config.py
# SECRET = b'test'
# LIFETIME = 1
. Output only the next line. | salt=SECRET, |
Next line prediction: <|code_start|> assert signatures(contract) == {
'add_account': Signature(parameters=[
Parameter('key', Parameter.POSITIONAL_OR_KEYWORD, annotation=str)]),
'balance': Signature(parameters=[
Parameter('key', Parameter.POSITIONAL_OR_KEYWORD, annotation=str)]),
'increment': Signature(parameters=[
Parameter('key', Parameter.POSITIONAL_OR_KEYWORD, annotation=str),
Parameter('quantity', Parameter.POSITIONAL_OR_KEYWORD, annotation=float)
]),
'transfer': Signature(parameters=[
Parameter('source', Parameter.POSITIONAL_OR_KEYWORD, annotation=str),
Parameter('dest', Parameter.POSITIONAL_OR_KEYWORD, annotation=str),
Parameter('quantity', Parameter.POSITIONAL_OR_KEYWORD, annotation=float)
])
}
contract.add_account('key1')
contract.increment('key1', 100.0)
assert contract.balance('key1') == '100.0'
def test_register_contract():
class MyContract(SimpleContract):
counter = 0
def greet(self, name: str):
self.counter += 1
return "hello, " + name
this_contract = MyContract()
<|code_end|>
. Use current file imports:
(from inspect import Signature, Parameter
from pyledger.server.contract import SimpleContract, register_contract, \
methods, api, signatures, status
from pyledger.server.status import BaseStatus)
and context including class names, function names, or small code snippets from other files:
# Path: pyledger/server/contract.py
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# def methods(contract):
# """
# Obtain methods from the contract
#
# :param contract:
# :return:
# """
# methods = {}
#
# for name, function in inspect.getmembers(contract,
# predicate=inspect.ismethod):
# if not name == '__init__':
# methods[name] = function
#
# return methods
#
# def api(contract):
# api_spec = {}
# contract_methods = methods(contract)
# for method in contract_methods:
# function_spec = {}
# sig = inspect.signature(contract_methods[method])
# for param in sig.parameters:
# function_spec[param] = sig.parameters[param].annotation
#
# api_spec[method] = function_spec
#
# return api_spec
#
# def signatures(contract):
# contract_signatures = {}
# contract_methods = methods(contract)
# for k, method in contract_methods.items():
# contract_signatures[k] = inspect.signature(method)
#
# return contract_signatures
#
# def status(contract):
# all_attributes = inspect.getmembers(
# contract,
# predicate=lambda a: not(inspect.isroutine(a)))
#
# attributes = {}
#
# for attribute in all_attributes:
# if not attribute[0].startswith('_'):
# attributes[attribute[0]] = attribute[1]
#
# return contract._status_class(**attributes)
#
# Path: pyledger/server/status.py
# class BaseStatus(abc.ABC):
# """
# Status abstract class
# """
# @abc.abstractmethod
# def dump(self):
# pass
#
# @abc.abstractmethod
# def load(self, dump: bytes):
# pass
#
# @abc.abstractmethod
# def to_dict(self):
# pass
. Output only the next line. | register_contract(this_contract) |
Based on the snippet: <|code_start|> raise Exception('Account already exists')
self.accounts[key] = 0.0
def increment(self, key: str, quantity: float):
if key not in self.accounts:
raise Exception('Account not found')
self.accounts[key] += quantity
def transfer(self, source: str, dest: str, quantity: float):
if source not in self.accounts:
raise Exception('Source account not found')
if dest not in self.accounts:
raise Exception('Destination account not found')
if self.accounts[source] < quantity:
raise Exception('Not enough funds in source account')
self.accounts[source] -= quantity
self.accounts[dest] += quantity
def balance(self, key: str):
if key not in self.accounts:
print(self.accounts)
raise Exception('Account not found')
return str(self.accounts[key])
contract = DigitalCurrency()
<|code_end|>
, predict the immediate next line with the help of imports:
from inspect import Signature, Parameter
from pyledger.server.contract import SimpleContract, register_contract, \
methods, api, signatures, status
from pyledger.server.status import BaseStatus
and context (classes, functions, sometimes code) from other files:
# Path: pyledger/server/contract.py
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# def methods(contract):
# """
# Obtain methods from the contract
#
# :param contract:
# :return:
# """
# methods = {}
#
# for name, function in inspect.getmembers(contract,
# predicate=inspect.ismethod):
# if not name == '__init__':
# methods[name] = function
#
# return methods
#
# def api(contract):
# api_spec = {}
# contract_methods = methods(contract)
# for method in contract_methods:
# function_spec = {}
# sig = inspect.signature(contract_methods[method])
# for param in sig.parameters:
# function_spec[param] = sig.parameters[param].annotation
#
# api_spec[method] = function_spec
#
# return api_spec
#
# def signatures(contract):
# contract_signatures = {}
# contract_methods = methods(contract)
# for k, method in contract_methods.items():
# contract_signatures[k] = inspect.signature(method)
#
# return contract_signatures
#
# def status(contract):
# all_attributes = inspect.getmembers(
# contract,
# predicate=lambda a: not(inspect.isroutine(a)))
#
# attributes = {}
#
# for attribute in all_attributes:
# if not attribute[0].startswith('_'):
# attributes[attribute[0]] = attribute[1]
#
# return contract._status_class(**attributes)
#
# Path: pyledger/server/status.py
# class BaseStatus(abc.ABC):
# """
# Status abstract class
# """
# @abc.abstractmethod
# def dump(self):
# pass
#
# @abc.abstractmethod
# def load(self, dump: bytes):
# pass
#
# @abc.abstractmethod
# def to_dict(self):
# pass
. Output only the next line. | assert [k for k in methods(contract)] == [ |
Next line prediction: <|code_start|>
def increment(self, key: str, quantity: float):
if key not in self.accounts:
raise Exception('Account not found')
self.accounts[key] += quantity
def transfer(self, source: str, dest: str, quantity: float):
if source not in self.accounts:
raise Exception('Source account not found')
if dest not in self.accounts:
raise Exception('Destination account not found')
if self.accounts[source] < quantity:
raise Exception('Not enough funds in source account')
self.accounts[source] -= quantity
self.accounts[dest] += quantity
def balance(self, key: str):
if key not in self.accounts:
print(self.accounts)
raise Exception('Account not found')
return str(self.accounts[key])
contract = DigitalCurrency()
assert [k for k in methods(contract)] == [
'add_account', 'balance', 'increment', 'transfer']
<|code_end|>
. Use current file imports:
(from inspect import Signature, Parameter
from pyledger.server.contract import SimpleContract, register_contract, \
methods, api, signatures, status
from pyledger.server.status import BaseStatus)
and context including class names, function names, or small code snippets from other files:
# Path: pyledger/server/contract.py
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# def methods(contract):
# """
# Obtain methods from the contract
#
# :param contract:
# :return:
# """
# methods = {}
#
# for name, function in inspect.getmembers(contract,
# predicate=inspect.ismethod):
# if not name == '__init__':
# methods[name] = function
#
# return methods
#
# def api(contract):
# api_spec = {}
# contract_methods = methods(contract)
# for method in contract_methods:
# function_spec = {}
# sig = inspect.signature(contract_methods[method])
# for param in sig.parameters:
# function_spec[param] = sig.parameters[param].annotation
#
# api_spec[method] = function_spec
#
# return api_spec
#
# def signatures(contract):
# contract_signatures = {}
# contract_methods = methods(contract)
# for k, method in contract_methods.items():
# contract_signatures[k] = inspect.signature(method)
#
# return contract_signatures
#
# def status(contract):
# all_attributes = inspect.getmembers(
# contract,
# predicate=lambda a: not(inspect.isroutine(a)))
#
# attributes = {}
#
# for attribute in all_attributes:
# if not attribute[0].startswith('_'):
# attributes[attribute[0]] = attribute[1]
#
# return contract._status_class(**attributes)
#
# Path: pyledger/server/status.py
# class BaseStatus(abc.ABC):
# """
# Status abstract class
# """
# @abc.abstractmethod
# def dump(self):
# pass
#
# @abc.abstractmethod
# def load(self, dump: bytes):
# pass
#
# @abc.abstractmethod
# def to_dict(self):
# pass
. Output only the next line. | assert api(contract) == { |
Based on the snippet: <|code_start|> def transfer(self, source: str, dest: str, quantity: float):
if source not in self.accounts:
raise Exception('Source account not found')
if dest not in self.accounts:
raise Exception('Destination account not found')
if self.accounts[source] < quantity:
raise Exception('Not enough funds in source account')
self.accounts[source] -= quantity
self.accounts[dest] += quantity
def balance(self, key: str):
if key not in self.accounts:
print(self.accounts)
raise Exception('Account not found')
return str(self.accounts[key])
contract = DigitalCurrency()
assert [k for k in methods(contract)] == [
'add_account', 'balance', 'increment', 'transfer']
assert api(contract) == {
'add_account': {'key': str},
'balance': {'key': str},
'increment': {'key': str, 'quantity': float},
'transfer': {'dest': str, 'quantity': float, 'source': str}
}
<|code_end|>
, predict the immediate next line with the help of imports:
from inspect import Signature, Parameter
from pyledger.server.contract import SimpleContract, register_contract, \
methods, api, signatures, status
from pyledger.server.status import BaseStatus
and context (classes, functions, sometimes code) from other files:
# Path: pyledger/server/contract.py
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# def methods(contract):
# """
# Obtain methods from the contract
#
# :param contract:
# :return:
# """
# methods = {}
#
# for name, function in inspect.getmembers(contract,
# predicate=inspect.ismethod):
# if not name == '__init__':
# methods[name] = function
#
# return methods
#
# def api(contract):
# api_spec = {}
# contract_methods = methods(contract)
# for method in contract_methods:
# function_spec = {}
# sig = inspect.signature(contract_methods[method])
# for param in sig.parameters:
# function_spec[param] = sig.parameters[param].annotation
#
# api_spec[method] = function_spec
#
# return api_spec
#
# def signatures(contract):
# contract_signatures = {}
# contract_methods = methods(contract)
# for k, method in contract_methods.items():
# contract_signatures[k] = inspect.signature(method)
#
# return contract_signatures
#
# def status(contract):
# all_attributes = inspect.getmembers(
# contract,
# predicate=lambda a: not(inspect.isroutine(a)))
#
# attributes = {}
#
# for attribute in all_attributes:
# if not attribute[0].startswith('_'):
# attributes[attribute[0]] = attribute[1]
#
# return contract._status_class(**attributes)
#
# Path: pyledger/server/status.py
# class BaseStatus(abc.ABC):
# """
# Status abstract class
# """
# @abc.abstractmethod
# def dump(self):
# pass
#
# @abc.abstractmethod
# def load(self, dump: bytes):
# pass
#
# @abc.abstractmethod
# def to_dict(self):
# pass
. Output only the next line. | assert signatures(contract) == { |
Using the snippet: <|code_start|># Pyledger. A simple ledger for smart contracts implemented in Python
# Copyright (C) 2017 Guillem Borrell Nogueras
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def test_contract_status():
"""Check if returns a status"""
class MyContract(SimpleContract):
counter = 0
def greet(self, name: str):
self.counter += 1
return "hello, " + name
this_contract = MyContract()
<|code_end|>
, determine the next line of code. You have imports:
from inspect import Signature, Parameter
from pyledger.server.contract import SimpleContract, register_contract, \
methods, api, signatures, status
from pyledger.server.status import BaseStatus
and context (class names, function names, or code) available:
# Path: pyledger/server/contract.py
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# def methods(contract):
# """
# Obtain methods from the contract
#
# :param contract:
# :return:
# """
# methods = {}
#
# for name, function in inspect.getmembers(contract,
# predicate=inspect.ismethod):
# if not name == '__init__':
# methods[name] = function
#
# return methods
#
# def api(contract):
# api_spec = {}
# contract_methods = methods(contract)
# for method in contract_methods:
# function_spec = {}
# sig = inspect.signature(contract_methods[method])
# for param in sig.parameters:
# function_spec[param] = sig.parameters[param].annotation
#
# api_spec[method] = function_spec
#
# return api_spec
#
# def signatures(contract):
# contract_signatures = {}
# contract_methods = methods(contract)
# for k, method in contract_methods.items():
# contract_signatures[k] = inspect.signature(method)
#
# return contract_signatures
#
# def status(contract):
# all_attributes = inspect.getmembers(
# contract,
# predicate=lambda a: not(inspect.isroutine(a)))
#
# attributes = {}
#
# for attribute in all_attributes:
# if not attribute[0].startswith('_'):
# attributes[attribute[0]] = attribute[1]
#
# return contract._status_class(**attributes)
#
# Path: pyledger/server/status.py
# class BaseStatus(abc.ABC):
# """
# Status abstract class
# """
# @abc.abstractmethod
# def dump(self):
# pass
#
# @abc.abstractmethod
# def load(self, dump: bytes):
# pass
#
# @abc.abstractmethod
# def to_dict(self):
# pass
. Output only the next line. | this_contract_status = status(this_contract) |
Predict the next line after this snippet: <|code_start|># Copyright (C) 2017 Guillem Borrell Nogueras
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def test_contract_status():
"""Check if returns a status"""
class MyContract(SimpleContract):
counter = 0
def greet(self, name: str):
self.counter += 1
return "hello, " + name
this_contract = MyContract()
this_contract_status = status(this_contract)
<|code_end|>
using the current file's imports:
from inspect import Signature, Parameter
from pyledger.server.contract import SimpleContract, register_contract, \
methods, api, signatures, status
from pyledger.server.status import BaseStatus
and any relevant context from other files:
# Path: pyledger/server/contract.py
# class SimpleContract(BaseContract):
# """
# Contract that uses SimpleStatus for serialization.
#
# The goal of this class is to make a contact feel just like a Python class.
# """
# _status_class = SimpleStatus
#
# def register_contract(contract, description=''):
# """
# Register a contract and make it
# :param contract:
# :param description:
# :return:
# """
# global contract_registry
#
# if contract.__class__.__name__ in contract_registry:
# raise ValueError('A contract with the same name already registered')
# else:
# contract_registry[contract.__class__.__name__] = contract
#
# db_contract = Contract()
# db_contract.name = contract.__class__.__name__
# db_contract.created = datetime.datetime.now()
# db_contract.description = description
#
# first_status = Status()
# first_status.contract = db_contract
# first_status.when = datetime.datetime.now()
# first_status.attributes = status(contract).dump()
# # Genesis key is the name of the contract
# first_status.key = contract.__class__.__name__.encode('utf-8')
#
# DB.session.add(db_contract)
# DB.session.add(first_status)
# DB.session.commit()
#
# def methods(contract):
# """
# Obtain methods from the contract
#
# :param contract:
# :return:
# """
# methods = {}
#
# for name, function in inspect.getmembers(contract,
# predicate=inspect.ismethod):
# if not name == '__init__':
# methods[name] = function
#
# return methods
#
# def api(contract):
# api_spec = {}
# contract_methods = methods(contract)
# for method in contract_methods:
# function_spec = {}
# sig = inspect.signature(contract_methods[method])
# for param in sig.parameters:
# function_spec[param] = sig.parameters[param].annotation
#
# api_spec[method] = function_spec
#
# return api_spec
#
# def signatures(contract):
# contract_signatures = {}
# contract_methods = methods(contract)
# for k, method in contract_methods.items():
# contract_signatures[k] = inspect.signature(method)
#
# return contract_signatures
#
# def status(contract):
# all_attributes = inspect.getmembers(
# contract,
# predicate=lambda a: not(inspect.isroutine(a)))
#
# attributes = {}
#
# for attribute in all_attributes:
# if not attribute[0].startswith('_'):
# attributes[attribute[0]] = attribute[1]
#
# return contract._status_class(**attributes)
#
# Path: pyledger/server/status.py
# class BaseStatus(abc.ABC):
# """
# Status abstract class
# """
# @abc.abstractmethod
# def dump(self):
# pass
#
# @abc.abstractmethod
# def load(self, dump: bytes):
# pass
#
# @abc.abstractmethod
# def to_dict(self):
# pass
. Output only the next line. | assert isinstance(this_contract_status, BaseStatus) == True |
Here is a snippet: <|code_start|>
class Voter(State):
def __init__(self, timeout=1.0):
super(Voter, self).__init__(timeout=timeout)
self._last_vote = None
def on_vote_request(self, message):
if(self._last_vote is None and
message.data["lastLogIndex"] >= self._server._lastLogIndex):
self._last_vote = message.sender
self._send_vote_response_message(message)
else:
self._send_vote_response_message(message, yes=False)
return self, None
def _send_vote_response_message(self, msg, yes=True):
<|code_end|>
. Write the next line using the current file imports:
from .state import State
from ..messages.request_vote import RequestVoteResponseMessage
from ..messages.client import ClientFollowerResponse
and context from other files:
# Path: src/lifeRaft/states/state.py
# class State(object):
#
# def __init__(self, timeout=1.0):
# self._timeout = timeout
#
# def set_server(self, server):
# self._server = server
#
# def on_message(self, message):
# """This method is called when a message is received,
# and calls one of the other corrosponding methods
# that this state reacts to.
#
# """
# _type = message.type
# # Convert to follower if heigher term received
# if(message.term > self._server._currentTerm):
# self._server._currentTerm = message.term
# from .follower import Follower
# if not issubclass(type(self), Follower):
# follower = Follower(timeout=self._timeout)
# follower.set_server(self._server)
# return follower.on_message(message)
#
# # Is the messages.term < ours? If so we need to tell
# # them this so they don't get left behind.
# elif(message.term < self._server._currentTerm):
# self._send_response_message(message, yes=False)
# return self, None
#
# if(_type == BaseMessage.AppendEntries):
# return self.on_append_entries(message)
# elif(_type == BaseMessage.RequestVote):
# return self.on_vote_request(message)
# elif(_type == BaseMessage.RequestVoteResponse):
# return self.on_vote_received(message)
# elif(_type == BaseMessage.Response):
# return self.on_response_received(message)
#
# def on_leader_timeout(self):
# """This is called when the leader timeout is reached."""
# raise NotImplemented
#
# def on_vote_request(self, message):
# """This is called when there is a vote request."""
# return self, None
#
# def on_vote_received(self, message):
# """This is called when this node recieves a vote."""
# return self, None
#
# def on_append_entries(self, message):
# """This is called when there is a request to
# append an entry to the log.
# """
# raise NotImplemented
#
# def on_response_received(self, message):
# """This is called when a response is sent back to the Leader"""
# return self, None
#
# def on_client_command(self, message):
# """This is called when there is a client request."""
# raise NotImplemented
#
# @property
# def timeout(self):
# return random.uniform(self._timeout, 2.0 * self._timeout)
#
# def _send_response_message(self, msg, yes=True):
# response = ResponseMessage(self._server._name, msg.sender, msg.term, {
# "response": yes,
# "currentTerm": self._server._currentTerm,
# })
# self._server.send_message_response(response)
#
# Path: src/lifeRaft/messages/request_vote.py
# class RequestVoteResponseMessage(BaseMessage):
#
# _type = BaseMessage.RequestVoteResponse
#
# def __init__(self, sender, receiver, term, data):
# BaseMessage.__init__(self, sender, receiver, term, data)
#
# Path: src/lifeRaft/messages/client.py
# class ClientFollowerResponse(ClientResponse):
# def __init__(self, receiver_id, leader_id):
# super(ClientFollowerResponse, self).__init__(receiver_id, leader_id, False)
, which may include functions, classes, or code. Output only the next line. | voteResponse = RequestVoteResponseMessage( |
Given the code snippet: <|code_start|>
class Voter(State):
def __init__(self, timeout=1.0):
super(Voter, self).__init__(timeout=timeout)
self._last_vote = None
def on_vote_request(self, message):
if(self._last_vote is None and
message.data["lastLogIndex"] >= self._server._lastLogIndex):
self._last_vote = message.sender
self._send_vote_response_message(message)
else:
self._send_vote_response_message(message, yes=False)
return self, None
def _send_vote_response_message(self, msg, yes=True):
voteResponse = RequestVoteResponseMessage(
self._server._name,
msg.sender,
msg.term,
{"response": yes})
self._server.send_message_response(voteResponse)
def on_client_message(self, message):
''' return the message response for the client '''
<|code_end|>
, generate the next line using the imports in this file:
from .state import State
from ..messages.request_vote import RequestVoteResponseMessage
from ..messages.client import ClientFollowerResponse
and context (functions, classes, or occasionally code) from other files:
# Path: src/lifeRaft/states/state.py
# class State(object):
#
# def __init__(self, timeout=1.0):
# self._timeout = timeout
#
# def set_server(self, server):
# self._server = server
#
# def on_message(self, message):
# """This method is called when a message is received,
# and calls one of the other corrosponding methods
# that this state reacts to.
#
# """
# _type = message.type
# # Convert to follower if heigher term received
# if(message.term > self._server._currentTerm):
# self._server._currentTerm = message.term
# from .follower import Follower
# if not issubclass(type(self), Follower):
# follower = Follower(timeout=self._timeout)
# follower.set_server(self._server)
# return follower.on_message(message)
#
# # Is the messages.term < ours? If so we need to tell
# # them this so they don't get left behind.
# elif(message.term < self._server._currentTerm):
# self._send_response_message(message, yes=False)
# return self, None
#
# if(_type == BaseMessage.AppendEntries):
# return self.on_append_entries(message)
# elif(_type == BaseMessage.RequestVote):
# return self.on_vote_request(message)
# elif(_type == BaseMessage.RequestVoteResponse):
# return self.on_vote_received(message)
# elif(_type == BaseMessage.Response):
# return self.on_response_received(message)
#
# def on_leader_timeout(self):
# """This is called when the leader timeout is reached."""
# raise NotImplemented
#
# def on_vote_request(self, message):
# """This is called when there is a vote request."""
# return self, None
#
# def on_vote_received(self, message):
# """This is called when this node recieves a vote."""
# return self, None
#
# def on_append_entries(self, message):
# """This is called when there is a request to
# append an entry to the log.
# """
# raise NotImplemented
#
# def on_response_received(self, message):
# """This is called when a response is sent back to the Leader"""
# return self, None
#
# def on_client_command(self, message):
# """This is called when there is a client request."""
# raise NotImplemented
#
# @property
# def timeout(self):
# return random.uniform(self._timeout, 2.0 * self._timeout)
#
# def _send_response_message(self, msg, yes=True):
# response = ResponseMessage(self._server._name, msg.sender, msg.term, {
# "response": yes,
# "currentTerm": self._server._currentTerm,
# })
# self._server.send_message_response(response)
#
# Path: src/lifeRaft/messages/request_vote.py
# class RequestVoteResponseMessage(BaseMessage):
#
# _type = BaseMessage.RequestVoteResponse
#
# def __init__(self, sender, receiver, term, data):
# BaseMessage.__init__(self, sender, receiver, term, data)
#
# Path: src/lifeRaft/messages/client.py
# class ClientFollowerResponse(ClientResponse):
# def __init__(self, receiver_id, leader_id):
# super(ClientFollowerResponse, self).__init__(receiver_id, leader_id, False)
. Output only the next line. | return ClientFollowerResponse(message.sender, self._last_vote) |
Given the code snippet: <|code_start|> self._nextIndexes[n._name] = self._server._lastLogIndex + 1
self._matchIndex[n._name] = 0
def on_append_entries(self, message):
if message.sender == self._server._name:
# bcast
return self, None
if message.term > self._server._currentTerm:
## If we're getting append messages from the future, we're not the leader
follower = Follower()
follower.set_server(self._server)
return follower, None
raise RuntimeError
def on_vote_received(self, message):
''' Well I find it dandy you're still voting for me '''
return self, None
def on_response_received(self, message):
# Was the last AppendEntries good?
if(not message.data["response"]):
# No, so lets back up the log for this node
self._nextIndexes[message.sender] -= 1
# Get the next log entry to send to the client.
previousIndex = max(0, self._nextIndexes[message.sender] - 1)
previous = self._server._log[previousIndex]
current = self._server._log[self._nextIndexes[message.sender]]
# Send the new log to the client and wait for it to respond.
<|code_end|>
, generate the next line using the imports in this file:
from collections import defaultdict
from .voter import Voter
from ..messages.append_entries import AppendEntriesMessage
from ..messages.client import ClientMessage, ClientLeaderResponse
from .follower import Follower
and context (functions, classes, or occasionally code) from other files:
# Path: src/lifeRaft/states/voter.py
# class Voter(State):
#
# def __init__(self, timeout=1.0):
# super(Voter, self).__init__(timeout=timeout)
# self._last_vote = None
#
# def on_vote_request(self, message):
# if(self._last_vote is None and
# message.data["lastLogIndex"] >= self._server._lastLogIndex):
# self._last_vote = message.sender
# self._send_vote_response_message(message)
# else:
# self._send_vote_response_message(message, yes=False)
#
# return self, None
#
# def _send_vote_response_message(self, msg, yes=True):
# voteResponse = RequestVoteResponseMessage(
# self._server._name,
# msg.sender,
# msg.term,
# {"response": yes})
# self._server.send_message_response(voteResponse)
#
# def on_client_message(self, message):
# ''' return the message response for the client '''
# return ClientFollowerResponse(message.sender, self._last_vote)
#
# Path: src/lifeRaft/messages/append_entries.py
# class AppendEntriesMessage(BaseMessage):
#
# _type = BaseMessage.AppendEntries
#
# def __init__(self, sender, receiver, term, data):
# BaseMessage.__init__(self, sender, receiver, term, data)
#
# Path: src/lifeRaft/messages/client.py
# class ClientMessage(object):
# # Types
# Leader = 0
# Append = 1
#
# def __init__(self, sender, receiver_host, receiver_port, command):
# self._snd_id = sender
# self._rcv_host = receiver_host
# self._rcv_port = receiver_port
# self._command = command
#
# @property
# def command(self):
# return self._command
#
# @property
# def sender(self):
# return self._snd_id
#
# @property
# def receiver_host(self):
# return self._receiver_host
#
# @property
# def receiver_port(self):
# return self._receiver_port
#
# class ClientLeaderResponse(ClientResponse):
# def __init__(self, receiver_id, leader_id, response):
# super(ClientLeaderResponse, self).__init__(receiver_id, leader_id, response)
. Output only the next line. | appendEntry = AppendEntriesMessage( |
Given the code snippet: <|code_start|> if(self._nextIndexes[message.sender] > self._server._lastLogIndex):
self._nextIndexes[message.sender] = self._server._lastLogIndex
return self, None
@property
def timeout(self):
''' Nyquist rate on avg election timeout; bcast freq >= 3x timeout freq ==> no timeouts '''
return self._timeout * .75
def on_leader_timeout(self):
''' While leader, bcast heartbeat on timeout '''
self._send_heart_beat()
return self, None
def _send_heart_beat(self):
message = AppendEntriesMessage(
self._server._name,
None,
self._server._currentTerm,
{
"leaderId": self._server._name,
"prevLogIndex": self._server._lastLogIndex,
"prevLogTerm": self._server._lastLogTerm,
"entries": [],
"leaderCommit": self._server._commitIndex,
})
self._server.send_message(message)
def on_client_message(self, message):
<|code_end|>
, generate the next line using the imports in this file:
from collections import defaultdict
from .voter import Voter
from ..messages.append_entries import AppendEntriesMessage
from ..messages.client import ClientMessage, ClientLeaderResponse
from .follower import Follower
and context (functions, classes, or occasionally code) from other files:
# Path: src/lifeRaft/states/voter.py
# class Voter(State):
#
# def __init__(self, timeout=1.0):
# super(Voter, self).__init__(timeout=timeout)
# self._last_vote = None
#
# def on_vote_request(self, message):
# if(self._last_vote is None and
# message.data["lastLogIndex"] >= self._server._lastLogIndex):
# self._last_vote = message.sender
# self._send_vote_response_message(message)
# else:
# self._send_vote_response_message(message, yes=False)
#
# return self, None
#
# def _send_vote_response_message(self, msg, yes=True):
# voteResponse = RequestVoteResponseMessage(
# self._server._name,
# msg.sender,
# msg.term,
# {"response": yes})
# self._server.send_message_response(voteResponse)
#
# def on_client_message(self, message):
# ''' return the message response for the client '''
# return ClientFollowerResponse(message.sender, self._last_vote)
#
# Path: src/lifeRaft/messages/append_entries.py
# class AppendEntriesMessage(BaseMessage):
#
# _type = BaseMessage.AppendEntries
#
# def __init__(self, sender, receiver, term, data):
# BaseMessage.__init__(self, sender, receiver, term, data)
#
# Path: src/lifeRaft/messages/client.py
# class ClientMessage(object):
# # Types
# Leader = 0
# Append = 1
#
# def __init__(self, sender, receiver_host, receiver_port, command):
# self._snd_id = sender
# self._rcv_host = receiver_host
# self._rcv_port = receiver_port
# self._command = command
#
# @property
# def command(self):
# return self._command
#
# @property
# def sender(self):
# return self._snd_id
#
# @property
# def receiver_host(self):
# return self._receiver_host
#
# @property
# def receiver_port(self):
# return self._receiver_port
#
# class ClientLeaderResponse(ClientResponse):
# def __init__(self, receiver_id, leader_id, response):
# super(ClientLeaderResponse, self).__init__(receiver_id, leader_id, response)
. Output only the next line. | if message.command == ClientMessage.Leader: |
Given the following code snippet before the placeholder: <|code_start|> self._nextIndexes[message.sender] = self._server._lastLogIndex
return self, None
@property
def timeout(self):
''' Nyquist rate on avg election timeout; bcast freq >= 3x timeout freq ==> no timeouts '''
return self._timeout * .75
def on_leader_timeout(self):
''' While leader, bcast heartbeat on timeout '''
self._send_heart_beat()
return self, None
def _send_heart_beat(self):
message = AppendEntriesMessage(
self._server._name,
None,
self._server._currentTerm,
{
"leaderId": self._server._name,
"prevLogIndex": self._server._lastLogIndex,
"prevLogTerm": self._server._lastLogTerm,
"entries": [],
"leaderCommit": self._server._commitIndex,
})
self._server.send_message(message)
def on_client_message(self, message):
if message.command == ClientMessage.Leader:
<|code_end|>
, predict the next line using imports from the current file:
from collections import defaultdict
from .voter import Voter
from ..messages.append_entries import AppendEntriesMessage
from ..messages.client import ClientMessage, ClientLeaderResponse
from .follower import Follower
and context including class names, function names, and sometimes code from other files:
# Path: src/lifeRaft/states/voter.py
# class Voter(State):
#
# def __init__(self, timeout=1.0):
# super(Voter, self).__init__(timeout=timeout)
# self._last_vote = None
#
# def on_vote_request(self, message):
# if(self._last_vote is None and
# message.data["lastLogIndex"] >= self._server._lastLogIndex):
# self._last_vote = message.sender
# self._send_vote_response_message(message)
# else:
# self._send_vote_response_message(message, yes=False)
#
# return self, None
#
# def _send_vote_response_message(self, msg, yes=True):
# voteResponse = RequestVoteResponseMessage(
# self._server._name,
# msg.sender,
# msg.term,
# {"response": yes})
# self._server.send_message_response(voteResponse)
#
# def on_client_message(self, message):
# ''' return the message response for the client '''
# return ClientFollowerResponse(message.sender, self._last_vote)
#
# Path: src/lifeRaft/messages/append_entries.py
# class AppendEntriesMessage(BaseMessage):
#
# _type = BaseMessage.AppendEntries
#
# def __init__(self, sender, receiver, term, data):
# BaseMessage.__init__(self, sender, receiver, term, data)
#
# Path: src/lifeRaft/messages/client.py
# class ClientMessage(object):
# # Types
# Leader = 0
# Append = 1
#
# def __init__(self, sender, receiver_host, receiver_port, command):
# self._snd_id = sender
# self._rcv_host = receiver_host
# self._rcv_port = receiver_port
# self._command = command
#
# @property
# def command(self):
# return self._command
#
# @property
# def sender(self):
# return self._snd_id
#
# @property
# def receiver_host(self):
# return self._receiver_host
#
# @property
# def receiver_port(self):
# return self._receiver_port
#
# class ClientLeaderResponse(ClientResponse):
# def __init__(self, receiver_id, leader_id, response):
# super(ClientLeaderResponse, self).__init__(receiver_id, leader_id, response)
. Output only the next line. | return ClientLeaderResponse(message.sender, self._server._name, True) |
Given the following code snippet before the placeholder: <|code_start|>
class State(object):
def __init__(self, timeout=1.0):
self._timeout = timeout
def set_server(self, server):
self._server = server
def on_message(self, message):
"""This method is called when a message is received,
and calls one of the other corrosponding methods
that this state reacts to.
"""
_type = message.type
# Convert to follower if heigher term received
if(message.term > self._server._currentTerm):
self._server._currentTerm = message.term
if not issubclass(type(self), Follower):
follower = Follower(timeout=self._timeout)
follower.set_server(self._server)
return follower.on_message(message)
# Is the messages.term < ours? If so we need to tell
# them this so they don't get left behind.
elif(message.term < self._server._currentTerm):
self._send_response_message(message, yes=False)
return self, None
<|code_end|>
, predict the next line using imports from the current file:
import random
from ..messages.base import BaseMessage
from ..messages.response import ResponseMessage
from .follower import Follower
and context including class names, function names, and sometimes code from other files:
# Path: src/lifeRaft/messages/base.py
# class BaseMessage(object):
# AppendEntries = 0
# RequestVote = 1
# RequestVoteResponse = 2
# Response = 3
#
# def __init__(self, sender, receiver, term, data):
# self._timestamp = int(time.time())
#
# self._sender = sender
# self._receiver = receiver
# self._data = data
# self._term = term
#
# @property
# def receiver(self):
# return self._receiver
#
# @property
# def sender(self):
# return self._sender
#
# @property
# def data(self):
# return self._data
#
# @property
# def timestamp(self):
# return self._timestamp
#
# @property
# def term(self):
# return self._term
#
# @property
# def type(self):
# return self._type
#
# Path: src/lifeRaft/messages/response.py
# class ResponseMessage(BaseMessage):
#
# _type = BaseMessage.Response
#
# def __init__(self, sender, receiver, term, data):
# BaseMessage.__init__(self, sender, receiver, term, data)
. Output only the next line. | if(_type == BaseMessage.AppendEntries): |
Predict the next line after this snippet: <|code_start|> """This is called when the leader timeout is reached."""
raise NotImplemented
def on_vote_request(self, message):
"""This is called when there is a vote request."""
return self, None
def on_vote_received(self, message):
"""This is called when this node recieves a vote."""
return self, None
def on_append_entries(self, message):
"""This is called when there is a request to
append an entry to the log.
"""
raise NotImplemented
def on_response_received(self, message):
"""This is called when a response is sent back to the Leader"""
return self, None
def on_client_command(self, message):
"""This is called when there is a client request."""
raise NotImplemented
@property
def timeout(self):
return random.uniform(self._timeout, 2.0 * self._timeout)
def _send_response_message(self, msg, yes=True):
<|code_end|>
using the current file's imports:
import random
from ..messages.base import BaseMessage
from ..messages.response import ResponseMessage
from .follower import Follower
and any relevant context from other files:
# Path: src/lifeRaft/messages/base.py
# class BaseMessage(object):
# AppendEntries = 0
# RequestVote = 1
# RequestVoteResponse = 2
# Response = 3
#
# def __init__(self, sender, receiver, term, data):
# self._timestamp = int(time.time())
#
# self._sender = sender
# self._receiver = receiver
# self._data = data
# self._term = term
#
# @property
# def receiver(self):
# return self._receiver
#
# @property
# def sender(self):
# return self._sender
#
# @property
# def data(self):
# return self._data
#
# @property
# def timestamp(self):
# return self._timestamp
#
# @property
# def term(self):
# return self._term
#
# @property
# def type(self):
# return self._type
#
# Path: src/lifeRaft/messages/response.py
# class ResponseMessage(BaseMessage):
#
# _type = BaseMessage.Response
#
# def __init__(self, sender, receiver, term, data):
# BaseMessage.__init__(self, sender, receiver, term, data)
. Output only the next line. | response = ResponseMessage(self._server._name, msg.sender, msg.term, { |
Based on the snippet: <|code_start|>
class TestMemoryBoard( unittest.TestCase ):
def setUp( self ):
self.board = MemoryBoard()
def test_memoryboard_post_message( self ):
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
from ..boards.memory_board import MemoryBoard
from ..messages.base import BaseMessage
and context (classes, functions, sometimes code) from other files:
# Path: src/lifeRaft/boards/memory_board.py
# class MemoryBoard(Board):
#
# def __init__(self):
# Board.__init__(self)
# self._board = []
#
# def post_message(self, message):
# self._board.append(message)
#
# self._board = sorted(self._board,
# key=lambda a: a.timestamp, reverse=True)
#
# def get_message(self):
# if(len(self._board) > 0):
# return self._board.pop()
# else:
# return None
#
# Path: src/lifeRaft/messages/base.py
# class BaseMessage(object):
# AppendEntries = 0
# RequestVote = 1
# RequestVoteResponse = 2
# Response = 3
#
# def __init__(self, sender, receiver, term, data):
# self._timestamp = int(time.time())
#
# self._sender = sender
# self._receiver = receiver
# self._data = data
# self._term = term
#
# @property
# def receiver(self):
# return self._receiver
#
# @property
# def sender(self):
# return self._sender
#
# @property
# def data(self):
# return self._data
#
# @property
# def timestamp(self):
# return self._timestamp
#
# @property
# def term(self):
# return self._term
#
# @property
# def type(self):
# return self._type
. Output only the next line. | msg = BaseMessage( 0, 0, 0, 0 ) |
Given the code snippet: <|code_start|>
def on_leader_timeout(self):
''' this event triggers an election '''
self._votes = {}
self._start_election()
return self, None
def on_vote_request(self, message):
return self, None
def on_vote_received(self, message):
if message.sender not in self._votes:
self._votes[message.sender] = message
if(len(self._votes.keys()) > (self._server._total_nodes - 1) / 2):
leader = Leader(timeout=self._timeout)
leader.set_server(self._server)
leader._last_vote = self._last_vote
leader._send_heart_beat()
return leader, None
return self, None
def on_append_entries(self, message):
''' revert to follower, call on_append_entries '''
follower = Follower(timeout=self._timeout)
follower.set_server(self._server)
return follower.on_append_entries(message)
def _start_election(self):
self._server._currentTerm += 1
<|code_end|>
, generate the next line using the imports in this file:
from .voter import Voter
from ..messages.request_vote import RequestVoteMessage
from .leader import Leader
from .follower import Follower
and context (functions, classes, or occasionally code) from other files:
# Path: src/lifeRaft/states/voter.py
# class Voter(State):
#
# def __init__(self, timeout=1.0):
# super(Voter, self).__init__(timeout=timeout)
# self._last_vote = None
#
# def on_vote_request(self, message):
# if(self._last_vote is None and
# message.data["lastLogIndex"] >= self._server._lastLogIndex):
# self._last_vote = message.sender
# self._send_vote_response_message(message)
# else:
# self._send_vote_response_message(message, yes=False)
#
# return self, None
#
# def _send_vote_response_message(self, msg, yes=True):
# voteResponse = RequestVoteResponseMessage(
# self._server._name,
# msg.sender,
# msg.term,
# {"response": yes})
# self._server.send_message_response(voteResponse)
#
# def on_client_message(self, message):
# ''' return the message response for the client '''
# return ClientFollowerResponse(message.sender, self._last_vote)
#
# Path: src/lifeRaft/messages/request_vote.py
# class RequestVoteMessage(BaseMessage):
#
# _type = BaseMessage.RequestVote
#
# def __init__(self, sender, receiver, term, data):
# BaseMessage.__init__(self, sender, receiver, term, data)
. Output only the next line. | election = RequestVoteMessage( |
Predict the next line after this snippet: <|code_start|> if(len(n) > 0):
n[0].post_message(message)
def post_message(self, message):
self._messageBoard.post_message(message)
def on_message(self, message):
with self._messagelock:
try:
state, response = self._state.on_message(message)
self._state = state
except Exception as exc:
print repr(exc)
def run(self):
while self._run:
ts = time.time()
time.sleep(self._state.timeout)
if self._state._last_heartbeat < ts:
with self._messagelock:
state, response = self._state.on_leader_timeout()
self._state = state
def stop(self):
self._run = False
@property
def is_leader(self):
''' a simple leadership interface '''
<|code_end|>
using the current file's imports:
from ..states.leader import Leader
from ..messages.client import ClientMessage
import zmq
import threading
import pickle
import Queue
import time
import math
import time
and any relevant context from other files:
# Path: src/lifeRaft/states/leader.py
# class Leader(Voter):
#
# def __init__(self, timeout=1.0):
# super(Leader, self).__init__(timeout=timeout)
# self._nextIndexes = defaultdict(int)
# self._matchIndex = defaultdict(int)
# self._last_heartbeat = 0
#
# def set_server(self, server):
# self._server = server
#
# for n in self._server._neighbors:
# self._nextIndexes[n._name] = self._server._lastLogIndex + 1
# self._matchIndex[n._name] = 0
#
# def on_append_entries(self, message):
# if message.sender == self._server._name:
# # bcast
# return self, None
# if message.term > self._server._currentTerm:
# ## If we're getting append messages from the future, we're not the leader
# from .follower import Follower
# follower = Follower()
# follower.set_server(self._server)
# return follower, None
# raise RuntimeError
#
# def on_vote_received(self, message):
# ''' Well I find it dandy you're still voting for me '''
# return self, None
#
# def on_response_received(self, message):
# # Was the last AppendEntries good?
# if(not message.data["response"]):
# # No, so lets back up the log for this node
# self._nextIndexes[message.sender] -= 1
#
# # Get the next log entry to send to the client.
# previousIndex = max(0, self._nextIndexes[message.sender] - 1)
# previous = self._server._log[previousIndex]
# current = self._server._log[self._nextIndexes[message.sender]]
#
# # Send the new log to the client and wait for it to respond.
# appendEntry = AppendEntriesMessage(
# self._server._name,
# message.sender,
# self._server._currentTerm,
# {
# "leaderId": self._server._name,
# "prevLogIndex": previousIndex,
# "prevLogTerm": previous["term"],
# "entries": [current],
# "leaderCommit": self._server._commitIndex,
# })
#
# self._send_response_message(appendEntry)
# else:
# # The last append was good so increase their index.
# self._nextIndexes[message.sender] += 1
#
# # Are they caught up?
# if(self._nextIndexes[message.sender] > self._server._lastLogIndex):
# self._nextIndexes[message.sender] = self._server._lastLogIndex
#
# return self, None
#
# @property
# def timeout(self):
# ''' Nyquist rate on avg election timeout; bcast freq >= 3x timeout freq ==> no timeouts '''
# return self._timeout * .75
#
# def on_leader_timeout(self):
# ''' While leader, bcast heartbeat on timeout '''
# self._send_heart_beat()
# return self, None
#
# def _send_heart_beat(self):
# message = AppendEntriesMessage(
# self._server._name,
# None,
# self._server._currentTerm,
# {
# "leaderId": self._server._name,
# "prevLogIndex": self._server._lastLogIndex,
# "prevLogTerm": self._server._lastLogTerm,
# "entries": [],
# "leaderCommit": self._server._commitIndex,
# })
# self._server.send_message(message)
#
# def on_client_message(self, message):
# if message.command == ClientMessage.Leader:
# return ClientLeaderResponse(message.sender, self._server._name, True)
# else:
# #TODO: Raft lol
# raise NotImplemented
#
# Path: src/lifeRaft/messages/client.py
# class ClientMessage(object):
# # Types
# Leader = 0
# Append = 1
#
# def __init__(self, sender, receiver_host, receiver_port, command):
# self._snd_id = sender
# self._rcv_host = receiver_host
# self._rcv_port = receiver_port
# self._command = command
#
# @property
# def command(self):
# return self._command
#
# @property
# def sender(self):
# return self._snd_id
#
# @property
# def receiver_host(self):
# return self._receiver_host
#
# @property
# def receiver_port(self):
# return self._receiver_port
. Output only the next line. | return issubclass(type(self._state), Leader) |
Given the following code snippet before the placeholder: <|code_start|> '''
Simple mock up for creating cluster definitions of remote nodes
(We don't want to actually set up a server)
'''
def __init__(self, name, host='127.0.0.1', port=6666, client_port=5000):
self._name = name
self._host = host
self._port = port
self._client_port = client_port
class ZeroMQClient(object):
def __init__(self, neighbors, client_id='client', timeout=5):
self._client_id = client_id
self._neighbors = neighbors
self._context = zmq.Context()
self._context.setsockopt(zmq.LINGER, 0)
self._timeout = 5
@property
def quorum(self):
return math.ceil(0.5 * len(self._neighbors))
@property
def leader(self):
''' Sends a Leader request to all nodes, establishing leader by quorum '''
votes = {}
sockets = []
for n in self._neighbors:
sock = self._context.socket(zmq.REQ)
sock.connect("tcp://%s:%d" % (n._host, n._client_port))
<|code_end|>
, predict the next line using imports from the current file:
from ..states.leader import Leader
from ..messages.client import ClientMessage
import zmq
import threading
import pickle
import Queue
import time
import math
import time
and context including class names, function names, and sometimes code from other files:
# Path: src/lifeRaft/states/leader.py
# class Leader(Voter):
#
# def __init__(self, timeout=1.0):
# super(Leader, self).__init__(timeout=timeout)
# self._nextIndexes = defaultdict(int)
# self._matchIndex = defaultdict(int)
# self._last_heartbeat = 0
#
# def set_server(self, server):
# self._server = server
#
# for n in self._server._neighbors:
# self._nextIndexes[n._name] = self._server._lastLogIndex + 1
# self._matchIndex[n._name] = 0
#
# def on_append_entries(self, message):
# if message.sender == self._server._name:
# # bcast
# return self, None
# if message.term > self._server._currentTerm:
# ## If we're getting append messages from the future, we're not the leader
# from .follower import Follower
# follower = Follower()
# follower.set_server(self._server)
# return follower, None
# raise RuntimeError
#
# def on_vote_received(self, message):
# ''' Well I find it dandy you're still voting for me '''
# return self, None
#
# def on_response_received(self, message):
# # Was the last AppendEntries good?
# if(not message.data["response"]):
# # No, so lets back up the log for this node
# self._nextIndexes[message.sender] -= 1
#
# # Get the next log entry to send to the client.
# previousIndex = max(0, self._nextIndexes[message.sender] - 1)
# previous = self._server._log[previousIndex]
# current = self._server._log[self._nextIndexes[message.sender]]
#
# # Send the new log to the client and wait for it to respond.
# appendEntry = AppendEntriesMessage(
# self._server._name,
# message.sender,
# self._server._currentTerm,
# {
# "leaderId": self._server._name,
# "prevLogIndex": previousIndex,
# "prevLogTerm": previous["term"],
# "entries": [current],
# "leaderCommit": self._server._commitIndex,
# })
#
# self._send_response_message(appendEntry)
# else:
# # The last append was good so increase their index.
# self._nextIndexes[message.sender] += 1
#
# # Are they caught up?
# if(self._nextIndexes[message.sender] > self._server._lastLogIndex):
# self._nextIndexes[message.sender] = self._server._lastLogIndex
#
# return self, None
#
# @property
# def timeout(self):
# ''' Nyquist rate on avg election timeout; bcast freq >= 3x timeout freq ==> no timeouts '''
# return self._timeout * .75
#
# def on_leader_timeout(self):
# ''' While leader, bcast heartbeat on timeout '''
# self._send_heart_beat()
# return self, None
#
# def _send_heart_beat(self):
# message = AppendEntriesMessage(
# self._server._name,
# None,
# self._server._currentTerm,
# {
# "leaderId": self._server._name,
# "prevLogIndex": self._server._lastLogIndex,
# "prevLogTerm": self._server._lastLogTerm,
# "entries": [],
# "leaderCommit": self._server._commitIndex,
# })
# self._server.send_message(message)
#
# def on_client_message(self, message):
# if message.command == ClientMessage.Leader:
# return ClientLeaderResponse(message.sender, self._server._name, True)
# else:
# #TODO: Raft lol
# raise NotImplemented
#
# Path: src/lifeRaft/messages/client.py
# class ClientMessage(object):
# # Types
# Leader = 0
# Append = 1
#
# def __init__(self, sender, receiver_host, receiver_port, command):
# self._snd_id = sender
# self._rcv_host = receiver_host
# self._rcv_port = receiver_port
# self._command = command
#
# @property
# def command(self):
# return self._command
#
# @property
# def sender(self):
# return self._snd_id
#
# @property
# def receiver_host(self):
# return self._receiver_host
#
# @property
# def receiver_port(self):
# return self._receiver_port
. Output only the next line. | message = ClientMessage(self._client_id, n._host, n._client_port, ClientMessage.Leader) |
Predict the next line for this snippet: <|code_start|> Note that the current server implementation sends
'Content-Encoding' header anyway, mandating client to
decompress the file.
digest: SHA256 digest of the file before compression
If specified, the digest will not be computed again, saving
resources.
logical_size: if ``data`` is gzip-compressed, this parameter
has to be set to decompressed file size.
"""
with _exclusive_lock(self._lock_path('links', name)):
logger.debug('Acquired lock to link for %s.', name)
link_path = self._link_path(name)
if _path_exists(link_path) and _file_version(link_path) > version:
logger.info(
'Tried to store older version of %s (%d < %d), ignoring.',
name,
version,
_file_version(link_path),
)
return _file_version(link_path)
# data is managed by contents now, and shouldn't be used directly
with _InputStreamWrapper(data, size) as contents:
if digest is None or logical_size is None:
contents.save()
if compressed:
# This shouldn't occur if the request came from a proper
# filetracker client, so we don't care if it's slow.
logger.warning('Storing compressed stream without hints.')
with gzip.open(contents.current_path, 'rb') as decompressed:
<|code_end|>
with the help of current file imports:
import contextlib
import email.utils
import errno
import fcntl
import gevent
import gzip
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import bsddb3
import six
from filetracker.utils import file_digest
and context from other files:
# Path: filetracker/utils.py
# def file_digest(source):
# """Calculates SHA256 digest of a file.
#
# Args:
# source: either a file-like object or a path to file
# """
# hash_sha256 = hashlib.sha256()
#
# should_close = False
#
# if isinstance(source, six.string_types):
# should_close = True
# source = open(source, 'rb')
#
# for chunk in iter(lambda: source.read(_BUFFER_SIZE), b''):
# hash_sha256.update(chunk)
#
# if should_close:
# source.close()
#
# return hash_sha256.hexdigest()
, which may contain function names, class names, or code. Output only the next line. | digest = file_digest(decompressed) |
Predict the next line after this snippet: <|code_start|>
If ``name`` has a version number, it is ignored.
Raises an (unspecified) exception if file is not found.
"""
raise NotImplementedError
def file_size(self, name):
"""Returns the size of the file.
Raises an (unspecified) exception if file is not found.
"""
raise NotImplementedError
def get_stream(self, name):
"""Retrieves a file as a binary stream.
Returns a pair (binary stream, versioned name).
"""
raise NotImplementedError
def get_file(self, name, filename):
"""Saves the content of file named ``name`` to ``filename``.
Works like :meth:`get_stream`, but ``filename`` is the name of
a file which will be created (or overwritten).
Returns the full versioned name of the retrieved file.
"""
stream, vname = self.get_stream(name)
<|code_end|>
using the current file's imports:
import collections
import os
import shutil
from filetracker.utils import split_name, mkdir
and any relevant context from other files:
# Path: filetracker/utils.py
# def split_name(name):
# """Splits a (possibly versioned) name into unversioned name and version.
#
# Returns a tuple ``(unversioned_name, version)``, where ``version`` may
# be ``None``.
# """
# s = name.rsplit('@', 1)
# if len(s) == 1:
# return s[0], None
# else:
# try:
# return s[0], int(s[1])
# except ValueError:
# raise ValueError(
# "Invalid Filetracker filename: version must " "be int, not %r" % (s[1],)
# )
#
# def mkdir(name):
# try:
# os.makedirs(name, 0o700)
# except OSError as e:
# if e.errno != errno.EEXIST:
# raise
. Output only the next line. | path, version = split_name(vname) |
Given snippet: <|code_start|> """
raise NotImplementedError
def file_size(self, name):
"""Returns the size of the file.
Raises an (unspecified) exception if file is not found.
"""
raise NotImplementedError
def get_stream(self, name):
"""Retrieves a file as a binary stream.
Returns a pair (binary stream, versioned name).
"""
raise NotImplementedError
def get_file(self, name, filename):
"""Saves the content of file named ``name`` to ``filename``.
Works like :meth:`get_stream`, but ``filename`` is the name of
a file which will be created (or overwritten).
Returns the full versioned name of the retrieved file.
"""
stream, vname = self.get_stream(name)
path, version = split_name(vname)
dir_path = os.path.dirname(filename)
if dir_path:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import collections
import os
import shutil
from filetracker.utils import split_name, mkdir
and context:
# Path: filetracker/utils.py
# def split_name(name):
# """Splits a (possibly versioned) name into unversioned name and version.
#
# Returns a tuple ``(unversioned_name, version)``, where ``version`` may
# be ``None``.
# """
# s = name.rsplit('@', 1)
# if len(s) == 1:
# return s[0], None
# else:
# try:
# return s[0], int(s[1])
# except ValueError:
# raise ValueError(
# "Invalid Filetracker filename: version must " "be int, not %r" % (s[1],)
# )
#
# def mkdir(name):
# try:
# os.makedirs(name, 0o700)
# except OSError as e:
# if e.errno != errno.EEXIST:
# raise
which might include code, classes, or functions. Output only the next line. | mkdir(dir_path) |
Next line prediction: <|code_start|> try:
os.remove(self.filename)
dir_path = os.path.dirname(self.filename)
rmdirs(dir_path, self.manager.dir)
except OSError:
pass
finally:
fcntl.flock(self.manager.tree_lock_fd, fcntl.LOCK_UN)
os.close(self.fd)
self.fd = -1
def __del__(self):
# The file is unlocked when the a descriptor which was used to lock
# it is closed.
self.close()
def __init__(self, dir):
self.dir = dir
mkdir(dir)
# All mkdirs, opens, rmdirs and unlinks must be guarded by this lock
self.tree_lock_fd = os.open(
os.path.join(dir, 'tree.lock'), os.O_WRONLY | os.O_CREAT, 0o600
)
def __del__(self):
os.close(self.tree_lock_fd)
def lock_for(self, name):
check_name(name)
<|code_end|>
. Use current file imports:
(import fcntl
import os
from filetracker.utils import split_name, check_name, mkdir, rmdirs)
and context including class names, function names, or small code snippets from other files:
# Path: filetracker/utils.py
# def split_name(name):
# """Splits a (possibly versioned) name into unversioned name and version.
#
# Returns a tuple ``(unversioned_name, version)``, where ``version`` may
# be ``None``.
# """
# s = name.rsplit('@', 1)
# if len(s) == 1:
# return s[0], None
# else:
# try:
# return s[0], int(s[1])
# except ValueError:
# raise ValueError(
# "Invalid Filetracker filename: version must " "be int, not %r" % (s[1],)
# )
#
# def check_name(name, allow_version=True):
# if not isinstance(name, six.string_types):
# raise ValueError("Invalid Filetracker filename: not string: %r" % (name,))
# parts = name.split('/')
# if not parts:
# raise ValueError("Invalid Filetracker filename: empty name")
# if parts[0]:
# raise ValueError("Invalid Filetracker filename: does not start with /")
# if '..' in parts:
# raise ValueError("Invalid Filetracker filename: .. in path")
# if '@' in ''.join(parts[:-1]):
# raise ValueError("Invalid Filetracker filename: @ in path")
# if len(parts[-1].split('@')) > 2:
# raise ValueError("Invalid Filetracker filename: multiple versions")
# if '@' in parts[-1] and not allow_version:
# raise ValueError(
# "Invalid Filetracker filename: version not allowed " "in this API call"
# )
#
# def mkdir(name):
# try:
# os.makedirs(name, 0o700)
# except OSError as e:
# if e.errno != errno.EEXIST:
# raise
#
# def rmdirs(name, root):
# """Removes empty directories from ``name`` upwards, stops at ``root``."""
# while name != root:
# try:
# os.rmdir(name)
# name = os.path.dirname(name)
# except OSError as e:
# if e.errno in (errno.ENOTEMPTY, errno.ENOENT):
# return
# else:
# raise
. Output only the next line. | name, version = split_name(name) |
Continue the code snippet: <|code_start|> fcntl.flock(self.manager.tree_lock_fd, fcntl.LOCK_EX)
try:
os.remove(self.filename)
dir_path = os.path.dirname(self.filename)
rmdirs(dir_path, self.manager.dir)
except OSError:
pass
finally:
fcntl.flock(self.manager.tree_lock_fd, fcntl.LOCK_UN)
os.close(self.fd)
self.fd = -1
def __del__(self):
# The file is unlocked when the a descriptor which was used to lock
# it is closed.
self.close()
def __init__(self, dir):
self.dir = dir
mkdir(dir)
# All mkdirs, opens, rmdirs and unlinks must be guarded by this lock
self.tree_lock_fd = os.open(
os.path.join(dir, 'tree.lock'), os.O_WRONLY | os.O_CREAT, 0o600
)
def __del__(self):
os.close(self.tree_lock_fd)
def lock_for(self, name):
<|code_end|>
. Use current file imports:
import fcntl
import os
from filetracker.utils import split_name, check_name, mkdir, rmdirs
and context (classes, functions, or code) from other files:
# Path: filetracker/utils.py
# def split_name(name):
# """Splits a (possibly versioned) name into unversioned name and version.
#
# Returns a tuple ``(unversioned_name, version)``, where ``version`` may
# be ``None``.
# """
# s = name.rsplit('@', 1)
# if len(s) == 1:
# return s[0], None
# else:
# try:
# return s[0], int(s[1])
# except ValueError:
# raise ValueError(
# "Invalid Filetracker filename: version must " "be int, not %r" % (s[1],)
# )
#
# def check_name(name, allow_version=True):
# if not isinstance(name, six.string_types):
# raise ValueError("Invalid Filetracker filename: not string: %r" % (name,))
# parts = name.split('/')
# if not parts:
# raise ValueError("Invalid Filetracker filename: empty name")
# if parts[0]:
# raise ValueError("Invalid Filetracker filename: does not start with /")
# if '..' in parts:
# raise ValueError("Invalid Filetracker filename: .. in path")
# if '@' in ''.join(parts[:-1]):
# raise ValueError("Invalid Filetracker filename: @ in path")
# if len(parts[-1].split('@')) > 2:
# raise ValueError("Invalid Filetracker filename: multiple versions")
# if '@' in parts[-1] and not allow_version:
# raise ValueError(
# "Invalid Filetracker filename: version not allowed " "in this API call"
# )
#
# def mkdir(name):
# try:
# os.makedirs(name, 0o700)
# except OSError as e:
# if e.errno != errno.EEXIST:
# raise
#
# def rmdirs(name, root):
# """Removes empty directories from ``name`` upwards, stops at ``root``."""
# while name != root:
# try:
# os.rmdir(name)
# name = os.path.dirname(name)
# except OSError as e:
# if e.errno in (errno.ENOTEMPTY, errno.ENOENT):
# return
# else:
# raise
. Output only the next line. | check_name(name) |
Here is a snippet: <|code_start|> """Unlocks the file and releases any system resources.
If ``delete`` is ``True``, also removes the underlying
lock file or equivalent.
May be called more than once (it's a no-op then).
"""
pass
def lock_for(self, name):
"""Returns a :class:`LockManager.Lock` bound to the passed file.
Locks are not versioned -- there should be a single lock for
all versions of the given name. The argument ``name`` may contain
version specification, but it must be ignored.
"""
raise NotImplementedError
class FcntlLockManager(LockManager):
"""A :class:`LockManager` using ``fcntl.flock``."""
class FcntlLock(LockManager.Lock):
def __init__(self, manager, filename):
self.manager = manager
self.filename = filename
dir = os.path.dirname(filename)
fcntl.flock(manager.tree_lock_fd, fcntl.LOCK_EX)
try:
<|code_end|>
. Write the next line using the current file imports:
import fcntl
import os
from filetracker.utils import split_name, check_name, mkdir, rmdirs
and context from other files:
# Path: filetracker/utils.py
# def split_name(name):
# """Splits a (possibly versioned) name into unversioned name and version.
#
# Returns a tuple ``(unversioned_name, version)``, where ``version`` may
# be ``None``.
# """
# s = name.rsplit('@', 1)
# if len(s) == 1:
# return s[0], None
# else:
# try:
# return s[0], int(s[1])
# except ValueError:
# raise ValueError(
# "Invalid Filetracker filename: version must " "be int, not %r" % (s[1],)
# )
#
# def check_name(name, allow_version=True):
# if not isinstance(name, six.string_types):
# raise ValueError("Invalid Filetracker filename: not string: %r" % (name,))
# parts = name.split('/')
# if not parts:
# raise ValueError("Invalid Filetracker filename: empty name")
# if parts[0]:
# raise ValueError("Invalid Filetracker filename: does not start with /")
# if '..' in parts:
# raise ValueError("Invalid Filetracker filename: .. in path")
# if '@' in ''.join(parts[:-1]):
# raise ValueError("Invalid Filetracker filename: @ in path")
# if len(parts[-1].split('@')) > 2:
# raise ValueError("Invalid Filetracker filename: multiple versions")
# if '@' in parts[-1] and not allow_version:
# raise ValueError(
# "Invalid Filetracker filename: version not allowed " "in this API call"
# )
#
# def mkdir(name):
# try:
# os.makedirs(name, 0o700)
# except OSError as e:
# if e.errno != errno.EEXIST:
# raise
#
# def rmdirs(name, root):
# """Removes empty directories from ``name`` upwards, stops at ``root``."""
# while name != root:
# try:
# os.rmdir(name)
# name = os.path.dirname(name)
# except OSError as e:
# if e.errno in (errno.ENOTEMPTY, errno.ENOENT):
# return
# else:
# raise
, which may include functions, classes, or code. Output only the next line. | mkdir(dir) |
Next line prediction: <|code_start|> self.filename = filename
dir = os.path.dirname(filename)
fcntl.flock(manager.tree_lock_fd, fcntl.LOCK_EX)
try:
mkdir(dir)
self.fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0o600)
finally:
fcntl.flock(manager.tree_lock_fd, fcntl.LOCK_UN)
# Set mtime so that any future cleanup script may remove lock files
# not used for some specified time.
os.utime(filename, None)
def lock_shared(self):
fcntl.flock(self.fd, fcntl.LOCK_SH)
def lock_exclusive(self):
fcntl.flock(self.fd, fcntl.LOCK_EX)
def unlock(self):
fcntl.flock(self.fd, fcntl.LOCK_UN)
def close(self, delete=False):
if self.fd != -1:
if delete:
fcntl.flock(self.manager.tree_lock_fd, fcntl.LOCK_EX)
try:
os.remove(self.filename)
dir_path = os.path.dirname(self.filename)
<|code_end|>
. Use current file imports:
(import fcntl
import os
from filetracker.utils import split_name, check_name, mkdir, rmdirs)
and context including class names, function names, or small code snippets from other files:
# Path: filetracker/utils.py
# def split_name(name):
# """Splits a (possibly versioned) name into unversioned name and version.
#
# Returns a tuple ``(unversioned_name, version)``, where ``version`` may
# be ``None``.
# """
# s = name.rsplit('@', 1)
# if len(s) == 1:
# return s[0], None
# else:
# try:
# return s[0], int(s[1])
# except ValueError:
# raise ValueError(
# "Invalid Filetracker filename: version must " "be int, not %r" % (s[1],)
# )
#
# def check_name(name, allow_version=True):
# if not isinstance(name, six.string_types):
# raise ValueError("Invalid Filetracker filename: not string: %r" % (name,))
# parts = name.split('/')
# if not parts:
# raise ValueError("Invalid Filetracker filename: empty name")
# if parts[0]:
# raise ValueError("Invalid Filetracker filename: does not start with /")
# if '..' in parts:
# raise ValueError("Invalid Filetracker filename: .. in path")
# if '@' in ''.join(parts[:-1]):
# raise ValueError("Invalid Filetracker filename: @ in path")
# if len(parts[-1].split('@')) > 2:
# raise ValueError("Invalid Filetracker filename: multiple versions")
# if '@' in parts[-1] and not allow_version:
# raise ValueError(
# "Invalid Filetracker filename: version not allowed " "in this API call"
# )
#
# def mkdir(name):
# try:
# os.makedirs(name, 0o700)
# except OSError as e:
# if e.errno != errno.EEXIST:
# raise
#
# def rmdirs(name, root):
# """Removes empty directories from ``name`` upwards, stops at ``root``."""
# while name != root:
# try:
# os.rmdir(name)
# name = os.path.dirname(name)
# except OSError as e:
# if e.errno in (errno.ENOTEMPTY, errno.ENOENT):
# return
# else:
# raise
. Output only the next line. | rmdirs(dir_path, self.manager.dir) |
Using the snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
w.tdays("2017-02-02", "2017-03-02", "")
w.tdays("2017-02-02", "2017-03-02", "Days=Weekdays")
w.tdays("2017-02-02", "2017-03-02", "Days=Alldays")
w.tdays("2017-02-02", "2017-03-02", "TradingCalendar=SHFE")
"""
def download_tdays(w, startdate, enddate, option=''):
"""
下载交易日数据
:param w:
:param startdate:
:param enddate:
:param option:
:return:
"""
<|code_end|>
, determine the next line of code. You have imports:
import pandas as pd
from .utils import asDateTime
and context (class names, function names, or code) available:
# Path: kquant_data/wind/utils.py
# def asDateTime(v, asDate=False):
# """
# 万得中读出来的时间总多5ms,覆写这部分
# w.asDateTime = asDateTime
# w.start()
# :param v:
# :param asDate:
# :return:
# """
# # return datetime(1899, 12, 30, 0, 0, 0, 0) + timedelta(v + 0.005 / 3600 / 24)
# return datetime(1899, 12, 30, 0, 0, 0, 0) + timedelta(v)
. Output only the next line. | w.asDateTime = asDateTime |
Using the snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
<|code_end|>
, determine the next line of code. You have imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and context (class names, function names, or code) available:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
. Output only the next line. | path = os.path.join(__CONFIG_H5_STK_DIR__, "5min_000016.SH", 'Symbol.csv') |
Given snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
path = os.path.join(__CONFIG_H5_STK_DIR__, "5min_000016.SH", 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, "5min_000016.SH", 'DateTime.csv')
DateTime = get_datetime(path)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and context:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
which might include code, classes, or functions. Output only the next line. | df = merge_weight_internal(symbols, DateTime, "000016.SH") |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
path = os.path.join(__CONFIG_H5_STK_DIR__, "5min_000016.SH", 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, "5min_000016.SH", 'DateTime.csv')
<|code_end|>
using the current file's imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and any relevant context from other files:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
. Output only the next line. | DateTime = get_datetime(path) |
Here is a snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
path = os.path.join(__CONFIG_H5_STK_DIR__, "5min_000016.SH", 'Symbol.csv')
<|code_end|>
. Write the next line using the current file imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and context from other files:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
, which may include functions, classes, or code. Output only the next line. | symbols = all_instruments(path) |
Using the snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
path = os.path.join(__CONFIG_H5_STK_DIR__, "5min_000016.SH", 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, "5min_000016.SH", 'DateTime.csv')
DateTime = get_datetime(path)
df = merge_weight_internal(symbols, DateTime, "000016.SH")
path = os.path.join(__CONFIG_H5_STK_DIR__, "5min_000016.SH", 'weight.h5')
<|code_end|>
, determine the next line of code. You have imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and context (class names, function names, or code) available:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
. Output only the next line. | write_dataframe_set_dtype_remove_head(path, df, None, "weight") |
Given the following code snippet before the placeholder: <|code_start|>调用wset函数的部分
下载数据的方法
1.在时间上使用折半可以最少的下载数据,但已经下了一部分,要补下时如果挪了一位,又得全重下
2.在文件上,三个文件一组,三组一样,删中间一个,直到不能删了,退出
"""
def download_sectorconstituent(w, date, sector, windcode, field='wind_code'):
"""
板块成份
中信证券一级行业指数:时间好像没有必要,因为日历日也会查询出来
风险警示股票:日期就是查询的日期,股票也是最新名,没有啥用
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000;field=wind_code")
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000")
w.wset("sectorconstituent","date=2017-03-03;windcode=000300.SH")
:param w:
:param sector:
:param date:
:return:
"""
param = 'date=%s' % date
if sector:
param += ';sector=%s' % sector
if windcode:
param += ';windcode=%s' % windcode
if field:
param += ';field=%s' % field
<|code_end|>
, predict the next line using imports from the current file:
import os
import pandas as pd
from .utils import asDateTime
and context including class names, function names, and sometimes code from other files:
# Path: kquant_data/wind/utils.py
# def asDateTime(v, asDate=False):
# """
# 万得中读出来的时间总多5ms,覆写这部分
# w.asDateTime = asDateTime
# w.start()
# :param v:
# :param asDate:
# :return:
# """
# # return datetime(1899, 12, 30, 0, 0, 0, 0) + timedelta(v + 0.005 / 3600 / 24)
# return datetime(1899, 12, 30, 0, 0, 0, 0) + timedelta(v)
. Output only the next line. | w.asDateTime = asDateTime |
Based on the snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
<|code_end|>
, predict the immediate next line with the help of imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and context (classes, functions, sometimes code) from other files:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
. Output only the next line. | path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH", 'Symbol.csv') |
Based on the snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH", 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH", 'DateTime.csv')
DateTime = get_datetime(path)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and context (classes, functions, sometimes code) from other files:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
. Output only the next line. | df = merge_weight_internal(symbols, DateTime, "000016.SH") |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH", 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH", 'DateTime.csv')
<|code_end|>
with the help of current file imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and context from other files:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
, which may contain function names, class names, or code. Output only the next line. | DateTime = get_datetime(path) |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH", 'Symbol.csv')
<|code_end|>
. Use current file imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and context (classes, functions, or code) from other files:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
. Output only the next line. | symbols = all_instruments(path) |
Given snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
以前的做法是先生成数据,然后再生成合约
"""
if __name__ == '__main__':
# 时间和合约都已经生成了
# 只要将时间与合约对上即可
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH", 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH", 'DateTime.csv')
DateTime = get_datetime(path)
df = merge_weight_internal(symbols, DateTime, "000016.SH")
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH", 'weight.h5')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__
from kquant_data.processing.merge import merge_weight_internal
from kquant_data.api import get_datetime, all_instruments
from kquant_data.xio.h5 import write_dataframe_set_dtype_remove_head
and context:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# Path: kquant_data/processing/merge.py
# def merge_weight_internal(symbols, DateTime, wind_code):
# """
# 合并一级文件夹
# :param rule:
# :param sector_name:
# :param dataset_name:
# :return:
# """
# tic()
# path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
# df = load_index_weight(path)
# print("数据加载完成")
# # 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
# df.fillna(-1, inplace=True)
# toc()
#
# # 原始数据比较简单,但与行业板块数据又不一样
# # 1.每年的约定时间会调整成份股
# # 2.每天的值都不一样
# # 约定nan表示不属于成份,0表示属于成份,但权重为0
# df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# # -1表示特殊数据,处理下
# df.replace(-1, np.nan, inplace=True)
# print("数据加载完成")
# toc()
#
# return df
#
# Path: kquant_data/api.py
# def get_datetime(path):
# dt = pd.read_csv(path, index_col=0, parse_dates=True)
# dt['date'] = dt.index
# return dt
#
# def all_instruments(path=None, type=None):
# """
# 得到合约列表
# :param type:
# :return:
# """
# if path is None:
# path = os.path.join(__CONFIG_H5_STK_DIR__, "daily", 'Symbol.csv')
#
# df = pd.read_csv(path, dtype={'code': str})
#
# return df
#
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
# """
# 每个单元格的数据类型都一样
# 强行指定类型可以让文件的占用更小
# 表头不保存
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
# if dtype is None:
# f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
# else:
# f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
# f.close()
# return
which might include code, classes, or functions. Output only the next line. | write_dataframe_set_dtype_remove_head(path, df, None, "weight") |
Continue the code snippet: <|code_start|>
def _binsearch_download_constituent(w, dates, path, file_ext, start, end, sector, windcode, field,
two_sides=False, is_indexconstituent=False):
"""
使用折半法下载
缺点,数据都下载过来了,中间很多重复的数据
:param w:
:param dates:
:param path:
:param file_ext:
:param start:
:param end:
:param sector:None表示没有权块信息,那就是权重信息了
:param windcode:
:param field:
:param two_sides:用在一开始数据下载时补两头
:return:
"""
len = end - start
if len < 1:
# 两个相邻,也需要进行操作
return
date = dates[start]
fullpath = os.path.join(path, date + file_ext)
df_start = read_constituent(fullpath)
if df_start is None:
print("下载:%s" % date)
if not is_indexconstituent:
<|code_end|>
. Use current file imports:
import os
import shutil
import pandas as pd
from datetime import datetime
from ..wind.wset import write_constituent, read_constituent, download_sectorconstituent, download_indexconstituent, \
download_futureoir
and context (classes, functions, or code) from other files:
# Path: kquant_data/wind/wset.py
# def write_constituent(path, df):
# df.to_csv(path, encoding='utf-8-sig', date_format='%Y-%m-%d', index=False)
#
# def read_constituent(path):
# """
# 读取板块文件
# :param path:
# :return:
# """
# try:
# df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=True)
# except Exception as e:
# return None
# try:
# df['date'] = pd.to_datetime(df['date'])
# except KeyError:
# pass
# return df
#
# def download_sectorconstituent(w, date, sector, windcode, field='wind_code'):
# """
# 板块成份
# 中信证券一级行业指数:时间好像没有必要,因为日历日也会查询出来
# 风险警示股票:日期就是查询的日期,股票也是最新名,没有啥用
#
# w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000;field=wind_code")
# w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000")
# w.wset("sectorconstituent","date=2017-03-03;windcode=000300.SH")
# :param w:
# :param sector:
# :param date:
# :return:
# """
# param = 'date=%s' % date
# if sector:
# param += ';sector=%s' % sector
# if windcode:
# param += ';windcode=%s' % windcode
# if field:
# param += ';field=%s' % field
#
# w.asDateTime = asDateTime
# w_wset_data = w.wset("sectorconstituent", param)
# df = pd.DataFrame(w_wset_data.Data)
# df = df.T
# df.columns = w_wset_data.Fields
# try:
# df['date'] = pd.to_datetime(df['date'])
# except KeyError:
# pass
# return df
#
# def download_indexconstituent(w, date, windcode, field='wind_code,i_weight'):
# """
# 指数权重
# 如果指定日期不是交易日,会返回时前一个交易日的信息
# :param w:
# :param windcode:
# :param date:
# :return:
# """
# param = 'date=%s' % date
# if windcode:
# param += ';windcode=%s' % windcode
# if field:
# param += ';field=%s' % field
#
# w.asDateTime = asDateTime
# w_wset_data = w.wset("indexconstituent", param)
# df = pd.DataFrame(w_wset_data.Data)
# df = df.T
# df.columns = w_wset_data.Fields
# return df
#
# def download_futureoir(w, startdate, enddate, windcode):
# """
# 品种持仓
# 是将多仓与空仓排名合并到一起,然后按多仓进行统一排名,所以对于空仓要使用时,需要自行重新排名
# 查询的跨度不要太长,一个月或三个月
# :param w:
# :param startdate:
# :param enddate:
# :return:
# """
# w.asDateTime = asDateTime
# w_wset_data = w.wset("futureoir",
# "startdate=%s;enddate=%s;varity=%s;order_by=long;ranks=all;"
# "field=date,ranks,member_name,"
# "long_position,long_position_increase,long_potion_rate,"
# "short_position,short_position_increase,short_position_rate,"
# "vol,vol_increase,vol_rate,settle" % (startdate, enddate, windcode))
# df = pd.DataFrame(w_wset_data.Data)
# df = df.T
# df.columns = w_wset_data.Fields
#
# try:
# df['date'] = pd.to_datetime(df['date'])
# except:
# pass
#
# return df
. Output only the next line. | df_start = download_sectorconstituent(w, date, sector, windcode, field) |
Predict the next line for this snippet: <|code_start|> file_download_constituent(w, df['date_str'], foldpath, '.csv',
sector=None, windcode=wind_code, field='wind_code,i_weight', is_indexconstituent=True)
dst_path = os.path.join(root_path, "%s_move" % wind_code)
move_constituent(foldpath, dst_path)
def download_index_weight2(w, dates, wind_code, root_path):
for date in dates:
path = os.path.join(root_path, wind_code, date.strftime('%Y-%m-%d.csv'))
df = read_constituent(path)
if df is None:
print("下载权重", path)
df = download_indexconstituent(w, date.strftime('%Y-%m-%d'), wind_code)
write_constituent(path, df)
def download_futureoir_day_range(w, day_range, windcode, root_path):
"""
指定
:param w:
:param day_range:
:param windcode:
:param root_path:
:return:
"""
startdate = day_range[0]
enddate = day_range[-1]
print("长度%d,开始%s,结束%s" % (len(day_range), startdate, enddate))
if len(day_range) > 0:
<|code_end|>
with the help of current file imports:
import os
import shutil
import pandas as pd
from datetime import datetime
from ..wind.wset import write_constituent, read_constituent, download_sectorconstituent, download_indexconstituent, \
download_futureoir
and context from other files:
# Path: kquant_data/wind/wset.py
# def write_constituent(path, df):
# df.to_csv(path, encoding='utf-8-sig', date_format='%Y-%m-%d', index=False)
#
# def read_constituent(path):
# """
# 读取板块文件
# :param path:
# :return:
# """
# try:
# df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=True)
# except Exception as e:
# return None
# try:
# df['date'] = pd.to_datetime(df['date'])
# except KeyError:
# pass
# return df
#
# def download_sectorconstituent(w, date, sector, windcode, field='wind_code'):
# """
# 板块成份
# 中信证券一级行业指数:时间好像没有必要,因为日历日也会查询出来
# 风险警示股票:日期就是查询的日期,股票也是最新名,没有啥用
#
# w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000;field=wind_code")
# w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000")
# w.wset("sectorconstituent","date=2017-03-03;windcode=000300.SH")
# :param w:
# :param sector:
# :param date:
# :return:
# """
# param = 'date=%s' % date
# if sector:
# param += ';sector=%s' % sector
# if windcode:
# param += ';windcode=%s' % windcode
# if field:
# param += ';field=%s' % field
#
# w.asDateTime = asDateTime
# w_wset_data = w.wset("sectorconstituent", param)
# df = pd.DataFrame(w_wset_data.Data)
# df = df.T
# df.columns = w_wset_data.Fields
# try:
# df['date'] = pd.to_datetime(df['date'])
# except KeyError:
# pass
# return df
#
# def download_indexconstituent(w, date, windcode, field='wind_code,i_weight'):
# """
# 指数权重
# 如果指定日期不是交易日,会返回时前一个交易日的信息
# :param w:
# :param windcode:
# :param date:
# :return:
# """
# param = 'date=%s' % date
# if windcode:
# param += ';windcode=%s' % windcode
# if field:
# param += ';field=%s' % field
#
# w.asDateTime = asDateTime
# w_wset_data = w.wset("indexconstituent", param)
# df = pd.DataFrame(w_wset_data.Data)
# df = df.T
# df.columns = w_wset_data.Fields
# return df
#
# def download_futureoir(w, startdate, enddate, windcode):
# """
# 品种持仓
# 是将多仓与空仓排名合并到一起,然后按多仓进行统一排名,所以对于空仓要使用时,需要自行重新排名
# 查询的跨度不要太长,一个月或三个月
# :param w:
# :param startdate:
# :param enddate:
# :return:
# """
# w.asDateTime = asDateTime
# w_wset_data = w.wset("futureoir",
# "startdate=%s;enddate=%s;varity=%s;order_by=long;ranks=all;"
# "field=date,ranks,member_name,"
# "long_position,long_position_increase,long_potion_rate,"
# "short_position,short_position_increase,short_position_rate,"
# "vol,vol_increase,vol_rate,settle" % (startdate, enddate, windcode))
# df = pd.DataFrame(w_wset_data.Data)
# df = df.T
# df.columns = w_wset_data.Fields
#
# try:
# df['date'] = pd.to_datetime(df['date'])
# except:
# pass
#
# return df
, which may contain function names, class names, or code. Output only the next line. | df = download_futureoir(w, startdate=startdate, enddate=enddate, windcode=windcode) |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
大智慧数据的处理
"""
dzh_h5_type = np.dtype([
('time', np.uint64),
('pre_day', np.float64),
('pre_close', np.float64),
('split', np.float64),
('purchase', np.float64),
('purchase_price', np.float64),
('dividend', np.float64),
('dr_pre_close', np.float64),
('dr_factor', np.float64),
('backward_factor', np.float64),
('forward_factor', np.float64),
])
def dividend_to_h5(input_path, data):
<|code_end|>
using the current file's imports:
import urllib
import urllib.request
import numpy as np
from struct import *
from ..xio.h5 import write_dataframe_set_struct_keep_head
and any relevant context from other files:
# Path: kquant_data/xio/h5.py
# def write_dataframe_set_struct_keep_head(path, data, dtype, dateset_name):
# """
# 保存DataFrame数据
# 保留表头
# 可以用来存K线,除权除息等信息
# :param path:
# :param data:
# :param dtype:
# :param dateset_name:
# :return:
# """
# f = h5py.File(path, 'w')
#
# r = data.to_records(index=False)
# d = np.array(r, dtype=dtype)
#
# f.create_dataset(dateset_name, data=d, compression="gzip", compression_opts=6)
# f.close()
# return
. Output only the next line. | write_dataframe_set_struct_keep_head(input_path, data, dzh_h5_type, 'Dividend') |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
同一品种的下载流程
"""
if __name__ == '__main__':
# 很多品种的2016-04-21没有数据,如TA.CZC\ZC.CZC\SR.CZC\OI.CZC\MA.CZC\RM.CZC\FG.CZC\CF.CZC\WH.CZC,都郑商所
# w = None
w.start()
# 最新的一天由于没有数据,会导致参与下载,如果中间某一天缺失,会导致大量下载数据,需要排除
date_str = (datetime.today() - timedelta(days=0)).strftime('%Y-%m-%d')
# date_str = datetime.today().strftime('%Y-%m-%d')
# IF一类的起始时间是从2010年开始,不能再从2009年开始遍历了
<|code_end|>
. Use current file imports:
from WindPy import w
from datetime import datetime,timedelta
from kquant_data.wind.tdays import read_tdays
from kquant_data.config import __CONFIG_TDAYS_SHFE_FILE__
from kquant_data.wind_resume.wset import resume_download_futureoir
from kquant_data.future.symbol import get_actvie_products_wind,get_all_products_wind
from kquant_data.config import __CONFIG_H5_FUT_DATA_DIR__
import os
import numpy as np
import pandas as pd
and context (classes, functions, or code) from other files:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/config.py
# __CONFIG_TDAYS_SHFE_FILE__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'tdays', 'SHFE.csv')
#
# Path: kquant_data/wind_resume/wset.py
# def resume_download_futureoir(w, trading_days, root_path, windcode, adjust_trading_days):
# dir_path = os.path.join(root_path, windcode)
# os.makedirs(dir_path, exist_ok=True)
#
# # 从第一个有数据的部分开始第一条,这个需要根据自己的情况进行处理
# if adjust_trading_days:
# for dirpath, dirnames, filenames in os.walk(dir_path):
# for filename in filenames:
# trading_days = trading_days[filename[:10]:]
# break
#
# # 分成list套list,只要时间超长就分新的一组
# last_date = pd.Timestamp(1900, 1, 1).tz_localize(None)
# day_ranges = []
# day_range = []
# for day in trading_days['date']:
# path = os.path.join(root_path, windcode, day.strftime('%Y-%m-%d.csv'))
# if os.path.exists(path):
# continue
# else:
# if (day - last_date).days > 20 or len(day_range) >= 30:
# day_ranges.append(day_range)
# day_range = []
# last_date = day
# day_range.append(day)
# # 将最后一个添加进去
# day_ranges.append(day_range)
#
# for day_range in day_ranges:
# if len(day_range) > 0:
# ret = download_futureoir_day_range(w, day_range, windcode, root_path)
# if ret == 0:
# print("下载数据为空,有可能才上市没多久,需要再下载")
# break
#
# print('处理完毕!可能有部分数据由于超时没有下载成功,可再运行一次脚本')
#
# Path: kquant_data/future/symbol.py
# def get_actvie_products_wind():
# _lst = get_actvie_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# def get_all_products_wind():
# _lst = get_all_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# Path: kquant_data/config.py
# __CONFIG_H5_FUT_DATA_DIR__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'data')
. Output only the next line. | trading_days = read_tdays(__CONFIG_TDAYS_SHFE_FILE__) |
Given the code snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
同一品种的下载流程
"""
if __name__ == '__main__':
# 很多品种的2016-04-21没有数据,如TA.CZC\ZC.CZC\SR.CZC\OI.CZC\MA.CZC\RM.CZC\FG.CZC\CF.CZC\WH.CZC,都郑商所
# w = None
w.start()
# 最新的一天由于没有数据,会导致参与下载,如果中间某一天缺失,会导致大量下载数据,需要排除
date_str = (datetime.today() - timedelta(days=0)).strftime('%Y-%m-%d')
# date_str = datetime.today().strftime('%Y-%m-%d')
# IF一类的起始时间是从2010年开始,不能再从2009年开始遍历了
<|code_end|>
, generate the next line using the imports in this file:
from WindPy import w
from datetime import datetime,timedelta
from kquant_data.wind.tdays import read_tdays
from kquant_data.config import __CONFIG_TDAYS_SHFE_FILE__
from kquant_data.wind_resume.wset import resume_download_futureoir
from kquant_data.future.symbol import get_actvie_products_wind,get_all_products_wind
from kquant_data.config import __CONFIG_H5_FUT_DATA_DIR__
import os
import numpy as np
import pandas as pd
and context (functions, classes, or occasionally code) from other files:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/config.py
# __CONFIG_TDAYS_SHFE_FILE__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'tdays', 'SHFE.csv')
#
# Path: kquant_data/wind_resume/wset.py
# def resume_download_futureoir(w, trading_days, root_path, windcode, adjust_trading_days):
# dir_path = os.path.join(root_path, windcode)
# os.makedirs(dir_path, exist_ok=True)
#
# # 从第一个有数据的部分开始第一条,这个需要根据自己的情况进行处理
# if adjust_trading_days:
# for dirpath, dirnames, filenames in os.walk(dir_path):
# for filename in filenames:
# trading_days = trading_days[filename[:10]:]
# break
#
# # 分成list套list,只要时间超长就分新的一组
# last_date = pd.Timestamp(1900, 1, 1).tz_localize(None)
# day_ranges = []
# day_range = []
# for day in trading_days['date']:
# path = os.path.join(root_path, windcode, day.strftime('%Y-%m-%d.csv'))
# if os.path.exists(path):
# continue
# else:
# if (day - last_date).days > 20 or len(day_range) >= 30:
# day_ranges.append(day_range)
# day_range = []
# last_date = day
# day_range.append(day)
# # 将最后一个添加进去
# day_ranges.append(day_range)
#
# for day_range in day_ranges:
# if len(day_range) > 0:
# ret = download_futureoir_day_range(w, day_range, windcode, root_path)
# if ret == 0:
# print("下载数据为空,有可能才上市没多久,需要再下载")
# break
#
# print('处理完毕!可能有部分数据由于超时没有下载成功,可再运行一次脚本')
#
# Path: kquant_data/future/symbol.py
# def get_actvie_products_wind():
# _lst = get_actvie_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# def get_all_products_wind():
# _lst = get_all_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# Path: kquant_data/config.py
# __CONFIG_H5_FUT_DATA_DIR__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'data')
. Output only the next line. | trading_days = read_tdays(__CONFIG_TDAYS_SHFE_FILE__) |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
同一品种的下载流程
"""
if __name__ == '__main__':
# 很多品种的2016-04-21没有数据,如TA.CZC\ZC.CZC\SR.CZC\OI.CZC\MA.CZC\RM.CZC\FG.CZC\CF.CZC\WH.CZC,都郑商所
# w = None
w.start()
# 最新的一天由于没有数据,会导致参与下载,如果中间某一天缺失,会导致大量下载数据,需要排除
date_str = (datetime.today() - timedelta(days=0)).strftime('%Y-%m-%d')
# date_str = datetime.today().strftime('%Y-%m-%d')
# IF一类的起始时间是从2010年开始,不能再从2009年开始遍历了
trading_days = read_tdays(__CONFIG_TDAYS_SHFE_FILE__)
# trading_days = trading_days['2016-01-01':date_str]
trading_days = trading_days['2017-10-01':date_str]
# 下载后存下
windcodes = get_all_products_wind()
root_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir")
for windcode in windcodes:
# windcode = 'AP.CZC'
print('处理', windcode)
<|code_end|>
. Use current file imports:
from WindPy import w
from datetime import datetime,timedelta
from kquant_data.wind.tdays import read_tdays
from kquant_data.config import __CONFIG_TDAYS_SHFE_FILE__
from kquant_data.wind_resume.wset import resume_download_futureoir
from kquant_data.future.symbol import get_actvie_products_wind,get_all_products_wind
from kquant_data.config import __CONFIG_H5_FUT_DATA_DIR__
import os
import numpy as np
import pandas as pd
and context (classes, functions, or code) from other files:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/config.py
# __CONFIG_TDAYS_SHFE_FILE__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'tdays', 'SHFE.csv')
#
# Path: kquant_data/wind_resume/wset.py
# def resume_download_futureoir(w, trading_days, root_path, windcode, adjust_trading_days):
# dir_path = os.path.join(root_path, windcode)
# os.makedirs(dir_path, exist_ok=True)
#
# # 从第一个有数据的部分开始第一条,这个需要根据自己的情况进行处理
# if adjust_trading_days:
# for dirpath, dirnames, filenames in os.walk(dir_path):
# for filename in filenames:
# trading_days = trading_days[filename[:10]:]
# break
#
# # 分成list套list,只要时间超长就分新的一组
# last_date = pd.Timestamp(1900, 1, 1).tz_localize(None)
# day_ranges = []
# day_range = []
# for day in trading_days['date']:
# path = os.path.join(root_path, windcode, day.strftime('%Y-%m-%d.csv'))
# if os.path.exists(path):
# continue
# else:
# if (day - last_date).days > 20 or len(day_range) >= 30:
# day_ranges.append(day_range)
# day_range = []
# last_date = day
# day_range.append(day)
# # 将最后一个添加进去
# day_ranges.append(day_range)
#
# for day_range in day_ranges:
# if len(day_range) > 0:
# ret = download_futureoir_day_range(w, day_range, windcode, root_path)
# if ret == 0:
# print("下载数据为空,有可能才上市没多久,需要再下载")
# break
#
# print('处理完毕!可能有部分数据由于超时没有下载成功,可再运行一次脚本')
#
# Path: kquant_data/future/symbol.py
# def get_actvie_products_wind():
# _lst = get_actvie_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# def get_all_products_wind():
# _lst = get_all_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# Path: kquant_data/config.py
# __CONFIG_H5_FUT_DATA_DIR__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'data')
. Output only the next line. | resume_download_futureoir(w, trading_days, root_path, windcode, adjust_trading_days=True) |
Using the snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
同一品种的下载流程
"""
if __name__ == '__main__':
# 很多品种的2016-04-21没有数据,如TA.CZC\ZC.CZC\SR.CZC\OI.CZC\MA.CZC\RM.CZC\FG.CZC\CF.CZC\WH.CZC,都郑商所
# w = None
w.start()
# 最新的一天由于没有数据,会导致参与下载,如果中间某一天缺失,会导致大量下载数据,需要排除
date_str = (datetime.today() - timedelta(days=0)).strftime('%Y-%m-%d')
# date_str = datetime.today().strftime('%Y-%m-%d')
# IF一类的起始时间是从2010年开始,不能再从2009年开始遍历了
trading_days = read_tdays(__CONFIG_TDAYS_SHFE_FILE__)
# trading_days = trading_days['2016-01-01':date_str]
trading_days = trading_days['2017-10-01':date_str]
# 下载后存下
<|code_end|>
, determine the next line of code. You have imports:
from WindPy import w
from datetime import datetime,timedelta
from kquant_data.wind.tdays import read_tdays
from kquant_data.config import __CONFIG_TDAYS_SHFE_FILE__
from kquant_data.wind_resume.wset import resume_download_futureoir
from kquant_data.future.symbol import get_actvie_products_wind,get_all_products_wind
from kquant_data.config import __CONFIG_H5_FUT_DATA_DIR__
import os
import numpy as np
import pandas as pd
and context (class names, function names, or code) available:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/config.py
# __CONFIG_TDAYS_SHFE_FILE__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'tdays', 'SHFE.csv')
#
# Path: kquant_data/wind_resume/wset.py
# def resume_download_futureoir(w, trading_days, root_path, windcode, adjust_trading_days):
# dir_path = os.path.join(root_path, windcode)
# os.makedirs(dir_path, exist_ok=True)
#
# # 从第一个有数据的部分开始第一条,这个需要根据自己的情况进行处理
# if adjust_trading_days:
# for dirpath, dirnames, filenames in os.walk(dir_path):
# for filename in filenames:
# trading_days = trading_days[filename[:10]:]
# break
#
# # 分成list套list,只要时间超长就分新的一组
# last_date = pd.Timestamp(1900, 1, 1).tz_localize(None)
# day_ranges = []
# day_range = []
# for day in trading_days['date']:
# path = os.path.join(root_path, windcode, day.strftime('%Y-%m-%d.csv'))
# if os.path.exists(path):
# continue
# else:
# if (day - last_date).days > 20 or len(day_range) >= 30:
# day_ranges.append(day_range)
# day_range = []
# last_date = day
# day_range.append(day)
# # 将最后一个添加进去
# day_ranges.append(day_range)
#
# for day_range in day_ranges:
# if len(day_range) > 0:
# ret = download_futureoir_day_range(w, day_range, windcode, root_path)
# if ret == 0:
# print("下载数据为空,有可能才上市没多久,需要再下载")
# break
#
# print('处理完毕!可能有部分数据由于超时没有下载成功,可再运行一次脚本')
#
# Path: kquant_data/future/symbol.py
# def get_actvie_products_wind():
# _lst = get_actvie_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# def get_all_products_wind():
# _lst = get_all_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# Path: kquant_data/config.py
# __CONFIG_H5_FUT_DATA_DIR__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'data')
. Output only the next line. | windcodes = get_all_products_wind() |
Given the following code snippet before the placeholder: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
同一品种的下载流程
"""
if __name__ == '__main__':
# 很多品种的2016-04-21没有数据,如TA.CZC\ZC.CZC\SR.CZC\OI.CZC\MA.CZC\RM.CZC\FG.CZC\CF.CZC\WH.CZC,都郑商所
# w = None
w.start()
# 最新的一天由于没有数据,会导致参与下载,如果中间某一天缺失,会导致大量下载数据,需要排除
date_str = (datetime.today() - timedelta(days=0)).strftime('%Y-%m-%d')
# date_str = datetime.today().strftime('%Y-%m-%d')
# IF一类的起始时间是从2010年开始,不能再从2009年开始遍历了
trading_days = read_tdays(__CONFIG_TDAYS_SHFE_FILE__)
# trading_days = trading_days['2016-01-01':date_str]
trading_days = trading_days['2017-10-01':date_str]
# 下载后存下
windcodes = get_all_products_wind()
<|code_end|>
, predict the next line using imports from the current file:
from WindPy import w
from datetime import datetime,timedelta
from kquant_data.wind.tdays import read_tdays
from kquant_data.config import __CONFIG_TDAYS_SHFE_FILE__
from kquant_data.wind_resume.wset import resume_download_futureoir
from kquant_data.future.symbol import get_actvie_products_wind,get_all_products_wind
from kquant_data.config import __CONFIG_H5_FUT_DATA_DIR__
import os
import numpy as np
import pandas as pd
and context including class names, function names, and sometimes code from other files:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/config.py
# __CONFIG_TDAYS_SHFE_FILE__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'tdays', 'SHFE.csv')
#
# Path: kquant_data/wind_resume/wset.py
# def resume_download_futureoir(w, trading_days, root_path, windcode, adjust_trading_days):
# dir_path = os.path.join(root_path, windcode)
# os.makedirs(dir_path, exist_ok=True)
#
# # 从第一个有数据的部分开始第一条,这个需要根据自己的情况进行处理
# if adjust_trading_days:
# for dirpath, dirnames, filenames in os.walk(dir_path):
# for filename in filenames:
# trading_days = trading_days[filename[:10]:]
# break
#
# # 分成list套list,只要时间超长就分新的一组
# last_date = pd.Timestamp(1900, 1, 1).tz_localize(None)
# day_ranges = []
# day_range = []
# for day in trading_days['date']:
# path = os.path.join(root_path, windcode, day.strftime('%Y-%m-%d.csv'))
# if os.path.exists(path):
# continue
# else:
# if (day - last_date).days > 20 or len(day_range) >= 30:
# day_ranges.append(day_range)
# day_range = []
# last_date = day
# day_range.append(day)
# # 将最后一个添加进去
# day_ranges.append(day_range)
#
# for day_range in day_ranges:
# if len(day_range) > 0:
# ret = download_futureoir_day_range(w, day_range, windcode, root_path)
# if ret == 0:
# print("下载数据为空,有可能才上市没多久,需要再下载")
# break
#
# print('处理完毕!可能有部分数据由于超时没有下载成功,可再运行一次脚本')
#
# Path: kquant_data/future/symbol.py
# def get_actvie_products_wind():
# _lst = get_actvie_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# def get_all_products_wind():
# _lst = get_all_products()
# __lst = [get_wind_code(x).upper() for x in _lst]
# return __lst
#
# Path: kquant_data/config.py
# __CONFIG_H5_FUT_DATA_DIR__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'data')
. Output only the next line. | root_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir") |
Next line prediction: <|code_start|>
def process2(input_path, output_path, folder_name):
for dirpath, dirnames, filenames in os.walk(input_path):
dfs_long = None
dfs_short = None
for filename in filenames:
path = os.path.join(dirpath, filename)
df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=['date'])
df.index = df['date']
col_name = filename[:-4]
col_long = df['long_position_increase']
col_long.name = col_name
dfs_long = pd.concat([dfs_long, col_long], axis=1)
col_short = df['short_position_increase']
col_short.name = col_name
dfs_short = pd.concat([dfs_short, col_short], axis=1)
path2 = os.path.join(output_path, '%s_long_position_increase.csv' % folder_name)
dfs_long.to_csv(path2, encoding='utf-8-sig', date_format='%Y-%m-%d')
path2 = os.path.join(output_path, '%s_short_position_increase.csv' % folder_name)
dfs_short.to_csv(path2, encoding='utf-8-sig', date_format='%Y-%m-%d')
if __name__ == '__main__':
member_name = '前二十名合计'
folder_name = 'top20'
<|code_end|>
. Use current file imports:
(import os
import pandas as pd
from kquant_data.config import __CONFIG_H5_FUT_DATA_DIR__)
and context including class names, function names, or small code snippets from other files:
# Path: kquant_data/config.py
# __CONFIG_H5_FUT_DATA_DIR__ = os.path.join(__CONFIG_H5_FUT_DIR__, 'data')
. Output only the next line. | input_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir") |
Predict the next line for this snippet: <|code_start|>
# 下载多天数据,以另一数据做为标准来下载
# 比如交易数据是10月8号,那就得取10月7号,然后再平移到8号,如果7号没有数据那就得9月30号
path = os.path.join(__CONFIG_H5_STK_FACTOR_DIR__, 'test.csv')
date_index = read_data_dataframe(path)
# dates = list(date_index.index)
download_index_weight(w, trading_days, "000300.SH", __CONFIG_H5_STK_WEIGHT_DIR__)
def download_000016(w, trading_days):
# 万得数据只到2009年4月1号,之前的数据没有
trading_days = trading_days['2009-04-01':date_str]
download_index_weight(w, trading_days, "000016.SH", __CONFIG_H5_STK_WEIGHT_DIR__)
def download_000905(w, trading_days):
# 万得数据只到2009年4月1号,之前的数据没有
trading_days = trading_days['2007-01-31':date_str]
download_index_weight(w, trading_days, "000905.SH", __CONFIG_H5_STK_WEIGHT_DIR__)
if __name__ == '__main__':
w.start()
date_str = datetime.today().strftime('%Y-%m-%d')
date_str = '2018-04-27'
<|code_end|>
with the help of current file imports:
import os
from WindPy import w
from datetime import datetime
from kquant_data.wind.tdays import read_tdays
from kquant_data.xio.csv import read_data_dataframe
from kquant_data.wind_resume.wset import download_index_weight
from kquant_data.config import __CONFIG_H5_STK_FACTOR_DIR__, __CONFIG_H5_STK_WEIGHT_DIR__, __CONFIG_TDAYS_SSE_FILE__
and context from other files:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/xio/csv.py
# def read_data_dataframe(path, sep=','):
# """
# 读取季报的公告日
# 注意:有些股票多个季度一起发,一般是公司出问题了,特别是600878,四个报告同一天发布
# 年报与一季报很有可能一起发
# :param path:
# :param sep:
# :return:
# """
# try:
# df = pd.read_csv(path, index_col=0, parse_dates=True, encoding='utf-8-sig', sep=sep)
# except (FileNotFoundError, OSError):
# return None
#
# return df
#
# Path: kquant_data/wind_resume/wset.py
# def download_index_weight(w, trading_days, wind_code, root_path):
# """
# 下载指数成份和权重
# :param w:
# :param trading_days:
# :param wind_code:
# :param root_path:
# :return:
# """
# df = trading_days
# df['date_str'] = trading_days['date'].astype(str)
#
# foldpath = os.path.join(root_path, wind_code)
# file_download_constituent(w, df['date_str'], foldpath, '.csv',
# sector=None, windcode=wind_code, field='wind_code,i_weight', is_indexconstituent=True)
#
# dst_path = os.path.join(root_path, "%s_move" % wind_code)
# move_constituent(foldpath, dst_path)
#
# Path: kquant_data/config.py
# __CONFIG_H5_STK_FACTOR_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'factor')
#
# __CONFIG_H5_STK_WEIGHT_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'indexconstituent')
#
# __CONFIG_TDAYS_SSE_FILE__ = os.path.join(__CONFIG_H5_STK_DIR__, 'tdays', 'SSE.csv')
, which may contain function names, class names, or code. Output only the next line. | trading_days = read_tdays(__CONFIG_TDAYS_SSE_FILE__) |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
下载指数权重
万得中的中证指数每个月更新一次,用的是上月月末数据
如要实时更新,需要联系客户经理
可能有部分数据出现nan的情况,如纳入成份股时
"""
def download_000300(w, trading_days):
# 4月8号是指数的发布日期
trading_days = trading_days['2005-04-08':date_str]
# 下载多天数据,以另一数据做为标准来下载
# 比如交易数据是10月8号,那就得取10月7号,然后再平移到8号,如果7号没有数据那就得9月30号
path = os.path.join(__CONFIG_H5_STK_FACTOR_DIR__, 'test.csv')
<|code_end|>
using the current file's imports:
import os
from WindPy import w
from datetime import datetime
from kquant_data.wind.tdays import read_tdays
from kquant_data.xio.csv import read_data_dataframe
from kquant_data.wind_resume.wset import download_index_weight
from kquant_data.config import __CONFIG_H5_STK_FACTOR_DIR__, __CONFIG_H5_STK_WEIGHT_DIR__, __CONFIG_TDAYS_SSE_FILE__
and any relevant context from other files:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/xio/csv.py
# def read_data_dataframe(path, sep=','):
# """
# 读取季报的公告日
# 注意:有些股票多个季度一起发,一般是公司出问题了,特别是600878,四个报告同一天发布
# 年报与一季报很有可能一起发
# :param path:
# :param sep:
# :return:
# """
# try:
# df = pd.read_csv(path, index_col=0, parse_dates=True, encoding='utf-8-sig', sep=sep)
# except (FileNotFoundError, OSError):
# return None
#
# return df
#
# Path: kquant_data/wind_resume/wset.py
# def download_index_weight(w, trading_days, wind_code, root_path):
# """
# 下载指数成份和权重
# :param w:
# :param trading_days:
# :param wind_code:
# :param root_path:
# :return:
# """
# df = trading_days
# df['date_str'] = trading_days['date'].astype(str)
#
# foldpath = os.path.join(root_path, wind_code)
# file_download_constituent(w, df['date_str'], foldpath, '.csv',
# sector=None, windcode=wind_code, field='wind_code,i_weight', is_indexconstituent=True)
#
# dst_path = os.path.join(root_path, "%s_move" % wind_code)
# move_constituent(foldpath, dst_path)
#
# Path: kquant_data/config.py
# __CONFIG_H5_STK_FACTOR_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'factor')
#
# __CONFIG_H5_STK_WEIGHT_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'indexconstituent')
#
# __CONFIG_TDAYS_SSE_FILE__ = os.path.join(__CONFIG_H5_STK_DIR__, 'tdays', 'SSE.csv')
. Output only the next line. | date_index = read_data_dataframe(path) |
Here is a snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
下载指数权重
万得中的中证指数每个月更新一次,用的是上月月末数据
如要实时更新,需要联系客户经理
可能有部分数据出现nan的情况,如纳入成份股时
"""
def download_000300(w, trading_days):
# 4月8号是指数的发布日期
trading_days = trading_days['2005-04-08':date_str]
# 下载多天数据,以另一数据做为标准来下载
# 比如交易数据是10月8号,那就得取10月7号,然后再平移到8号,如果7号没有数据那就得9月30号
path = os.path.join(__CONFIG_H5_STK_FACTOR_DIR__, 'test.csv')
date_index = read_data_dataframe(path)
# dates = list(date_index.index)
<|code_end|>
. Write the next line using the current file imports:
import os
from WindPy import w
from datetime import datetime
from kquant_data.wind.tdays import read_tdays
from kquant_data.xio.csv import read_data_dataframe
from kquant_data.wind_resume.wset import download_index_weight
from kquant_data.config import __CONFIG_H5_STK_FACTOR_DIR__, __CONFIG_H5_STK_WEIGHT_DIR__, __CONFIG_TDAYS_SSE_FILE__
and context from other files:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/xio/csv.py
# def read_data_dataframe(path, sep=','):
# """
# 读取季报的公告日
# 注意:有些股票多个季度一起发,一般是公司出问题了,特别是600878,四个报告同一天发布
# 年报与一季报很有可能一起发
# :param path:
# :param sep:
# :return:
# """
# try:
# df = pd.read_csv(path, index_col=0, parse_dates=True, encoding='utf-8-sig', sep=sep)
# except (FileNotFoundError, OSError):
# return None
#
# return df
#
# Path: kquant_data/wind_resume/wset.py
# def download_index_weight(w, trading_days, wind_code, root_path):
# """
# 下载指数成份和权重
# :param w:
# :param trading_days:
# :param wind_code:
# :param root_path:
# :return:
# """
# df = trading_days
# df['date_str'] = trading_days['date'].astype(str)
#
# foldpath = os.path.join(root_path, wind_code)
# file_download_constituent(w, df['date_str'], foldpath, '.csv',
# sector=None, windcode=wind_code, field='wind_code,i_weight', is_indexconstituent=True)
#
# dst_path = os.path.join(root_path, "%s_move" % wind_code)
# move_constituent(foldpath, dst_path)
#
# Path: kquant_data/config.py
# __CONFIG_H5_STK_FACTOR_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'factor')
#
# __CONFIG_H5_STK_WEIGHT_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'indexconstituent')
#
# __CONFIG_TDAYS_SSE_FILE__ = os.path.join(__CONFIG_H5_STK_DIR__, 'tdays', 'SSE.csv')
, which may include functions, classes, or code. Output only the next line. | download_index_weight(w, trading_days, "000300.SH", __CONFIG_H5_STK_WEIGHT_DIR__) |
Given the code snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
下载指数权重
万得中的中证指数每个月更新一次,用的是上月月末数据
如要实时更新,需要联系客户经理
可能有部分数据出现nan的情况,如纳入成份股时
"""
def download_000300(w, trading_days):
# 4月8号是指数的发布日期
trading_days = trading_days['2005-04-08':date_str]
# 下载多天数据,以另一数据做为标准来下载
# 比如交易数据是10月8号,那就得取10月7号,然后再平移到8号,如果7号没有数据那就得9月30号
<|code_end|>
, generate the next line using the imports in this file:
import os
from WindPy import w
from datetime import datetime
from kquant_data.wind.tdays import read_tdays
from kquant_data.xio.csv import read_data_dataframe
from kquant_data.wind_resume.wset import download_index_weight
from kquant_data.config import __CONFIG_H5_STK_FACTOR_DIR__, __CONFIG_H5_STK_WEIGHT_DIR__, __CONFIG_TDAYS_SSE_FILE__
and context (functions, classes, or occasionally code) from other files:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/xio/csv.py
# def read_data_dataframe(path, sep=','):
# """
# 读取季报的公告日
# 注意:有些股票多个季度一起发,一般是公司出问题了,特别是600878,四个报告同一天发布
# 年报与一季报很有可能一起发
# :param path:
# :param sep:
# :return:
# """
# try:
# df = pd.read_csv(path, index_col=0, parse_dates=True, encoding='utf-8-sig', sep=sep)
# except (FileNotFoundError, OSError):
# return None
#
# return df
#
# Path: kquant_data/wind_resume/wset.py
# def download_index_weight(w, trading_days, wind_code, root_path):
# """
# 下载指数成份和权重
# :param w:
# :param trading_days:
# :param wind_code:
# :param root_path:
# :return:
# """
# df = trading_days
# df['date_str'] = trading_days['date'].astype(str)
#
# foldpath = os.path.join(root_path, wind_code)
# file_download_constituent(w, df['date_str'], foldpath, '.csv',
# sector=None, windcode=wind_code, field='wind_code,i_weight', is_indexconstituent=True)
#
# dst_path = os.path.join(root_path, "%s_move" % wind_code)
# move_constituent(foldpath, dst_path)
#
# Path: kquant_data/config.py
# __CONFIG_H5_STK_FACTOR_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'factor')
#
# __CONFIG_H5_STK_WEIGHT_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'indexconstituent')
#
# __CONFIG_TDAYS_SSE_FILE__ = os.path.join(__CONFIG_H5_STK_DIR__, 'tdays', 'SSE.csv')
. Output only the next line. | path = os.path.join(__CONFIG_H5_STK_FACTOR_DIR__, 'test.csv') |
Using the snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
下载指数权重
万得中的中证指数每个月更新一次,用的是上月月末数据
如要实时更新,需要联系客户经理
可能有部分数据出现nan的情况,如纳入成份股时
"""
def download_000300(w, trading_days):
# 4月8号是指数的发布日期
trading_days = trading_days['2005-04-08':date_str]
# 下载多天数据,以另一数据做为标准来下载
# 比如交易数据是10月8号,那就得取10月7号,然后再平移到8号,如果7号没有数据那就得9月30号
path = os.path.join(__CONFIG_H5_STK_FACTOR_DIR__, 'test.csv')
date_index = read_data_dataframe(path)
# dates = list(date_index.index)
<|code_end|>
, determine the next line of code. You have imports:
import os
from WindPy import w
from datetime import datetime
from kquant_data.wind.tdays import read_tdays
from kquant_data.xio.csv import read_data_dataframe
from kquant_data.wind_resume.wset import download_index_weight
from kquant_data.config import __CONFIG_H5_STK_FACTOR_DIR__, __CONFIG_H5_STK_WEIGHT_DIR__, __CONFIG_TDAYS_SSE_FILE__
and context (class names, function names, or code) available:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/xio/csv.py
# def read_data_dataframe(path, sep=','):
# """
# 读取季报的公告日
# 注意:有些股票多个季度一起发,一般是公司出问题了,特别是600878,四个报告同一天发布
# 年报与一季报很有可能一起发
# :param path:
# :param sep:
# :return:
# """
# try:
# df = pd.read_csv(path, index_col=0, parse_dates=True, encoding='utf-8-sig', sep=sep)
# except (FileNotFoundError, OSError):
# return None
#
# return df
#
# Path: kquant_data/wind_resume/wset.py
# def download_index_weight(w, trading_days, wind_code, root_path):
# """
# 下载指数成份和权重
# :param w:
# :param trading_days:
# :param wind_code:
# :param root_path:
# :return:
# """
# df = trading_days
# df['date_str'] = trading_days['date'].astype(str)
#
# foldpath = os.path.join(root_path, wind_code)
# file_download_constituent(w, df['date_str'], foldpath, '.csv',
# sector=None, windcode=wind_code, field='wind_code,i_weight', is_indexconstituent=True)
#
# dst_path = os.path.join(root_path, "%s_move" % wind_code)
# move_constituent(foldpath, dst_path)
#
# Path: kquant_data/config.py
# __CONFIG_H5_STK_FACTOR_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'factor')
#
# __CONFIG_H5_STK_WEIGHT_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'indexconstituent')
#
# __CONFIG_TDAYS_SSE_FILE__ = os.path.join(__CONFIG_H5_STK_DIR__, 'tdays', 'SSE.csv')
. Output only the next line. | download_index_weight(w, trading_days, "000300.SH", __CONFIG_H5_STK_WEIGHT_DIR__) |
Based on the snippet: <|code_start|>
# 下载多天数据,以另一数据做为标准来下载
# 比如交易数据是10月8号,那就得取10月7号,然后再平移到8号,如果7号没有数据那就得9月30号
path = os.path.join(__CONFIG_H5_STK_FACTOR_DIR__, 'test.csv')
date_index = read_data_dataframe(path)
# dates = list(date_index.index)
download_index_weight(w, trading_days, "000300.SH", __CONFIG_H5_STK_WEIGHT_DIR__)
def download_000016(w, trading_days):
# 万得数据只到2009年4月1号,之前的数据没有
trading_days = trading_days['2009-04-01':date_str]
download_index_weight(w, trading_days, "000016.SH", __CONFIG_H5_STK_WEIGHT_DIR__)
def download_000905(w, trading_days):
# 万得数据只到2009年4月1号,之前的数据没有
trading_days = trading_days['2007-01-31':date_str]
download_index_weight(w, trading_days, "000905.SH", __CONFIG_H5_STK_WEIGHT_DIR__)
if __name__ == '__main__':
w.start()
date_str = datetime.today().strftime('%Y-%m-%d')
date_str = '2018-04-27'
<|code_end|>
, predict the immediate next line with the help of imports:
import os
from WindPy import w
from datetime import datetime
from kquant_data.wind.tdays import read_tdays
from kquant_data.xio.csv import read_data_dataframe
from kquant_data.wind_resume.wset import download_index_weight
from kquant_data.config import __CONFIG_H5_STK_FACTOR_DIR__, __CONFIG_H5_STK_WEIGHT_DIR__, __CONFIG_TDAYS_SSE_FILE__
and context (classes, functions, sometimes code) from other files:
# Path: kquant_data/wind/tdays.py
# def read_tdays(path):
# try:
# df = pd.read_csv(path, parse_dates=True)
# except:
# return None
#
# df['date'] = pd.to_datetime(df['date'])
# df.index = df['date']
# return df
#
# Path: kquant_data/xio/csv.py
# def read_data_dataframe(path, sep=','):
# """
# 读取季报的公告日
# 注意:有些股票多个季度一起发,一般是公司出问题了,特别是600878,四个报告同一天发布
# 年报与一季报很有可能一起发
# :param path:
# :param sep:
# :return:
# """
# try:
# df = pd.read_csv(path, index_col=0, parse_dates=True, encoding='utf-8-sig', sep=sep)
# except (FileNotFoundError, OSError):
# return None
#
# return df
#
# Path: kquant_data/wind_resume/wset.py
# def download_index_weight(w, trading_days, wind_code, root_path):
# """
# 下载指数成份和权重
# :param w:
# :param trading_days:
# :param wind_code:
# :param root_path:
# :return:
# """
# df = trading_days
# df['date_str'] = trading_days['date'].astype(str)
#
# foldpath = os.path.join(root_path, wind_code)
# file_download_constituent(w, df['date_str'], foldpath, '.csv',
# sector=None, windcode=wind_code, field='wind_code,i_weight', is_indexconstituent=True)
#
# dst_path = os.path.join(root_path, "%s_move" % wind_code)
# move_constituent(foldpath, dst_path)
#
# Path: kquant_data/config.py
# __CONFIG_H5_STK_FACTOR_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'factor')
#
# __CONFIG_H5_STK_WEIGHT_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'indexconstituent')
#
# __CONFIG_TDAYS_SSE_FILE__ = os.path.join(__CONFIG_H5_STK_DIR__, 'tdays', 'SSE.csv')
. Output only the next line. | trading_days = read_tdays(__CONFIG_TDAYS_SSE_FILE__) |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
兴业证券
利用期权市场进行择时之二:依据期权指标判断市场走势
"""
def main2():
# 获取期权基础信息文件
df_info = get_opt_info('510050.SH.csv')
df_info_call = df_info[df_info['call_or_put'] == 'C']
df_info_put = df_info[df_info['call_or_put'] == 'P']
df_info_call.reset_index(inplace=True)
df_info_put.reset_index(inplace=True)
path = os.path.join(__CONFIG_TDX_STK_DIR__, 'vipdoc', 'ds', 'lday')
wind_codes = list(set(df_info_call['wind_code']))
wind_codes.sort()
df_volume = None
for wind_code in wind_codes:
print(wind_code)
try:
<|code_end|>
. Use current file imports:
import os
import pandas as pd
from kquant_data.api import get_price
from kquant_data.option.info import get_opt_info
from kquant_data.config import __CONFIG_H5_OPT_DIR__, __CONFIG_TDX_STK_DIR__
and context (classes, functions, or code) from other files:
# Path: kquant_data/api.py
# def get_price(symbols, instrument_type, start_date=None, end_date=None, bar_size=86400, fields=None, path=None,
# new_symbols=None):
# """
# 从通达信中取数据,没有除权的可以直接读,但除权的需要拿到除权因子才能用
# 这样的话相当于日线需要转一次,所以可以在本地为日线存一次HDF5
# 通达信中目前只有日线、5分钟、1分钟
# :param symbols:
# :param instrument_type:
# :param start_date:
# :param end_date:
# :param bar_size:
# :param fields:
# :param path:
# :param new_symbols: 50ETF期权需要将数字转成符号,方便使用
# :return:
# """
# # 将str转成list
# if isinstance(symbols, str):
# symbols = [symbols]
# if isinstance(fields, str):
# fields = [fields]
# if new_symbols is None:
# new_symbols = symbols
# if isinstance(new_symbols, str):
# new_symbols = [new_symbols]
#
# _dict = collections.OrderedDict()
# _fields = None
# for idx, symbol in enumerate(symbols):
# code_market = split_by_dot(symbol)
# if len(code_market) == 2:
# code, market = code_market
# else:
# code, market = code_market[0], ''
#
# df = _get_date_from_file(symbol, market, code, bar_size, start_date, end_date, fields, path, instrument_type)
# _fields = df.columns
# _dict[new_symbols[idx]] = df
#
# # 只有一个合约,直接输出单个合约的表
# if len(_dict) == 1:
# return list(_dict.values())[0]
#
# # 只有一个字段时,输出每个合约的列表
# if len(_fields) == 1:
# dfs = None
# for (k, v) in _dict.items():
# if dfs is None:
# dfs = v
# else:
# dfs = pd.merge(dfs, v, left_index=True, right_index=True, how='outer')
# dfs.columns = list(_dict.keys())
# # 需要将不同合约的进行合并
# return dfs
#
# # 三维的,需要转换成Panel
# # 是否可以使用 pan = pan_.transpose(2,1,0),发现不能用,因为没有对齐
# _dict2 = collections.OrderedDict()
# for (k, v) in _dict.items():
# for c in v.columns:
# _df = pd.DataFrame(v[c])
# if c in _dict2:
# _dict2[c] = pd.merge(_dict2[c], _df, left_index=True, right_index=True, how='outer')
# else:
# _dict2[c] = _df
#
# for (k, v) in _dict2.items():
# v.columns = new_symbols
# # v.columns = list(_dict.keys())
#
# return pd.Panel(_dict2)
#
# Path: kquant_data/option/info.py
# def get_opt_info(filename):
# root_path = os.path.join(__CONFIG_H5_OPT_DIR__, 'optioncontractbasicinfo', filename)
# df_info = read_optioncontractbasicinfo(root_path)
# # 排序一下,方便显示,先按月份,然再换名后的月份
# df_info = df_info.sort_values(by=['limit_month', 'limit_month_m', 'call_or_put', 'exercise_price'])
# return df_info
#
# Path: kquant_data/config.py
# __CONFIG_H5_OPT_DIR__ = r'D:\DATA_OPT'
#
# __CONFIG_TDX_STK_DIR__ = r'D:\new_hbzq'
. Output only the next line. | df_volume_ = get_price(wind_code, path=path, instrument_type='option', |
Using the snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
兴业证券
利用期权市场进行择时之二:依据期权指标判断市场走势
"""
def main2():
# 获取期权基础信息文件
<|code_end|>
, determine the next line of code. You have imports:
import os
import pandas as pd
from kquant_data.api import get_price
from kquant_data.option.info import get_opt_info
from kquant_data.config import __CONFIG_H5_OPT_DIR__, __CONFIG_TDX_STK_DIR__
and context (class names, function names, or code) available:
# Path: kquant_data/api.py
# def get_price(symbols, instrument_type, start_date=None, end_date=None, bar_size=86400, fields=None, path=None,
# new_symbols=None):
# """
# 从通达信中取数据,没有除权的可以直接读,但除权的需要拿到除权因子才能用
# 这样的话相当于日线需要转一次,所以可以在本地为日线存一次HDF5
# 通达信中目前只有日线、5分钟、1分钟
# :param symbols:
# :param instrument_type:
# :param start_date:
# :param end_date:
# :param bar_size:
# :param fields:
# :param path:
# :param new_symbols: 50ETF期权需要将数字转成符号,方便使用
# :return:
# """
# # 将str转成list
# if isinstance(symbols, str):
# symbols = [symbols]
# if isinstance(fields, str):
# fields = [fields]
# if new_symbols is None:
# new_symbols = symbols
# if isinstance(new_symbols, str):
# new_symbols = [new_symbols]
#
# _dict = collections.OrderedDict()
# _fields = None
# for idx, symbol in enumerate(symbols):
# code_market = split_by_dot(symbol)
# if len(code_market) == 2:
# code, market = code_market
# else:
# code, market = code_market[0], ''
#
# df = _get_date_from_file(symbol, market, code, bar_size, start_date, end_date, fields, path, instrument_type)
# _fields = df.columns
# _dict[new_symbols[idx]] = df
#
# # 只有一个合约,直接输出单个合约的表
# if len(_dict) == 1:
# return list(_dict.values())[0]
#
# # 只有一个字段时,输出每个合约的列表
# if len(_fields) == 1:
# dfs = None
# for (k, v) in _dict.items():
# if dfs is None:
# dfs = v
# else:
# dfs = pd.merge(dfs, v, left_index=True, right_index=True, how='outer')
# dfs.columns = list(_dict.keys())
# # 需要将不同合约的进行合并
# return dfs
#
# # 三维的,需要转换成Panel
# # 是否可以使用 pan = pan_.transpose(2,1,0),发现不能用,因为没有对齐
# _dict2 = collections.OrderedDict()
# for (k, v) in _dict.items():
# for c in v.columns:
# _df = pd.DataFrame(v[c])
# if c in _dict2:
# _dict2[c] = pd.merge(_dict2[c], _df, left_index=True, right_index=True, how='outer')
# else:
# _dict2[c] = _df
#
# for (k, v) in _dict2.items():
# v.columns = new_symbols
# # v.columns = list(_dict.keys())
#
# return pd.Panel(_dict2)
#
# Path: kquant_data/option/info.py
# def get_opt_info(filename):
# root_path = os.path.join(__CONFIG_H5_OPT_DIR__, 'optioncontractbasicinfo', filename)
# df_info = read_optioncontractbasicinfo(root_path)
# # 排序一下,方便显示,先按月份,然再换名后的月份
# df_info = df_info.sort_values(by=['limit_month', 'limit_month_m', 'call_or_put', 'exercise_price'])
# return df_info
#
# Path: kquant_data/config.py
# __CONFIG_H5_OPT_DIR__ = r'D:\DATA_OPT'
#
# __CONFIG_TDX_STK_DIR__ = r'D:\new_hbzq'
. Output only the next line. | df_info = get_opt_info('510050.SH.csv') |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
兴业证券
利用期权市场进行择时之二:依据期权指标判断市场走势
"""
def main2():
# 获取期权基础信息文件
df_info = get_opt_info('510050.SH.csv')
df_info_call = df_info[df_info['call_or_put'] == 'C']
df_info_put = df_info[df_info['call_or_put'] == 'P']
df_info_call.reset_index(inplace=True)
df_info_put.reset_index(inplace=True)
<|code_end|>
. Use current file imports:
import os
import pandas as pd
from kquant_data.api import get_price
from kquant_data.option.info import get_opt_info
from kquant_data.config import __CONFIG_H5_OPT_DIR__, __CONFIG_TDX_STK_DIR__
and context (classes, functions, or code) from other files:
# Path: kquant_data/api.py
# def get_price(symbols, instrument_type, start_date=None, end_date=None, bar_size=86400, fields=None, path=None,
# new_symbols=None):
# """
# 从通达信中取数据,没有除权的可以直接读,但除权的需要拿到除权因子才能用
# 这样的话相当于日线需要转一次,所以可以在本地为日线存一次HDF5
# 通达信中目前只有日线、5分钟、1分钟
# :param symbols:
# :param instrument_type:
# :param start_date:
# :param end_date:
# :param bar_size:
# :param fields:
# :param path:
# :param new_symbols: 50ETF期权需要将数字转成符号,方便使用
# :return:
# """
# # 将str转成list
# if isinstance(symbols, str):
# symbols = [symbols]
# if isinstance(fields, str):
# fields = [fields]
# if new_symbols is None:
# new_symbols = symbols
# if isinstance(new_symbols, str):
# new_symbols = [new_symbols]
#
# _dict = collections.OrderedDict()
# _fields = None
# for idx, symbol in enumerate(symbols):
# code_market = split_by_dot(symbol)
# if len(code_market) == 2:
# code, market = code_market
# else:
# code, market = code_market[0], ''
#
# df = _get_date_from_file(symbol, market, code, bar_size, start_date, end_date, fields, path, instrument_type)
# _fields = df.columns
# _dict[new_symbols[idx]] = df
#
# # 只有一个合约,直接输出单个合约的表
# if len(_dict) == 1:
# return list(_dict.values())[0]
#
# # 只有一个字段时,输出每个合约的列表
# if len(_fields) == 1:
# dfs = None
# for (k, v) in _dict.items():
# if dfs is None:
# dfs = v
# else:
# dfs = pd.merge(dfs, v, left_index=True, right_index=True, how='outer')
# dfs.columns = list(_dict.keys())
# # 需要将不同合约的进行合并
# return dfs
#
# # 三维的,需要转换成Panel
# # 是否可以使用 pan = pan_.transpose(2,1,0),发现不能用,因为没有对齐
# _dict2 = collections.OrderedDict()
# for (k, v) in _dict.items():
# for c in v.columns:
# _df = pd.DataFrame(v[c])
# if c in _dict2:
# _dict2[c] = pd.merge(_dict2[c], _df, left_index=True, right_index=True, how='outer')
# else:
# _dict2[c] = _df
#
# for (k, v) in _dict2.items():
# v.columns = new_symbols
# # v.columns = list(_dict.keys())
#
# return pd.Panel(_dict2)
#
# Path: kquant_data/option/info.py
# def get_opt_info(filename):
# root_path = os.path.join(__CONFIG_H5_OPT_DIR__, 'optioncontractbasicinfo', filename)
# df_info = read_optioncontractbasicinfo(root_path)
# # 排序一下,方便显示,先按月份,然再换名后的月份
# df_info = df_info.sort_values(by=['limit_month', 'limit_month_m', 'call_or_put', 'exercise_price'])
# return df_info
#
# Path: kquant_data/config.py
# __CONFIG_H5_OPT_DIR__ = r'D:\DATA_OPT'
#
# __CONFIG_TDX_STK_DIR__ = r'D:\new_hbzq'
. Output only the next line. | path = os.path.join(__CONFIG_TDX_STK_DIR__, 'vipdoc', 'ds', 'lday') |
Given snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
导出通达信的股本变迁
"""
if __name__ == '__main__':
result = GbbqReader().get_df(__CONFIG_TDX_GBBQ_FILE__)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
from kquant_data.config import __CONFIG_TDX_GBBQ_FILE__, __CONFIG_H5_STK_DIR__
from kquant_data.stock.gbbq import GbbqReader
and context:
# Path: kquant_data/config.py
# __CONFIG_TDX_GBBQ_FILE__ = os.path.join(__CONFIG_TDX_STK_DIR__, 'T0002', 'hq_cache', 'gbbq')
#
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
which might include code, classes, or functions. Output only the next line. | path = os.path.join(__CONFIG_H5_STK_DIR__, 'gbbq.csv') |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
将新生成的5分钟数据与老的5分钟数据进行合并
合并出来的数据只用于生成5分钟的单文件数据时使用,其它情况下不使用
"""
def _export_data(rule, _input, output, instruments, i):
t = instruments.iloc[i]
print("%d %s" % (i, t['local_symbol']))
<|code_end|>
using the current file's imports:
import os
import pandas as pd
from kquant_data.config import __CONFIG_H5_STK_DIR__, __CONFIG_H5_STK_DIVIDEND_DIR__
from kquant_data.stock.stock import merge_adjust_factor, bars_to_h5
from kquant_data.processing.utils import filter_dataframe, multiprocessing_convert
from kquant_data.stock.symbol import get_folder_symbols
and any relevant context from other files:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# __CONFIG_H5_STK_DIVIDEND_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'dividend')
#
# Path: kquant_data/stock/stock.py
# def sort_dividend(divs):
# def factor(daily, divs, ndigits):
# def adjust(df, adjust_type=None):
# def merge_adjust_factor(df, div):
# def read_h5_tdx(market, code, bar_size, h5_path, tdx_path, div_path):
# def _export_dividend_from_data(tdx_root, dividend_output, daily_output, data):
# def export_dividend_daily_dzh(dzh_input, tdx_root, dividend_output, daily_output):
# def export_dividend_daily_gbbq(gbbq_input, tdx_root, dividend_output, daily_output):
#
# Path: kquant_data/processing/utils.py
# def filter_dataframe(df, index_name=None, start_date=None, end_date=None, fields=None):
# if index_name is not None:
# df['index_datetime'] = df[index_name].apply(yyyyMMddHHmm_2_datetime)
# df = df.set_index('index_datetime')
# # 过滤时间
# if start_date is not None or end_date is not None:
# df = df[start_date:end_date]
# # 过滤字段
# if fields is not None:
# df = df[fields]
# return df
#
# def multiprocessing_convert(multi, rule, _input, output, instruments, func_convert):
# tic()
#
# if multi:
# pool_size = multiprocessing.cpu_count() - 1
# pool = multiprocessing.Pool(processes=pool_size)
# func = partial(func_convert, rule, _input, output, instruments)
# pool_outputs = pool.map(func, range(len(instruments)))
# print('Pool:', pool_outputs)
# else:
# for i in range(len(instruments)):
# func_convert(rule, _input, output, instruments, i)
#
# toc()
#
# Path: kquant_data/stock/symbol.py
# def get_folder_symbols(folder, sub_folder):
# path = os.path.join(folder, sub_folder, 'sh')
# df_sh = get_symbols_from_path(path, "SSE")
# path = os.path.join(folder, sub_folder, 'sz')
# df_sz = get_symbols_from_path(path, "SZSE")
# df = pd.concat([df_sh, df_sz])
#
# return df
. Output only the next line. | path_new = os.path.join(__CONFIG_H5_STK_DIR__, _input, t['market'], "%s.h5" % t['local_symbol']) |
Predict the next line after this snippet: <|code_start|>
def _export_data(rule, _input, output, instruments, i):
t = instruments.iloc[i]
print("%d %s" % (i, t['local_symbol']))
path_new = os.path.join(__CONFIG_H5_STK_DIR__, _input, t['market'], "%s.h5" % t['local_symbol'])
# 这里不应当出错,因为之前已经导出过数据到
df_new = pd.read_hdf(path_new)
if df_new is None:
return None
df_new = filter_dataframe(df_new, 'DateTime', None, None, None)
path_old = os.path.join(__CONFIG_H5_STK_DIR__, output, t['market'], "%s.h5" % t['local_symbol'])
try:
# 没有以前的数据
df_old = pd.read_hdf(path_old)
if df_old is None:
df = df_new
else:
df_old = filter_dataframe(df_old, 'DateTime', None, None, None)
# 数据合并,不能简单的合并
# 需要保留老的,新的重复的地方忽略
last_ts = df_old.index[-1]
df_new2 = df_new[last_ts:][1:]
df = pd.concat([df_old, df_new2])
except:
df = df_new
# 有可能没有除权文件
<|code_end|>
using the current file's imports:
import os
import pandas as pd
from kquant_data.config import __CONFIG_H5_STK_DIR__, __CONFIG_H5_STK_DIVIDEND_DIR__
from kquant_data.stock.stock import merge_adjust_factor, bars_to_h5
from kquant_data.processing.utils import filter_dataframe, multiprocessing_convert
from kquant_data.stock.symbol import get_folder_symbols
and any relevant context from other files:
# Path: kquant_data/config.py
# __CONFIG_H5_STK_DIR__ = r'D:\DATA_STK'
#
# __CONFIG_H5_STK_DIVIDEND_DIR__ = os.path.join(__CONFIG_H5_STK_DIR__, 'dividend')
#
# Path: kquant_data/stock/stock.py
# def sort_dividend(divs):
# def factor(daily, divs, ndigits):
# def adjust(df, adjust_type=None):
# def merge_adjust_factor(df, div):
# def read_h5_tdx(market, code, bar_size, h5_path, tdx_path, div_path):
# def _export_dividend_from_data(tdx_root, dividend_output, daily_output, data):
# def export_dividend_daily_dzh(dzh_input, tdx_root, dividend_output, daily_output):
# def export_dividend_daily_gbbq(gbbq_input, tdx_root, dividend_output, daily_output):
#
# Path: kquant_data/processing/utils.py
# def filter_dataframe(df, index_name=None, start_date=None, end_date=None, fields=None):
# if index_name is not None:
# df['index_datetime'] = df[index_name].apply(yyyyMMddHHmm_2_datetime)
# df = df.set_index('index_datetime')
# # 过滤时间
# if start_date is not None or end_date is not None:
# df = df[start_date:end_date]
# # 过滤字段
# if fields is not None:
# df = df[fields]
# return df
#
# def multiprocessing_convert(multi, rule, _input, output, instruments, func_convert):
# tic()
#
# if multi:
# pool_size = multiprocessing.cpu_count() - 1
# pool = multiprocessing.Pool(processes=pool_size)
# func = partial(func_convert, rule, _input, output, instruments)
# pool_outputs = pool.map(func, range(len(instruments)))
# print('Pool:', pool_outputs)
# else:
# for i in range(len(instruments)):
# func_convert(rule, _input, output, instruments, i)
#
# toc()
#
# Path: kquant_data/stock/symbol.py
# def get_folder_symbols(folder, sub_folder):
# path = os.path.join(folder, sub_folder, 'sh')
# df_sh = get_symbols_from_path(path, "SSE")
# path = os.path.join(folder, sub_folder, 'sz')
# df_sz = get_symbols_from_path(path, "SZSE")
# df = pd.concat([df_sh, df_sz])
#
# return df
. Output only the next line. | div_path = os.path.join(__CONFIG_H5_STK_DIVIDEND_DIR__, "%s.h5" % t['local_symbol']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.