Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given snippet: <|code_start|> pcrbdata_offset = kdbg.get_field_offset("nt", KPCR, "PrcbData.VectorToInterruptObject")
except WindowsError:
pcrbdata_offset = 0
addr_nt_KiStartUnexpectedRange = kdbg.get_symbol_offset("nt!KiStartUnexpectedRange")
addr_nt_KiEndUnexpectedRange = kdbg.get_symbol_offset("nt!KiEndUnexpectedRange")
if pcrbdata_offset == 0:
get_kinterrupt = lambda kdbg, addr, kpcr, i: get_kinterrupt_64(kdbg, addr)
else:
get_kinterrupt = lambda kdbg, addr, kpcr, i: get_kinterrupt_32(kdbg, kpcr, i)
kpcr_addr = kdbg.read_processor_system_data(num_proc, DEBUG_DATA_KPCR_OFFSET)
idt_base = kdbg.read_ptr(kpcr_addr + idt_base_offset)
for i in xrange(0, 0xFF):
idt32 = IDT32()
kdbg.read_virtual_memory_into(idt_base + i * sizeof(IDT32), idt32)
if (idt32.ExtendedOffset == 0 or idt32.Offset == 0):
l_idt.append((None, None))
continue
addr = (idt32.ExtendedOffset << 16) | idt32.Offset
if (addr < addr_nt_KiStartUnexpectedRange or addr > addr_nt_KiEndUnexpectedRange):
l_idt.append((addr, get_kinterrupt(kdbg, addr, kpcr_addr, i)))
else:
addr_kinterrupt = get_kinterrupt(kdbg, addr, kpcr_addr, i)
if addr_kinterrupt is None:
addr = None
l_idt.append((addr, addr_kinterrupt))
return l_idt
if __name__ == '__main__':
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import ctypes
import os
import windows
from windows.generated_def.winstructs import *
from dbginterface import LocalKernelDebugger
and context:
# Path: dbginterface.py
# def LocalKernelDebugger(quiet=True):
# """| Check that all conditions to Local Kernel Debugging are met
# | and return a LKD (subclass of :class:`LocalKernelDebuggerBase`
# """
# if not windows.utils.check_debug():
# raise LocalKernelDebuggerError("Cannot perform LocalKernelDebugging on kernel not in DEBUG mode")
# if not windows.utils.check_is_elevated():
# raise LocalKernelDebuggerError("Cannot perform LocalKernelDebugging from non-Admin process")
# windows.utils.enable_privilege(SE_DEBUG_NAME, True)
# if windows.system.bitness == 64:
# if windows.current_process.is_wow_64:
# raise LocalKernelDebuggerError("Cannot perform LocalKernelDebugging from SysWow64 process (please launch from 64bits python)")
# return LocalKernelDebugger64(quiet)
# return LocalKernelDebugger32(quiet)
which might include code, classes, or functions. Output only the next line. | kdbg = LocalKernelDebugger() |
Based on the snippet: <|code_start|>
def minus_one_error_check(func_name, result, func, args):
if result == -1:
raise Kernel32Error(func_name)
return args
def kernel32_error_check(func_name, result, func, args):
"""raise Kernel32Error if result is 0"""
if not result:
raise Kernel32Error(func_name)
return args
def kernel32_zero_check(func_name, result, func, args):
"""raise Kernel32Error if result is NOT 0"""
if result:
raise Kernel32Error(func_name)
return args
def iphlpapi_error_check(func_name, result, func, args):
"""raise IphlpapiError if result is NOT 0"""
if result:
raise IphlpapiError(func_name, result)
return args
def error_ntstatus(func_name, result, func, args):
if result:
<|code_end|>
, predict the immediate next line with the help of imports:
import ctypes
import functools
import windows.generated_def.winfuncs as winfuncs
from ctypes.wintypes import *
from windows.generated_def.winstructs import *
from windows.generated_def.windef import *
from windows.generated_def.ntstatus import NtStatusException
from windows.dbgprint import dbgprint
and context (classes, functions, sometimes code) from other files:
# Path: windows/generated_def/ntstatus.py
# class NtStatusException(Exception):
# ALL_STATUS = {}
# def __init__(self , code):
# try:
# x = self.ALL_STATUS[code]
# except KeyError:
# x = (code, 'UNKNOW_ERROR', 'Error non documented in ntstatus.py')
# self.code = x[0]
# self.name = x[1]
# self.descr = x[2]
#
# return super(NtStatusException, self).__init__(*x)
#
# def __str__(self):
# return "{e.name}(0x{e.code:x}): {e.descr}".format(e=self)
#
# @classmethod
# def register_ntstatus(cls, code, name, descr):
# if code in cls.ALL_STATUS:
# return # Use the first def
# cls.ALL_STATUS[code] = (code, name, descr)
#
# Path: windows/dbgprint.py
# def get_stack_func_name(lvl):
# def do_dbgprint(msg, type=None):
# def do_nothing(*args, **kwargs):
# def parse_option(s):
. Output only the next line. | raise NtStatusException(result & 0xffffffff) |
Predict the next line after this snippet: <|code_start|> default_error_check = staticmethod(kernel32_error_check)
class IphlpapiProxy(ApiProxy):
APIDLL = iphlpapi
default_error_check = staticmethod(iphlpapi_error_check)
class NtdllProxy(ApiProxy):
APIDLL = ntdll
default_error_check = staticmethod(kernel32_zero_check)
class OptionalExport(object):
"""used 'around' a Proxy decorator
Should be used for export that are not available everywhere (ntdll internals | 32/64 bits stuff)
If the export is not found the function will be None
Example:
@OptionalExport(NtdllProxy('NtWow64ReadVirtualMemory64'))
def NtWow64ReadVirtualMemory64(...)
...
"""
def __init__(self, subdecorator):
self.subdecorator = subdecorator
def __call__(self, f):
try:
return self.subdecorator(f)
except ExportNotFound as e:
<|code_end|>
using the current file's imports:
import ctypes
import functools
import windows.generated_def.winfuncs as winfuncs
from ctypes.wintypes import *
from windows.generated_def.winstructs import *
from windows.generated_def.windef import *
from windows.generated_def.ntstatus import NtStatusException
from windows.dbgprint import dbgprint
and any relevant context from other files:
# Path: windows/generated_def/ntstatus.py
# class NtStatusException(Exception):
# ALL_STATUS = {}
# def __init__(self , code):
# try:
# x = self.ALL_STATUS[code]
# except KeyError:
# x = (code, 'UNKNOW_ERROR', 'Error non documented in ntstatus.py')
# self.code = x[0]
# self.name = x[1]
# self.descr = x[2]
#
# return super(NtStatusException, self).__init__(*x)
#
# def __str__(self):
# return "{e.name}(0x{e.code:x}): {e.descr}".format(e=self)
#
# @classmethod
# def register_ntstatus(cls, code, name, descr):
# if code in cls.ALL_STATUS:
# return # Use the first def
# cls.ALL_STATUS[code] = (code, name, descr)
#
# Path: windows/dbgprint.py
# def get_stack_func_name(lvl):
# def do_dbgprint(msg, type=None):
# def do_nothing(*args, **kwargs):
# def parse_option(s):
. Output only the next line. | dbgprint("Export <{e.func_name}> not found in <{e.api_name}>".format(e=e), "EXPORTNOTFOUND") |
Here is a snippet: <|code_start|>"""Basic contact management functions.
Contacts are linked to monitors and are used to determine where to send
alerts for monitors.
Contacts are basic name/email/phone sets.
Contacts are only stored in the database and not in memory, they are loaded
from the database each time an alert is sent.
"""
<|code_end|>
. Write the next line using the current file imports:
from typing import Dict, Iterable, Optional, Any, Set
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
, which may include functions, classes, or code. Output only the next line. | async def create_contact(dbcon: DBConnection, name: Optional[str], email: Optional[str], |
Given the code snippet: <|code_start|>"""Basic contact management functions.
Contacts are linked to monitors and are used to determine where to send
alerts for monitors.
Contacts are basic name/email/phone sets.
Contacts are only stored in the database and not in memory, they are loaded
from the database each time an alert is sent.
"""
async def create_contact(dbcon: DBConnection, name: Optional[str], email: Optional[str],
phone: Optional[str], active: bool) -> str:
"""Add a contact to the database."""
q = """insert into contacts (name, email, phone, active) values (%s, %s, %s, %s)"""
q_args = (name, email, phone, active)
contact_id = await dbcon.operation(q, q_args)
return contact_id
async def update_contact(dbcon: DBConnection, contact_id: int, data: Dict[str, str]) -> None:
"""Update a contacts information in the database.
Data is a dict with name/email/phone/active values that
will be updated.
"""
<|code_end|>
, generate the next line using the imports in this file:
from typing import Dict, Iterable, Optional, Any, Set
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | async def _run(cur: Cursor) -> None: |
Using the snippet: <|code_start|>Contacts are linked to monitors and are used to determine where to send
alerts for monitors.
Contacts are basic name/email/phone sets.
Contacts are only stored in the database and not in memory, they are loaded
from the database each time an alert is sent.
"""
async def create_contact(dbcon: DBConnection, name: Optional[str], email: Optional[str],
phone: Optional[str], active: bool) -> str:
"""Add a contact to the database."""
q = """insert into contacts (name, email, phone, active) values (%s, %s, %s, %s)"""
q_args = (name, email, phone, active)
contact_id = await dbcon.operation(q, q_args)
return contact_id
async def update_contact(dbcon: DBConnection, contact_id: int, data: Dict[str, str]) -> None:
"""Update a contacts information in the database.
Data is a dict with name/email/phone/active values that
will be updated.
"""
async def _run(cur: Cursor) -> None:
for key, value in data.items():
if key not in ['name', 'email', 'phone', 'active']:
<|code_end|>
, determine the next line of code. You have imports:
from typing import Dict, Iterable, Optional, Any, Set
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (class names, function names, or code) available:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | raise errors.IrisettError('invalid contact key %s' % key) |
Given the code snippet: <|code_start|> return contact_group_id
async def update_contact_group(dbcon: DBConnection, contact_group_id: int, data: Dict[str, str]) -> None:
"""Update a contact groups information in the database.
Data is a dict with name/active values that will be updated.
"""
async def _run(cur: Cursor) -> None:
for key, value in data.items():
if key not in ['name', 'active']:
raise errors.IrisettError('invalid contact key %s' % key)
q = """update contact_groups set %s=%%s where id=%%s""" % key
q_args = (value, contact_group_id)
await cur.execute(q, q_args)
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
await dbcon.transact(_run)
async def delete_contact_group(dbcon: DBConnection, contact_group_id: int) -> None:
"""Remove a contact group from the database."""
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
q = """delete from contact_groups where id=%s"""
await dbcon.operation(q, (contact_group_id,))
<|code_end|>
, generate the next line using the imports in this file:
from typing import Dict, Iterable, Optional, Any, Set
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | async def get_all_contacts_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.Contact]: |
Continue the code snippet: <|code_start|>Contacts are only stored in the database and not in memory, they are loaded
from the database each time an alert is sent.
"""
async def create_contact(dbcon: DBConnection, name: Optional[str], email: Optional[str],
phone: Optional[str], active: bool) -> str:
"""Add a contact to the database."""
q = """insert into contacts (name, email, phone, active) values (%s, %s, %s, %s)"""
q_args = (name, email, phone, active)
contact_id = await dbcon.operation(q, q_args)
return contact_id
async def update_contact(dbcon: DBConnection, contact_id: int, data: Dict[str, str]) -> None:
"""Update a contacts information in the database.
Data is a dict with name/email/phone/active values that
will be updated.
"""
async def _run(cur: Cursor) -> None:
for key, value in data.items():
if key not in ['name', 'email', 'phone', 'active']:
raise errors.IrisettError('invalid contact key %s' % key)
q = """update contacts set %s=%%s where id=%%s""" % key
q_args = (value, contact_id)
await cur.execute(q, q_args)
<|code_end|>
. Use current file imports:
from typing import Dict, Iterable, Optional, Any, Set
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (classes, functions, or code) from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | if not await contact_exists(dbcon, contact_id): |
Given the code snippet: <|code_start|> left join contact_groups on contact_groups.id=monitor_group_contact_groups.contact_group_id
left join contact_group_contacts on contact_group_contacts.contact_group_id=contact_groups.id
left join contacts on contacts.id=contact_group_contacts.contact_id
where monitor_group_active_monitors.active_monitor_id=%s
and contact_groups.active=true
and contacts.active=true"""
return {object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))}
async def get_contact_dict_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Dict[str, set]:
"""Get all contact addresses/numbers for a specific active monitor.
Return: Dict[str, Set(str)] for 'email' and 'phone'.
"""
ret = {
'email': set(),
'phone': set(),
} # type: Dict[str, set]
contacts = await get_all_contacts_for_active_monitor(dbcon, monitor_id)
for contact in contacts:
if contact.email:
ret['email'].add(contact.email)
if contact.phone:
ret['phone'].add(contact.phone)
return ret
async def add_contact_to_active_monitor(dbcon: DBConnection, contact_id: int, monitor_id: int) -> None:
"""Connect a contact and an active monitor."""
<|code_end|>
, generate the next line using the imports in this file:
from typing import Dict, Iterable, Optional, Any, Set
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | if not await active_monitor_exists(dbcon, monitor_id): |
Using the snippet: <|code_start|>async def delete_contact(dbcon: DBConnection, contact_id: int) -> None:
"""Remove a contact from the database."""
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
q = """delete from contacts where id=%s"""
await dbcon.operation(q, (contact_id,))
async def create_contact_group(dbcon: DBConnection, name: str, active: bool) -> str:
"""Add a contact group to the database."""
q = """insert into contact_groups (name, active) values (%s, %s)"""
q_args = (name, active)
contact_group_id = await dbcon.operation(q, q_args)
return contact_group_id
async def update_contact_group(dbcon: DBConnection, contact_group_id: int, data: Dict[str, str]) -> None:
"""Update a contact groups information in the database.
Data is a dict with name/active values that will be updated.
"""
async def _run(cur: Cursor) -> None:
for key, value in data.items():
if key not in ['name', 'active']:
raise errors.IrisettError('invalid contact key %s' % key)
q = """update contact_groups set %s=%%s where id=%%s""" % key
q_args = (value, contact_group_id)
await cur.execute(q, q_args)
<|code_end|>
, determine the next line of code. You have imports:
from typing import Dict, Iterable, Optional, Any, Set
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (class names, function names, or code) available:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | if not await contact_group_exists(dbcon, contact_group_id): |
Based on the snippet: <|code_start|>
CLICKSEND_URL = 'https://rest.clicksend.com/v3/sms/send'
async def send_sms(recipients: Iterable[str], msg: str, username: str, api_key: str, sender: str):
data = {
'messages': [],
} # type: Dict[str, List]
for recipient in recipients:
data['messages'].append({
'source': 'python',
'from': sender,
'body': msg[:140],
'to': recipient,
'schedule': ''
})
try:
async with aiohttp.ClientSession(headers={'Content-Type': 'application/json'},
auth=aiohttp.BasicAuth(username, api_key)) as session:
async with session.post(CLICKSEND_URL, data=json.dumps(data), timeout=30) as resp:
if resp.status != 200:
<|code_end|>
, predict the immediate next line with the help of imports:
from typing import Dict, Any, Iterable, Optional, List
from irisett import (
log
)
import aiohttp
import json
import jinja2
and context (classes, functions, sometimes code) from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
. Output only the next line. | log.msg('Error sending clicksend sms notification: http status %s' % (str(resp.status)), |
Predict the next line after this snippet: <|code_start|>
async def send_slack_notification(url: str, attachments: List[Dict]):
data = {
'attachments': attachments
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(url, data=json.dumps(data), timeout=30) as resp:
if resp.status != 200:
<|code_end|>
using the current file's imports:
from typing import List, Dict, Optional, Any
from irisett import (
log
)
import aiohttp
import json
import jinja2
and any relevant context from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
. Output only the next line. | log.msg('Error sending slack notification: http status %s' % (str(resp.status)), |
Given the code snippet: <|code_start|>
# noinspection PyMethodMayBeStatic
class NotificationManager:
def __init__(self, config: Any, *, loop: asyncio.AbstractEventLoop=None) -> None:
self.loop = loop or asyncio.get_event_loop()
if not config:
<|code_end|>
, generate the next line using the imports in this file:
from typing import Dict, Iterable, Any, List
from irisett import (
log,
)
from irisett.notify import (
email,
http,
sms,
clicksend,
slack,
)
import asyncio
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/notify/email.py
# async def send_email(loop: asyncio.AbstractEventLoop, mail_from: str, mail_to: Union[Iterable, str],
# subject: str, body: str, server: str='localhost') -> None:
# async def send_alert_notification(
# loop: asyncio.AbstractEventLoop, settings: Dict[str, Any],
# recipients: Iterable[str], tmpl_args: Dict[str, Any]) -> None:
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/http.py
# async def send_http_notification(url: str, in_data: Any):
# async def send_alert_notification(settings: Dict[str, Any], email_recipients: Iterable[str],
# sms_recipients: Iterable[str], tmpl_args: Dict[str, Any]):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/sms.py
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
# async def send_alert_notification(settings: Dict[str, Any], recipients: Iterable[str], tmpl_args: Dict[str, Any]):
#
# Path: irisett/notify/clicksend.py
# CLICKSEND_URL = 'https://rest.clicksend.com/v3/sms/send'
# async def send_sms(recipients: Iterable[str], msg: str, username: str, api_key: str, sender: str):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/slack.py
# async def send_slack_notification(url: str, attachments: List[Dict]):
# async def send_alert_notification(settings: Dict, tmpl_args: Dict):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
. Output only the next line. | log.msg('Missing config section, no alert notification will be sent', 'NOTIFICATIONS') |
Given the code snippet: <|code_start|>
# noinspection PyMethodMayBeStatic
class NotificationManager:
def __init__(self, config: Any, *, loop: asyncio.AbstractEventLoop=None) -> None:
self.loop = loop or asyncio.get_event_loop()
if not config:
log.msg('Missing config section, no alert notification will be sent', 'NOTIFICATIONS')
self.http_settings = None
self.email_settings = None
self.sms_settings = None
self.slack_settings = None
else:
self.http_settings = http.parse_settings(config)
<|code_end|>
, generate the next line using the imports in this file:
from typing import Dict, Iterable, Any, List
from irisett import (
log,
)
from irisett.notify import (
email,
http,
sms,
clicksend,
slack,
)
import asyncio
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/notify/email.py
# async def send_email(loop: asyncio.AbstractEventLoop, mail_from: str, mail_to: Union[Iterable, str],
# subject: str, body: str, server: str='localhost') -> None:
# async def send_alert_notification(
# loop: asyncio.AbstractEventLoop, settings: Dict[str, Any],
# recipients: Iterable[str], tmpl_args: Dict[str, Any]) -> None:
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/http.py
# async def send_http_notification(url: str, in_data: Any):
# async def send_alert_notification(settings: Dict[str, Any], email_recipients: Iterable[str],
# sms_recipients: Iterable[str], tmpl_args: Dict[str, Any]):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/sms.py
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
# async def send_alert_notification(settings: Dict[str, Any], recipients: Iterable[str], tmpl_args: Dict[str, Any]):
#
# Path: irisett/notify/clicksend.py
# CLICKSEND_URL = 'https://rest.clicksend.com/v3/sms/send'
# async def send_sms(recipients: Iterable[str], msg: str, username: str, api_key: str, sender: str):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/slack.py
# async def send_slack_notification(url: str, attachments: List[Dict]):
# async def send_alert_notification(settings: Dict, tmpl_args: Dict):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
. Output only the next line. | self.email_settings = email.parse_settings(config) |
Given snippet: <|code_start|>
# noinspection PyMethodMayBeStatic
class NotificationManager:
def __init__(self, config: Any, *, loop: asyncio.AbstractEventLoop=None) -> None:
self.loop = loop or asyncio.get_event_loop()
if not config:
log.msg('Missing config section, no alert notification will be sent', 'NOTIFICATIONS')
self.http_settings = None
self.email_settings = None
self.sms_settings = None
self.slack_settings = None
else:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from typing import Dict, Iterable, Any, List
from irisett import (
log,
)
from irisett.notify import (
email,
http,
sms,
clicksend,
slack,
)
import asyncio
and context:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/notify/email.py
# async def send_email(loop: asyncio.AbstractEventLoop, mail_from: str, mail_to: Union[Iterable, str],
# subject: str, body: str, server: str='localhost') -> None:
# async def send_alert_notification(
# loop: asyncio.AbstractEventLoop, settings: Dict[str, Any],
# recipients: Iterable[str], tmpl_args: Dict[str, Any]) -> None:
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/http.py
# async def send_http_notification(url: str, in_data: Any):
# async def send_alert_notification(settings: Dict[str, Any], email_recipients: Iterable[str],
# sms_recipients: Iterable[str], tmpl_args: Dict[str, Any]):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/sms.py
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
# async def send_alert_notification(settings: Dict[str, Any], recipients: Iterable[str], tmpl_args: Dict[str, Any]):
#
# Path: irisett/notify/clicksend.py
# CLICKSEND_URL = 'https://rest.clicksend.com/v3/sms/send'
# async def send_sms(recipients: Iterable[str], msg: str, username: str, api_key: str, sender: str):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/slack.py
# async def send_slack_notification(url: str, attachments: List[Dict]):
# async def send_alert_notification(settings: Dict, tmpl_args: Dict):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
which might include code, classes, or functions. Output only the next line. | self.http_settings = http.parse_settings(config) |
Predict the next line after this snippet: <|code_start|>
# noinspection PyMethodMayBeStatic
class NotificationManager:
def __init__(self, config: Any, *, loop: asyncio.AbstractEventLoop=None) -> None:
self.loop = loop or asyncio.get_event_loop()
if not config:
log.msg('Missing config section, no alert notification will be sent', 'NOTIFICATIONS')
self.http_settings = None
self.email_settings = None
self.sms_settings = None
self.slack_settings = None
else:
self.http_settings = http.parse_settings(config)
self.email_settings = email.parse_settings(config)
<|code_end|>
using the current file's imports:
from typing import Dict, Iterable, Any, List
from irisett import (
log,
)
from irisett.notify import (
email,
http,
sms,
clicksend,
slack,
)
import asyncio
and any relevant context from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/notify/email.py
# async def send_email(loop: asyncio.AbstractEventLoop, mail_from: str, mail_to: Union[Iterable, str],
# subject: str, body: str, server: str='localhost') -> None:
# async def send_alert_notification(
# loop: asyncio.AbstractEventLoop, settings: Dict[str, Any],
# recipients: Iterable[str], tmpl_args: Dict[str, Any]) -> None:
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/http.py
# async def send_http_notification(url: str, in_data: Any):
# async def send_alert_notification(settings: Dict[str, Any], email_recipients: Iterable[str],
# sms_recipients: Iterable[str], tmpl_args: Dict[str, Any]):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/sms.py
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
# async def send_alert_notification(settings: Dict[str, Any], recipients: Iterable[str], tmpl_args: Dict[str, Any]):
#
# Path: irisett/notify/clicksend.py
# CLICKSEND_URL = 'https://rest.clicksend.com/v3/sms/send'
# async def send_sms(recipients: Iterable[str], msg: str, username: str, api_key: str, sender: str):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/slack.py
# async def send_slack_notification(url: str, attachments: List[Dict]):
# async def send_alert_notification(settings: Dict, tmpl_args: Dict):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
. Output only the next line. | self.sms_settings = sms.parse_settings(config) |
Given the code snippet: <|code_start|> self.slack_settings = None
else:
self.http_settings = http.parse_settings(config)
self.email_settings = email.parse_settings(config)
self.sms_settings = sms.parse_settings(config)
self.slack_settings = slack.parse_settings(config)
async def send_notification(self, recipient_dict: Dict[str, Any], tmpl_args: Dict[str, Any]) -> bool:
email_recipients = list(recipient_dict['email'])
sms_recipients = list(recipient_dict['phone'])
if email_recipients and self.email_settings:
await email.send_alert_notification(self.loop, self.email_settings, email_recipients, tmpl_args)
if sms_recipients and self.sms_settings:
await sms.send_alert_notification(self.sms_settings, sms_recipients, tmpl_args)
if self.http_settings:
await http.send_alert_notification(self.http_settings, email_recipients, sms_recipients, tmpl_args)
if self.slack_settings:
await slack.send_alert_notification(self.slack_settings, tmpl_args)
return True
async def send_email(self, recipients: Iterable[str], subject: str, body: str):
if not self.email_settings:
return
await email.send_email(self.loop, self.email_settings['sender'], recipients, subject, body,
self.email_settings['server'])
async def send_sms(self, recipients: Iterable[str], msg: str):
if not self.sms_settings:
return
if self.sms_settings['provider'] == 'clicksend':
<|code_end|>
, generate the next line using the imports in this file:
from typing import Dict, Iterable, Any, List
from irisett import (
log,
)
from irisett.notify import (
email,
http,
sms,
clicksend,
slack,
)
import asyncio
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/notify/email.py
# async def send_email(loop: asyncio.AbstractEventLoop, mail_from: str, mail_to: Union[Iterable, str],
# subject: str, body: str, server: str='localhost') -> None:
# async def send_alert_notification(
# loop: asyncio.AbstractEventLoop, settings: Dict[str, Any],
# recipients: Iterable[str], tmpl_args: Dict[str, Any]) -> None:
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/http.py
# async def send_http_notification(url: str, in_data: Any):
# async def send_alert_notification(settings: Dict[str, Any], email_recipients: Iterable[str],
# sms_recipients: Iterable[str], tmpl_args: Dict[str, Any]):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/sms.py
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
# async def send_alert_notification(settings: Dict[str, Any], recipients: Iterable[str], tmpl_args: Dict[str, Any]):
#
# Path: irisett/notify/clicksend.py
# CLICKSEND_URL = 'https://rest.clicksend.com/v3/sms/send'
# async def send_sms(recipients: Iterable[str], msg: str, username: str, api_key: str, sender: str):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/slack.py
# async def send_slack_notification(url: str, attachments: List[Dict]):
# async def send_alert_notification(settings: Dict, tmpl_args: Dict):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
. Output only the next line. | await clicksend.send_sms(recipients, msg, self.sms_settings['username'], self.sms_settings['api-key'], |
Here is a snippet: <|code_start|>
# noinspection PyMethodMayBeStatic
class NotificationManager:
def __init__(self, config: Any, *, loop: asyncio.AbstractEventLoop=None) -> None:
self.loop = loop or asyncio.get_event_loop()
if not config:
log.msg('Missing config section, no alert notification will be sent', 'NOTIFICATIONS')
self.http_settings = None
self.email_settings = None
self.sms_settings = None
self.slack_settings = None
else:
self.http_settings = http.parse_settings(config)
self.email_settings = email.parse_settings(config)
self.sms_settings = sms.parse_settings(config)
<|code_end|>
. Write the next line using the current file imports:
from typing import Dict, Iterable, Any, List
from irisett import (
log,
)
from irisett.notify import (
email,
http,
sms,
clicksend,
slack,
)
import asyncio
and context from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/notify/email.py
# async def send_email(loop: asyncio.AbstractEventLoop, mail_from: str, mail_to: Union[Iterable, str],
# subject: str, body: str, server: str='localhost') -> None:
# async def send_alert_notification(
# loop: asyncio.AbstractEventLoop, settings: Dict[str, Any],
# recipients: Iterable[str], tmpl_args: Dict[str, Any]) -> None:
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/http.py
# async def send_http_notification(url: str, in_data: Any):
# async def send_alert_notification(settings: Dict[str, Any], email_recipients: Iterable[str],
# sms_recipients: Iterable[str], tmpl_args: Dict[str, Any]):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/sms.py
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
# async def send_alert_notification(settings: Dict[str, Any], recipients: Iterable[str], tmpl_args: Dict[str, Any]):
#
# Path: irisett/notify/clicksend.py
# CLICKSEND_URL = 'https://rest.clicksend.com/v3/sms/send'
# async def send_sms(recipients: Iterable[str], msg: str, username: str, api_key: str, sender: str):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
#
# Path: irisett/notify/slack.py
# async def send_slack_notification(url: str, attachments: List[Dict]):
# async def send_alert_notification(settings: Dict, tmpl_args: Dict):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
, which may include functions, classes, or code. Output only the next line. | self.slack_settings = slack.parse_settings(config) |
Given the code snippet: <|code_start|> raise MonitorFailedError(std_data)
text, perf = parse_plugin_output(std_data)
if proc.returncode not in [STATUS_OK, STATUS_WARNING]:
raise MonitorFailedError(text)
return text, perf
def parse_plugin_output(output: Union[str, bytes]) -> Tuple[str, List[str]]:
"""Parse nagios output.
Splits the data into a text string and performance data.
"""
output = decode_plugin_output(output)
if '|' not in output:
text = output
perf = [] # type: List[str]
else:
text, _perf = output.split('|', 1)
perf = _perf.split('|')
text = text.strip()
return text, perf
def decode_plugin_output(output: Union[str, bytes]) -> str:
"""Decode nagios output from latin-1."""
try:
if type(output) == bytes:
output = cast(bytes, output)
output = output.decode('latin-1', 'replace')
except Exception as e:
<|code_end|>
, generate the next line using the imports in this file:
from typing import List, Tuple, Union, cast
from irisett import log
import asyncio.subprocess
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
. Output only the next line. | log.debug('nagios.encode_monitor_output: error: %s' % str(e)) |
Using the snippet: <|code_start|>
async def send_http_notification(url: str, in_data: Any):
out_data = json.dumps(in_data)
try:
async with aiohttp.ClientSession() as session:
async with session.post(url, data=out_data, timeout=10) as resp:
if resp.status != 200:
<|code_end|>
, determine the next line of code. You have imports:
from typing import Any, Optional, Dict, Iterable
from irisett import (
log
)
import aiohttp
import json
and context (class names, function names, or code) available:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
. Output only the next line. | log.msg('Error sending http notification: http status %s' % (str(resp.status)), |
Next line prediction: <|code_start|>"""Websocket proxy for irisett events.
Setup a websocket listener that sends irisett events over the websocket
as they arrive.
"""
class WSEventProxy:
def __init__(self, request: web.Request) -> None:
self.request = request
self.ws = web.WebSocketResponse()
self.running = False
self.client_started = False
self.listener = None # type: Optional[event.EventListener]
async def run(self) -> None:
await self.ws.prepare(self.request)
self.running = True
<|code_end|>
. Use current file imports:
(from typing import Any, Optional
from aiohttp import web
from irisett import (
event,
)
import asyncio
import aiohttp
import json)
and context including class names, function names, or small code snippets from other files:
# Path: irisett/event.py
# class EventListener:
# class EventTracer:
# def __init__(self, tracer: 'EventTracer', callback: Callable, *,
# event_filter: Optional[List[str]] = None,
# active_monitor_filter: Optional[List[Union[str, int]]] = None) -> None:
# def set_event_filter(self, filter: Optional[List]) -> None:
# def set_active_monitor_filter(self, filter: Optional[List]) -> None:
# def _parse_active_monitor_filter(filter: Optional[List]) -> Any:
# def _parse_filter_list(filter: Optional[List]) -> Any:
# def wants_event(self, event_name: str, args: Dict) -> bool:
# def __init__(self) -> None:
# def listen(self, callback: Callable, *,
# event_filter: Optional[List[str]] = None,
# active_monitor_filter: Optional[List[Union[str, int]]] = None) -> EventListener:
# def stop_listening(self, listener: EventListener) -> None:
# def running(self, event_name: str, **kwargs: Any) -> None:
. Output only the next line. | self.listener = event.listen(self._handle_events) |
Predict the next line for this snippet: <|code_start|> If a list of filter arguments are passed in convert it to a set
for increased lookup speed and reduced size.
"""
ret = None
if filter:
ret = set(filter)
return ret
def wants_event(self, event_name: str, args: Dict) -> bool:
"""Check if an event matches a listeners filters.
If it does not, the listener will not receive the event.
"""
ret = True
if self.event_filter and event_name not in self.event_filter:
ret = False
elif self.active_monitor_filter and 'monitor' in args and args['monitor'].monitor_type == 'active' \
and args['monitor'].id not in self.active_monitor_filter:
ret = False
return ret
class EventTracer:
"""The main event tracer class.
Creates listeners and receives events. When an event is received it
is sent to all listeners (that matches the events filters.
"""
def __init__(self) -> None:
self.listeners = set() # type: Set[EventListener]
<|code_end|>
with the help of current file imports:
from typing import Callable, Dict, Optional, List, Set, Union, Any
from irisett import (
stats,
log,
)
import time
import asyncio
and context from other files:
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
, which may contain function names, class names, or code. Output only the next line. | stats.set('num_listeners', 0, 'EVENT') |
Continue the code snippet: <|code_start|> active_monitor_filter can be a list of active monitor ids that must match.
"""
stats.inc('num_listeners', 'EVENT')
listener = EventListener(self, callback, event_filter=event_filter, active_monitor_filter=active_monitor_filter)
self.listeners.add(listener)
return listener
def stop_listening(self, listener: EventListener) -> None:
"""Remove a callback from the listener list."""
if listener in self.listeners:
stats.dec('num_listeners', 'EVENT')
self.listeners.remove(listener)
def running(self, event_name: str, **kwargs: Any) -> None:
"""An event is running.
Listener callbacks will be called with:
callback(listener-dict, event-name, timestamp, arg-dict)
"""
stats.inc('events_fired', 'EVENT')
if not self.listeners:
return
timestamp = time.time()
for listener in self.listeners:
if not listener.wants_event(event_name, kwargs):
continue
try:
t = listener.callback(listener, event_name, timestamp, kwargs)
asyncio.ensure_future(t)
except Exception as e:
<|code_end|>
. Use current file imports:
from typing import Callable, Dict, Optional, List, Set, Union, Any
from irisett import (
stats,
log,
)
import time
import asyncio
and context (classes, functions, or code) from other files:
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
. Output only the next line. | log.msg('Failed to run event listener callback: %s' % str(e)) |
Using the snippet: <|code_start|>"""Webapi middleware helpers.
Middleware for common actions, authentication etc.
"""
# noinspection PyUnusedLocal
async def logging_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Basic logging and accounting."""
async def middleware_handler(request: web.Request) -> web.Response:
stats.inc('num_calls', 'WEBAPI')
<|code_end|>
, determine the next line of code. You have imports:
from typing import Optional, Callable, Any
from aiohttp import web
from irisett import (
log,
stats,
)
from irisett.webapi import (
errors,
)
from irisett.errors import IrisettError
import base64
import binascii
and context (class names, function names, or code) available:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/webapi/errors.py
# class WebAPIError(Exception):
# class InvalidData(WebAPIError):
# class PermissionDenied(WebAPIError):
# class NotFound(WebAPIError):
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# def __str__(self) -> str:
# if len(self.args) == 1:
# ret = self.args[0]
# else:
# ret = str(self.__class__.__name__)
# return ret
. Output only the next line. | log.msg('Received request: %s' % request, 'WEBAPI') |
Continue the code snippet: <|code_start|>"""Webapi middleware helpers.
Middleware for common actions, authentication etc.
"""
# noinspection PyUnusedLocal
async def logging_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Basic logging and accounting."""
async def middleware_handler(request: web.Request) -> web.Response:
<|code_end|>
. Use current file imports:
from typing import Optional, Callable, Any
from aiohttp import web
from irisett import (
log,
stats,
)
from irisett.webapi import (
errors,
)
from irisett.errors import IrisettError
import base64
import binascii
and context (classes, functions, or code) from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/webapi/errors.py
# class WebAPIError(Exception):
# class InvalidData(WebAPIError):
# class PermissionDenied(WebAPIError):
# class NotFound(WebAPIError):
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# def __str__(self) -> str:
# if len(self.args) == 1:
# ret = self.args[0]
# else:
# ret = str(self.__class__.__name__)
# return ret
. Output only the next line. | stats.inc('num_calls', 'WEBAPI') |
Predict the next line after this snippet: <|code_start|>
# noinspection PyUnusedLocal
async def logging_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Basic logging and accounting."""
async def middleware_handler(request: web.Request) -> web.Response:
stats.inc('num_calls', 'WEBAPI')
log.msg('Received request: %s' % request, 'WEBAPI')
return await handler(request)
return middleware_handler
async def basic_auth_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Authentication.
Uses HTTP basic auth to check that requests are including the required
username and password.
"""
async def middleware_handler(request: web.Request) -> web.Response:
ok = False
auth_token = request.headers.get('Authorization')
if auth_token and auth_token.startswith('Basic '):
auth_token = auth_token[6:]
try:
auth_bytes = base64.b64decode(auth_token) # type: Optional[bytes]
except binascii.Error:
auth_bytes = None
if auth_bytes:
<|code_end|>
using the current file's imports:
from typing import Optional, Callable, Any
from aiohttp import web
from irisett import (
log,
stats,
)
from irisett.webapi import (
errors,
)
from irisett.errors import IrisettError
import base64
import binascii
and any relevant context from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/webapi/errors.py
# class WebAPIError(Exception):
# class InvalidData(WebAPIError):
# class PermissionDenied(WebAPIError):
# class NotFound(WebAPIError):
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# def __str__(self) -> str:
# if len(self.args) == 1:
# ret = self.args[0]
# else:
# ret = str(self.__class__.__name__)
# return ret
. Output only the next line. | auth_str = auth_bytes.decode('utf-8', errors='ignore') |
Given the following code snippet before the placeholder: <|code_start|> return await handler(request)
return middleware_handler
# noinspection PyUnusedLocal
async def error_handler_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Error handling middle.
Catch errors raised in web views and try to return a corresponding
HTTP error code.
"""
async def middleware_handler(request: web.Request) -> web.Response:
errcode = None
errmsg = None
ret = None
try:
ret = await handler(request)
except errors.NotFound as e:
errcode = 404
errmsg = str(e) or 'not found'
except errors.PermissionDenied as e:
errcode = 401
errmsg = str(e) or 'permission denied'
except errors.InvalidData as e:
errcode = 400
errmsg = str(e) or 'invalid data'
except errors.WebAPIError as e:
errcode = 400
errmsg = str(e) or 'api error'
<|code_end|>
, predict the next line using imports from the current file:
from typing import Optional, Callable, Any
from aiohttp import web
from irisett import (
log,
stats,
)
from irisett.webapi import (
errors,
)
from irisett.errors import IrisettError
import base64
import binascii
and context including class names, function names, and sometimes code from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/webapi/errors.py
# class WebAPIError(Exception):
# class InvalidData(WebAPIError):
# class PermissionDenied(WebAPIError):
# class NotFound(WebAPIError):
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# def __str__(self) -> str:
# if len(self.args) == 1:
# ret = self.args[0]
# else:
# ret = str(self.__class__.__name__)
# return ret
. Output only the next line. | except IrisettError as e: |
Using the snippet: <|code_start|>"""Send notification emails."""
charset.add_charset('utf-8', charset.SHORTEST, charset.QP) # type: ignore
# noinspection PyPep8
# noinspection PyPep8
async def send_email(loop: asyncio.AbstractEventLoop, mail_from: str, mail_to: Union[Iterable, str],
subject: str, body: str, server: str='localhost') -> None:
"""Send an email to one or more recipients.
Only supports plain text emails with a single message body.
No attachments etc.
"""
if type(mail_to) == str:
mail_to = [mail_to]
smtp = aiosmtplib.SMTP(hostname=server, port=25, loop=loop)
try:
await smtp.connect()
for rcpt in mail_to:
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = mail_from
msg['To'] = rcpt
await smtp.send_message(msg)
await smtp.quit()
except aiosmtplib.errors.SMTPException as e:
<|code_end|>
, determine the next line of code. You have imports:
from typing import Optional, Dict, Any, Union, Iterable
from email import charset
from email.mime.text import MIMEText
from irisett import (
log,
)
import aiosmtplib
import jinja2
import asyncio
and context (class names, function names, or code) available:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
. Output only the next line. | log.msg('Error sending smtp notification: %s' % (str(e)), 'NOTIFICATIONS') |
Given the code snippet: <|code_start|>"""Monitor groups.
Monitor groups are used to group monitors into.. groups. They can be used
as a cosmetic feature, but also to connect multiple monitors to contacts
without setting the contact(s) for each monitor.
"""
<|code_end|>
, generate the next line using the imports in this file:
from typing import Optional, Dict, Any, Iterable
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
monitor_group_exists,
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def monitor_group_exists(dbcon: DBConnection, monitor_group_id: int) -> bool:
# """Check if a monitor group id exists."""
# q = """select count(id) from monitor_groups where id=%s"""
# return await _object_exists(dbcon, q, (monitor_group_id,))
#
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | async def create_monitor_group(dbcon: DBConnection, parent_id: Optional[int], name: str): |
Given the code snippet: <|code_start|>"""Monitor groups.
Monitor groups are used to group monitors into.. groups. They can be used
as a cosmetic feature, but also to connect multiple monitors to contacts
without setting the contact(s) for each monitor.
"""
async def create_monitor_group(dbcon: DBConnection, parent_id: Optional[int], name: str):
"""Add a monitor group to the database."""
if not name:
raise errors.InvalidArguments('missing monitor group name')
if parent_id:
if not await monitor_group_exists(dbcon, parent_id):
raise errors.InvalidArguments('parent monitor group does not exist')
q = """insert into monitor_groups (parent_id, name) values (%s, %s)"""
q_args = (parent_id, name) # type: Any
else:
q = """insert into monitor_groups (name) values (%s)"""
q_args = (name,)
group_id = await dbcon.operation(q, q_args)
return group_id
async def update_monitor_group(dbcon: DBConnection, monitor_group_id: int, data: Dict[str, Any]):
"""Update a monitor group in the database.
Data is a dict with parent_id/name values that will be updated.
"""
<|code_end|>
, generate the next line using the imports in this file:
from typing import Optional, Dict, Any, Iterable
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
monitor_group_exists,
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def monitor_group_exists(dbcon: DBConnection, monitor_group_id: int) -> bool:
# """Check if a monitor group id exists."""
# q = """select count(id) from monitor_groups where id=%s"""
# return await _object_exists(dbcon, q, (monitor_group_id,))
#
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | async def _run(cur: Cursor) -> None: |
Predict the next line for this snippet: <|code_start|>"""Monitor groups.
Monitor groups are used to group monitors into.. groups. They can be used
as a cosmetic feature, but also to connect multiple monitors to contacts
without setting the contact(s) for each monitor.
"""
async def create_monitor_group(dbcon: DBConnection, parent_id: Optional[int], name: str):
"""Add a monitor group to the database."""
if not name:
<|code_end|>
with the help of current file imports:
from typing import Optional, Dict, Any, Iterable
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
monitor_group_exists,
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def monitor_group_exists(dbcon: DBConnection, monitor_group_id: int) -> bool:
# """Check if a monitor group id exists."""
# q = """select count(id) from monitor_groups where id=%s"""
# return await _object_exists(dbcon, q, (monitor_group_id,))
#
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
, which may contain function names, class names, or code. Output only the next line. | raise errors.InvalidArguments('missing monitor group name') |
Given the following code snippet before the placeholder: <|code_start|> if not await monitor_group_exists(dbcon, monitor_group_id):
raise errors.InvalidArguments('monitor_group does not exist')
q = """delete from monitor_group_contacts where monitor_group_id=%s and contact_id=%s"""
q_args = (monitor_group_id, contact_id)
await dbcon.operation(q, q_args)
async def add_contact_group_to_monitor_group(dbcon: DBConnection, monitor_group_id: int, contact_group_id: int) -> None:
"""Connect a monitor_group and a contact group."""
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
if not await monitor_group_exists(dbcon, monitor_group_id):
raise errors.InvalidArguments('monitor_group does not exist')
q = """replace into monitor_group_contact_groups (monitor_group_id, contact_group_id) values (%s, %s)"""
q_args = (monitor_group_id, contact_group_id)
await dbcon.operation(q, q_args)
async def delete_contact_group_from_monitor_group(
dbcon: DBConnection, monitor_group_id: int, contact_group_id: int) -> None:
"""Remove a contact group from a monitor group."""
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact does not exist')
if not await monitor_group_exists(dbcon, monitor_group_id):
raise errors.InvalidArguments('monitor_group does not exist')
q = """delete from monitor_group_contact_groups where monitor_group_id=%s and contact_group_id=%s"""
q_args = (monitor_group_id, contact_group_id)
await dbcon.operation(q, q_args)
<|code_end|>
, predict the next line using imports from the current file:
from typing import Optional, Dict, Any, Iterable
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
monitor_group_exists,
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context including class names, function names, and sometimes code from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def monitor_group_exists(dbcon: DBConnection, monitor_group_id: int) -> bool:
# """Check if a monitor group id exists."""
# q = """select count(id) from monitor_groups where id=%s"""
# return await _object_exists(dbcon, q, (monitor_group_id,))
#
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | async def get_all_monitor_groups(dbcon: DBConnection) -> Iterable[object_models.MonitorGroup]: |
Using the snippet: <|code_start|>"""Monitor groups.
Monitor groups are used to group monitors into.. groups. They can be used
as a cosmetic feature, but also to connect multiple monitors to contacts
without setting the contact(s) for each monitor.
"""
async def create_monitor_group(dbcon: DBConnection, parent_id: Optional[int], name: str):
"""Add a monitor group to the database."""
if not name:
raise errors.InvalidArguments('missing monitor group name')
if parent_id:
<|code_end|>
, determine the next line of code. You have imports:
from typing import Optional, Dict, Any, Iterable
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
monitor_group_exists,
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (class names, function names, or code) available:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def monitor_group_exists(dbcon: DBConnection, monitor_group_id: int) -> bool:
# """Check if a monitor group id exists."""
# q = """select count(id) from monitor_groups where id=%s"""
# return await _object_exists(dbcon, q, (monitor_group_id,))
#
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | if not await monitor_group_exists(dbcon, parent_id): |
Given the code snippet: <|code_start|> q = """delete from object_metadata where object_type="monitor_group" and object_id=%s"""
await cur.execute(q, (monitor_group_id,))
await dbcon.transact(_run)
async def add_active_monitor_to_monitor_group(dbcon: DBConnection, monitor_group_id: int, monitor_id: int) -> None:
"""Connect a monitor_group and an active monitor."""
if not await active_monitor_exists(dbcon, monitor_id):
raise errors.InvalidArguments('monitor does not exist')
if not await monitor_group_exists(dbcon, monitor_group_id):
raise errors.InvalidArguments('monitor_group does not exist')
q = """replace into monitor_group_active_monitors (monitor_group_id, active_monitor_id) values (%s, %s)"""
q_args = (monitor_group_id, monitor_id)
await dbcon.operation(q, q_args)
async def delete_active_monitor_from_monitor_group(dbcon: DBConnection, monitor_group_id: int, monitor_id: int) -> None:
"""Remove an active monitor from a monitor group."""
if not await active_monitor_exists(dbcon, monitor_id):
raise errors.InvalidArguments('monitor does not exist')
if not await monitor_group_exists(dbcon, monitor_group_id):
raise errors.InvalidArguments('monitor_group does not exist')
q = """delete from monitor_group_active_monitors where monitor_group_id=%s and active_monitor_id=%s"""
q_args = (monitor_group_id, monitor_id)
await dbcon.operation(q, q_args)
async def add_contact_to_monitor_group(dbcon: DBConnection, monitor_group_id: int, contact_id: int) -> None:
"""Connect a monitor_group and a contact."""
<|code_end|>
, generate the next line using the imports in this file:
from typing import Optional, Dict, Any, Iterable
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
monitor_group_exists,
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (functions, classes, or occasionally code) from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def monitor_group_exists(dbcon: DBConnection, monitor_group_id: int) -> bool:
# """Check if a monitor group id exists."""
# q = """select count(id) from monitor_groups where id=%s"""
# return await _object_exists(dbcon, q, (monitor_group_id,))
#
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | if not await contact_exists(dbcon, contact_id): |
Based on the snippet: <|code_start|> if key not in ['parent_id', 'name']:
raise errors.IrisettError('invalid monitor_group key %s' % key)
if key == 'parent_id' and value:
if monitor_group_id == int(value):
raise errors.InvalidArguments('monitor group can\'t be its own parent')
if not await monitor_group_exists(dbcon, value):
raise errors.InvalidArguments('parent monitor group does not exist')
q = """update monitor_groups set %s=%%s where id=%%s""" % key
q_args = (value, monitor_group_id)
await cur.execute(q, q_args)
await dbcon.transact(_run)
async def delete_monitor_group(dbcon: DBConnection, monitor_group_id: int) -> None:
"""Remove a monitor_group from the database."""
async def _run(cur: Cursor) -> None:
q = """delete from monitor_groups where id=%s"""
await cur.execute(q, (monitor_group_id,))
q = """delete from monitor_group_active_monitors where monitor_group_id=%s"""
await cur.execute(q, (monitor_group_id,))
q = """delete from object_metadata where object_type="monitor_group" and object_id=%s"""
await cur.execute(q, (monitor_group_id,))
await dbcon.transact(_run)
async def add_active_monitor_to_monitor_group(dbcon: DBConnection, monitor_group_id: int, monitor_id: int) -> None:
"""Connect a monitor_group and an active monitor."""
<|code_end|>
, predict the immediate next line with the help of imports:
from typing import Optional, Dict, Any, Iterable
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
monitor_group_exists,
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and context (classes, functions, sometimes code) from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def monitor_group_exists(dbcon: DBConnection, monitor_group_id: int) -> bool:
# """Check if a monitor group id exists."""
# q = """select count(id) from monitor_groups where id=%s"""
# return await _object_exists(dbcon, q, (monitor_group_id,))
#
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | if not await active_monitor_exists(dbcon, monitor_id): |
Predict the next line after this snippet: <|code_start|> raise errors.InvalidArguments('monitor_group does not exist')
q = """delete from monitor_group_active_monitors where monitor_group_id=%s and active_monitor_id=%s"""
q_args = (monitor_group_id, monitor_id)
await dbcon.operation(q, q_args)
async def add_contact_to_monitor_group(dbcon: DBConnection, monitor_group_id: int, contact_id: int) -> None:
"""Connect a monitor_group and a contact."""
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
if not await monitor_group_exists(dbcon, monitor_group_id):
raise errors.InvalidArguments('monitor_group does not exist')
q = """replace into monitor_group_contacts (monitor_group_id, contact_id) values (%s, %s)"""
q_args = (monitor_group_id, contact_id)
await dbcon.operation(q, q_args)
async def delete_contact_from_monitor_group(dbcon: DBConnection, monitor_group_id: int, contact_id: int) -> None:
"""Remove a contact from a monitor group."""
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
if not await monitor_group_exists(dbcon, monitor_group_id):
raise errors.InvalidArguments('monitor_group does not exist')
q = """delete from monitor_group_contacts where monitor_group_id=%s and contact_id=%s"""
q_args = (monitor_group_id, contact_id)
await dbcon.operation(q, q_args)
async def add_contact_group_to_monitor_group(dbcon: DBConnection, monitor_group_id: int, contact_group_id: int) -> None:
"""Connect a monitor_group and a contact group."""
<|code_end|>
using the current file's imports:
from typing import Optional, Dict, Any, Iterable
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
monitor_group_exists,
contact_exists,
active_monitor_exists,
contact_group_exists,
)
and any relevant context from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
#
# Path: irisett/object_exists.py
# async def monitor_group_exists(dbcon: DBConnection, monitor_group_id: int) -> bool:
# """Check if a monitor group id exists."""
# q = """select count(id) from monitor_groups where id=%s"""
# return await _object_exists(dbcon, q, (monitor_group_id,))
#
# async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from contacts where id=%s"""
# return await _object_exists(dbcon, q, (contact_id,))
#
# async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
# """Check if a contact id exists."""
# q = """select count(id) from active_monitors where id=%s"""
# return await _object_exists(dbcon, q, (active_monitor_id,))
#
# async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
# """Check if a contact group id exists."""
# q = """select count(id) from contact_groups where id=%s"""
# return await _object_exists(dbcon, q, (contact_group_id,))
. Output only the next line. | if not await contact_group_exists(dbcon, contact_group_id): |
Using the snippet: <|code_start|>
def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
provider = config.get('sms-provider')
if not provider:
<|code_end|>
, determine the next line of code. You have imports:
from typing import Dict, Any, Optional, Iterable
from irisett import (
log,
)
from irisett.notify import (
clicksend,
)
and context (class names, function names, or code) available:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/notify/clicksend.py
# CLICKSEND_URL = 'https://rest.clicksend.com/v3/sms/send'
# async def send_sms(recipients: Iterable[str], msg: str, username: str, api_key: str, sender: str):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
. Output only the next line. | log.msg('No SMS provider specified, no sms notifications will be sent', 'NOTIFICATIONS') |
Given the following code snippet before the placeholder: <|code_start|>
def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
provider = config.get('sms-provider')
if not provider:
log.msg('No SMS provider specified, no sms notifications will be sent', 'NOTIFICATIONS')
return None
if provider not in ['clicksend']:
log.msg('Unknown SMS provider specified, no sms notifications will be sent', 'NOTIFICATIONS')
return None
ret = None
if provider == 'clicksend':
<|code_end|>
, predict the next line using imports from the current file:
from typing import Dict, Any, Optional, Iterable
from irisett import (
log,
)
from irisett.notify import (
clicksend,
)
and context including class names, function names, and sometimes code from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/notify/clicksend.py
# CLICKSEND_URL = 'https://rest.clicksend.com/v3/sms/send'
# async def send_sms(recipients: Iterable[str], msg: str, username: str, api_key: str, sender: str):
# def parse_settings(config: Any) -> Optional[Dict[str, Any]]:
. Output only the next line. | ret = clicksend.parse_settings(config) |
Given the following code snippet before the placeholder: <|code_start|>"""Object metadata management.
Metadata can be applied to any object type/id pair.
Metadata is managed using metadicts, ie. key/value pairs
of (short) data that are attached to an object.
"""
<|code_end|>
, predict the next line using imports from the current file:
from typing import Dict, Iterable, Optional, Tuple
from irisett.sql import DBConnection, Cursor
from irisett import object_models
and context including class names, function names, and sometimes code from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
. Output only the next line. | async def get_metadata(dbcon: DBConnection, object_type: str, object_id: int) -> Dict[str, str]: |
Predict the next line for this snippet: <|code_start|>"""Object metadata management.
Metadata can be applied to any object type/id pair.
Metadata is managed using metadicts, ie. key/value pairs
of (short) data that are attached to an object.
"""
async def get_metadata(dbcon: DBConnection, object_type: str, object_id: int) -> Dict[str, str]:
"""Return a dict of metadata for an object."""
q = """select `key`, value from object_metadata where object_type=%s and object_id=%s"""
q_args = (object_type, object_id)
rows = await dbcon.fetch_all(q, q_args)
metadict = {}
for key, value in rows:
metadict[key] = value
return metadict
async def add_metadata(dbcon: DBConnection, object_type: str, object_id: int, metadict: Dict[str, str]):
"""Add metadata to an object.
Metadict is a dictionary of key value pairs to add.
"""
<|code_end|>
with the help of current file imports:
from typing import Dict, Iterable, Optional, Tuple
from irisett.sql import DBConnection, Cursor
from irisett import object_models
and context from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
, which may contain function names, class names, or code. Output only the next line. | async def _run(cur: Cursor) -> None: |
Here is a snippet: <|code_start|> q_args = (object_type, object_id, str(key), str(value))
await cur.execute(q, q_args)
await dbcon.transact(_run)
async def delete_metadata(dbcon: DBConnection, object_type: str, object_id: int,
keys: Optional[Iterable[str]] = None):
"""Delete metadata for an object.
If keys is given, only delete the specified keys, otherwise delete all
metadata for the object.
"""
async def _run(cur: Cursor) -> None:
if keys:
# noinspection PyTypeChecker
for key in keys:
q = """delete from object_metadata where object_type=%s and object_id=%s and `key`=%s"""
q_args = (object_type, object_id, key) # type: Tuple
await cur.execute(q, q_args)
else:
q = """delete from object_metadata where object_type=%s and object_id=%s"""
q_args = (object_type, object_id)
await cur.execute(q, q_args)
await dbcon.transact(_run)
async def get_metadata_for_object(
<|code_end|>
. Write the next line using the current file imports:
from typing import Dict, Iterable, Optional, Tuple
from irisett.sql import DBConnection, Cursor
from irisett import object_models
and context from other files:
# Path: irisett/sql.py
# class DBConnection:
# def __init__(self, host: str, user: str, passwd: str, dbname: str, loop: asyncio.AbstractEventLoop=None) -> None:
# async def initialize(self, *, only_init_tables: bool=False):
# async def close(self) -> None:
# async def _create_db(self) -> None:
# async def _init_db(self, only_init_tables: bool) -> None:
# async def _check_db_exists(self) -> bool:
# async def _check_db_initialized(self) -> bool:
# async def _upgrade_db(self) -> None:
# async def _get_db_version(self) -> int:
# async def _set_db_version(self, version: int):
# async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_row(self, query: str, args: Optional[Iterable]=None) -> List:
# async def fetch_single(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:
# async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:
# async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
#
# Path: irisett/object_models.py
# def insert_values(object: Any) -> Tuple:
# def list_asdict(in_list: Iterable[Any]) -> List[Any]:
# def insert_filter(attribute: Any, value: Any) -> bool:
# class Contact:
# class ContactGroup:
# class ActiveMonitor:
# class ActiveMonitorArg:
# class ActiveMonitorAlert:
# class ActiveMonitorDef:
# class ActiveMonitorDefArg:
# class ObjectMetadata:
# class ObjectBindata:
# class MonitorGroup:
, which may include functions, classes, or code. Output only the next line. | dbcon: DBConnection, object_type: str, object_id: int) -> Iterable[object_models.ObjectMetadata]: |
Continue the code snippet: <|code_start|>"""Basic logging functionality.
Supports logging to stdout, syslog and file.
"""
# Yes yes, globals are ugly.
logger = None
def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
rotate_length: int=1000000, max_rotated_files: int=250) -> None:
global logger
level = logging.INFO
if debug_logging:
level = logging.DEBUG
if logtype not in ['stdout', 'syslog', 'file']:
<|code_end|>
. Use current file imports:
from typing import Optional, Any, cast
from irisett import errors
import os
import os.path
import logging
import logging.handlers
and context (classes, functions, or code) from other files:
# Path: irisett/errors.py
# class IrisettError(Exception):
# class InvalidArguments(IrisettError):
# def __str__(self) -> str:
. Output only the next line. | raise errors.IrisettError('invalid logtype name %s' % logtype) |
Next line prediction: <|code_start|>"""Webmgmt middleware helpers.
Middleware for common actions, authentication etc.
"""
# noinspection PyUnusedLocal
async def logging_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Basic logging and accounting."""
async def middleware_handler(request: web.Request) -> web.Response:
stats.inc('num_calls', 'WEBMGMT')
<|code_end|>
. Use current file imports:
(from typing import Optional, Callable, Any
from aiohttp import web
from irisett import (
log,
stats,
)
from irisett.webmgmt import (
errors,
)
from irisett.errors import IrisettError
import base64
import binascii)
and context including class names, function names, or small code snippets from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/webmgmt/errors.py
# class WebMgmtError(Exception):
# class InvalidData(WebMgmtError):
# class PermissionDenied(WebMgmtError):
# class NotFound(WebMgmtError):
# class MissingLogin(WebMgmtError):
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# def __str__(self) -> str:
# if len(self.args) == 1:
# ret = self.args[0]
# else:
# ret = str(self.__class__.__name__)
# return ret
. Output only the next line. | log.msg('Received request: %s' % request, 'WEBMGMT') |
Here is a snippet: <|code_start|>"""Webmgmt middleware helpers.
Middleware for common actions, authentication etc.
"""
# noinspection PyUnusedLocal
async def logging_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Basic logging and accounting."""
async def middleware_handler(request: web.Request) -> web.Response:
<|code_end|>
. Write the next line using the current file imports:
from typing import Optional, Callable, Any
from aiohttp import web
from irisett import (
log,
stats,
)
from irisett.webmgmt import (
errors,
)
from irisett.errors import IrisettError
import base64
import binascii
and context from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/webmgmt/errors.py
# class WebMgmtError(Exception):
# class InvalidData(WebMgmtError):
# class PermissionDenied(WebMgmtError):
# class NotFound(WebMgmtError):
# class MissingLogin(WebMgmtError):
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# def __str__(self) -> str:
# if len(self.args) == 1:
# ret = self.args[0]
# else:
# ret = str(self.__class__.__name__)
# return ret
, which may include functions, classes, or code. Output only the next line. | stats.inc('num_calls', 'WEBMGMT') |
Next line prediction: <|code_start|>
# noinspection PyUnusedLocal
async def logging_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Basic logging and accounting."""
async def middleware_handler(request: web.Request) -> web.Response:
stats.inc('num_calls', 'WEBMGMT')
log.msg('Received request: %s' % request, 'WEBMGMT')
return await handler(request)
return middleware_handler
async def basic_auth_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Authentication.
Uses HTTP basic auth to check that requests are including the required
username and password.
"""
async def middleware_handler(request: web.Request) -> web.Response:
ok = False
auth_token = request.headers.get('Authorization')
if auth_token and auth_token.startswith('Basic '):
auth_token = auth_token[6:]
try:
auth_bytes = base64.b64decode(auth_token) # type: Optional[bytes]
except binascii.Error:
auth_bytes = None
if auth_bytes:
<|code_end|>
. Use current file imports:
(from typing import Optional, Callable, Any
from aiohttp import web
from irisett import (
log,
stats,
)
from irisett.webmgmt import (
errors,
)
from irisett.errors import IrisettError
import base64
import binascii)
and context including class names, function names, or small code snippets from other files:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/webmgmt/errors.py
# class WebMgmtError(Exception):
# class InvalidData(WebMgmtError):
# class PermissionDenied(WebMgmtError):
# class NotFound(WebMgmtError):
# class MissingLogin(WebMgmtError):
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# def __str__(self) -> str:
# if len(self.args) == 1:
# ret = self.args[0]
# else:
# ret = str(self.__class__.__name__)
# return ret
. Output only the next line. | auth_str = auth_bytes.decode('utf-8', errors='ignore') |
Given snippet: <|code_start|># noinspection PyUnusedLocal
async def error_handler_middleware_factory(app: web.Application, handler: Any) -> Callable:
"""Error handling middle.
Catch errors raised in web views and try to return a corresponding
HTTP error code.
"""
async def middleware_handler(request: web.Request) -> web.Response:
errcode = None
errmsg = None
ret = None
headers = {}
try:
ret = await handler(request)
except errors.NotFound as e:
errcode = 404
errmsg = str(e) or 'not found'
except errors.PermissionDenied as e:
errcode = 401
errmsg = str(e) or 'permission denied'
except errors.MissingLogin as e:
errcode = 401
errmsg = str(e) or 'permission denied'
headers['WWW-Authenticate'] = 'Basic realm="Restricted"'
except errors.InvalidData as e:
errcode = 400
errmsg = str(e) or 'invalid data'
except errors.WebMgmtError as e:
errcode = 400
errmsg = str(e) or 'web error'
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from typing import Optional, Callable, Any
from aiohttp import web
from irisett import (
log,
stats,
)
from irisett.webmgmt import (
errors,
)
from irisett.errors import IrisettError
import base64
import binascii
and context:
# Path: irisett/log.py
# def configure_logging(logtype: str, logfilename: Optional[str]=None, debug_logging: bool=False,
# rotate_length: int=1000000, max_rotated_files: int=250) -> None:
# def msg(logmsg: str, section: Optional[str]=None) -> None:
# def debug(logmsg: str, section: Optional[str]=None) -> None:
# def log_msg(self, logmsg: str) -> None:
# def log_debug(self, logmsg: str) -> None:
# class LoggingMixin:
#
# Path: irisett/stats.py
# def get_section(section: Optional[str]) -> Dict[str, float]:
# def set(var: str, value: float, section: Optional[str] = None) -> None:
# def inc(var: str, section: Optional[str] = None) -> None:
# def dec(var: str, section: Optional[str] = None) -> None:
# def get_stats() -> Dict[str, float]:
#
# Path: irisett/webmgmt/errors.py
# class WebMgmtError(Exception):
# class InvalidData(WebMgmtError):
# class PermissionDenied(WebMgmtError):
# class NotFound(WebMgmtError):
# class MissingLogin(WebMgmtError):
#
# Path: irisett/errors.py
# class IrisettError(Exception):
# def __str__(self) -> str:
# if len(self.args) == 1:
# ret = self.args[0]
# else:
# ret = str(self.__class__.__name__)
# return ret
which might include code, classes, or functions. Output only the next line. | except IrisettError as e: |
Given snippet: <|code_start|>"""A set of functions used to validate HTTP input data.
These functions are primarily used to valid that arguments sent in http
requests are what they are supposed to be.
"""
def require_str(value: Any, convert: bool=False, allow_none: bool=False) -> Any:
"""Make sure a value is a str.
Used when dealing with http input data.
"""
if value is None and allow_none:
return value
if type(value) != str:
if not convert:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from typing import Union, Any, Dict, List, cast, Optional, SupportsInt
from irisett.webapi.errors import InvalidData
and context:
# Path: irisett/webapi/errors.py
# class InvalidData(WebAPIError):
# pass
which might include code, classes, or functions. Output only the next line. | raise InvalidData('value was %s(%s), expected str' % (type(value), value)) |
Predict the next line for this snippet: <|code_start|> file. [default: settings.yaml]
--mongo-host=<hostname> Host for mongo database [default: localhost]
--mongo-port=<port> Port for mongo database [default: 27017]
"""
args_schema = schema.Schema({
'--mongo-port': schema.And(schema.Use(int), lambda n: 1 <= n <= 65535, error='Invalid mongo port'),
# '--delay': schema.And(schema.Use(float), lambda n: n > 0, error='Invalid delay, must be number > 0'),
object: object,
})
def merge_arguments(default_args, cfg_args, cmd_args):
"""Given the default arguments, the arguments from the config file, and the command-line arguments,
merge the arguments in order of increasing precedence (default, config, cmd)
NOTE: The way it is determined whether a command-line argument was passed was by checking that
its value is equal to the default argument. As such this will fail if a command-line argument
is explicitly passed that is the same as the default argument - the config file will take
precedence in this case.
"""
result = {**default_args, **cfg_args}
for key, val in cmd_args.items():
if val != default_args.get(key):
result[key] = val
elif key in cfg_args:
<|code_end|>
with the help of current file imports:
import importlib
import os
import sys
import schema
import yaml
from docopt import docopt
from yahoo_groups_backup.logging import eprint
from docopt import parse_defaults
and context from other files:
# Path: yahoo_groups_backup/logging.py
# def eprint(*args, **kwargs):
# print(*args, **kwargs, file=sys.stderr)
, which may contain function names, class names, or code. Output only the next line. | eprint("Using '%s' from config file" % (key,)) |
Here is a snippet: <|code_start|>
__all__ = ["CompressibleForcing", "IncompressibleForcing", "EadyForcing", "CompressibleEadyForcing", "ShallowWaterForcing"]
class Forcing(object, metaclass=ABCMeta):
"""
Base class for forcing terms for Gusto.
:arg state: x :class:`.State` object.
:arg euler_poincare: if True then the momentum equation is in Euler
Poincare form and we need to add 0.5*grad(u^2) to the forcing term.
If False then this term is not added.
:arg linear: if True then we are solving a linear equation so nonlinear
terms (namely the Euler Poincare term) should not be added.
:arg extra_terms: extra terms to add to the u component of the forcing
term - these will be multiplied by the appropriate test function.
"""
def __init__(self, state, euler_poincare=False, linear=False, extra_terms=None, moisture=None):
self.state = state
if linear:
self.euler_poincare = False
<|code_end|>
. Write the next line using the current file imports:
from abc import ABCMeta, abstractmethod
from firedrake import (Function, split, TrialFunction, TestFunction,
FacetNormal, inner, dx, cross, div, jump, avg, dS_v,
LinearVariationalProblem, LinearVariationalSolver,
dot, dS, Constant, as_vector, SpatialCoordinate)
from gusto.configuration import logger, DEBUG
from gusto import thermodynamics
and context from other files:
# Path: gusto/configuration.py
# def set_log_handler(comm):
# def __init__(self, **kwargs):
# def __setattr__(self, name, value):
# def name(self):
# class Configuration(object):
# class TimesteppingParameters(Configuration):
# class OutputParameters(Configuration):
# class CompressibleParameters(Configuration):
# class ShallowWaterParameters(Configuration):
# class EadyParameters(Configuration):
# class CompressibleEadyParameters(CompressibleParameters, EadyParameters):
# class AdvectionOptions(Configuration, metaclass=ABCMeta):
# class EmbeddedDGOptions(AdvectionOptions):
# class RecoveredOptions(AdvectionOptions):
# class SUPGOptions(AdvectionOptions):
# N = 0.01 # Brunt-Vaisala frequency (1/s)
# T_0 = 273.15 # ref. temperature
# H = None # mean depth
# H = None
# L = None
# N = sqrt(EadyParameters.Nsq)
, which may include functions, classes, or code. Output only the next line. | logger.warning('Setting euler_poincare to False because you have set linear=True') |
Predict the next line after this snippet: <|code_start|> if self.extruded:
L += self.gravity_term()
if self.coriolis:
L += self.coriolis_term()
if self.euler_poincare:
L += self.euler_poincare_term()
if self.topography:
L += self.topography_term()
if self.extra_terms is not None:
L += inner(self.test, self.extra_terms)*dx
# scale L
L = self.scaling * L
# sponge term has a separate scaling factor as it is always implicit
if self.sponge:
L -= self.impl*self.state.timestepping.dt*self.sponge_term()
# hydrostatic term has no scaling factor
if self.hydrostatic:
L += (2*self.impl-1)*self.hydrostatic_term()
return L
def _build_forcing_solvers(self):
a = self.mass_term()
L = self.forcing_term()
bcs = None if len(self.state.bcs) == 0 else self.state.bcs
u_forcing_problem = LinearVariationalProblem(
a, L, self.uF, bcs=bcs
)
solver_parameters = {}
<|code_end|>
using the current file's imports:
from abc import ABCMeta, abstractmethod
from firedrake import (Function, split, TrialFunction, TestFunction,
FacetNormal, inner, dx, cross, div, jump, avg, dS_v,
LinearVariationalProblem, LinearVariationalSolver,
dot, dS, Constant, as_vector, SpatialCoordinate)
from gusto.configuration import logger, DEBUG
from gusto import thermodynamics
and any relevant context from other files:
# Path: gusto/configuration.py
# def set_log_handler(comm):
# def __init__(self, **kwargs):
# def __setattr__(self, name, value):
# def name(self):
# class Configuration(object):
# class TimesteppingParameters(Configuration):
# class OutputParameters(Configuration):
# class CompressibleParameters(Configuration):
# class ShallowWaterParameters(Configuration):
# class EadyParameters(Configuration):
# class CompressibleEadyParameters(CompressibleParameters, EadyParameters):
# class AdvectionOptions(Configuration, metaclass=ABCMeta):
# class EmbeddedDGOptions(AdvectionOptions):
# class RecoveredOptions(AdvectionOptions):
# class SUPGOptions(AdvectionOptions):
# N = 0.01 # Brunt-Vaisala frequency (1/s)
# T_0 = 273.15 # ref. temperature
# H = None # mean depth
# H = None
# L = None
# N = sqrt(EadyParameters.Nsq)
. Output only the next line. | if logger.isEnabledFor(DEBUG): |
Continue the code snippet: <|code_start|> raise ValueError('Your coordinates do not appear to match the coordinates of a DoF')
if field is None or new_value is None:
return point_indices
@pytest.mark.parametrize("geometry", ["1D", "2D"])
def test_gaussian_elimination(geometry, mesh):
cell = mesh.ufl_cell().cellname()
DG1_elt = FiniteElement("DG", cell, 1, variant="equispaced")
DG1 = FunctionSpace(mesh, DG1_elt)
vec_DG1 = VectorFunctionSpace(mesh, DG1_elt)
act_coords = Function(vec_DG1)
eff_coords = Function(vec_DG1)
field_init = Function(DG1)
field_true = Function(DG1)
field_final = Function(DG1)
# We now include things for the num of exterior values, which may be removed
DG0 = FunctionSpace(mesh, "DG", 0)
num_ext = Function(DG0)
num_ext.dat.data[0] = 1.0
# Get initial and true conditions
field_init, field_true, act_coords, eff_coords = setup_values(geometry, field_init,
field_true, act_coords,
eff_coords)
<|code_end|>
. Use current file imports:
from firedrake import (IntervalMesh, FunctionSpace, Function, RectangleMesh,
VectorFunctionSpace, FiniteElement, SpatialCoordinate)
from gusto import kernels
import numpy as np
import pytest
and context (classes, functions, or code) from other files:
# Path: gusto/kernels.py
# class GaussianElimination(object):
# class Average(object):
# class AverageWeightings(object):
# class PhysicsRecoveryTop():
# class PhysicsRecoveryBottom():
# def __init__(self, DG1):
# def apply(self, v_DG1_old, v_DG1, act_coords, eff_coords, num_ext):
# def __init__(self, V):
# def apply(self, v_out, weighting, v_in):
# def __init__(self, V):
# def apply(self, w):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
. Output only the next line. | kernel = kernels.GaussianElimination(DG1) |
Here is a snippet: <|code_start|> if field is not None and new_value is not None:
field.dat.data[i] = new_value
break
if not point_found:
raise ValueError('Your coordinates do not appear to match the coordinates of a DoF')
if field is None or new_value is None:
return point_index
@pytest.mark.parametrize("boundary", ["top", "bottom"])
def test_physics_recovery_kernels(boundary):
m = IntervalMesh(3, 3)
mesh = ExtrudedMesh(m, layers=3, layer_height=1.0)
cell = m.ufl_cell().cellname()
hori_elt = FiniteElement("DG", cell, 0)
vert_elt = FiniteElement("CG", interval, 1)
theta_elt = TensorProductElement(hori_elt, vert_elt)
Vt = FunctionSpace(mesh, theta_elt)
Vt_brok = FunctionSpace(mesh, BrokenElement(theta_elt))
initial_field = Function(Vt)
true_field = Function(Vt_brok)
new_field = Function(Vt_brok)
initial_field, true_field, boundary_index = setup_values(boundary, initial_field, true_field)
<|code_end|>
. Write the next line using the current file imports:
from firedrake import (IntervalMesh, Function, BrokenElement, VectorElement,
FunctionSpace, FiniteElement, ExtrudedMesh,
interval, TensorProductElement, SpatialCoordinate)
from gusto import kernels
import numpy as np
import pytest
and context from other files:
# Path: gusto/kernels.py
# class GaussianElimination(object):
# class Average(object):
# class AverageWeightings(object):
# class PhysicsRecoveryTop():
# class PhysicsRecoveryBottom():
# def __init__(self, DG1):
# def apply(self, v_DG1_old, v_DG1, act_coords, eff_coords, num_ext):
# def __init__(self, V):
# def apply(self, v_out, weighting, v_in):
# def __init__(self, V):
# def apply(self, w):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
, which may include functions, classes, or code. Output only the next line. | kernel = kernels.PhysicsRecoveryTop() if boundary == "top" else kernels.PhysicsRecoveryBottom() |
Based on the snippet: <|code_start|> self.on_sphere = (mesh._base_mesh.geometric_dimension() == 3 and mesh._base_mesh.topological_dimension() == 2)
except AttributeError:
self.on_sphere = (mesh.geometric_dimension() == 3 and mesh.topological_dimension() == 2)
# build the vertical normal and define perp for 2d geometries
dim = mesh.topological_dimension()
if self.on_sphere:
x = SpatialCoordinate(mesh)
R = sqrt(inner(x, x))
self.k = interpolate(x/R, mesh.coordinates.function_space())
if dim == 2:
outward_normals = CellNormal(mesh)
self.perp = lambda u: cross(outward_normals, u)
else:
kvec = [0.0]*dim
kvec[dim-1] = 1.0
self.k = Constant(kvec)
if dim == 2:
self.perp = lambda u: as_vector([-u[1], u[0]])
# project test function for hydrostatic case
if self.hydrostatic:
self.h_project = lambda u: u - self.k*inner(u, self.k)
else:
self.h_project = lambda u: u
# Constant to hold current time
self.t = Constant(0.0)
# setup logger
<|code_end|>
, predict the immediate next line with the help of imports:
from os import path, makedirs
from netCDF4 import Dataset
from gusto.diagnostics import Diagnostics, Perturbation, SteadyStateError
from firedrake import (FiniteElement, TensorProductElement, HDiv, DirichletBC,
FunctionSpace, MixedFunctionSpace, VectorFunctionSpace,
interval, Function, Mesh, functionspaceimpl,
File, SpatialCoordinate, sqrt, Constant, inner,
op2, DumbCheckpoint, FILE_CREATE, FILE_READ, interpolate,
CellNormal, cross, as_vector)
from gusto.configuration import logger, set_log_handler
import itertools
import sys
import time
import numpy as np
and context (classes, functions, sometimes code) from other files:
# Path: gusto/configuration.py
# def set_log_handler(comm):
# def __init__(self, **kwargs):
# def __setattr__(self, name, value):
# def name(self):
# class Configuration(object):
# class TimesteppingParameters(Configuration):
# class OutputParameters(Configuration):
# class CompressibleParameters(Configuration):
# class ShallowWaterParameters(Configuration):
# class EadyParameters(Configuration):
# class CompressibleEadyParameters(CompressibleParameters, EadyParameters):
# class AdvectionOptions(Configuration, metaclass=ABCMeta):
# class EmbeddedDGOptions(AdvectionOptions):
# class RecoveredOptions(AdvectionOptions):
# class SUPGOptions(AdvectionOptions):
# N = 0.01 # Brunt-Vaisala frequency (1/s)
# T_0 = 273.15 # ref. temperature
# H = None # mean depth
# H = None
# L = None
# N = sqrt(EadyParameters.Nsq)
. Output only the next line. | logger.setLevel(output.log_level) |
Using the snippet: <|code_start|> except AttributeError:
self.on_sphere = (mesh.geometric_dimension() == 3 and mesh.topological_dimension() == 2)
# build the vertical normal and define perp for 2d geometries
dim = mesh.topological_dimension()
if self.on_sphere:
x = SpatialCoordinate(mesh)
R = sqrt(inner(x, x))
self.k = interpolate(x/R, mesh.coordinates.function_space())
if dim == 2:
outward_normals = CellNormal(mesh)
self.perp = lambda u: cross(outward_normals, u)
else:
kvec = [0.0]*dim
kvec[dim-1] = 1.0
self.k = Constant(kvec)
if dim == 2:
self.perp = lambda u: as_vector([-u[1], u[0]])
# project test function for hydrostatic case
if self.hydrostatic:
self.h_project = lambda u: u - self.k*inner(u, self.k)
else:
self.h_project = lambda u: u
# Constant to hold current time
self.t = Constant(0.0)
# setup logger
logger.setLevel(output.log_level)
<|code_end|>
, determine the next line of code. You have imports:
from os import path, makedirs
from netCDF4 import Dataset
from gusto.diagnostics import Diagnostics, Perturbation, SteadyStateError
from firedrake import (FiniteElement, TensorProductElement, HDiv, DirichletBC,
FunctionSpace, MixedFunctionSpace, VectorFunctionSpace,
interval, Function, Mesh, functionspaceimpl,
File, SpatialCoordinate, sqrt, Constant, inner,
op2, DumbCheckpoint, FILE_CREATE, FILE_READ, interpolate,
CellNormal, cross, as_vector)
from gusto.configuration import logger, set_log_handler
import itertools
import sys
import time
import numpy as np
and context (class names, function names, or code) available:
# Path: gusto/configuration.py
# def set_log_handler(comm):
# def __init__(self, **kwargs):
# def __setattr__(self, name, value):
# def name(self):
# class Configuration(object):
# class TimesteppingParameters(Configuration):
# class OutputParameters(Configuration):
# class CompressibleParameters(Configuration):
# class ShallowWaterParameters(Configuration):
# class EadyParameters(Configuration):
# class CompressibleEadyParameters(CompressibleParameters, EadyParameters):
# class AdvectionOptions(Configuration, metaclass=ABCMeta):
# class EmbeddedDGOptions(AdvectionOptions):
# class RecoveredOptions(AdvectionOptions):
# class SUPGOptions(AdvectionOptions):
# N = 0.01 # Brunt-Vaisala frequency (1/s)
# T_0 = 273.15 # ref. temperature
# H = None # mean depth
# H = None
# L = None
# N = sqrt(EadyParameters.Nsq)
. Output only the next line. | set_log_handler(mesh.comm) |
Based on the snippet: <|code_start|> """
An object that 'recovers' a low order field (e.g. in DG0)
into a higher order field (e.g. in CG1).
The code is essentially that of the Firedrake Projector
object, using the "average" method, and could possibly
be replaced by it if it comes into the master branch.
:arg v: the :class:`ufl.Expr` or
:class:`.Function` to project.
:arg v_out: :class:`.Function` to put the result in.
"""
def __init__(self, v, v_out):
if not isinstance(v, (ufl.core.expr.Expr, function.Function)):
raise ValueError("Can only recover UFL expression or Functions not '%s'" % type(v))
# Check shape values
if v.ufl_shape != v_out.ufl_shape:
raise RuntimeError('Shape mismatch between source %s and target function spaces %s in project' % (v.ufl_shape, v_out.ufl_shape))
self._same_fspace = (isinstance(v, function.Function) and v.function_space() == v_out.function_space())
self.v = v
self.v_out = v_out
self.V = v_out.function_space()
# Check the number of local dofs
if self.v_out.function_space().finat_element.space_dimension() != self.v.function_space().finat_element.space_dimension():
raise RuntimeError("Number of local dofs for each field must be equal.")
<|code_end|>
, predict the immediate next line with the help of imports:
from enum import Enum
from firedrake import (BrokenElement, Constant, DirichletBC, FiniteElement,
Function, FunctionSpace, Interpolator, Projector,
SpatialCoordinate, TensorProductElement,
VectorFunctionSpace, as_vector, function, interval)
from firedrake.utils import cached_property
from gusto import kernels
import ufl
and context (classes, functions, sometimes code) from other files:
# Path: gusto/kernels.py
# class GaussianElimination(object):
# class Average(object):
# class AverageWeightings(object):
# class PhysicsRecoveryTop():
# class PhysicsRecoveryBottom():
# def __init__(self, DG1):
# def apply(self, v_DG1_old, v_DG1, act_coords, eff_coords, num_ext):
# def __init__(self, V):
# def apply(self, v_out, weighting, v_in):
# def __init__(self, V):
# def apply(self, w):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
. Output only the next line. | self.average_kernel = kernels.Average(self.V) |
Here is a snippet: <|code_start|>
__all__ = ["IncompressibleSolver", "ShallowWaterSolver", "CompressibleSolver"]
class TimesteppingSolver(object, metaclass=ABCMeta):
"""
Base class for timestepping linear solvers for Gusto.
This is a dummy base class.
:arg state: :class:`.State` object.
:arg solver_parameters (optional): solver parameters
:arg overwrite_solver_parameters: boolean, if True use only the
solver_parameters that have been passed in, if False then update
the default solver parameters with the solver_parameters passed in.
"""
def __init__(self, state, solver_parameters=None,
overwrite_solver_parameters=False):
self.state = state
if solver_parameters is not None:
if not overwrite_solver_parameters:
p = flatten_parameters(self.solver_parameters)
p.update(flatten_parameters(solver_parameters))
solver_parameters = p
self.solver_parameters = solver_parameters
<|code_end|>
. Write the next line using the current file imports:
from firedrake import (split, LinearVariationalProblem, Constant,
LinearVariationalSolver, TestFunctions, TrialFunctions,
TestFunction, TrialFunction, lhs, rhs, FacetNormal,
div, dx, jump, avg, dS_v, dS_h, ds_v, ds_t, ds_b, ds_tb, inner,
dot, grad, Function, VectorSpaceBasis, BrokenElement,
FunctionSpace, MixedFunctionSpace, DirichletBC)
from firedrake.petsc import flatten_parameters
from firedrake.parloops import par_loop, READ, INC
from pyop2.profiling import timed_function, timed_region
from gusto.configuration import logger, DEBUG
from gusto import thermodynamics
from abc import ABCMeta, abstractmethod, abstractproperty
import numpy as np
and context from other files:
# Path: gusto/configuration.py
# def set_log_handler(comm):
# def __init__(self, **kwargs):
# def __setattr__(self, name, value):
# def name(self):
# class Configuration(object):
# class TimesteppingParameters(Configuration):
# class OutputParameters(Configuration):
# class CompressibleParameters(Configuration):
# class ShallowWaterParameters(Configuration):
# class EadyParameters(Configuration):
# class CompressibleEadyParameters(CompressibleParameters, EadyParameters):
# class AdvectionOptions(Configuration, metaclass=ABCMeta):
# class EmbeddedDGOptions(AdvectionOptions):
# class RecoveredOptions(AdvectionOptions):
# class SUPGOptions(AdvectionOptions):
# N = 0.01 # Brunt-Vaisala frequency (1/s)
# T_0 = 273.15 # ref. temperature
# H = None # mean depth
# H = None
# L = None
# N = sqrt(EadyParameters.Nsq)
, which may include functions, classes, or code. Output only the next line. | if logger.isEnabledFor(DEBUG): |
Given the code snippet: <|code_start|>
__all__ = ["IncompressibleSolver", "ShallowWaterSolver", "CompressibleSolver"]
class TimesteppingSolver(object, metaclass=ABCMeta):
"""
Base class for timestepping linear solvers for Gusto.
This is a dummy base class.
:arg state: :class:`.State` object.
:arg solver_parameters (optional): solver parameters
:arg overwrite_solver_parameters: boolean, if True use only the
solver_parameters that have been passed in, if False then update
the default solver parameters with the solver_parameters passed in.
"""
def __init__(self, state, solver_parameters=None,
overwrite_solver_parameters=False):
self.state = state
if solver_parameters is not None:
if not overwrite_solver_parameters:
p = flatten_parameters(self.solver_parameters)
p.update(flatten_parameters(solver_parameters))
solver_parameters = p
self.solver_parameters = solver_parameters
<|code_end|>
, generate the next line using the imports in this file:
from firedrake import (split, LinearVariationalProblem, Constant,
LinearVariationalSolver, TestFunctions, TrialFunctions,
TestFunction, TrialFunction, lhs, rhs, FacetNormal,
div, dx, jump, avg, dS_v, dS_h, ds_v, ds_t, ds_b, ds_tb, inner,
dot, grad, Function, VectorSpaceBasis, BrokenElement,
FunctionSpace, MixedFunctionSpace, DirichletBC)
from firedrake.petsc import flatten_parameters
from firedrake.parloops import par_loop, READ, INC
from pyop2.profiling import timed_function, timed_region
from gusto.configuration import logger, DEBUG
from gusto import thermodynamics
from abc import ABCMeta, abstractmethod, abstractproperty
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: gusto/configuration.py
# def set_log_handler(comm):
# def __init__(self, **kwargs):
# def __setattr__(self, name, value):
# def name(self):
# class Configuration(object):
# class TimesteppingParameters(Configuration):
# class OutputParameters(Configuration):
# class CompressibleParameters(Configuration):
# class ShallowWaterParameters(Configuration):
# class EadyParameters(Configuration):
# class CompressibleEadyParameters(CompressibleParameters, EadyParameters):
# class AdvectionOptions(Configuration, metaclass=ABCMeta):
# class EmbeddedDGOptions(AdvectionOptions):
# class RecoveredOptions(AdvectionOptions):
# class SUPGOptions(AdvectionOptions):
# N = 0.01 # Brunt-Vaisala frequency (1/s)
# T_0 = 273.15 # ref. temperature
# H = None # mean depth
# H = None
# L = None
# N = sqrt(EadyParameters.Nsq)
. Output only the next line. | if logger.isEnabledFor(DEBUG): |
Continue the code snippet: <|code_start|> num_points = len(coord_field.dat.data[:])
point_found = False
for i in range(num_points):
# Do the coordinates at the ith point match our desired coords?
if np.allclose(coord_field.dat.data[i], coords, rtol=1e-14):
point_found = True
point_index = i
if field is not None and new_value is not None:
field.dat.data[i] = new_value
break
if not point_found:
raise ValueError('Your coordinates do not appear to match the coordinates of a DoF')
if field is None or new_value is None:
return point_index
@pytest.mark.parametrize("geometry", ["1D", "2D"])
def test_average(geometry, mesh):
vec_CG1 = VectorFunctionSpace(mesh, "CG", 1)
# We will fill DG_field with values, and average them to CG_field
weights = Function(vec_CG1)
true_values = Function(vec_CG1)
true_values = setup_values(geometry, true_values)
<|code_end|>
. Use current file imports:
from firedrake import (IntervalMesh, Function, RectangleMesh,
VectorFunctionSpace, SpatialCoordinate)
from gusto import kernels
import numpy as np
import pytest
and context (classes, functions, or code) from other files:
# Path: gusto/kernels.py
# class GaussianElimination(object):
# class Average(object):
# class AverageWeightings(object):
# class PhysicsRecoveryTop():
# class PhysicsRecoveryBottom():
# def __init__(self, DG1):
# def apply(self, v_DG1_old, v_DG1, act_coords, eff_coords, num_ext):
# def __init__(self, V):
# def apply(self, v_out, weighting, v_in):
# def __init__(self, V):
# def apply(self, w):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
. Output only the next line. | kernel = kernels.AverageWeightings(vec_CG1) |
Next line prediction: <|code_start|> if field is not None and new_value is not None:
field.dat.data[i] = new_value
break
if not point_found:
raise ValueError('Your coordinates do not appear to match the coordinates of a DoF')
if field is None or new_value is None:
return point_index
@pytest.mark.parametrize("geometry", ["1D", "2D"])
def test_average(geometry, mesh):
cell = mesh.ufl_cell().cellname()
DG1_elt = FiniteElement("DG", cell, 1, variant="equispaced")
vec_DG1 = VectorFunctionSpace(mesh, DG1_elt)
vec_DG0 = VectorFunctionSpace(mesh, "DG", 0)
vec_CG1 = VectorFunctionSpace(mesh, "CG", 1)
# We will fill DG1_field with values, and average them to CG_field
# First need to put the values into DG0 and then interpolate
DG0_field = Function(vec_DG0)
DG1_field = Function(vec_DG1)
CG_field = Function(vec_CG1)
weights = Function(vec_CG1)
DG0_field, weights, true_values, CG_index = setup_values(geometry, DG0_field, weights)
DG1_field.interpolate(DG0_field)
<|code_end|>
. Use current file imports:
(from firedrake import (IntervalMesh, Function, RectangleMesh, SpatialCoordinate,
VectorFunctionSpace, FiniteElement)
from gusto import kernels
import numpy as np
import pytest)
and context including class names, function names, or small code snippets from other files:
# Path: gusto/kernels.py
# class GaussianElimination(object):
# class Average(object):
# class AverageWeightings(object):
# class PhysicsRecoveryTop():
# class PhysicsRecoveryBottom():
# def __init__(self, DG1):
# def apply(self, v_DG1_old, v_DG1, act_coords, eff_coords, num_ext):
# def __init__(self, V):
# def apply(self, v_out, weighting, v_in):
# def __init__(self, V):
# def apply(self, w):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
# def __init__(self):
# def apply(self, v_DG1, v_CG1):
. Output only the next line. | kernel = kernels.Average(vec_CG1) |
Predict the next line after this snippet: <|code_start|>
class Timer(Stat):
def __init__(self):
self.count = 0
<|code_end|>
using the current file's imports:
import contextlib
from time import time
from .meter import Meter
from .stats import Stat
from .histogram import Histogram
and any relevant context from other files:
# Path: tapes/local/meter.py
# class Meter(Stat):
# def __init__(self):
# self.last_tick = time()
# self.count = 0
# self.m1 = EWMA.one()
# self.m5 = EWMA.five()
# self.m15 = EWMA.fifteen()
#
# def mark(self, n=1):
# self._tick_if_needed()
# self.count += n
# self.m1.update(n)
# self.m5.update(n)
# self.m15.update(n)
#
# def _get_one_minute(self):
# self._tick_if_needed()
# return self.m1.get_rate()
#
# def _get_five_minute(self):
# self._tick_if_needed()
# return self.m5.get_rate()
#
# def _get_fifteen_minute(self):
# self._tick_if_needed()
# return self.m15.get_rate()
#
# def _tick_if_needed(self):
# old_tick = self.last_tick
# new_tick = time()
# age = new_tick - old_tick
# if age > _INTERVAL:
# new_interval_start_tick = new_tick - age % _INTERVAL
# self.last_tick = new_interval_start_tick
# required_ticks = int(age // _INTERVAL)
# for _ in range(required_ticks):
# self.m1.tick()
# self.m5.tick()
# self.m15.tick()
#
# def get_values(self):
# return {
# 'count': self.count,
# 'm1': self._get_one_minute(),
# 'm5': self._get_five_minute(),
# 'm15': self._get_fifteen_minute(),
# }
#
# Path: tapes/local/stats.py
# class Stat(object):
#
# @abc.abstractmethod
# def get_values(self):
# raise NotImplementedError()
#
# Path: tapes/local/histogram.py
# class Histogram(Stat):
# def __init__(self):
# self.count = 0
# self.reservoir = ExponentiallyDecayingReservoir()
#
# def update(self, value):
# self.count += 1
# self.reservoir.update(value)
#
# def get_values(self):
# snapshot = self.reservoir.get_snapshot()
# return {
# 'count': self.count,
# 'min': snapshot.get_min(),
# 'max': snapshot.get_max(),
# 'mean': snapshot.get_mean(),
# 'stddev': snapshot.get_sd(),
# 'q50': snapshot.get_quantile(0.5),
# 'q75': snapshot.get_quantile(0.75),
# 'q95': snapshot.get_quantile(0.95),
# 'q98': snapshot.get_quantile(0.98),
# 'q99': snapshot.get_quantile(0.99),
# 'q999': snapshot.get_quantile(0.999),
# }
. Output only the next line. | self.meter = Meter() |
Predict the next line for this snippet: <|code_start|>
class Timer(Stat):
def __init__(self):
self.count = 0
self.meter = Meter()
<|code_end|>
with the help of current file imports:
import contextlib
from time import time
from .meter import Meter
from .stats import Stat
from .histogram import Histogram
and context from other files:
# Path: tapes/local/meter.py
# class Meter(Stat):
# def __init__(self):
# self.last_tick = time()
# self.count = 0
# self.m1 = EWMA.one()
# self.m5 = EWMA.five()
# self.m15 = EWMA.fifteen()
#
# def mark(self, n=1):
# self._tick_if_needed()
# self.count += n
# self.m1.update(n)
# self.m5.update(n)
# self.m15.update(n)
#
# def _get_one_minute(self):
# self._tick_if_needed()
# return self.m1.get_rate()
#
# def _get_five_minute(self):
# self._tick_if_needed()
# return self.m5.get_rate()
#
# def _get_fifteen_minute(self):
# self._tick_if_needed()
# return self.m15.get_rate()
#
# def _tick_if_needed(self):
# old_tick = self.last_tick
# new_tick = time()
# age = new_tick - old_tick
# if age > _INTERVAL:
# new_interval_start_tick = new_tick - age % _INTERVAL
# self.last_tick = new_interval_start_tick
# required_ticks = int(age // _INTERVAL)
# for _ in range(required_ticks):
# self.m1.tick()
# self.m5.tick()
# self.m15.tick()
#
# def get_values(self):
# return {
# 'count': self.count,
# 'm1': self._get_one_minute(),
# 'm5': self._get_five_minute(),
# 'm15': self._get_fifteen_minute(),
# }
#
# Path: tapes/local/stats.py
# class Stat(object):
#
# @abc.abstractmethod
# def get_values(self):
# raise NotImplementedError()
#
# Path: tapes/local/histogram.py
# class Histogram(Stat):
# def __init__(self):
# self.count = 0
# self.reservoir = ExponentiallyDecayingReservoir()
#
# def update(self, value):
# self.count += 1
# self.reservoir.update(value)
#
# def get_values(self):
# snapshot = self.reservoir.get_snapshot()
# return {
# 'count': self.count,
# 'min': snapshot.get_min(),
# 'max': snapshot.get_max(),
# 'mean': snapshot.get_mean(),
# 'stddev': snapshot.get_sd(),
# 'q50': snapshot.get_quantile(0.5),
# 'q75': snapshot.get_quantile(0.75),
# 'q95': snapshot.get_quantile(0.95),
# 'q98': snapshot.get_quantile(0.98),
# 'q99': snapshot.get_quantile(0.99),
# 'q999': snapshot.get_quantile(0.999),
# }
, which may contain function names, class names, or code. Output only the next line. | self.histogram = Histogram() |
Here is a snippet: <|code_start|>
app = Flask(__name__)
registry = DistributedRegistry()
registry.connect()
timer = registry.timer('my.timer')
@app.route('/')
def hello():
with timer.time():
return 'finished'
if __name__ == '__main__':
def _report(_registry):
while True:
sleep(100)
<|code_end|>
. Write the next line using the current file imports:
from time import sleep
from flask import Flask
from tapes.distributed.registry import DistributedRegistry, RegistryAggregator
and context from other files:
# Path: tapes/distributed/registry.py
# class DistributedRegistry(BaseRegistry):
# """A registry proxy that pushes metrics data to a ``RegistryAggregator``."""
# def __init__(self, socket_addr=_DEFAULT_IPC):
# """
# :param socket_addr: the 0MQ IPC socket address; has to be the same as corresponding aggregator's
# """
# super(DistributedRegistry, self).__init__()
# self.stats = dict()
# self.socket_addr = socket_addr
# self.zmq_context = None
# self.socket = None
#
# def meter(self, name):
# return self._get_or_add_stat(name, functools.partial(MeterProxy, self.socket, name))
#
# def timer(self, name):
# return self._get_or_add_stat(name, functools.partial(TimerProxy, self.socket, name))
#
# def gauge(self, name, producer):
# raise NotImplementedError('Gauge is unavailable in distributed mode')
#
# def counter(self, name):
# return self._get_or_add_stat(name, functools.partial(CounterProxy, self.socket, name))
#
# def histogram(self, name):
# return self._get_or_add_stat(name, functools.partial(HistogramProxy, self.socket, name))
#
# def connect(self):
# """Connects to the 0MQ socket and starts publishing."""
# distributed_logger.info('Connecting registry proxy to ZMQ socket %s', self.socket_addr)
# self.zmq_context = zmq.Context()
# sock = self.zmq_context.socket(zmq.PUB)
# sock.set_hwm(0)
# sock.setsockopt(zmq.LINGER, 0)
# sock.connect(self.socket_addr)
# distributed_logger.info('Connected registry proxy to ZMQ socket %s', self.socket_addr)
#
# def _reset_socket(values):
# for value in values:
# try:
# _reset_socket(value.values())
# except AttributeError:
# value.socket = sock
#
# distributed_logger.debug('Resetting socket on metrics proxies')
# _reset_socket(self.stats.values())
# self.socket = sock
# distributed_logger.debug('Reset socket on metrics proxies')
#
# def close(self):
# distributed_logger.info('Shutting down metrics proxy')
# self.socket.send_pyobj(Message('shutdown', 'noname', -1))
# self.socket.disconnect(self.socket_addr)
# self.socket.close()
# self.zmq_context.destroy()
# distributed_logger.info('Metrics proxy shutdown complete')
#
# class RegistryAggregator(object):
# """Aggregates multiple registry proxies and reports on the unified metrics."""
# def __init__(self, reporter, socket_addr=_DEFAULT_IPC):
# """Constructs a metrics registry aggregator.
#
# The ``registry`` field on the ``reporter`` argument will be reset to an implementation instance prior to
# calling ``start()``. Any previously set registry is not guaranteed to be used.
#
# :param reporter: the reporter to use
# :param socket_addr: the 0MQ socket address; has to be the same as corresponding proxies'
# """
# super(RegistryAggregator, self).__init__()
# self.socket_addr = socket_addr
# self.reporter = reporter
# self.process = None
#
# def start(self, fork=True):
# """Starts the registry aggregator.
#
# :param fork: whether to fork a process; if ``False``, blocks and stays in the existing process
# """
# if not fork:
# distributed_logger.info('Starting metrics aggregator, not forking')
# _registry_aggregator(self.reporter, self.socket_addr)
# else:
# distributed_logger.info('Starting metrics aggregator, forking')
# p = Process(target=_registry_aggregator, args=(self.reporter, self.socket_addr, ))
# p.start()
# distributed_logger.info('Started metrics aggregator as PID %s', p.pid)
# self.process = p
#
# def stop(self):
# """Terminates the forked process.
#
# Only valid if started as a fork, because... well you wouldn't get here otherwise.
# :return:
# """
# distributed_logger.info('Stopping metrics aggregator')
# self.process.terminate()
# self.process.join()
# distributed_logger.info('Stopped metrics aggregator')
, which may include functions, classes, or code. Output only the next line. | RegistryAggregator(_report).start() |
Predict the next line for this snippet: <|code_start|>from __future__ import print_function
def is_within(delta):
class _(object):
def of(self, expected_value):
def _check(value):
return abs(value - expected_value) < delta
return arg.passes_test(_check)
return _()
is_float = arg.passes_test(lambda x: isinstance(x, float))
<|code_end|>
with the help of current file imports:
from datetime import timedelta
from time import sleep
from fudge.inspector import arg
from tapes.reporting.statsd import StatsdReporter
from tests.local.base import StatsTest
import fudge
and context from other files:
# Path: tapes/reporting/statsd.py
# class StatsdReporter(ScheduledReporter):
# """Reporter for StatsD."""
# def __init__(self, interval, host='localhost', port=8125, prefix=None, registry=None):
# """
# :param interval: a timedelta, how often metrics are reported
# :param host: the statsd host
# :param port: the statsd port
# :param prefix: the statsd prefix to use
# :param registry: the registry to report on, defaults to the global one
# """
# super(StatsdReporter, self).__init__(interval, registry)
# self.statsd_client = statsd.StatsClient(host, port, prefix)
#
# def _report_meter(self, name, meter):
# stats = meter.get_values()
# self.statsd_client.gauge('{}.total'.format(name), stats['count'])
# self.statsd_client.timing('{}.m1_rate'.format(name), stats['m1'])
# self.statsd_client.timing('{}.m5_rate'.format(name), stats['m5'])
# self.statsd_client.timing('{}.m15_rate'.format(name), stats['m15'])
#
# def _report_timer(self, name, timer):
# stats = timer.get_values()
# self.statsd_client.gauge('{}.total'.format(name), stats['count'])
# self.statsd_client.timing('{}.m1_rate'.format(name), stats['m1'])
# self.statsd_client.timing('{}.m5_rate'.format(name), stats['m5'])
# self.statsd_client.timing('{}.m15_rate'.format(name), stats['m15'])
# # statsd wants millis, this is in seconds internally
# self.statsd_client.timing('{}.min'.format(name), stats['min'] * 1000)
# self.statsd_client.timing('{}.max'.format(name), stats['max'] * 1000)
# self.statsd_client.timing('{}.mean'.format(name), stats['mean'] * 1000)
# self.statsd_client.timing('{}.stddev'.format(name), stats['stddev'] * 1000)
# self.statsd_client.timing('{}.q50'.format(name), stats['q50'] * 1000)
# self.statsd_client.timing('{}.q75'.format(name), stats['q75'] * 1000)
# self.statsd_client.timing('{}.q98'.format(name), stats['q98'] * 1000)
# self.statsd_client.timing('{}.q99'.format(name), stats['q99'] * 1000)
# self.statsd_client.timing('{}.q999'.format(name), stats['q999'] * 1000)
#
# def _report_gauge(self, name, gauge):
# stats = gauge.get_values()
# self.statsd_client.gauge('{}.value'.format(name), stats['value'])
#
# def _report_counter(self, name, counter):
# stats = counter.get_values()
# self.statsd_client.incr('{}.value'.format(name), stats['value'])
#
# def _report_histogram(self, name, histogram):
# stats = histogram.get_values()
# self.statsd_client.gauge('{}.total'.format(name), stats['count'])
# self.statsd_client.timing('{}.min'.format(name), stats['min'])
# self.statsd_client.timing('{}.max'.format(name), stats['max'])
# self.statsd_client.timing('{}.mean'.format(name), stats['mean'])
# self.statsd_client.timing('{}.stddev'.format(name), stats['stddev'])
# self.statsd_client.timing('{}.q50'.format(name), stats['q50'])
# self.statsd_client.timing('{}.q75'.format(name), stats['q75'])
# self.statsd_client.timing('{}.q98'.format(name), stats['q98'])
# self.statsd_client.timing('{}.q99'.format(name), stats['q99'])
# self.statsd_client.timing('{}.q999'.format(name), stats['q999'])
#
# def _talk_this_way(self, name, thing):
# if isinstance(thing, Meter):
# self._report_meter(name, thing)
# elif isinstance(thing, Timer):
# self._report_timer(name, thing)
# elif isinstance(thing, Gauge):
# self._report_gauge(name, thing)
# elif isinstance(thing, Counter):
# self._report_counter(name, thing)
# elif isinstance(thing, Histogram):
# self._report_histogram(name, thing)
# else:
# raise ValueError('No clue what a {} is'.format(type(thing)))
#
# def _walk_this_way(self, stats, curr_name=''):
# try:
# for k, v in stats.items():
# self._walk_this_way(v, '{}.{}'.format(curr_name, k) if curr_name else k)
# except AttributeError:
# self._talk_this_way(curr_name, stats)
#
# def report(self):
# if reporting_logger.isEnabledFor(logging.DEBUG):
# reporting_logger.debug('Reporting to StatsD %s', self.registry.get_stats())
# self._walk_this_way(self.registry.stats, '')
#
# Path: tests/local/base.py
# class StatsTest(unittest.TestCase):
# def setUp(self):
# self.registry = Registry()
, which may contain function names, class names, or code. Output only the next line. | class StatsdReportingTestCase(StatsTest): |
Using the snippet: <|code_start|>from __future__ import division
_INTERVAL = 5.0
class Meter(Stat):
def __init__(self):
self.last_tick = time()
self.count = 0
<|code_end|>
, determine the next line of code. You have imports:
from time import time
from .average import EWMA
from .stats import Stat
and context (class names, function names, or code) available:
# Path: tapes/local/average.py
# class EWMA(object):
# @classmethod
# def one(cls):
# return EWMA(_M1_ALPHA, _INTERVAL)
#
# @classmethod
# def five(cls):
# return EWMA(_M5_ALPHA, _INTERVAL)
#
# @classmethod
# def fifteen(cls):
# return EWMA(_M15_ALPHA, _INTERVAL)
#
# def __init__(self, alpha, interval):
# self.alpha = alpha
# self.interval = interval
# self.rate = 0.0
# self.uncounted = 0
#
# def get_rate(self):
# return self.rate
#
# def update(self, n):
# self.uncounted += n
#
# def _tick_uninitialized(self):
# count = self.uncounted
# self.uncounted = 0
# instant_rate = count / self.interval
# self.rate = instant_rate
# self.tick = self._tick_initialized
#
# def _tick_initialized(self):
# count = self.uncounted
# self.uncounted = 0
# instant_rate = count / self.interval
# self.rate += (self.alpha * (instant_rate - self.rate))
#
# tick = _tick_uninitialized
#
# Path: tapes/local/stats.py
# class Stat(object):
#
# @abc.abstractmethod
# def get_values(self):
# raise NotImplementedError()
. Output only the next line. | self.m1 = EWMA.one() |
Continue the code snippet: <|code_start|>
class CounterProxy(MetricsProxy):
def __init__(self, socket, name):
super(CounterProxy, self).__init__(socket)
self.name = name
def increment(self, n=1):
<|code_end|>
. Use current file imports:
from .proxy import MetricsProxy
from .message import Message
and context (classes, functions, or code) from other files:
# Path: tapes/distributed/proxy.py
# class MetricsProxy(object):
# def __init__(self, socket):
# self.socket = socket
#
# def send(self, message):
# distributed_logger.debug('Sending message %s', message)
# self.socket.send_pyobj(message)
#
# Path: tapes/distributed/message.py
. Output only the next line. | self.send(Message('counter', self.name, n)) |
Given the code snippet: <|code_start|>
@abc.abstractmethod
def timer(self, name):
raise NotImplementedError()
@abc.abstractmethod
def gauge(self, name, producer):
raise NotImplementedError()
@abc.abstractmethod
def counter(self, name):
raise NotImplementedError()
@abc.abstractmethod
def histogram(self, name):
raise NotImplementedError()
class Registry(BaseRegistry):
"""Factory and storage location for all metrics stuff.
Use producer methods to create metrics. Metrics are hierarchical, the names are split on '.'.
"""
def meter(self, name):
"""Creates or gets an existing meter.
:param name: The name
:return: The created or existing meter for the given name
"""
<|code_end|>
, generate the next line using the imports in this file:
import functools
import abc
import six
from addict import Dict
from .local.meter import Meter
from .local.counter import Counter
from .local.gauge import Gauge
from .local.histogram import Histogram
from .local.timer import Timer
and context (functions, classes, or occasionally code) from other files:
# Path: tapes/local/meter.py
# class Meter(Stat):
# def __init__(self):
# self.last_tick = time()
# self.count = 0
# self.m1 = EWMA.one()
# self.m5 = EWMA.five()
# self.m15 = EWMA.fifteen()
#
# def mark(self, n=1):
# self._tick_if_needed()
# self.count += n
# self.m1.update(n)
# self.m5.update(n)
# self.m15.update(n)
#
# def _get_one_minute(self):
# self._tick_if_needed()
# return self.m1.get_rate()
#
# def _get_five_minute(self):
# self._tick_if_needed()
# return self.m5.get_rate()
#
# def _get_fifteen_minute(self):
# self._tick_if_needed()
# return self.m15.get_rate()
#
# def _tick_if_needed(self):
# old_tick = self.last_tick
# new_tick = time()
# age = new_tick - old_tick
# if age > _INTERVAL:
# new_interval_start_tick = new_tick - age % _INTERVAL
# self.last_tick = new_interval_start_tick
# required_ticks = int(age // _INTERVAL)
# for _ in range(required_ticks):
# self.m1.tick()
# self.m5.tick()
# self.m15.tick()
#
# def get_values(self):
# return {
# 'count': self.count,
# 'm1': self._get_one_minute(),
# 'm5': self._get_five_minute(),
# 'm15': self._get_fifteen_minute(),
# }
#
# Path: tapes/local/counter.py
# class Counter(Stat):
# def __init__(self):
# self.count = 0
#
# def get_values(self):
# return {
# 'value': self.count
# }
#
# def increment(self, n=1):
# self.count += n
#
# def decrement(self, n=1):
# self.count -= n
#
# Path: tapes/local/gauge.py
# class Gauge(Stat):
# def __init__(self, producer):
# self.producer = producer
#
# def get_values(self):
# return {
# 'value': self.producer()
# }
#
# Path: tapes/local/histogram.py
# class Histogram(Stat):
# def __init__(self):
# self.count = 0
# self.reservoir = ExponentiallyDecayingReservoir()
#
# def update(self, value):
# self.count += 1
# self.reservoir.update(value)
#
# def get_values(self):
# snapshot = self.reservoir.get_snapshot()
# return {
# 'count': self.count,
# 'min': snapshot.get_min(),
# 'max': snapshot.get_max(),
# 'mean': snapshot.get_mean(),
# 'stddev': snapshot.get_sd(),
# 'q50': snapshot.get_quantile(0.5),
# 'q75': snapshot.get_quantile(0.75),
# 'q95': snapshot.get_quantile(0.95),
# 'q98': snapshot.get_quantile(0.98),
# 'q99': snapshot.get_quantile(0.99),
# 'q999': snapshot.get_quantile(0.999),
# }
#
# Path: tapes/local/timer.py
# class Timer(Stat):
# def __init__(self):
# self.count = 0
# self.meter = Meter()
# self.histogram = Histogram()
# super(Timer, self).__init__()
#
# @contextlib.contextmanager
# def time(self):
# start_time = time()
# try:
# yield
# finally:
# self.update(time() - start_time)
#
# def update(self, value):
# self.meter.mark()
# self.histogram.update(value)
#
# def get_values(self):
# values = self.meter.get_values()
# values.update(self.histogram.get_values())
# return values
. Output only the next line. | return self._get_or_add_stat(name, Meter) |
Based on the snippet: <|code_start|> def meter(self, name):
"""Creates or gets an existing meter.
:param name: The name
:return: The created or existing meter for the given name
"""
return self._get_or_add_stat(name, Meter)
def timer(self, name):
"""Creates or gets an existing timer.
:param name: The name
:return: The created or existing timer for the given name
"""
return self._get_or_add_stat(name, Timer)
def gauge(self, name, producer):
"""Creates or gets an existing gauge.
:param name: The name
:return: The created or existing gauge for the given name
"""
return self._get_or_add_stat(name, functools.partial(Gauge, producer))
def counter(self, name):
"""Creates or gets an existing counter.
:param name: The name
:return: The created or existing counter for the given name
"""
<|code_end|>
, predict the immediate next line with the help of imports:
import functools
import abc
import six
from addict import Dict
from .local.meter import Meter
from .local.counter import Counter
from .local.gauge import Gauge
from .local.histogram import Histogram
from .local.timer import Timer
and context (classes, functions, sometimes code) from other files:
# Path: tapes/local/meter.py
# class Meter(Stat):
# def __init__(self):
# self.last_tick = time()
# self.count = 0
# self.m1 = EWMA.one()
# self.m5 = EWMA.five()
# self.m15 = EWMA.fifteen()
#
# def mark(self, n=1):
# self._tick_if_needed()
# self.count += n
# self.m1.update(n)
# self.m5.update(n)
# self.m15.update(n)
#
# def _get_one_minute(self):
# self._tick_if_needed()
# return self.m1.get_rate()
#
# def _get_five_minute(self):
# self._tick_if_needed()
# return self.m5.get_rate()
#
# def _get_fifteen_minute(self):
# self._tick_if_needed()
# return self.m15.get_rate()
#
# def _tick_if_needed(self):
# old_tick = self.last_tick
# new_tick = time()
# age = new_tick - old_tick
# if age > _INTERVAL:
# new_interval_start_tick = new_tick - age % _INTERVAL
# self.last_tick = new_interval_start_tick
# required_ticks = int(age // _INTERVAL)
# for _ in range(required_ticks):
# self.m1.tick()
# self.m5.tick()
# self.m15.tick()
#
# def get_values(self):
# return {
# 'count': self.count,
# 'm1': self._get_one_minute(),
# 'm5': self._get_five_minute(),
# 'm15': self._get_fifteen_minute(),
# }
#
# Path: tapes/local/counter.py
# class Counter(Stat):
# def __init__(self):
# self.count = 0
#
# def get_values(self):
# return {
# 'value': self.count
# }
#
# def increment(self, n=1):
# self.count += n
#
# def decrement(self, n=1):
# self.count -= n
#
# Path: tapes/local/gauge.py
# class Gauge(Stat):
# def __init__(self, producer):
# self.producer = producer
#
# def get_values(self):
# return {
# 'value': self.producer()
# }
#
# Path: tapes/local/histogram.py
# class Histogram(Stat):
# def __init__(self):
# self.count = 0
# self.reservoir = ExponentiallyDecayingReservoir()
#
# def update(self, value):
# self.count += 1
# self.reservoir.update(value)
#
# def get_values(self):
# snapshot = self.reservoir.get_snapshot()
# return {
# 'count': self.count,
# 'min': snapshot.get_min(),
# 'max': snapshot.get_max(),
# 'mean': snapshot.get_mean(),
# 'stddev': snapshot.get_sd(),
# 'q50': snapshot.get_quantile(0.5),
# 'q75': snapshot.get_quantile(0.75),
# 'q95': snapshot.get_quantile(0.95),
# 'q98': snapshot.get_quantile(0.98),
# 'q99': snapshot.get_quantile(0.99),
# 'q999': snapshot.get_quantile(0.999),
# }
#
# Path: tapes/local/timer.py
# class Timer(Stat):
# def __init__(self):
# self.count = 0
# self.meter = Meter()
# self.histogram = Histogram()
# super(Timer, self).__init__()
#
# @contextlib.contextmanager
# def time(self):
# start_time = time()
# try:
# yield
# finally:
# self.update(time() - start_time)
#
# def update(self, value):
# self.meter.mark()
# self.histogram.update(value)
#
# def get_values(self):
# values = self.meter.get_values()
# values.update(self.histogram.get_values())
# return values
. Output only the next line. | return self._get_or_add_stat(name, Counter) |
Here is a snippet: <|code_start|>
class Registry(BaseRegistry):
"""Factory and storage location for all metrics stuff.
Use producer methods to create metrics. Metrics are hierarchical, the names are split on '.'.
"""
def meter(self, name):
"""Creates or gets an existing meter.
:param name: The name
:return: The created or existing meter for the given name
"""
return self._get_or_add_stat(name, Meter)
def timer(self, name):
"""Creates or gets an existing timer.
:param name: The name
:return: The created or existing timer for the given name
"""
return self._get_or_add_stat(name, Timer)
def gauge(self, name, producer):
"""Creates or gets an existing gauge.
:param name: The name
:return: The created or existing gauge for the given name
"""
<|code_end|>
. Write the next line using the current file imports:
import functools
import abc
import six
from addict import Dict
from .local.meter import Meter
from .local.counter import Counter
from .local.gauge import Gauge
from .local.histogram import Histogram
from .local.timer import Timer
and context from other files:
# Path: tapes/local/meter.py
# class Meter(Stat):
# def __init__(self):
# self.last_tick = time()
# self.count = 0
# self.m1 = EWMA.one()
# self.m5 = EWMA.five()
# self.m15 = EWMA.fifteen()
#
# def mark(self, n=1):
# self._tick_if_needed()
# self.count += n
# self.m1.update(n)
# self.m5.update(n)
# self.m15.update(n)
#
# def _get_one_minute(self):
# self._tick_if_needed()
# return self.m1.get_rate()
#
# def _get_five_minute(self):
# self._tick_if_needed()
# return self.m5.get_rate()
#
# def _get_fifteen_minute(self):
# self._tick_if_needed()
# return self.m15.get_rate()
#
# def _tick_if_needed(self):
# old_tick = self.last_tick
# new_tick = time()
# age = new_tick - old_tick
# if age > _INTERVAL:
# new_interval_start_tick = new_tick - age % _INTERVAL
# self.last_tick = new_interval_start_tick
# required_ticks = int(age // _INTERVAL)
# for _ in range(required_ticks):
# self.m1.tick()
# self.m5.tick()
# self.m15.tick()
#
# def get_values(self):
# return {
# 'count': self.count,
# 'm1': self._get_one_minute(),
# 'm5': self._get_five_minute(),
# 'm15': self._get_fifteen_minute(),
# }
#
# Path: tapes/local/counter.py
# class Counter(Stat):
# def __init__(self):
# self.count = 0
#
# def get_values(self):
# return {
# 'value': self.count
# }
#
# def increment(self, n=1):
# self.count += n
#
# def decrement(self, n=1):
# self.count -= n
#
# Path: tapes/local/gauge.py
# class Gauge(Stat):
# def __init__(self, producer):
# self.producer = producer
#
# def get_values(self):
# return {
# 'value': self.producer()
# }
#
# Path: tapes/local/histogram.py
# class Histogram(Stat):
# def __init__(self):
# self.count = 0
# self.reservoir = ExponentiallyDecayingReservoir()
#
# def update(self, value):
# self.count += 1
# self.reservoir.update(value)
#
# def get_values(self):
# snapshot = self.reservoir.get_snapshot()
# return {
# 'count': self.count,
# 'min': snapshot.get_min(),
# 'max': snapshot.get_max(),
# 'mean': snapshot.get_mean(),
# 'stddev': snapshot.get_sd(),
# 'q50': snapshot.get_quantile(0.5),
# 'q75': snapshot.get_quantile(0.75),
# 'q95': snapshot.get_quantile(0.95),
# 'q98': snapshot.get_quantile(0.98),
# 'q99': snapshot.get_quantile(0.99),
# 'q999': snapshot.get_quantile(0.999),
# }
#
# Path: tapes/local/timer.py
# class Timer(Stat):
# def __init__(self):
# self.count = 0
# self.meter = Meter()
# self.histogram = Histogram()
# super(Timer, self).__init__()
#
# @contextlib.contextmanager
# def time(self):
# start_time = time()
# try:
# yield
# finally:
# self.update(time() - start_time)
#
# def update(self, value):
# self.meter.mark()
# self.histogram.update(value)
#
# def get_values(self):
# values = self.meter.get_values()
# values.update(self.histogram.get_values())
# return values
, which may include functions, classes, or code. Output only the next line. | return self._get_or_add_stat(name, functools.partial(Gauge, producer)) |
Given snippet: <|code_start|> def timer(self, name):
"""Creates or gets an existing timer.
:param name: The name
:return: The created or existing timer for the given name
"""
return self._get_or_add_stat(name, Timer)
def gauge(self, name, producer):
"""Creates or gets an existing gauge.
:param name: The name
:return: The created or existing gauge for the given name
"""
return self._get_or_add_stat(name, functools.partial(Gauge, producer))
def counter(self, name):
"""Creates or gets an existing counter.
:param name: The name
:return: The created or existing counter for the given name
"""
return self._get_or_add_stat(name, Counter)
def histogram(self, name):
"""Creates or gets an existing histogram.
:param name: The name
:return: The created or existing histogram for the given name
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import functools
import abc
import six
from addict import Dict
from .local.meter import Meter
from .local.counter import Counter
from .local.gauge import Gauge
from .local.histogram import Histogram
from .local.timer import Timer
and context:
# Path: tapes/local/meter.py
# class Meter(Stat):
# def __init__(self):
# self.last_tick = time()
# self.count = 0
# self.m1 = EWMA.one()
# self.m5 = EWMA.five()
# self.m15 = EWMA.fifteen()
#
# def mark(self, n=1):
# self._tick_if_needed()
# self.count += n
# self.m1.update(n)
# self.m5.update(n)
# self.m15.update(n)
#
# def _get_one_minute(self):
# self._tick_if_needed()
# return self.m1.get_rate()
#
# def _get_five_minute(self):
# self._tick_if_needed()
# return self.m5.get_rate()
#
# def _get_fifteen_minute(self):
# self._tick_if_needed()
# return self.m15.get_rate()
#
# def _tick_if_needed(self):
# old_tick = self.last_tick
# new_tick = time()
# age = new_tick - old_tick
# if age > _INTERVAL:
# new_interval_start_tick = new_tick - age % _INTERVAL
# self.last_tick = new_interval_start_tick
# required_ticks = int(age // _INTERVAL)
# for _ in range(required_ticks):
# self.m1.tick()
# self.m5.tick()
# self.m15.tick()
#
# def get_values(self):
# return {
# 'count': self.count,
# 'm1': self._get_one_minute(),
# 'm5': self._get_five_minute(),
# 'm15': self._get_fifteen_minute(),
# }
#
# Path: tapes/local/counter.py
# class Counter(Stat):
# def __init__(self):
# self.count = 0
#
# def get_values(self):
# return {
# 'value': self.count
# }
#
# def increment(self, n=1):
# self.count += n
#
# def decrement(self, n=1):
# self.count -= n
#
# Path: tapes/local/gauge.py
# class Gauge(Stat):
# def __init__(self, producer):
# self.producer = producer
#
# def get_values(self):
# return {
# 'value': self.producer()
# }
#
# Path: tapes/local/histogram.py
# class Histogram(Stat):
# def __init__(self):
# self.count = 0
# self.reservoir = ExponentiallyDecayingReservoir()
#
# def update(self, value):
# self.count += 1
# self.reservoir.update(value)
#
# def get_values(self):
# snapshot = self.reservoir.get_snapshot()
# return {
# 'count': self.count,
# 'min': snapshot.get_min(),
# 'max': snapshot.get_max(),
# 'mean': snapshot.get_mean(),
# 'stddev': snapshot.get_sd(),
# 'q50': snapshot.get_quantile(0.5),
# 'q75': snapshot.get_quantile(0.75),
# 'q95': snapshot.get_quantile(0.95),
# 'q98': snapshot.get_quantile(0.98),
# 'q99': snapshot.get_quantile(0.99),
# 'q999': snapshot.get_quantile(0.999),
# }
#
# Path: tapes/local/timer.py
# class Timer(Stat):
# def __init__(self):
# self.count = 0
# self.meter = Meter()
# self.histogram = Histogram()
# super(Timer, self).__init__()
#
# @contextlib.contextmanager
# def time(self):
# start_time = time()
# try:
# yield
# finally:
# self.update(time() - start_time)
#
# def update(self, value):
# self.meter.mark()
# self.histogram.update(value)
#
# def get_values(self):
# values = self.meter.get_values()
# values.update(self.histogram.get_values())
# return values
which might include code, classes, or functions. Output only the next line. | return self._get_or_add_stat(name, Histogram) |
Using the snippet: <|code_start|>
@abc.abstractmethod
def counter(self, name):
raise NotImplementedError()
@abc.abstractmethod
def histogram(self, name):
raise NotImplementedError()
class Registry(BaseRegistry):
"""Factory and storage location for all metrics stuff.
Use producer methods to create metrics. Metrics are hierarchical, the names are split on '.'.
"""
def meter(self, name):
"""Creates or gets an existing meter.
:param name: The name
:return: The created or existing meter for the given name
"""
return self._get_or_add_stat(name, Meter)
def timer(self, name):
"""Creates or gets an existing timer.
:param name: The name
:return: The created or existing timer for the given name
"""
<|code_end|>
, determine the next line of code. You have imports:
import functools
import abc
import six
from addict import Dict
from .local.meter import Meter
from .local.counter import Counter
from .local.gauge import Gauge
from .local.histogram import Histogram
from .local.timer import Timer
and context (class names, function names, or code) available:
# Path: tapes/local/meter.py
# class Meter(Stat):
# def __init__(self):
# self.last_tick = time()
# self.count = 0
# self.m1 = EWMA.one()
# self.m5 = EWMA.five()
# self.m15 = EWMA.fifteen()
#
# def mark(self, n=1):
# self._tick_if_needed()
# self.count += n
# self.m1.update(n)
# self.m5.update(n)
# self.m15.update(n)
#
# def _get_one_minute(self):
# self._tick_if_needed()
# return self.m1.get_rate()
#
# def _get_five_minute(self):
# self._tick_if_needed()
# return self.m5.get_rate()
#
# def _get_fifteen_minute(self):
# self._tick_if_needed()
# return self.m15.get_rate()
#
# def _tick_if_needed(self):
# old_tick = self.last_tick
# new_tick = time()
# age = new_tick - old_tick
# if age > _INTERVAL:
# new_interval_start_tick = new_tick - age % _INTERVAL
# self.last_tick = new_interval_start_tick
# required_ticks = int(age // _INTERVAL)
# for _ in range(required_ticks):
# self.m1.tick()
# self.m5.tick()
# self.m15.tick()
#
# def get_values(self):
# return {
# 'count': self.count,
# 'm1': self._get_one_minute(),
# 'm5': self._get_five_minute(),
# 'm15': self._get_fifteen_minute(),
# }
#
# Path: tapes/local/counter.py
# class Counter(Stat):
# def __init__(self):
# self.count = 0
#
# def get_values(self):
# return {
# 'value': self.count
# }
#
# def increment(self, n=1):
# self.count += n
#
# def decrement(self, n=1):
# self.count -= n
#
# Path: tapes/local/gauge.py
# class Gauge(Stat):
# def __init__(self, producer):
# self.producer = producer
#
# def get_values(self):
# return {
# 'value': self.producer()
# }
#
# Path: tapes/local/histogram.py
# class Histogram(Stat):
# def __init__(self):
# self.count = 0
# self.reservoir = ExponentiallyDecayingReservoir()
#
# def update(self, value):
# self.count += 1
# self.reservoir.update(value)
#
# def get_values(self):
# snapshot = self.reservoir.get_snapshot()
# return {
# 'count': self.count,
# 'min': snapshot.get_min(),
# 'max': snapshot.get_max(),
# 'mean': snapshot.get_mean(),
# 'stddev': snapshot.get_sd(),
# 'q50': snapshot.get_quantile(0.5),
# 'q75': snapshot.get_quantile(0.75),
# 'q95': snapshot.get_quantile(0.95),
# 'q98': snapshot.get_quantile(0.98),
# 'q99': snapshot.get_quantile(0.99),
# 'q999': snapshot.get_quantile(0.999),
# }
#
# Path: tapes/local/timer.py
# class Timer(Stat):
# def __init__(self):
# self.count = 0
# self.meter = Meter()
# self.histogram = Histogram()
# super(Timer, self).__init__()
#
# @contextlib.contextmanager
# def time(self):
# start_time = time()
# try:
# yield
# finally:
# self.update(time() - start_time)
#
# def update(self, value):
# self.meter.mark()
# self.histogram.update(value)
#
# def get_values(self):
# values = self.meter.get_values()
# values.update(self.histogram.get_values())
# return values
. Output only the next line. | return self._get_or_add_stat(name, Timer) |
Given snippet: <|code_start|>from __future__ import division
class MeterProxy(MetricsProxy):
def __init__(self, socket, name):
super(MeterProxy, self).__init__(socket)
self.name = name
def mark(self, n=1):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .proxy import MetricsProxy
from .message import Message
and context:
# Path: tapes/distributed/proxy.py
# class MetricsProxy(object):
# def __init__(self, socket):
# self.socket = socket
#
# def send(self, message):
# distributed_logger.debug('Sending message %s', message)
# self.socket.send_pyobj(message)
#
# Path: tapes/distributed/message.py
which might include code, classes, or functions. Output only the next line. | self.send(Message('meter', self.name, n)) |
Given the following code snippet before the placeholder: <|code_start|>from __future__ import print_function
class HTTPReportingTestCase(StatsTest):
def test_http_reporter_serves_stats_as_json(self):
counter = self.registry.counter('some.path')
counter.increment(42)
sock, http_port = bind_unused_port()
sock.close()
<|code_end|>
, predict the next line using imports from the current file:
import requests
from tornado.testing import bind_unused_port
from tapes.reporting.http import HTTPReporter
from tests.local.base import StatsTest
and context including class names, function names, and sometimes code from other files:
# Path: tapes/reporting/http.py
# class HTTPReporter(Reporter):
# """Exposes metrics via HTTP.
#
# For web applications, you should almost certainly just use your existing framework's capabilities. This is for
# applications that don't have HTTP easily available.
# """
# def __init__(self, port, registry=None):
# """
# :param port: Port to listen on
# :param registry: The registry to report from, defaults to the global one
# """
# super(HTTPReporter, self).__init__(registry)
# self.port = port
# self.thread = None
# self.httpd = None
#
# def start(self):
# class _RequestHandler(BaseHTTPRequestHandler):
# def do_GET(inner_self):
# inner_self.send_response(200)
# response_string = json.dumps(self.registry.get_stats())
# inner_self.send_header('Content-Type', 'application/json')
# inner_self.send_header('Content-Length', len(response_string))
# inner_self.end_headers()
# inner_self.wfile.write(response_string.encode('utf-8'))
#
# server_address = '', self.port
#
# self.httpd = HTTPServer(server_address, _RequestHandler)
# self.thread = Thread(target=self.httpd.serve_forever)
# self.thread.start()
#
# def stop(self):
# self.httpd.shutdown()
# self.thread.join()
#
# Path: tests/local/base.py
# class StatsTest(unittest.TestCase):
# def setUp(self):
# self.registry = Registry()
. Output only the next line. | reporter = HTTPReporter(http_port, self.registry) |
Here is a snippet: <|code_start|>
class TornadoConsoleReportingTestCase(testing.AsyncTestCase):
@testing.gen_test
def test_tornado_stream_reporting_writes_to_stream(self):
<|code_end|>
. Write the next line using the current file imports:
from datetime import timedelta
from six import StringIO
from tornado import gen, testing
from tapes.registry import Registry
from tapes.reporting.tornado.stream import TornadoStreamReporter
import json
import os
and context from other files:
# Path: tapes/registry.py
# class Registry(BaseRegistry):
# """Factory and storage location for all metrics stuff.
#
# Use producer methods to create metrics. Metrics are hierarchical, the names are split on '.'.
# """
#
# def meter(self, name):
# """Creates or gets an existing meter.
#
# :param name: The name
# :return: The created or existing meter for the given name
# """
# return self._get_or_add_stat(name, Meter)
#
# def timer(self, name):
# """Creates or gets an existing timer.
#
# :param name: The name
# :return: The created or existing timer for the given name
# """
# return self._get_or_add_stat(name, Timer)
#
# def gauge(self, name, producer):
# """Creates or gets an existing gauge.
#
# :param name: The name
# :return: The created or existing gauge for the given name
# """
# return self._get_or_add_stat(name, functools.partial(Gauge, producer))
#
# def counter(self, name):
# """Creates or gets an existing counter.
#
# :param name: The name
# :return: The created or existing counter for the given name
# """
# return self._get_or_add_stat(name, Counter)
#
# def histogram(self, name):
# """Creates or gets an existing histogram.
#
# :param name: The name
# :return: The created or existing histogram for the given name
# """
# return self._get_or_add_stat(name, Histogram)
#
# def get_stats(self):
# """Retrieves the current values of the metrics associated with this registry, formatted as a dict.
#
# The metrics form a hierarchy, their names are split on '.'. The returned dict is an `addict`, so you can
# use it as either a regular dict or via attributes, e.g.,
#
# >>> import tapes
# >>> registry = tapes.Registry()
# >>> timer = registry.timer('my.timer')
# >>> stats = registry.get_stats()
# >>> print(stats['my']['timer']['count'])
# 0
# >>> print(stats.my.timer.count)
# 0
#
# :return: The values of the metrics associated with this registry
# """
# def _get_value(stats):
# try:
# return Dict((k, _get_value(v)) for k, v in stats.items())
# except AttributeError:
# return Dict(stats.get_values())
#
# return _get_value(self.stats)
#
# Path: tapes/reporting/tornado/stream.py
# class TornadoStreamReporter(TornadoScheduledReporter):
# """Writes JSON serialized metrics to a stream using an ``IOLoop`` for scheduling"""
# def __init__(self, interval, stream=sys.stdout, registry=None, io_loop=None):
# """
# :param interval: a timedelta
# :param stream: the stream to write to, defaults to stdout
# :param registry: the registry to report from, defaults to stdout
# :param io_loop: the IOLoop to use, defaults to ``IOLoop.current()``
# """
# super(TornadoStreamReporter, self).__init__(interval, registry, io_loop)
# self.stream = stream
#
# def report(self):
# json.dump(self.registry.get_stats(), self.stream)
# self.stream.write(os.linesep)
, which may include functions, classes, or code. Output only the next line. | registry = Registry() |
Using the snippet: <|code_start|>
class TornadoConsoleReportingTestCase(testing.AsyncTestCase):
@testing.gen_test
def test_tornado_stream_reporting_writes_to_stream(self):
registry = Registry()
counter = registry.counter('some.tornado.path')
counter.increment(66)
s = StringIO()
<|code_end|>
, determine the next line of code. You have imports:
from datetime import timedelta
from six import StringIO
from tornado import gen, testing
from tapes.registry import Registry
from tapes.reporting.tornado.stream import TornadoStreamReporter
import json
import os
and context (class names, function names, or code) available:
# Path: tapes/registry.py
# class Registry(BaseRegistry):
# """Factory and storage location for all metrics stuff.
#
# Use producer methods to create metrics. Metrics are hierarchical, the names are split on '.'.
# """
#
# def meter(self, name):
# """Creates or gets an existing meter.
#
# :param name: The name
# :return: The created or existing meter for the given name
# """
# return self._get_or_add_stat(name, Meter)
#
# def timer(self, name):
# """Creates or gets an existing timer.
#
# :param name: The name
# :return: The created or existing timer for the given name
# """
# return self._get_or_add_stat(name, Timer)
#
# def gauge(self, name, producer):
# """Creates or gets an existing gauge.
#
# :param name: The name
# :return: The created or existing gauge for the given name
# """
# return self._get_or_add_stat(name, functools.partial(Gauge, producer))
#
# def counter(self, name):
# """Creates or gets an existing counter.
#
# :param name: The name
# :return: The created or existing counter for the given name
# """
# return self._get_or_add_stat(name, Counter)
#
# def histogram(self, name):
# """Creates or gets an existing histogram.
#
# :param name: The name
# :return: The created or existing histogram for the given name
# """
# return self._get_or_add_stat(name, Histogram)
#
# def get_stats(self):
# """Retrieves the current values of the metrics associated with this registry, formatted as a dict.
#
# The metrics form a hierarchy, their names are split on '.'. The returned dict is an `addict`, so you can
# use it as either a regular dict or via attributes, e.g.,
#
# >>> import tapes
# >>> registry = tapes.Registry()
# >>> timer = registry.timer('my.timer')
# >>> stats = registry.get_stats()
# >>> print(stats['my']['timer']['count'])
# 0
# >>> print(stats.my.timer.count)
# 0
#
# :return: The values of the metrics associated with this registry
# """
# def _get_value(stats):
# try:
# return Dict((k, _get_value(v)) for k, v in stats.items())
# except AttributeError:
# return Dict(stats.get_values())
#
# return _get_value(self.stats)
#
# Path: tapes/reporting/tornado/stream.py
# class TornadoStreamReporter(TornadoScheduledReporter):
# """Writes JSON serialized metrics to a stream using an ``IOLoop`` for scheduling"""
# def __init__(self, interval, stream=sys.stdout, registry=None, io_loop=None):
# """
# :param interval: a timedelta
# :param stream: the stream to write to, defaults to stdout
# :param registry: the registry to report from, defaults to stdout
# :param io_loop: the IOLoop to use, defaults to ``IOLoop.current()``
# """
# super(TornadoStreamReporter, self).__init__(interval, registry, io_loop)
# self.stream = stream
#
# def report(self):
# json.dump(self.registry.get_stats(), self.stream)
# self.stream.write(os.linesep)
. Output only the next line. | reporter = TornadoStreamReporter(timedelta(milliseconds=100), stream=s, registry=registry) |
Predict the next line after this snippet: <|code_start|>
registry = DistributedRegistry()
class TimedHandler(web.RequestHandler):
timer = registry.timer('my.timer')
@gen.coroutine
def get(self):
with TimedHandler.timer.time():
self.write('finished')
if __name__ == "__main__":
application = web.Application([
(r"/", TimedHandler),
])
<|code_end|>
using the current file's imports:
from tornado import ioloop, web, httpserver, gen
from tapes.reporting.http import HTTPReporter
from tapes.distributed.registry import DistributedRegistry, RegistryAggregator
and any relevant context from other files:
# Path: tapes/reporting/http.py
# class HTTPReporter(Reporter):
# """Exposes metrics via HTTP.
#
# For web applications, you should almost certainly just use your existing framework's capabilities. This is for
# applications that don't have HTTP easily available.
# """
# def __init__(self, port, registry=None):
# """
# :param port: Port to listen on
# :param registry: The registry to report from, defaults to the global one
# """
# super(HTTPReporter, self).__init__(registry)
# self.port = port
# self.thread = None
# self.httpd = None
#
# def start(self):
# class _RequestHandler(BaseHTTPRequestHandler):
# def do_GET(inner_self):
# inner_self.send_response(200)
# response_string = json.dumps(self.registry.get_stats())
# inner_self.send_header('Content-Type', 'application/json')
# inner_self.send_header('Content-Length', len(response_string))
# inner_self.end_headers()
# inner_self.wfile.write(response_string.encode('utf-8'))
#
# server_address = '', self.port
#
# self.httpd = HTTPServer(server_address, _RequestHandler)
# self.thread = Thread(target=self.httpd.serve_forever)
# self.thread.start()
#
# def stop(self):
# self.httpd.shutdown()
# self.thread.join()
#
# Path: tapes/distributed/registry.py
# class DistributedRegistry(BaseRegistry):
# """A registry proxy that pushes metrics data to a ``RegistryAggregator``."""
# def __init__(self, socket_addr=_DEFAULT_IPC):
# """
# :param socket_addr: the 0MQ IPC socket address; has to be the same as corresponding aggregator's
# """
# super(DistributedRegistry, self).__init__()
# self.stats = dict()
# self.socket_addr = socket_addr
# self.zmq_context = None
# self.socket = None
#
# def meter(self, name):
# return self._get_or_add_stat(name, functools.partial(MeterProxy, self.socket, name))
#
# def timer(self, name):
# return self._get_or_add_stat(name, functools.partial(TimerProxy, self.socket, name))
#
# def gauge(self, name, producer):
# raise NotImplementedError('Gauge is unavailable in distributed mode')
#
# def counter(self, name):
# return self._get_or_add_stat(name, functools.partial(CounterProxy, self.socket, name))
#
# def histogram(self, name):
# return self._get_or_add_stat(name, functools.partial(HistogramProxy, self.socket, name))
#
# def connect(self):
# """Connects to the 0MQ socket and starts publishing."""
# distributed_logger.info('Connecting registry proxy to ZMQ socket %s', self.socket_addr)
# self.zmq_context = zmq.Context()
# sock = self.zmq_context.socket(zmq.PUB)
# sock.set_hwm(0)
# sock.setsockopt(zmq.LINGER, 0)
# sock.connect(self.socket_addr)
# distributed_logger.info('Connected registry proxy to ZMQ socket %s', self.socket_addr)
#
# def _reset_socket(values):
# for value in values:
# try:
# _reset_socket(value.values())
# except AttributeError:
# value.socket = sock
#
# distributed_logger.debug('Resetting socket on metrics proxies')
# _reset_socket(self.stats.values())
# self.socket = sock
# distributed_logger.debug('Reset socket on metrics proxies')
#
# def close(self):
# distributed_logger.info('Shutting down metrics proxy')
# self.socket.send_pyobj(Message('shutdown', 'noname', -1))
# self.socket.disconnect(self.socket_addr)
# self.socket.close()
# self.zmq_context.destroy()
# distributed_logger.info('Metrics proxy shutdown complete')
#
# class RegistryAggregator(object):
# """Aggregates multiple registry proxies and reports on the unified metrics."""
# def __init__(self, reporter, socket_addr=_DEFAULT_IPC):
# """Constructs a metrics registry aggregator.
#
# The ``registry`` field on the ``reporter`` argument will be reset to an implementation instance prior to
# calling ``start()``. Any previously set registry is not guaranteed to be used.
#
# :param reporter: the reporter to use
# :param socket_addr: the 0MQ socket address; has to be the same as corresponding proxies'
# """
# super(RegistryAggregator, self).__init__()
# self.socket_addr = socket_addr
# self.reporter = reporter
# self.process = None
#
# def start(self, fork=True):
# """Starts the registry aggregator.
#
# :param fork: whether to fork a process; if ``False``, blocks and stays in the existing process
# """
# if not fork:
# distributed_logger.info('Starting metrics aggregator, not forking')
# _registry_aggregator(self.reporter, self.socket_addr)
# else:
# distributed_logger.info('Starting metrics aggregator, forking')
# p = Process(target=_registry_aggregator, args=(self.reporter, self.socket_addr, ))
# p.start()
# distributed_logger.info('Started metrics aggregator as PID %s', p.pid)
# self.process = p
#
# def stop(self):
# """Terminates the forked process.
#
# Only valid if started as a fork, because... well you wouldn't get here otherwise.
# :return:
# """
# distributed_logger.info('Stopping metrics aggregator')
# self.process.terminate()
# self.process.join()
# distributed_logger.info('Stopped metrics aggregator')
. Output only the next line. | RegistryAggregator(HTTPReporter(8889)).start() |
Predict the next line for this snippet: <|code_start|>
registry = DistributedRegistry()
class TimedHandler(web.RequestHandler):
timer = registry.timer('my.timer')
@gen.coroutine
def get(self):
with TimedHandler.timer.time():
self.write('finished')
if __name__ == "__main__":
application = web.Application([
(r"/", TimedHandler),
])
<|code_end|>
with the help of current file imports:
from tornado import ioloop, web, httpserver, gen
from tapes.reporting.http import HTTPReporter
from tapes.distributed.registry import DistributedRegistry, RegistryAggregator
and context from other files:
# Path: tapes/reporting/http.py
# class HTTPReporter(Reporter):
# """Exposes metrics via HTTP.
#
# For web applications, you should almost certainly just use your existing framework's capabilities. This is for
# applications that don't have HTTP easily available.
# """
# def __init__(self, port, registry=None):
# """
# :param port: Port to listen on
# :param registry: The registry to report from, defaults to the global one
# """
# super(HTTPReporter, self).__init__(registry)
# self.port = port
# self.thread = None
# self.httpd = None
#
# def start(self):
# class _RequestHandler(BaseHTTPRequestHandler):
# def do_GET(inner_self):
# inner_self.send_response(200)
# response_string = json.dumps(self.registry.get_stats())
# inner_self.send_header('Content-Type', 'application/json')
# inner_self.send_header('Content-Length', len(response_string))
# inner_self.end_headers()
# inner_self.wfile.write(response_string.encode('utf-8'))
#
# server_address = '', self.port
#
# self.httpd = HTTPServer(server_address, _RequestHandler)
# self.thread = Thread(target=self.httpd.serve_forever)
# self.thread.start()
#
# def stop(self):
# self.httpd.shutdown()
# self.thread.join()
#
# Path: tapes/distributed/registry.py
# class DistributedRegistry(BaseRegistry):
# """A registry proxy that pushes metrics data to a ``RegistryAggregator``."""
# def __init__(self, socket_addr=_DEFAULT_IPC):
# """
# :param socket_addr: the 0MQ IPC socket address; has to be the same as corresponding aggregator's
# """
# super(DistributedRegistry, self).__init__()
# self.stats = dict()
# self.socket_addr = socket_addr
# self.zmq_context = None
# self.socket = None
#
# def meter(self, name):
# return self._get_or_add_stat(name, functools.partial(MeterProxy, self.socket, name))
#
# def timer(self, name):
# return self._get_or_add_stat(name, functools.partial(TimerProxy, self.socket, name))
#
# def gauge(self, name, producer):
# raise NotImplementedError('Gauge is unavailable in distributed mode')
#
# def counter(self, name):
# return self._get_or_add_stat(name, functools.partial(CounterProxy, self.socket, name))
#
# def histogram(self, name):
# return self._get_or_add_stat(name, functools.partial(HistogramProxy, self.socket, name))
#
# def connect(self):
# """Connects to the 0MQ socket and starts publishing."""
# distributed_logger.info('Connecting registry proxy to ZMQ socket %s', self.socket_addr)
# self.zmq_context = zmq.Context()
# sock = self.zmq_context.socket(zmq.PUB)
# sock.set_hwm(0)
# sock.setsockopt(zmq.LINGER, 0)
# sock.connect(self.socket_addr)
# distributed_logger.info('Connected registry proxy to ZMQ socket %s', self.socket_addr)
#
# def _reset_socket(values):
# for value in values:
# try:
# _reset_socket(value.values())
# except AttributeError:
# value.socket = sock
#
# distributed_logger.debug('Resetting socket on metrics proxies')
# _reset_socket(self.stats.values())
# self.socket = sock
# distributed_logger.debug('Reset socket on metrics proxies')
#
# def close(self):
# distributed_logger.info('Shutting down metrics proxy')
# self.socket.send_pyobj(Message('shutdown', 'noname', -1))
# self.socket.disconnect(self.socket_addr)
# self.socket.close()
# self.zmq_context.destroy()
# distributed_logger.info('Metrics proxy shutdown complete')
#
# class RegistryAggregator(object):
# """Aggregates multiple registry proxies and reports on the unified metrics."""
# def __init__(self, reporter, socket_addr=_DEFAULT_IPC):
# """Constructs a metrics registry aggregator.
#
# The ``registry`` field on the ``reporter`` argument will be reset to an implementation instance prior to
# calling ``start()``. Any previously set registry is not guaranteed to be used.
#
# :param reporter: the reporter to use
# :param socket_addr: the 0MQ socket address; has to be the same as corresponding proxies'
# """
# super(RegistryAggregator, self).__init__()
# self.socket_addr = socket_addr
# self.reporter = reporter
# self.process = None
#
# def start(self, fork=True):
# """Starts the registry aggregator.
#
# :param fork: whether to fork a process; if ``False``, blocks and stays in the existing process
# """
# if not fork:
# distributed_logger.info('Starting metrics aggregator, not forking')
# _registry_aggregator(self.reporter, self.socket_addr)
# else:
# distributed_logger.info('Starting metrics aggregator, forking')
# p = Process(target=_registry_aggregator, args=(self.reporter, self.socket_addr, ))
# p.start()
# distributed_logger.info('Started metrics aggregator as PID %s', p.pid)
# self.process = p
#
# def stop(self):
# """Terminates the forked process.
#
# Only valid if started as a fork, because... well you wouldn't get here otherwise.
# :return:
# """
# distributed_logger.info('Stopping metrics aggregator')
# self.process.terminate()
# self.process.join()
# distributed_logger.info('Stopped metrics aggregator')
, which may contain function names, class names, or code. Output only the next line. | RegistryAggregator(HTTPReporter(8889)).start() |
Predict the next line for this snippet: <|code_start|>
class HistogramProxy(MetricsProxy):
def __init__(self, socket, name):
super(HistogramProxy, self).__init__(socket)
self.name = name
def update(self, value):
<|code_end|>
with the help of current file imports:
from .proxy import MetricsProxy
from .message import Message
and context from other files:
# Path: tapes/distributed/proxy.py
# class MetricsProxy(object):
# def __init__(self, socket):
# self.socket = socket
#
# def send(self, message):
# distributed_logger.debug('Sending message %s', message)
# self.socket.send_pyobj(message)
#
# Path: tapes/distributed/message.py
, which may contain function names, class names, or code. Output only the next line. | self.send(Message('histogram', self.name, value)) |
Using the snippet: <|code_start|>
class ExponentiallyDecayingReservoirTestCase(unittest.TestCase):
@fudge.patch('tapes.reservoir.time', 'tapes.reservoir.random')
def test_update_always_applies_weight_to_values(self, time, random):
(time
.expects_call().returns(1.0)
.next_call().returns(10).next_call().returns(10)
.next_call().returns(20).next_call().returns(20)
.next_call().returns(30).next_call().returns(30)
.next_call().returns(40).next_call().returns(40)
.next_call().returns(50).next_call().returns(50)
.next_call().returns(60).next_call().returns(60))
(random
.expects_call().returns(0.1)
.next_call().returns(0.2)
.next_call().returns(0.3)
.next_call().returns(0.4)
.next_call().returns(0.01)
.next_call().returns(0.01))
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import fudge
from tapes.reservoir import ExponentiallyDecayingReservoir
and context (class names, function names, or code) available:
# Path: tapes/reservoir.py
# class ExponentiallyDecayingReservoir(object):
# def __init__(self, size=_DEFAULT_SIZE, alpha=_DEFAULT_ALPHA):
# self.size = size
# self.alpha = alpha
# self.start_time = time()
# self.next_scale_time = self.start_time + _RESCALE_THRESHOLD
# self.values = SortedDict()
#
# def _rescale_if_needed(self):
# now = time()
# if now > self.next_scale_time:
# self.next_scale_time = now + _RESCALE_THRESHOLD
# old_start_time = self.start_time
# self.start_time = now
# scaling_factor = exp(-self.alpha * (self.start_time - old_start_time))
#
# self.values = SortedDict(
# (priority * scaling_factor, _WeightedSample(sample.value, sample.weight * scaling_factor))
# for priority, sample in self.values.items()
# )
#
# def update(self, value):
# self._rescale_if_needed()
# timestamp = time()
# item_weight = exp(self.alpha * (timestamp - self.start_time))
# sample = _WeightedSample(value, item_weight)
# priority = item_weight / random()
#
# if len(self.values) < self.size:
# self.values[priority] = sample
# else:
# first_key = next(self.values.iterkeys())
# if first_key < priority:
# self.values.setdefault(priority, sample)
# del self.values[first_key]
#
# def get_snapshot(self):
# return _WeightedSnapshot(self.values.values())
. Output only the next line. | reservoir = ExponentiallyDecayingReservoir(size=3) |
Here is a snippet: <|code_start|>
def test_distributed_registry_logs_stuff():
sock, http_port = bind_unused_port()
sock.close()
<|code_end|>
. Write the next line using the current file imports:
from time import sleep
from tornado.testing import bind_unused_port
from tapes.reporting.http import HTTPReporter
from tapes.distributed.registry import DistributedRegistry, RegistryAggregator
import requests
and context from other files:
# Path: tapes/reporting/http.py
# class HTTPReporter(Reporter):
# """Exposes metrics via HTTP.
#
# For web applications, you should almost certainly just use your existing framework's capabilities. This is for
# applications that don't have HTTP easily available.
# """
# def __init__(self, port, registry=None):
# """
# :param port: Port to listen on
# :param registry: The registry to report from, defaults to the global one
# """
# super(HTTPReporter, self).__init__(registry)
# self.port = port
# self.thread = None
# self.httpd = None
#
# def start(self):
# class _RequestHandler(BaseHTTPRequestHandler):
# def do_GET(inner_self):
# inner_self.send_response(200)
# response_string = json.dumps(self.registry.get_stats())
# inner_self.send_header('Content-Type', 'application/json')
# inner_self.send_header('Content-Length', len(response_string))
# inner_self.end_headers()
# inner_self.wfile.write(response_string.encode('utf-8'))
#
# server_address = '', self.port
#
# self.httpd = HTTPServer(server_address, _RequestHandler)
# self.thread = Thread(target=self.httpd.serve_forever)
# self.thread.start()
#
# def stop(self):
# self.httpd.shutdown()
# self.thread.join()
#
# Path: tapes/distributed/registry.py
# class DistributedRegistry(BaseRegistry):
# """A registry proxy that pushes metrics data to a ``RegistryAggregator``."""
# def __init__(self, socket_addr=_DEFAULT_IPC):
# """
# :param socket_addr: the 0MQ IPC socket address; has to be the same as corresponding aggregator's
# """
# super(DistributedRegistry, self).__init__()
# self.stats = dict()
# self.socket_addr = socket_addr
# self.zmq_context = None
# self.socket = None
#
# def meter(self, name):
# return self._get_or_add_stat(name, functools.partial(MeterProxy, self.socket, name))
#
# def timer(self, name):
# return self._get_or_add_stat(name, functools.partial(TimerProxy, self.socket, name))
#
# def gauge(self, name, producer):
# raise NotImplementedError('Gauge is unavailable in distributed mode')
#
# def counter(self, name):
# return self._get_or_add_stat(name, functools.partial(CounterProxy, self.socket, name))
#
# def histogram(self, name):
# return self._get_or_add_stat(name, functools.partial(HistogramProxy, self.socket, name))
#
# def connect(self):
# """Connects to the 0MQ socket and starts publishing."""
# distributed_logger.info('Connecting registry proxy to ZMQ socket %s', self.socket_addr)
# self.zmq_context = zmq.Context()
# sock = self.zmq_context.socket(zmq.PUB)
# sock.set_hwm(0)
# sock.setsockopt(zmq.LINGER, 0)
# sock.connect(self.socket_addr)
# distributed_logger.info('Connected registry proxy to ZMQ socket %s', self.socket_addr)
#
# def _reset_socket(values):
# for value in values:
# try:
# _reset_socket(value.values())
# except AttributeError:
# value.socket = sock
#
# distributed_logger.debug('Resetting socket on metrics proxies')
# _reset_socket(self.stats.values())
# self.socket = sock
# distributed_logger.debug('Reset socket on metrics proxies')
#
# def close(self):
# distributed_logger.info('Shutting down metrics proxy')
# self.socket.send_pyobj(Message('shutdown', 'noname', -1))
# self.socket.disconnect(self.socket_addr)
# self.socket.close()
# self.zmq_context.destroy()
# distributed_logger.info('Metrics proxy shutdown complete')
#
# class RegistryAggregator(object):
# """Aggregates multiple registry proxies and reports on the unified metrics."""
# def __init__(self, reporter, socket_addr=_DEFAULT_IPC):
# """Constructs a metrics registry aggregator.
#
# The ``registry`` field on the ``reporter`` argument will be reset to an implementation instance prior to
# calling ``start()``. Any previously set registry is not guaranteed to be used.
#
# :param reporter: the reporter to use
# :param socket_addr: the 0MQ socket address; has to be the same as corresponding proxies'
# """
# super(RegistryAggregator, self).__init__()
# self.socket_addr = socket_addr
# self.reporter = reporter
# self.process = None
#
# def start(self, fork=True):
# """Starts the registry aggregator.
#
# :param fork: whether to fork a process; if ``False``, blocks and stays in the existing process
# """
# if not fork:
# distributed_logger.info('Starting metrics aggregator, not forking')
# _registry_aggregator(self.reporter, self.socket_addr)
# else:
# distributed_logger.info('Starting metrics aggregator, forking')
# p = Process(target=_registry_aggregator, args=(self.reporter, self.socket_addr, ))
# p.start()
# distributed_logger.info('Started metrics aggregator as PID %s', p.pid)
# self.process = p
#
# def stop(self):
# """Terminates the forked process.
#
# Only valid if started as a fork, because... well you wouldn't get here otherwise.
# :return:
# """
# distributed_logger.info('Stopping metrics aggregator')
# self.process.terminate()
# self.process.join()
# distributed_logger.info('Stopped metrics aggregator')
, which may include functions, classes, or code. Output only the next line. | aggregator = RegistryAggregator(HTTPReporter(http_port)) |
Continue the code snippet: <|code_start|>
def test_distributed_registry_logs_stuff():
sock, http_port = bind_unused_port()
sock.close()
<|code_end|>
. Use current file imports:
from time import sleep
from tornado.testing import bind_unused_port
from tapes.reporting.http import HTTPReporter
from tapes.distributed.registry import DistributedRegistry, RegistryAggregator
import requests
and context (classes, functions, or code) from other files:
# Path: tapes/reporting/http.py
# class HTTPReporter(Reporter):
# """Exposes metrics via HTTP.
#
# For web applications, you should almost certainly just use your existing framework's capabilities. This is for
# applications that don't have HTTP easily available.
# """
# def __init__(self, port, registry=None):
# """
# :param port: Port to listen on
# :param registry: The registry to report from, defaults to the global one
# """
# super(HTTPReporter, self).__init__(registry)
# self.port = port
# self.thread = None
# self.httpd = None
#
# def start(self):
# class _RequestHandler(BaseHTTPRequestHandler):
# def do_GET(inner_self):
# inner_self.send_response(200)
# response_string = json.dumps(self.registry.get_stats())
# inner_self.send_header('Content-Type', 'application/json')
# inner_self.send_header('Content-Length', len(response_string))
# inner_self.end_headers()
# inner_self.wfile.write(response_string.encode('utf-8'))
#
# server_address = '', self.port
#
# self.httpd = HTTPServer(server_address, _RequestHandler)
# self.thread = Thread(target=self.httpd.serve_forever)
# self.thread.start()
#
# def stop(self):
# self.httpd.shutdown()
# self.thread.join()
#
# Path: tapes/distributed/registry.py
# class DistributedRegistry(BaseRegistry):
# """A registry proxy that pushes metrics data to a ``RegistryAggregator``."""
# def __init__(self, socket_addr=_DEFAULT_IPC):
# """
# :param socket_addr: the 0MQ IPC socket address; has to be the same as corresponding aggregator's
# """
# super(DistributedRegistry, self).__init__()
# self.stats = dict()
# self.socket_addr = socket_addr
# self.zmq_context = None
# self.socket = None
#
# def meter(self, name):
# return self._get_or_add_stat(name, functools.partial(MeterProxy, self.socket, name))
#
# def timer(self, name):
# return self._get_or_add_stat(name, functools.partial(TimerProxy, self.socket, name))
#
# def gauge(self, name, producer):
# raise NotImplementedError('Gauge is unavailable in distributed mode')
#
# def counter(self, name):
# return self._get_or_add_stat(name, functools.partial(CounterProxy, self.socket, name))
#
# def histogram(self, name):
# return self._get_or_add_stat(name, functools.partial(HistogramProxy, self.socket, name))
#
# def connect(self):
# """Connects to the 0MQ socket and starts publishing."""
# distributed_logger.info('Connecting registry proxy to ZMQ socket %s', self.socket_addr)
# self.zmq_context = zmq.Context()
# sock = self.zmq_context.socket(zmq.PUB)
# sock.set_hwm(0)
# sock.setsockopt(zmq.LINGER, 0)
# sock.connect(self.socket_addr)
# distributed_logger.info('Connected registry proxy to ZMQ socket %s', self.socket_addr)
#
# def _reset_socket(values):
# for value in values:
# try:
# _reset_socket(value.values())
# except AttributeError:
# value.socket = sock
#
# distributed_logger.debug('Resetting socket on metrics proxies')
# _reset_socket(self.stats.values())
# self.socket = sock
# distributed_logger.debug('Reset socket on metrics proxies')
#
# def close(self):
# distributed_logger.info('Shutting down metrics proxy')
# self.socket.send_pyobj(Message('shutdown', 'noname', -1))
# self.socket.disconnect(self.socket_addr)
# self.socket.close()
# self.zmq_context.destroy()
# distributed_logger.info('Metrics proxy shutdown complete')
#
# class RegistryAggregator(object):
# """Aggregates multiple registry proxies and reports on the unified metrics."""
# def __init__(self, reporter, socket_addr=_DEFAULT_IPC):
# """Constructs a metrics registry aggregator.
#
# The ``registry`` field on the ``reporter`` argument will be reset to an implementation instance prior to
# calling ``start()``. Any previously set registry is not guaranteed to be used.
#
# :param reporter: the reporter to use
# :param socket_addr: the 0MQ socket address; has to be the same as corresponding proxies'
# """
# super(RegistryAggregator, self).__init__()
# self.socket_addr = socket_addr
# self.reporter = reporter
# self.process = None
#
# def start(self, fork=True):
# """Starts the registry aggregator.
#
# :param fork: whether to fork a process; if ``False``, blocks and stays in the existing process
# """
# if not fork:
# distributed_logger.info('Starting metrics aggregator, not forking')
# _registry_aggregator(self.reporter, self.socket_addr)
# else:
# distributed_logger.info('Starting metrics aggregator, forking')
# p = Process(target=_registry_aggregator, args=(self.reporter, self.socket_addr, ))
# p.start()
# distributed_logger.info('Started metrics aggregator as PID %s', p.pid)
# self.process = p
#
# def stop(self):
# """Terminates the forked process.
#
# Only valid if started as a fork, because... well you wouldn't get here otherwise.
# :return:
# """
# distributed_logger.info('Stopping metrics aggregator')
# self.process.terminate()
# self.process.join()
# distributed_logger.info('Stopped metrics aggregator')
. Output only the next line. | aggregator = RegistryAggregator(HTTPReporter(http_port)) |
Given snippet: <|code_start|>
class TornadoStatsdReportingTestCase(testing.AsyncTestCase):
@testing.gen_test
@fudge.patch('statsd.StatsClient')
def test_tornado_statsd_reporter_works(self, StatsClient):
(StatsClient.expects_call().with_args('localhost', 8125, None)
.returns_fake()
.expects('incr').with_args('some.path.value', 22))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from datetime import timedelta
from tornado import gen, testing
from tapes.registry import Registry
from tapes.reporting.tornado.statsd import TornadoStatsdReporter
import fudge
and context:
# Path: tapes/registry.py
# class Registry(BaseRegistry):
# """Factory and storage location for all metrics stuff.
#
# Use producer methods to create metrics. Metrics are hierarchical, the names are split on '.'.
# """
#
# def meter(self, name):
# """Creates or gets an existing meter.
#
# :param name: The name
# :return: The created or existing meter for the given name
# """
# return self._get_or_add_stat(name, Meter)
#
# def timer(self, name):
# """Creates or gets an existing timer.
#
# :param name: The name
# :return: The created or existing timer for the given name
# """
# return self._get_or_add_stat(name, Timer)
#
# def gauge(self, name, producer):
# """Creates or gets an existing gauge.
#
# :param name: The name
# :return: The created or existing gauge for the given name
# """
# return self._get_or_add_stat(name, functools.partial(Gauge, producer))
#
# def counter(self, name):
# """Creates or gets an existing counter.
#
# :param name: The name
# :return: The created or existing counter for the given name
# """
# return self._get_or_add_stat(name, Counter)
#
# def histogram(self, name):
# """Creates or gets an existing histogram.
#
# :param name: The name
# :return: The created or existing histogram for the given name
# """
# return self._get_or_add_stat(name, Histogram)
#
# def get_stats(self):
# """Retrieves the current values of the metrics associated with this registry, formatted as a dict.
#
# The metrics form a hierarchy, their names are split on '.'. The returned dict is an `addict`, so you can
# use it as either a regular dict or via attributes, e.g.,
#
# >>> import tapes
# >>> registry = tapes.Registry()
# >>> timer = registry.timer('my.timer')
# >>> stats = registry.get_stats()
# >>> print(stats['my']['timer']['count'])
# 0
# >>> print(stats.my.timer.count)
# 0
#
# :return: The values of the metrics associated with this registry
# """
# def _get_value(stats):
# try:
# return Dict((k, _get_value(v)) for k, v in stats.items())
# except AttributeError:
# return Dict(stats.get_values())
#
# return _get_value(self.stats)
#
# Path: tapes/reporting/tornado/statsd.py
# class TornadoStatsdReporter(StatsdReporter, TornadoScheduledReporter):
# """Reports to StatsD using an IOLoop for scheduling"""
# @gen.coroutine
# def report(self):
# super(TornadoStatsdReporter, self).report()
which might include code, classes, or functions. Output only the next line. | registry = Registry() |
Predict the next line for this snippet: <|code_start|>
class TornadoStatsdReportingTestCase(testing.AsyncTestCase):
@testing.gen_test
@fudge.patch('statsd.StatsClient')
def test_tornado_statsd_reporter_works(self, StatsClient):
(StatsClient.expects_call().with_args('localhost', 8125, None)
.returns_fake()
.expects('incr').with_args('some.path.value', 22))
registry = Registry()
counter = registry.counter('some.path')
counter.increment(5)
counter.increment(20)
counter.decrement(3)
<|code_end|>
with the help of current file imports:
from datetime import timedelta
from tornado import gen, testing
from tapes.registry import Registry
from tapes.reporting.tornado.statsd import TornadoStatsdReporter
import fudge
and context from other files:
# Path: tapes/registry.py
# class Registry(BaseRegistry):
# """Factory and storage location for all metrics stuff.
#
# Use producer methods to create metrics. Metrics are hierarchical, the names are split on '.'.
# """
#
# def meter(self, name):
# """Creates or gets an existing meter.
#
# :param name: The name
# :return: The created or existing meter for the given name
# """
# return self._get_or_add_stat(name, Meter)
#
# def timer(self, name):
# """Creates or gets an existing timer.
#
# :param name: The name
# :return: The created or existing timer for the given name
# """
# return self._get_or_add_stat(name, Timer)
#
# def gauge(self, name, producer):
# """Creates or gets an existing gauge.
#
# :param name: The name
# :return: The created or existing gauge for the given name
# """
# return self._get_or_add_stat(name, functools.partial(Gauge, producer))
#
# def counter(self, name):
# """Creates or gets an existing counter.
#
# :param name: The name
# :return: The created or existing counter for the given name
# """
# return self._get_or_add_stat(name, Counter)
#
# def histogram(self, name):
# """Creates or gets an existing histogram.
#
# :param name: The name
# :return: The created or existing histogram for the given name
# """
# return self._get_or_add_stat(name, Histogram)
#
# def get_stats(self):
# """Retrieves the current values of the metrics associated with this registry, formatted as a dict.
#
# The metrics form a hierarchy, their names are split on '.'. The returned dict is an `addict`, so you can
# use it as either a regular dict or via attributes, e.g.,
#
# >>> import tapes
# >>> registry = tapes.Registry()
# >>> timer = registry.timer('my.timer')
# >>> stats = registry.get_stats()
# >>> print(stats['my']['timer']['count'])
# 0
# >>> print(stats.my.timer.count)
# 0
#
# :return: The values of the metrics associated with this registry
# """
# def _get_value(stats):
# try:
# return Dict((k, _get_value(v)) for k, v in stats.items())
# except AttributeError:
# return Dict(stats.get_values())
#
# return _get_value(self.stats)
#
# Path: tapes/reporting/tornado/statsd.py
# class TornadoStatsdReporter(StatsdReporter, TornadoScheduledReporter):
# """Reports to StatsD using an IOLoop for scheduling"""
# @gen.coroutine
# def report(self):
# super(TornadoStatsdReporter, self).report()
, which may contain function names, class names, or code. Output only the next line. | reporter = TornadoStatsdReporter(timedelta(milliseconds=500), registry=registry) |
Given the following code snippet before the placeholder: <|code_start|>
registry = DistributedRegistry()
registry.connect()
timer = registry.timer('my.timer')
class TimedHandler(web.RequestHandler):
@gen.coroutine
def get(self):
with timer.time():
self.write('finished')
if __name__ == "__main__":
application = web.Application([
(r"/", TimedHandler),
])
<|code_end|>
, predict the next line using imports from the current file:
from tornado import ioloop, web, gen
from tapes.reporting.http import HTTPReporter
from tapes.distributed.registry import DistributedRegistry, RegistryAggregator
and context including class names, function names, and sometimes code from other files:
# Path: tapes/reporting/http.py
# class HTTPReporter(Reporter):
# """Exposes metrics via HTTP.
#
# For web applications, you should almost certainly just use your existing framework's capabilities. This is for
# applications that don't have HTTP easily available.
# """
# def __init__(self, port, registry=None):
# """
# :param port: Port to listen on
# :param registry: The registry to report from, defaults to the global one
# """
# super(HTTPReporter, self).__init__(registry)
# self.port = port
# self.thread = None
# self.httpd = None
#
# def start(self):
# class _RequestHandler(BaseHTTPRequestHandler):
# def do_GET(inner_self):
# inner_self.send_response(200)
# response_string = json.dumps(self.registry.get_stats())
# inner_self.send_header('Content-Type', 'application/json')
# inner_self.send_header('Content-Length', len(response_string))
# inner_self.end_headers()
# inner_self.wfile.write(response_string.encode('utf-8'))
#
# server_address = '', self.port
#
# self.httpd = HTTPServer(server_address, _RequestHandler)
# self.thread = Thread(target=self.httpd.serve_forever)
# self.thread.start()
#
# def stop(self):
# self.httpd.shutdown()
# self.thread.join()
#
# Path: tapes/distributed/registry.py
# class DistributedRegistry(BaseRegistry):
# """A registry proxy that pushes metrics data to a ``RegistryAggregator``."""
# def __init__(self, socket_addr=_DEFAULT_IPC):
# """
# :param socket_addr: the 0MQ IPC socket address; has to be the same as corresponding aggregator's
# """
# super(DistributedRegistry, self).__init__()
# self.stats = dict()
# self.socket_addr = socket_addr
# self.zmq_context = None
# self.socket = None
#
# def meter(self, name):
# return self._get_or_add_stat(name, functools.partial(MeterProxy, self.socket, name))
#
# def timer(self, name):
# return self._get_or_add_stat(name, functools.partial(TimerProxy, self.socket, name))
#
# def gauge(self, name, producer):
# raise NotImplementedError('Gauge is unavailable in distributed mode')
#
# def counter(self, name):
# return self._get_or_add_stat(name, functools.partial(CounterProxy, self.socket, name))
#
# def histogram(self, name):
# return self._get_or_add_stat(name, functools.partial(HistogramProxy, self.socket, name))
#
# def connect(self):
# """Connects to the 0MQ socket and starts publishing."""
# distributed_logger.info('Connecting registry proxy to ZMQ socket %s', self.socket_addr)
# self.zmq_context = zmq.Context()
# sock = self.zmq_context.socket(zmq.PUB)
# sock.set_hwm(0)
# sock.setsockopt(zmq.LINGER, 0)
# sock.connect(self.socket_addr)
# distributed_logger.info('Connected registry proxy to ZMQ socket %s', self.socket_addr)
#
# def _reset_socket(values):
# for value in values:
# try:
# _reset_socket(value.values())
# except AttributeError:
# value.socket = sock
#
# distributed_logger.debug('Resetting socket on metrics proxies')
# _reset_socket(self.stats.values())
# self.socket = sock
# distributed_logger.debug('Reset socket on metrics proxies')
#
# def close(self):
# distributed_logger.info('Shutting down metrics proxy')
# self.socket.send_pyobj(Message('shutdown', 'noname', -1))
# self.socket.disconnect(self.socket_addr)
# self.socket.close()
# self.zmq_context.destroy()
# distributed_logger.info('Metrics proxy shutdown complete')
#
# class RegistryAggregator(object):
# """Aggregates multiple registry proxies and reports on the unified metrics."""
# def __init__(self, reporter, socket_addr=_DEFAULT_IPC):
# """Constructs a metrics registry aggregator.
#
# The ``registry`` field on the ``reporter`` argument will be reset to an implementation instance prior to
# calling ``start()``. Any previously set registry is not guaranteed to be used.
#
# :param reporter: the reporter to use
# :param socket_addr: the 0MQ socket address; has to be the same as corresponding proxies'
# """
# super(RegistryAggregator, self).__init__()
# self.socket_addr = socket_addr
# self.reporter = reporter
# self.process = None
#
# def start(self, fork=True):
# """Starts the registry aggregator.
#
# :param fork: whether to fork a process; if ``False``, blocks and stays in the existing process
# """
# if not fork:
# distributed_logger.info('Starting metrics aggregator, not forking')
# _registry_aggregator(self.reporter, self.socket_addr)
# else:
# distributed_logger.info('Starting metrics aggregator, forking')
# p = Process(target=_registry_aggregator, args=(self.reporter, self.socket_addr, ))
# p.start()
# distributed_logger.info('Started metrics aggregator as PID %s', p.pid)
# self.process = p
#
# def stop(self):
# """Terminates the forked process.
#
# Only valid if started as a fork, because... well you wouldn't get here otherwise.
# :return:
# """
# distributed_logger.info('Stopping metrics aggregator')
# self.process.terminate()
# self.process.join()
# distributed_logger.info('Stopped metrics aggregator')
. Output only the next line. | RegistryAggregator(HTTPReporter(8889)).start() |
Predict the next line after this snippet: <|code_start|>
registry = DistributedRegistry()
registry.connect()
timer = registry.timer('my.timer')
class TimedHandler(web.RequestHandler):
@gen.coroutine
def get(self):
with timer.time():
self.write('finished')
if __name__ == "__main__":
application = web.Application([
(r"/", TimedHandler),
])
<|code_end|>
using the current file's imports:
from tornado import ioloop, web, gen
from tapes.reporting.http import HTTPReporter
from tapes.distributed.registry import DistributedRegistry, RegistryAggregator
and any relevant context from other files:
# Path: tapes/reporting/http.py
# class HTTPReporter(Reporter):
# """Exposes metrics via HTTP.
#
# For web applications, you should almost certainly just use your existing framework's capabilities. This is for
# applications that don't have HTTP easily available.
# """
# def __init__(self, port, registry=None):
# """
# :param port: Port to listen on
# :param registry: The registry to report from, defaults to the global one
# """
# super(HTTPReporter, self).__init__(registry)
# self.port = port
# self.thread = None
# self.httpd = None
#
# def start(self):
# class _RequestHandler(BaseHTTPRequestHandler):
# def do_GET(inner_self):
# inner_self.send_response(200)
# response_string = json.dumps(self.registry.get_stats())
# inner_self.send_header('Content-Type', 'application/json')
# inner_self.send_header('Content-Length', len(response_string))
# inner_self.end_headers()
# inner_self.wfile.write(response_string.encode('utf-8'))
#
# server_address = '', self.port
#
# self.httpd = HTTPServer(server_address, _RequestHandler)
# self.thread = Thread(target=self.httpd.serve_forever)
# self.thread.start()
#
# def stop(self):
# self.httpd.shutdown()
# self.thread.join()
#
# Path: tapes/distributed/registry.py
# class DistributedRegistry(BaseRegistry):
# """A registry proxy that pushes metrics data to a ``RegistryAggregator``."""
# def __init__(self, socket_addr=_DEFAULT_IPC):
# """
# :param socket_addr: the 0MQ IPC socket address; has to be the same as corresponding aggregator's
# """
# super(DistributedRegistry, self).__init__()
# self.stats = dict()
# self.socket_addr = socket_addr
# self.zmq_context = None
# self.socket = None
#
# def meter(self, name):
# return self._get_or_add_stat(name, functools.partial(MeterProxy, self.socket, name))
#
# def timer(self, name):
# return self._get_or_add_stat(name, functools.partial(TimerProxy, self.socket, name))
#
# def gauge(self, name, producer):
# raise NotImplementedError('Gauge is unavailable in distributed mode')
#
# def counter(self, name):
# return self._get_or_add_stat(name, functools.partial(CounterProxy, self.socket, name))
#
# def histogram(self, name):
# return self._get_or_add_stat(name, functools.partial(HistogramProxy, self.socket, name))
#
# def connect(self):
# """Connects to the 0MQ socket and starts publishing."""
# distributed_logger.info('Connecting registry proxy to ZMQ socket %s', self.socket_addr)
# self.zmq_context = zmq.Context()
# sock = self.zmq_context.socket(zmq.PUB)
# sock.set_hwm(0)
# sock.setsockopt(zmq.LINGER, 0)
# sock.connect(self.socket_addr)
# distributed_logger.info('Connected registry proxy to ZMQ socket %s', self.socket_addr)
#
# def _reset_socket(values):
# for value in values:
# try:
# _reset_socket(value.values())
# except AttributeError:
# value.socket = sock
#
# distributed_logger.debug('Resetting socket on metrics proxies')
# _reset_socket(self.stats.values())
# self.socket = sock
# distributed_logger.debug('Reset socket on metrics proxies')
#
# def close(self):
# distributed_logger.info('Shutting down metrics proxy')
# self.socket.send_pyobj(Message('shutdown', 'noname', -1))
# self.socket.disconnect(self.socket_addr)
# self.socket.close()
# self.zmq_context.destroy()
# distributed_logger.info('Metrics proxy shutdown complete')
#
# class RegistryAggregator(object):
# """Aggregates multiple registry proxies and reports on the unified metrics."""
# def __init__(self, reporter, socket_addr=_DEFAULT_IPC):
# """Constructs a metrics registry aggregator.
#
# The ``registry`` field on the ``reporter`` argument will be reset to an implementation instance prior to
# calling ``start()``. Any previously set registry is not guaranteed to be used.
#
# :param reporter: the reporter to use
# :param socket_addr: the 0MQ socket address; has to be the same as corresponding proxies'
# """
# super(RegistryAggregator, self).__init__()
# self.socket_addr = socket_addr
# self.reporter = reporter
# self.process = None
#
# def start(self, fork=True):
# """Starts the registry aggregator.
#
# :param fork: whether to fork a process; if ``False``, blocks and stays in the existing process
# """
# if not fork:
# distributed_logger.info('Starting metrics aggregator, not forking')
# _registry_aggregator(self.reporter, self.socket_addr)
# else:
# distributed_logger.info('Starting metrics aggregator, forking')
# p = Process(target=_registry_aggregator, args=(self.reporter, self.socket_addr, ))
# p.start()
# distributed_logger.info('Started metrics aggregator as PID %s', p.pid)
# self.process = p
#
# def stop(self):
# """Terminates the forked process.
#
# Only valid if started as a fork, because... well you wouldn't get here otherwise.
# :return:
# """
# distributed_logger.info('Stopping metrics aggregator')
# self.process.terminate()
# self.process.join()
# distributed_logger.info('Stopped metrics aggregator')
. Output only the next line. | RegistryAggregator(HTTPReporter(8889)).start() |
Based on the snippet: <|code_start|>
class Histogram(Stat):
def __init__(self):
self.count = 0
<|code_end|>
, predict the immediate next line with the help of imports:
from tapes.reservoir import ExponentiallyDecayingReservoir
from .stats import Stat
and context (classes, functions, sometimes code) from other files:
# Path: tapes/reservoir.py
# class ExponentiallyDecayingReservoir(object):
# def __init__(self, size=_DEFAULT_SIZE, alpha=_DEFAULT_ALPHA):
# self.size = size
# self.alpha = alpha
# self.start_time = time()
# self.next_scale_time = self.start_time + _RESCALE_THRESHOLD
# self.values = SortedDict()
#
# def _rescale_if_needed(self):
# now = time()
# if now > self.next_scale_time:
# self.next_scale_time = now + _RESCALE_THRESHOLD
# old_start_time = self.start_time
# self.start_time = now
# scaling_factor = exp(-self.alpha * (self.start_time - old_start_time))
#
# self.values = SortedDict(
# (priority * scaling_factor, _WeightedSample(sample.value, sample.weight * scaling_factor))
# for priority, sample in self.values.items()
# )
#
# def update(self, value):
# self._rescale_if_needed()
# timestamp = time()
# item_weight = exp(self.alpha * (timestamp - self.start_time))
# sample = _WeightedSample(value, item_weight)
# priority = item_weight / random()
#
# if len(self.values) < self.size:
# self.values[priority] = sample
# else:
# first_key = next(self.values.iterkeys())
# if first_key < priority:
# self.values.setdefault(priority, sample)
# del self.values[first_key]
#
# def get_snapshot(self):
# return _WeightedSnapshot(self.values.values())
#
# Path: tapes/local/stats.py
# class Stat(object):
#
# @abc.abstractmethod
# def get_values(self):
# raise NotImplementedError()
. Output only the next line. | self.reservoir = ExponentiallyDecayingReservoir() |
Using the snippet: <|code_start|>from __future__ import print_function
class StreamReportingTestCase(StatsTest):
def test_threaded_stream_reporter_prints_stats_with_intervals(self):
counter = self.registry.counter(
'some.path'
)
counter.increment(42)
s = StringIO()
<|code_end|>
, determine the next line of code. You have imports:
from datetime import timedelta
from time import sleep
from six import StringIO
from six.moves import map
from tapes.reporting.stream import ThreadedStreamReporter
from tests.local.base import StatsTest
import json
import os
and context (class names, function names, or code) available:
# Path: tapes/reporting/stream.py
# class ThreadedStreamReporter(ScheduledReporter):
# """Dumps JSON serialized metrics to a stream with an interval"""
# def __init__(self, interval, stream=sys.stdout, registry=None):
# """
# :param interval: a timedelta
# :param stream: the stream to write to, defaults to stdout
# :param registry: the registry to report from, defaults to the global one
# """
# super(ThreadedStreamReporter, self).__init__(interval, registry)
# self.stream = stream
#
# def report(self):
# stats = self.registry.get_stats()
# json.dump(stats, self.stream)
# self.stream.write(os.linesep)
#
# Path: tests/local/base.py
# class StatsTest(unittest.TestCase):
# def setUp(self):
# self.registry = Registry()
. Output only the next line. | reporter = ThreadedStreamReporter(timedelta(milliseconds=100), stream=s, registry=self.registry) |
Given the code snippet: <|code_start|>
class TimerProxy(MetricsProxy):
def __init__(self, socket, name):
super(TimerProxy, self).__init__(socket)
self.name = name
@contextlib.contextmanager
def time(self):
start_time = time()
try:
yield
finally:
end_time = time()
<|code_end|>
, generate the next line using the imports in this file:
import contextlib
from time import time
from .proxy import MetricsProxy
from .message import Message
and context (functions, classes, or occasionally code) from other files:
# Path: tapes/distributed/proxy.py
# class MetricsProxy(object):
# def __init__(self, socket):
# self.socket = socket
#
# def send(self, message):
# distributed_logger.debug('Sending message %s', message)
# self.socket.send_pyobj(message)
#
# Path: tapes/distributed/message.py
. Output only the next line. | self.send(Message('timer', self.name, end_time - start_time)) |
Given snippet: <|code_start|>parser.add_argument('data_dir', help='dir containing the input data.')
parser.add_argument('out_dir', help='dir to write results to.')
args = parser.parse_args()
params = load_config(args.json)
topicDict = params.get('outDir').format('topicDict.dict')
opinionDict = params.get('outDir').format('opinionDict.dict')
c_perspectives = get_corpus(params)
perspectives = [p.name for p in c_perspectives.perspectives]
logger.info('Perspectives found: {}'.format('; '.join(perspectives)))
input_dirs = [args.data_dir for p in perspectives]
corpus = CPTCorpus(input=input_dirs, topicDict=topicDict,
opinionDict=opinionDict, testSplit=100, file_dict=None,
topicLines=params.get('topicLines'),
opinionLines=params.get('opinionLines'))
# Update perspective names (default name is directory name, which is currently
# the same for all perspectives)
for p, name in zip(corpus.perspectives, perspectives):
p.name = name
logger.info(str(corpus))
nTopics = params.get('nTopics')
# get estimates of phi_topic and phi_opinions
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
import argparse
import pandas as pd
import os
import sys
from CPTCorpus import CPTCorpus
from cptm.utils.experiment import get_sampler, load_config, get_corpus, \
thetaFileName
and context:
# Path: cptm/utils/experiment.py
# def get_sampler(params, corpus, nTopics=None, initialize=True):
# if nTopics is None:
# nTopics = params.get('nTopics')
# out_dir = params.get('outDir')
# nIter = params.get('nIter')
# alpha = 50.0/nTopics
# beta = params.get('beta')
# beta_o = params.get('beta_o')
# logger.info('creating Gibbs sampler (nTopics: {}, nIter: {}, alpha: {}, '
# 'beta: {}, beta_o: {})'.format(nTopics, nIter, alpha, beta,
# beta_o))
# sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
# alpha=alpha, beta=beta, beta_o=beta_o,
# out_dir=out_dir.format(nTopics),
# initialize=initialize)
# return sampler
#
# def load_config(fName):
# with open(fName) as f:
# config = json.load(f)
#
# logger.debug('configuration of experiment: ')
# params = ['{}: {}'.format(p, v) for p, v in config.iteritems()]
# for p in params:
# logger.debug(p)
#
# params = {}
# params['inputData'] = config.get('inputData')
# params['outDir'] = config.get('outDir', '/{}')
# params['testSplit'] = config.get('testSplit', 20)
# params['minFreq'] = config.get('minFreq')
# params['removeTopTF'] = config.get('removeTopTF')
# params['removeTopDF'] = config.get('removeTopDF')
# params['nIter'] = config.get('nIter', 200)
# params['beta'] = config.get('beta', 0.02)
# params['beta_o'] = config.get('beta_o', 0.02)
# params['expNumTopics'] = config.get('expNumTopics', range(20, 201, 20))
# params['nTopics'] = config.get('nTopics')
# params['nProcesses'] = config.get('nProcesses', None)
# params['topicLines'] = config.get('topicLines', [0])
# params['opinionLines'] = config.get('opinionLines', [1])
# params['sampleEstimateStart'] = config.get('sampleEstimateStart')
# params['sampleEstimateEnd'] = config.get('sampleEstimateEnd')
#
# return params
#
# def get_corpus(params):
# out_dir = params.get('outDir')
# files = glob(params.get('inputData'))
#
# if not os.path.isfile(out_dir.format('corpus.json')):
# corpus = CPTCorpus(files,
# testSplit=params.get('testSplit'),
# topicLines=params.get('topicLines'),
# opinionLines=params.get('opinionLines'))
# minFreq = params.get('minFreq')
# removeTopTF = params.get('removeTopTF')
# removeTopDF = params.get('removeTopDF')
# if (not minFreq is None) or (not removeTopTF is None) or \
# (not removeTopDF is None):
# corpus.filter_dictionaries(minFreq=minFreq,
# removeTopTF=removeTopTF,
# removeTopDF=removeTopDF)
# corpus.save_dictionaries(directory=out_dir.format(''))
# corpus.save(out_dir.format('corpus.json'))
# else:
# corpus = CPTCorpus.load(file_name=out_dir.format('corpus.json'),
# topicLines=params.get('topicLines'),
# opinionLines=params.get('opinionLines'),
# topicDict=out_dir.format('topicDict.dict'),
# opinionDict=out_dir.format('opinionDict.dict'))
# return corpus
#
# def thetaFileName(params):
# nTopics = params.get('nTopics')
# return os.path.join(params.get('outDir').format(''),
# 'theta_{}.csv'.format(nTopics))
which might include code, classes, or functions. Output only the next line. | s = get_sampler(params, corpus, nTopics=nTopics, initialize=False) |
Predict the next line for this snippet: <|code_start|>documents.
The corpus is not divided in perspectives.
Used to estimate the likelihood of party manifestos given opinions for the
different perspectives (party manifestos come from the manifesto project).
Before this script can be run, a cptm corpus should be created. Use the
manifestoproject2cptm_input.py script to create a corpus that can be used
as input.
Usage: python experiment_manifesto.py <experiment.json> <input dir>
<output dir>
"""
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.DEBUG)
logging.getLogger('CPT_Gibbs').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
parser.add_argument('data_dir', help='dir containing the input data.')
parser.add_argument('out_dir', help='dir to write results to.')
args = parser.parse_args()
<|code_end|>
with the help of current file imports:
import logging
import argparse
import pandas as pd
import os
import sys
from CPTCorpus import CPTCorpus
from cptm.utils.experiment import get_sampler, load_config, get_corpus, \
thetaFileName
and context from other files:
# Path: cptm/utils/experiment.py
# def get_sampler(params, corpus, nTopics=None, initialize=True):
# if nTopics is None:
# nTopics = params.get('nTopics')
# out_dir = params.get('outDir')
# nIter = params.get('nIter')
# alpha = 50.0/nTopics
# beta = params.get('beta')
# beta_o = params.get('beta_o')
# logger.info('creating Gibbs sampler (nTopics: {}, nIter: {}, alpha: {}, '
# 'beta: {}, beta_o: {})'.format(nTopics, nIter, alpha, beta,
# beta_o))
# sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
# alpha=alpha, beta=beta, beta_o=beta_o,
# out_dir=out_dir.format(nTopics),
# initialize=initialize)
# return sampler
#
# def load_config(fName):
# with open(fName) as f:
# config = json.load(f)
#
# logger.debug('configuration of experiment: ')
# params = ['{}: {}'.format(p, v) for p, v in config.iteritems()]
# for p in params:
# logger.debug(p)
#
# params = {}
# params['inputData'] = config.get('inputData')
# params['outDir'] = config.get('outDir', '/{}')
# params['testSplit'] = config.get('testSplit', 20)
# params['minFreq'] = config.get('minFreq')
# params['removeTopTF'] = config.get('removeTopTF')
# params['removeTopDF'] = config.get('removeTopDF')
# params['nIter'] = config.get('nIter', 200)
# params['beta'] = config.get('beta', 0.02)
# params['beta_o'] = config.get('beta_o', 0.02)
# params['expNumTopics'] = config.get('expNumTopics', range(20, 201, 20))
# params['nTopics'] = config.get('nTopics')
# params['nProcesses'] = config.get('nProcesses', None)
# params['topicLines'] = config.get('topicLines', [0])
# params['opinionLines'] = config.get('opinionLines', [1])
# params['sampleEstimateStart'] = config.get('sampleEstimateStart')
# params['sampleEstimateEnd'] = config.get('sampleEstimateEnd')
#
# return params
#
# def get_corpus(params):
# out_dir = params.get('outDir')
# files = glob(params.get('inputData'))
#
# if not os.path.isfile(out_dir.format('corpus.json')):
# corpus = CPTCorpus(files,
# testSplit=params.get('testSplit'),
# topicLines=params.get('topicLines'),
# opinionLines=params.get('opinionLines'))
# minFreq = params.get('minFreq')
# removeTopTF = params.get('removeTopTF')
# removeTopDF = params.get('removeTopDF')
# if (not minFreq is None) or (not removeTopTF is None) or \
# (not removeTopDF is None):
# corpus.filter_dictionaries(minFreq=minFreq,
# removeTopTF=removeTopTF,
# removeTopDF=removeTopDF)
# corpus.save_dictionaries(directory=out_dir.format(''))
# corpus.save(out_dir.format('corpus.json'))
# else:
# corpus = CPTCorpus.load(file_name=out_dir.format('corpus.json'),
# topicLines=params.get('topicLines'),
# opinionLines=params.get('opinionLines'),
# topicDict=out_dir.format('topicDict.dict'),
# opinionDict=out_dir.format('opinionDict.dict'))
# return corpus
#
# def thetaFileName(params):
# nTopics = params.get('nTopics')
# return os.path.join(params.get('outDir').format(''),
# 'theta_{}.csv'.format(nTopics))
, which may contain function names, class names, or code. Output only the next line. | params = load_config(args.json) |
Here is a snippet: <|code_start|>different perspectives (party manifestos come from the manifesto project).
Before this script can be run, a cptm corpus should be created. Use the
manifestoproject2cptm_input.py script to create a corpus that can be used
as input.
Usage: python experiment_manifesto.py <experiment.json> <input dir>
<output dir>
"""
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.DEBUG)
logging.getLogger('CPT_Gibbs').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
parser.add_argument('data_dir', help='dir containing the input data.')
parser.add_argument('out_dir', help='dir to write results to.')
args = parser.parse_args()
params = load_config(args.json)
topicDict = params.get('outDir').format('topicDict.dict')
opinionDict = params.get('outDir').format('opinionDict.dict')
<|code_end|>
. Write the next line using the current file imports:
import logging
import argparse
import pandas as pd
import os
import sys
from CPTCorpus import CPTCorpus
from cptm.utils.experiment import get_sampler, load_config, get_corpus, \
thetaFileName
and context from other files:
# Path: cptm/utils/experiment.py
# def get_sampler(params, corpus, nTopics=None, initialize=True):
# if nTopics is None:
# nTopics = params.get('nTopics')
# out_dir = params.get('outDir')
# nIter = params.get('nIter')
# alpha = 50.0/nTopics
# beta = params.get('beta')
# beta_o = params.get('beta_o')
# logger.info('creating Gibbs sampler (nTopics: {}, nIter: {}, alpha: {}, '
# 'beta: {}, beta_o: {})'.format(nTopics, nIter, alpha, beta,
# beta_o))
# sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
# alpha=alpha, beta=beta, beta_o=beta_o,
# out_dir=out_dir.format(nTopics),
# initialize=initialize)
# return sampler
#
# def load_config(fName):
# with open(fName) as f:
# config = json.load(f)
#
# logger.debug('configuration of experiment: ')
# params = ['{}: {}'.format(p, v) for p, v in config.iteritems()]
# for p in params:
# logger.debug(p)
#
# params = {}
# params['inputData'] = config.get('inputData')
# params['outDir'] = config.get('outDir', '/{}')
# params['testSplit'] = config.get('testSplit', 20)
# params['minFreq'] = config.get('minFreq')
# params['removeTopTF'] = config.get('removeTopTF')
# params['removeTopDF'] = config.get('removeTopDF')
# params['nIter'] = config.get('nIter', 200)
# params['beta'] = config.get('beta', 0.02)
# params['beta_o'] = config.get('beta_o', 0.02)
# params['expNumTopics'] = config.get('expNumTopics', range(20, 201, 20))
# params['nTopics'] = config.get('nTopics')
# params['nProcesses'] = config.get('nProcesses', None)
# params['topicLines'] = config.get('topicLines', [0])
# params['opinionLines'] = config.get('opinionLines', [1])
# params['sampleEstimateStart'] = config.get('sampleEstimateStart')
# params['sampleEstimateEnd'] = config.get('sampleEstimateEnd')
#
# return params
#
# def get_corpus(params):
# out_dir = params.get('outDir')
# files = glob(params.get('inputData'))
#
# if not os.path.isfile(out_dir.format('corpus.json')):
# corpus = CPTCorpus(files,
# testSplit=params.get('testSplit'),
# topicLines=params.get('topicLines'),
# opinionLines=params.get('opinionLines'))
# minFreq = params.get('minFreq')
# removeTopTF = params.get('removeTopTF')
# removeTopDF = params.get('removeTopDF')
# if (not minFreq is None) or (not removeTopTF is None) or \
# (not removeTopDF is None):
# corpus.filter_dictionaries(minFreq=minFreq,
# removeTopTF=removeTopTF,
# removeTopDF=removeTopDF)
# corpus.save_dictionaries(directory=out_dir.format(''))
# corpus.save(out_dir.format('corpus.json'))
# else:
# corpus = CPTCorpus.load(file_name=out_dir.format('corpus.json'),
# topicLines=params.get('topicLines'),
# opinionLines=params.get('opinionLines'),
# topicDict=out_dir.format('topicDict.dict'),
# opinionDict=out_dir.format('opinionDict.dict'))
# return corpus
#
# def thetaFileName(params):
# nTopics = params.get('nTopics')
# return os.path.join(params.get('outDir').format(''),
# 'theta_{}.csv'.format(nTopics))
, which may include functions, classes, or code. Output only the next line. | c_perspectives = get_corpus(params) |
Next line prediction: <|code_start|> opinionDict=opinionDict, testSplit=100, file_dict=None,
topicLines=params.get('topicLines'),
opinionLines=params.get('opinionLines'))
# Update perspective names (default name is directory name, which is currently
# the same for all perspectives)
for p, name in zip(corpus.perspectives, perspectives):
p.name = name
logger.info(str(corpus))
nTopics = params.get('nTopics')
# get estimates of phi_topic and phi_opinions
s = get_sampler(params, corpus, nTopics=nTopics, initialize=False)
s.estimate_parameters(start=params.get('sampleEstimateStart'),
end=params.get('sampleEstimateEnd'))
phi_topic = s.topics
phi_opinion = s.opinions
params['outDir'] = args.out_dir
corpus.save(os.path.join(params.get('outDir'), 'corpus.json'))
sampler = get_sampler(params, corpus, nTopics=nTopics, initialize=False)
r, t = sampler.opinion_word_perplexity_per_document(phi_topic=phi_topic,
phi_opinion=phi_opinion)
r.to_csv(os.path.join(params['outDir'],
'opinion_word_perplexity_{}.csv'.format(nTopics)),
encoding='utf8')
<|code_end|>
. Use current file imports:
(import logging
import argparse
import pandas as pd
import os
import sys
from CPTCorpus import CPTCorpus
from cptm.utils.experiment import get_sampler, load_config, get_corpus, \
thetaFileName)
and context including class names, function names, or small code snippets from other files:
# Path: cptm/utils/experiment.py
# def get_sampler(params, corpus, nTopics=None, initialize=True):
# if nTopics is None:
# nTopics = params.get('nTopics')
# out_dir = params.get('outDir')
# nIter = params.get('nIter')
# alpha = 50.0/nTopics
# beta = params.get('beta')
# beta_o = params.get('beta_o')
# logger.info('creating Gibbs sampler (nTopics: {}, nIter: {}, alpha: {}, '
# 'beta: {}, beta_o: {})'.format(nTopics, nIter, alpha, beta,
# beta_o))
# sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
# alpha=alpha, beta=beta, beta_o=beta_o,
# out_dir=out_dir.format(nTopics),
# initialize=initialize)
# return sampler
#
# def load_config(fName):
# with open(fName) as f:
# config = json.load(f)
#
# logger.debug('configuration of experiment: ')
# params = ['{}: {}'.format(p, v) for p, v in config.iteritems()]
# for p in params:
# logger.debug(p)
#
# params = {}
# params['inputData'] = config.get('inputData')
# params['outDir'] = config.get('outDir', '/{}')
# params['testSplit'] = config.get('testSplit', 20)
# params['minFreq'] = config.get('minFreq')
# params['removeTopTF'] = config.get('removeTopTF')
# params['removeTopDF'] = config.get('removeTopDF')
# params['nIter'] = config.get('nIter', 200)
# params['beta'] = config.get('beta', 0.02)
# params['beta_o'] = config.get('beta_o', 0.02)
# params['expNumTopics'] = config.get('expNumTopics', range(20, 201, 20))
# params['nTopics'] = config.get('nTopics')
# params['nProcesses'] = config.get('nProcesses', None)
# params['topicLines'] = config.get('topicLines', [0])
# params['opinionLines'] = config.get('opinionLines', [1])
# params['sampleEstimateStart'] = config.get('sampleEstimateStart')
# params['sampleEstimateEnd'] = config.get('sampleEstimateEnd')
#
# return params
#
# def get_corpus(params):
# out_dir = params.get('outDir')
# files = glob(params.get('inputData'))
#
# if not os.path.isfile(out_dir.format('corpus.json')):
# corpus = CPTCorpus(files,
# testSplit=params.get('testSplit'),
# topicLines=params.get('topicLines'),
# opinionLines=params.get('opinionLines'))
# minFreq = params.get('minFreq')
# removeTopTF = params.get('removeTopTF')
# removeTopDF = params.get('removeTopDF')
# if (not minFreq is None) or (not removeTopTF is None) or \
# (not removeTopDF is None):
# corpus.filter_dictionaries(minFreq=minFreq,
# removeTopTF=removeTopTF,
# removeTopDF=removeTopDF)
# corpus.save_dictionaries(directory=out_dir.format(''))
# corpus.save(out_dir.format('corpus.json'))
# else:
# corpus = CPTCorpus.load(file_name=out_dir.format('corpus.json'),
# topicLines=params.get('topicLines'),
# opinionLines=params.get('opinionLines'),
# topicDict=out_dir.format('topicDict.dict'),
# opinionDict=out_dir.format('opinionDict.dict'))
# return corpus
#
# def thetaFileName(params):
# nTopics = params.get('nTopics')
# return os.path.join(params.get('outDir').format(''),
# 'theta_{}.csv'.format(nTopics))
. Output only the next line. | t.to_csv(thetaFileName(params), encoding='utf8') |
Based on the snippet: <|code_start|>
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
frogclient = get_frogclient()
number_of_words = []
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
<|code_end|>
, predict the immediate next line with the help of imports:
import pandas as pd
import logging
import sys
import argparse
import numpy as np
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
and context (classes, functions, sometimes code) from other files:
# Path: cptm/utils/inputgeneration.py
# class Perspective():
# def __init__(self, name, posTopic, posOpinion):
# """Initialize inputgeneration Perspective.
#
# Parameters:
# name : str
# The perspective name. Used as directory name to store the data.
# posTopic : list of strings
# List of strings specifying the pos-tags for topic words.
# posOpinion : list of strings
# List of strings specifying the pos-tags for opinion words.
# """
# self.name = name
# self.wordTypes = posTopic + posOpinion
# self.posTopic = posTopic
# self.posOpinion = posOpinion
# self.words = {}
# for w in self.wordTypes:
# self.words[w] = []
#
# def __str__(self):
# len_topic_words, len_opinion_words = self.word_lengths()
# return 'Perspective: {} - {} topic words; {} opinion words'.format(
# self.name, len_topic_words, len_opinion_words)
#
# def add(self, tag, word):
# self.words[tag].append(word)
#
# def write2file(self, out_dir, file_name):
# # create dir (if not exists)
# directory = os.path.join(out_dir, self.name)
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# # write words to file
# out_file = os.path.join(directory, file_name)
# logger.debug('Writing file {} for perspective {}'.format(out_file,
# self.name))
# with codecs.open(out_file, 'wb', 'utf8') as f:
# for w in self.wordTypes:
# f.write(u'{}\n'.format(' '.join(self.words[w])))
#
# def word_lengths(self):
# len_topic_words = sum([len(self.words[w])
# for w in self.posTopic])
# len_opinion_words = sum([len(self.words[w])
# for w in self.posOpinion])
# return len_topic_words, len_opinion_words
#
# def remove_trailing_digits(word):
# """Convert words like d66 to d.
#
# In the folia files from politicalmashup, words such as d66 have been
# extracted as two words (d and 66) and only d ended up in the data input
# files. The folia files were probably created with an old version of frog,
# because currenly, words like these are parsed correctly.
#
# This function can be used when parsing and lemmatizing new text to match
# the vocabulary used in the old folia files.
# """
# regex = re.compile('^(.+?)(\d+)$', flags=re.UNICODE)
# m = regex.match(word)
# if m:
# return m.group(1)
# return word
#
# Path: cptm/utils/dutchdata.py
# def pos_topic_words():
# return ['N']
#
# def pos_opinion_words():
# return ['ADJ', 'BW', 'WW']
#
# def word_types():
# return pos_topic_words() + pos_opinion_words()
#
# Path: cptm/utils/frog.py
# def get_frogclient(port=8020):
# try:
# frogclient = FrogClient('localhost', port)
# return frogclient
# except:
# logger.error('Cannot connect to the Frog server. '
# 'Is it running at port {}?'.format(port))
# logger.info('Start the Frog server with "docker run -p '
# '127.0.0.1:{}:{} -t -i proycon/lamachine frog '
# '-S {}"'.format(port, port, port))
# sys.exit(1)
#
# def pos_and_lemmas(text, frogclient):
# # add timeout functionality (so frog won't keep parsing faulty text
# # forever)
# signal.signal(signal.SIGALRM, timeout)
# signal.alarm(300)
#
# regex = re.compile(r'\(.*\)')
#
# try:
# for data in frogclient.process(text):
# word, lemma, morph, ext_pos = data[:4]
# if ext_pos: # ext_pos can be None
# pos = regex.sub('', ext_pos)
# yield pos, lemma
# except Exception, e:
# raise e
. Output only the next line. | p = Perspective('', pos_topic_words(), pos_opinion_words()) |
Next line prediction: <|code_start|>
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
frogclient = get_frogclient()
number_of_words = []
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
n = 0
for pos, lemma in pos_and_lemmas(text, frogclient):
n += 1
if pos in word_types():
<|code_end|>
. Use current file imports:
(import pandas as pd
import logging
import sys
import argparse
import numpy as np
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas)
and context including class names, function names, or small code snippets from other files:
# Path: cptm/utils/inputgeneration.py
# class Perspective():
# def __init__(self, name, posTopic, posOpinion):
# """Initialize inputgeneration Perspective.
#
# Parameters:
# name : str
# The perspective name. Used as directory name to store the data.
# posTopic : list of strings
# List of strings specifying the pos-tags for topic words.
# posOpinion : list of strings
# List of strings specifying the pos-tags for opinion words.
# """
# self.name = name
# self.wordTypes = posTopic + posOpinion
# self.posTopic = posTopic
# self.posOpinion = posOpinion
# self.words = {}
# for w in self.wordTypes:
# self.words[w] = []
#
# def __str__(self):
# len_topic_words, len_opinion_words = self.word_lengths()
# return 'Perspective: {} - {} topic words; {} opinion words'.format(
# self.name, len_topic_words, len_opinion_words)
#
# def add(self, tag, word):
# self.words[tag].append(word)
#
# def write2file(self, out_dir, file_name):
# # create dir (if not exists)
# directory = os.path.join(out_dir, self.name)
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# # write words to file
# out_file = os.path.join(directory, file_name)
# logger.debug('Writing file {} for perspective {}'.format(out_file,
# self.name))
# with codecs.open(out_file, 'wb', 'utf8') as f:
# for w in self.wordTypes:
# f.write(u'{}\n'.format(' '.join(self.words[w])))
#
# def word_lengths(self):
# len_topic_words = sum([len(self.words[w])
# for w in self.posTopic])
# len_opinion_words = sum([len(self.words[w])
# for w in self.posOpinion])
# return len_topic_words, len_opinion_words
#
# def remove_trailing_digits(word):
# """Convert words like d66 to d.
#
# In the folia files from politicalmashup, words such as d66 have been
# extracted as two words (d and 66) and only d ended up in the data input
# files. The folia files were probably created with an old version of frog,
# because currenly, words like these are parsed correctly.
#
# This function can be used when parsing and lemmatizing new text to match
# the vocabulary used in the old folia files.
# """
# regex = re.compile('^(.+?)(\d+)$', flags=re.UNICODE)
# m = regex.match(word)
# if m:
# return m.group(1)
# return word
#
# Path: cptm/utils/dutchdata.py
# def pos_topic_words():
# return ['N']
#
# def pos_opinion_words():
# return ['ADJ', 'BW', 'WW']
#
# def word_types():
# return pos_topic_words() + pos_opinion_words()
#
# Path: cptm/utils/frog.py
# def get_frogclient(port=8020):
# try:
# frogclient = FrogClient('localhost', port)
# return frogclient
# except:
# logger.error('Cannot connect to the Frog server. '
# 'Is it running at port {}?'.format(port))
# logger.info('Start the Frog server with "docker run -p '
# '127.0.0.1:{}:{} -t -i proycon/lamachine frog '
# '-S {}"'.format(port, port, port))
# sys.exit(1)
#
# def pos_and_lemmas(text, frogclient):
# # add timeout functionality (so frog won't keep parsing faulty text
# # forever)
# signal.signal(signal.SIGALRM, timeout)
# signal.alarm(300)
#
# regex = re.compile(r'\(.*\)')
#
# try:
# for data in frogclient.process(text):
# word, lemma, morph, ext_pos = data[:4]
# if ext_pos: # ext_pos can be None
# pos = regex.sub('', ext_pos)
# yield pos, lemma
# except Exception, e:
# raise e
. Output only the next line. | p.add(pos, remove_trailing_digits(lemma)) |
Next line prediction: <|code_start|>
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
frogclient = get_frogclient()
number_of_words = []
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
<|code_end|>
. Use current file imports:
(import pandas as pd
import logging
import sys
import argparse
import numpy as np
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas)
and context including class names, function names, or small code snippets from other files:
# Path: cptm/utils/inputgeneration.py
# class Perspective():
# def __init__(self, name, posTopic, posOpinion):
# """Initialize inputgeneration Perspective.
#
# Parameters:
# name : str
# The perspective name. Used as directory name to store the data.
# posTopic : list of strings
# List of strings specifying the pos-tags for topic words.
# posOpinion : list of strings
# List of strings specifying the pos-tags for opinion words.
# """
# self.name = name
# self.wordTypes = posTopic + posOpinion
# self.posTopic = posTopic
# self.posOpinion = posOpinion
# self.words = {}
# for w in self.wordTypes:
# self.words[w] = []
#
# def __str__(self):
# len_topic_words, len_opinion_words = self.word_lengths()
# return 'Perspective: {} - {} topic words; {} opinion words'.format(
# self.name, len_topic_words, len_opinion_words)
#
# def add(self, tag, word):
# self.words[tag].append(word)
#
# def write2file(self, out_dir, file_name):
# # create dir (if not exists)
# directory = os.path.join(out_dir, self.name)
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# # write words to file
# out_file = os.path.join(directory, file_name)
# logger.debug('Writing file {} for perspective {}'.format(out_file,
# self.name))
# with codecs.open(out_file, 'wb', 'utf8') as f:
# for w in self.wordTypes:
# f.write(u'{}\n'.format(' '.join(self.words[w])))
#
# def word_lengths(self):
# len_topic_words = sum([len(self.words[w])
# for w in self.posTopic])
# len_opinion_words = sum([len(self.words[w])
# for w in self.posOpinion])
# return len_topic_words, len_opinion_words
#
# def remove_trailing_digits(word):
# """Convert words like d66 to d.
#
# In the folia files from politicalmashup, words such as d66 have been
# extracted as two words (d and 66) and only d ended up in the data input
# files. The folia files were probably created with an old version of frog,
# because currenly, words like these are parsed correctly.
#
# This function can be used when parsing and lemmatizing new text to match
# the vocabulary used in the old folia files.
# """
# regex = re.compile('^(.+?)(\d+)$', flags=re.UNICODE)
# m = regex.match(word)
# if m:
# return m.group(1)
# return word
#
# Path: cptm/utils/dutchdata.py
# def pos_topic_words():
# return ['N']
#
# def pos_opinion_words():
# return ['ADJ', 'BW', 'WW']
#
# def word_types():
# return pos_topic_words() + pos_opinion_words()
#
# Path: cptm/utils/frog.py
# def get_frogclient(port=8020):
# try:
# frogclient = FrogClient('localhost', port)
# return frogclient
# except:
# logger.error('Cannot connect to the Frog server. '
# 'Is it running at port {}?'.format(port))
# logger.info('Start the Frog server with "docker run -p '
# '127.0.0.1:{}:{} -t -i proycon/lamachine frog '
# '-S {}"'.format(port, port, port))
# sys.exit(1)
#
# def pos_and_lemmas(text, frogclient):
# # add timeout functionality (so frog won't keep parsing faulty text
# # forever)
# signal.signal(signal.SIGALRM, timeout)
# signal.alarm(300)
#
# regex = re.compile(r'\(.*\)')
#
# try:
# for data in frogclient.process(text):
# word, lemma, morph, ext_pos = data[:4]
# if ext_pos: # ext_pos can be None
# pos = regex.sub('', ext_pos)
# yield pos, lemma
# except Exception, e:
# raise e
. Output only the next line. | p = Perspective('', pos_topic_words(), pos_opinion_words()) |
Next line prediction: <|code_start|>
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
frogclient = get_frogclient()
number_of_words = []
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
<|code_end|>
. Use current file imports:
(import pandas as pd
import logging
import sys
import argparse
import numpy as np
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas)
and context including class names, function names, or small code snippets from other files:
# Path: cptm/utils/inputgeneration.py
# class Perspective():
# def __init__(self, name, posTopic, posOpinion):
# """Initialize inputgeneration Perspective.
#
# Parameters:
# name : str
# The perspective name. Used as directory name to store the data.
# posTopic : list of strings
# List of strings specifying the pos-tags for topic words.
# posOpinion : list of strings
# List of strings specifying the pos-tags for opinion words.
# """
# self.name = name
# self.wordTypes = posTopic + posOpinion
# self.posTopic = posTopic
# self.posOpinion = posOpinion
# self.words = {}
# for w in self.wordTypes:
# self.words[w] = []
#
# def __str__(self):
# len_topic_words, len_opinion_words = self.word_lengths()
# return 'Perspective: {} - {} topic words; {} opinion words'.format(
# self.name, len_topic_words, len_opinion_words)
#
# def add(self, tag, word):
# self.words[tag].append(word)
#
# def write2file(self, out_dir, file_name):
# # create dir (if not exists)
# directory = os.path.join(out_dir, self.name)
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# # write words to file
# out_file = os.path.join(directory, file_name)
# logger.debug('Writing file {} for perspective {}'.format(out_file,
# self.name))
# with codecs.open(out_file, 'wb', 'utf8') as f:
# for w in self.wordTypes:
# f.write(u'{}\n'.format(' '.join(self.words[w])))
#
# def word_lengths(self):
# len_topic_words = sum([len(self.words[w])
# for w in self.posTopic])
# len_opinion_words = sum([len(self.words[w])
# for w in self.posOpinion])
# return len_topic_words, len_opinion_words
#
# def remove_trailing_digits(word):
# """Convert words like d66 to d.
#
# In the folia files from politicalmashup, words such as d66 have been
# extracted as two words (d and 66) and only d ended up in the data input
# files. The folia files were probably created with an old version of frog,
# because currenly, words like these are parsed correctly.
#
# This function can be used when parsing and lemmatizing new text to match
# the vocabulary used in the old folia files.
# """
# regex = re.compile('^(.+?)(\d+)$', flags=re.UNICODE)
# m = regex.match(word)
# if m:
# return m.group(1)
# return word
#
# Path: cptm/utils/dutchdata.py
# def pos_topic_words():
# return ['N']
#
# def pos_opinion_words():
# return ['ADJ', 'BW', 'WW']
#
# def word_types():
# return pos_topic_words() + pos_opinion_words()
#
# Path: cptm/utils/frog.py
# def get_frogclient(port=8020):
# try:
# frogclient = FrogClient('localhost', port)
# return frogclient
# except:
# logger.error('Cannot connect to the Frog server. '
# 'Is it running at port {}?'.format(port))
# logger.info('Start the Frog server with "docker run -p '
# '127.0.0.1:{}:{} -t -i proycon/lamachine frog '
# '-S {}"'.format(port, port, port))
# sys.exit(1)
#
# def pos_and_lemmas(text, frogclient):
# # add timeout functionality (so frog won't keep parsing faulty text
# # forever)
# signal.signal(signal.SIGALRM, timeout)
# signal.alarm(300)
#
# regex = re.compile(r'\(.*\)')
#
# try:
# for data in frogclient.process(text):
# word, lemma, morph, ext_pos = data[:4]
# if ext_pos: # ext_pos can be None
# pos = regex.sub('', ext_pos)
# yield pos, lemma
# except Exception, e:
# raise e
. Output only the next line. | p = Perspective('', pos_topic_words(), pos_opinion_words()) |
Given the following code snippet before the placeholder: <|code_start|>
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
frogclient = get_frogclient()
number_of_words = []
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
n = 0
for pos, lemma in pos_and_lemmas(text, frogclient):
n += 1
<|code_end|>
, predict the next line using imports from the current file:
import pandas as pd
import logging
import sys
import argparse
import numpy as np
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
and context including class names, function names, and sometimes code from other files:
# Path: cptm/utils/inputgeneration.py
# class Perspective():
# def __init__(self, name, posTopic, posOpinion):
# """Initialize inputgeneration Perspective.
#
# Parameters:
# name : str
# The perspective name. Used as directory name to store the data.
# posTopic : list of strings
# List of strings specifying the pos-tags for topic words.
# posOpinion : list of strings
# List of strings specifying the pos-tags for opinion words.
# """
# self.name = name
# self.wordTypes = posTopic + posOpinion
# self.posTopic = posTopic
# self.posOpinion = posOpinion
# self.words = {}
# for w in self.wordTypes:
# self.words[w] = []
#
# def __str__(self):
# len_topic_words, len_opinion_words = self.word_lengths()
# return 'Perspective: {} - {} topic words; {} opinion words'.format(
# self.name, len_topic_words, len_opinion_words)
#
# def add(self, tag, word):
# self.words[tag].append(word)
#
# def write2file(self, out_dir, file_name):
# # create dir (if not exists)
# directory = os.path.join(out_dir, self.name)
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# # write words to file
# out_file = os.path.join(directory, file_name)
# logger.debug('Writing file {} for perspective {}'.format(out_file,
# self.name))
# with codecs.open(out_file, 'wb', 'utf8') as f:
# for w in self.wordTypes:
# f.write(u'{}\n'.format(' '.join(self.words[w])))
#
# def word_lengths(self):
# len_topic_words = sum([len(self.words[w])
# for w in self.posTopic])
# len_opinion_words = sum([len(self.words[w])
# for w in self.posOpinion])
# return len_topic_words, len_opinion_words
#
# def remove_trailing_digits(word):
# """Convert words like d66 to d.
#
# In the folia files from politicalmashup, words such as d66 have been
# extracted as two words (d and 66) and only d ended up in the data input
# files. The folia files were probably created with an old version of frog,
# because currenly, words like these are parsed correctly.
#
# This function can be used when parsing and lemmatizing new text to match
# the vocabulary used in the old folia files.
# """
# regex = re.compile('^(.+?)(\d+)$', flags=re.UNICODE)
# m = regex.match(word)
# if m:
# return m.group(1)
# return word
#
# Path: cptm/utils/dutchdata.py
# def pos_topic_words():
# return ['N']
#
# def pos_opinion_words():
# return ['ADJ', 'BW', 'WW']
#
# def word_types():
# return pos_topic_words() + pos_opinion_words()
#
# Path: cptm/utils/frog.py
# def get_frogclient(port=8020):
# try:
# frogclient = FrogClient('localhost', port)
# return frogclient
# except:
# logger.error('Cannot connect to the Frog server. '
# 'Is it running at port {}?'.format(port))
# logger.info('Start the Frog server with "docker run -p '
# '127.0.0.1:{}:{} -t -i proycon/lamachine frog '
# '-S {}"'.format(port, port, port))
# sys.exit(1)
#
# def pos_and_lemmas(text, frogclient):
# # add timeout functionality (so frog won't keep parsing faulty text
# # forever)
# signal.signal(signal.SIGALRM, timeout)
# signal.alarm(300)
#
# regex = re.compile(r'\(.*\)')
#
# try:
# for data in frogclient.process(text):
# word, lemma, morph, ext_pos = data[:4]
# if ext_pos: # ext_pos can be None
# pos = regex.sub('', ext_pos)
# yield pos, lemma
# except Exception, e:
# raise e
. Output only the next line. | if pos in word_types(): |
Predict the next line after this snippet: <|code_start|>"""Script that converts a field in a tabular data file to cptm input files
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
<|code_end|>
using the current file's imports:
import pandas as pd
import logging
import sys
import argparse
import numpy as np
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
and any relevant context from other files:
# Path: cptm/utils/inputgeneration.py
# class Perspective():
# def __init__(self, name, posTopic, posOpinion):
# """Initialize inputgeneration Perspective.
#
# Parameters:
# name : str
# The perspective name. Used as directory name to store the data.
# posTopic : list of strings
# List of strings specifying the pos-tags for topic words.
# posOpinion : list of strings
# List of strings specifying the pos-tags for opinion words.
# """
# self.name = name
# self.wordTypes = posTopic + posOpinion
# self.posTopic = posTopic
# self.posOpinion = posOpinion
# self.words = {}
# for w in self.wordTypes:
# self.words[w] = []
#
# def __str__(self):
# len_topic_words, len_opinion_words = self.word_lengths()
# return 'Perspective: {} - {} topic words; {} opinion words'.format(
# self.name, len_topic_words, len_opinion_words)
#
# def add(self, tag, word):
# self.words[tag].append(word)
#
# def write2file(self, out_dir, file_name):
# # create dir (if not exists)
# directory = os.path.join(out_dir, self.name)
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# # write words to file
# out_file = os.path.join(directory, file_name)
# logger.debug('Writing file {} for perspective {}'.format(out_file,
# self.name))
# with codecs.open(out_file, 'wb', 'utf8') as f:
# for w in self.wordTypes:
# f.write(u'{}\n'.format(' '.join(self.words[w])))
#
# def word_lengths(self):
# len_topic_words = sum([len(self.words[w])
# for w in self.posTopic])
# len_opinion_words = sum([len(self.words[w])
# for w in self.posOpinion])
# return len_topic_words, len_opinion_words
#
# def remove_trailing_digits(word):
# """Convert words like d66 to d.
#
# In the folia files from politicalmashup, words such as d66 have been
# extracted as two words (d and 66) and only d ended up in the data input
# files. The folia files were probably created with an old version of frog,
# because currenly, words like these are parsed correctly.
#
# This function can be used when parsing and lemmatizing new text to match
# the vocabulary used in the old folia files.
# """
# regex = re.compile('^(.+?)(\d+)$', flags=re.UNICODE)
# m = regex.match(word)
# if m:
# return m.group(1)
# return word
#
# Path: cptm/utils/dutchdata.py
# def pos_topic_words():
# return ['N']
#
# def pos_opinion_words():
# return ['ADJ', 'BW', 'WW']
#
# def word_types():
# return pos_topic_words() + pos_opinion_words()
#
# Path: cptm/utils/frog.py
# def get_frogclient(port=8020):
# try:
# frogclient = FrogClient('localhost', port)
# return frogclient
# except:
# logger.error('Cannot connect to the Frog server. '
# 'Is it running at port {}?'.format(port))
# logger.info('Start the Frog server with "docker run -p '
# '127.0.0.1:{}:{} -t -i proycon/lamachine frog '
# '-S {}"'.format(port, port, port))
# sys.exit(1)
#
# def pos_and_lemmas(text, frogclient):
# # add timeout functionality (so frog won't keep parsing faulty text
# # forever)
# signal.signal(signal.SIGALRM, timeout)
# signal.alarm(300)
#
# regex = re.compile(r'\(.*\)')
#
# try:
# for data in frogclient.process(text):
# word, lemma, morph, ext_pos = data[:4]
# if ext_pos: # ext_pos can be None
# pos = regex.sub('', ext_pos)
# yield pos, lemma
# except Exception, e:
# raise e
. Output only the next line. | frogclient = get_frogclient() |
Continue the code snippet: <|code_start|><dir out>
"""
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
frogclient = get_frogclient()
number_of_words = []
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
n = 0
<|code_end|>
. Use current file imports:
import pandas as pd
import logging
import sys
import argparse
import numpy as np
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
and context (classes, functions, or code) from other files:
# Path: cptm/utils/inputgeneration.py
# class Perspective():
# def __init__(self, name, posTopic, posOpinion):
# """Initialize inputgeneration Perspective.
#
# Parameters:
# name : str
# The perspective name. Used as directory name to store the data.
# posTopic : list of strings
# List of strings specifying the pos-tags for topic words.
# posOpinion : list of strings
# List of strings specifying the pos-tags for opinion words.
# """
# self.name = name
# self.wordTypes = posTopic + posOpinion
# self.posTopic = posTopic
# self.posOpinion = posOpinion
# self.words = {}
# for w in self.wordTypes:
# self.words[w] = []
#
# def __str__(self):
# len_topic_words, len_opinion_words = self.word_lengths()
# return 'Perspective: {} - {} topic words; {} opinion words'.format(
# self.name, len_topic_words, len_opinion_words)
#
# def add(self, tag, word):
# self.words[tag].append(word)
#
# def write2file(self, out_dir, file_name):
# # create dir (if not exists)
# directory = os.path.join(out_dir, self.name)
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# # write words to file
# out_file = os.path.join(directory, file_name)
# logger.debug('Writing file {} for perspective {}'.format(out_file,
# self.name))
# with codecs.open(out_file, 'wb', 'utf8') as f:
# for w in self.wordTypes:
# f.write(u'{}\n'.format(' '.join(self.words[w])))
#
# def word_lengths(self):
# len_topic_words = sum([len(self.words[w])
# for w in self.posTopic])
# len_opinion_words = sum([len(self.words[w])
# for w in self.posOpinion])
# return len_topic_words, len_opinion_words
#
# def remove_trailing_digits(word):
# """Convert words like d66 to d.
#
# In the folia files from politicalmashup, words such as d66 have been
# extracted as two words (d and 66) and only d ended up in the data input
# files. The folia files were probably created with an old version of frog,
# because currenly, words like these are parsed correctly.
#
# This function can be used when parsing and lemmatizing new text to match
# the vocabulary used in the old folia files.
# """
# regex = re.compile('^(.+?)(\d+)$', flags=re.UNICODE)
# m = regex.match(word)
# if m:
# return m.group(1)
# return word
#
# Path: cptm/utils/dutchdata.py
# def pos_topic_words():
# return ['N']
#
# def pos_opinion_words():
# return ['ADJ', 'BW', 'WW']
#
# def word_types():
# return pos_topic_words() + pos_opinion_words()
#
# Path: cptm/utils/frog.py
# def get_frogclient(port=8020):
# try:
# frogclient = FrogClient('localhost', port)
# return frogclient
# except:
# logger.error('Cannot connect to the Frog server. '
# 'Is it running at port {}?'.format(port))
# logger.info('Start the Frog server with "docker run -p '
# '127.0.0.1:{}:{} -t -i proycon/lamachine frog '
# '-S {}"'.format(port, port, port))
# sys.exit(1)
#
# def pos_and_lemmas(text, frogclient):
# # add timeout functionality (so frog won't keep parsing faulty text
# # forever)
# signal.signal(signal.SIGALRM, timeout)
# signal.alarm(300)
#
# regex = re.compile(r'\(.*\)')
#
# try:
# for data in frogclient.process(text):
# word, lemma, morph, ext_pos = data[:4]
# if ext_pos: # ext_pos can be None
# pos = regex.sub('', ext_pos)
# yield pos, lemma
# except Exception, e:
# raise e
. Output only the next line. | for pos, lemma in pos_and_lemmas(text, frogclient): |
Using the snippet: <|code_start|>
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(manifesto project csv files)')
parser.add_argument('dir_out', help='the name of the dir where the '
'CPT corpus should be saved.')
args = parser.parse_args()
dir_in = args.dir_in
dir_out = args.dir_out
frogclient = get_frogclient()
if not os.path.exists(dir_out):
os.makedirs(dir_out)
data_files = glob.glob('{}/*.csv'.format(dir_in))
for i, data_file in enumerate(data_files):
if i % 5 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(data_files)))
<|code_end|>
, determine the next line of code. You have imports:
import pandas as pd
import logging
import argparse
import os
import glob
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
and context (class names, function names, or code) available:
# Path: cptm/utils/inputgeneration.py
# class Perspective():
# def __init__(self, name, posTopic, posOpinion):
# """Initialize inputgeneration Perspective.
#
# Parameters:
# name : str
# The perspective name. Used as directory name to store the data.
# posTopic : list of strings
# List of strings specifying the pos-tags for topic words.
# posOpinion : list of strings
# List of strings specifying the pos-tags for opinion words.
# """
# self.name = name
# self.wordTypes = posTopic + posOpinion
# self.posTopic = posTopic
# self.posOpinion = posOpinion
# self.words = {}
# for w in self.wordTypes:
# self.words[w] = []
#
# def __str__(self):
# len_topic_words, len_opinion_words = self.word_lengths()
# return 'Perspective: {} - {} topic words; {} opinion words'.format(
# self.name, len_topic_words, len_opinion_words)
#
# def add(self, tag, word):
# self.words[tag].append(word)
#
# def write2file(self, out_dir, file_name):
# # create dir (if not exists)
# directory = os.path.join(out_dir, self.name)
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# # write words to file
# out_file = os.path.join(directory, file_name)
# logger.debug('Writing file {} for perspective {}'.format(out_file,
# self.name))
# with codecs.open(out_file, 'wb', 'utf8') as f:
# for w in self.wordTypes:
# f.write(u'{}\n'.format(' '.join(self.words[w])))
#
# def word_lengths(self):
# len_topic_words = sum([len(self.words[w])
# for w in self.posTopic])
# len_opinion_words = sum([len(self.words[w])
# for w in self.posOpinion])
# return len_topic_words, len_opinion_words
#
# def remove_trailing_digits(word):
# """Convert words like d66 to d.
#
# In the folia files from politicalmashup, words such as d66 have been
# extracted as two words (d and 66) and only d ended up in the data input
# files. The folia files were probably created with an old version of frog,
# because currenly, words like these are parsed correctly.
#
# This function can be used when parsing and lemmatizing new text to match
# the vocabulary used in the old folia files.
# """
# regex = re.compile('^(.+?)(\d+)$', flags=re.UNICODE)
# m = regex.match(word)
# if m:
# return m.group(1)
# return word
#
# Path: cptm/utils/dutchdata.py
# def pos_topic_words():
# return ['N']
#
# def pos_opinion_words():
# return ['ADJ', 'BW', 'WW']
#
# def word_types():
# return pos_topic_words() + pos_opinion_words()
#
# Path: cptm/utils/frog.py
# def get_frogclient(port=8020):
# try:
# frogclient = FrogClient('localhost', port)
# return frogclient
# except:
# logger.error('Cannot connect to the Frog server. '
# 'Is it running at port {}?'.format(port))
# logger.info('Start the Frog server with "docker run -p '
# '127.0.0.1:{}:{} -t -i proycon/lamachine frog '
# '-S {}"'.format(port, port, port))
# sys.exit(1)
#
# def pos_and_lemmas(text, frogclient):
# # add timeout functionality (so frog won't keep parsing faulty text
# # forever)
# signal.signal(signal.SIGALRM, timeout)
# signal.alarm(300)
#
# regex = re.compile(r'\(.*\)')
#
# try:
# for data in frogclient.process(text):
# word, lemma, morph, ext_pos = data[:4]
# if ext_pos: # ext_pos can be None
# pos = regex.sub('', ext_pos)
# yield pos, lemma
# except Exception, e:
# raise e
. Output only the next line. | p = Perspective('', pos_topic_words(), pos_opinion_words()) |
Given the code snippet: <|code_start|>
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(manifesto project csv files)')
parser.add_argument('dir_out', help='the name of the dir where the '
'CPT corpus should be saved.')
args = parser.parse_args()
dir_in = args.dir_in
dir_out = args.dir_out
frogclient = get_frogclient()
if not os.path.exists(dir_out):
os.makedirs(dir_out)
data_files = glob.glob('{}/*.csv'.format(dir_in))
for i, data_file in enumerate(data_files):
if i % 5 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(data_files)))
p = Perspective('', pos_topic_words(), pos_opinion_words())
df = pd.read_csv(data_file, encoding='utf-8')
text = ' '.join([line for line in df['content']])
try:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
<|code_end|>
, generate the next line using the imports in this file:
import pandas as pd
import logging
import argparse
import os
import glob
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
and context (functions, classes, or occasionally code) from other files:
# Path: cptm/utils/inputgeneration.py
# class Perspective():
# def __init__(self, name, posTopic, posOpinion):
# """Initialize inputgeneration Perspective.
#
# Parameters:
# name : str
# The perspective name. Used as directory name to store the data.
# posTopic : list of strings
# List of strings specifying the pos-tags for topic words.
# posOpinion : list of strings
# List of strings specifying the pos-tags for opinion words.
# """
# self.name = name
# self.wordTypes = posTopic + posOpinion
# self.posTopic = posTopic
# self.posOpinion = posOpinion
# self.words = {}
# for w in self.wordTypes:
# self.words[w] = []
#
# def __str__(self):
# len_topic_words, len_opinion_words = self.word_lengths()
# return 'Perspective: {} - {} topic words; {} opinion words'.format(
# self.name, len_topic_words, len_opinion_words)
#
# def add(self, tag, word):
# self.words[tag].append(word)
#
# def write2file(self, out_dir, file_name):
# # create dir (if not exists)
# directory = os.path.join(out_dir, self.name)
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# # write words to file
# out_file = os.path.join(directory, file_name)
# logger.debug('Writing file {} for perspective {}'.format(out_file,
# self.name))
# with codecs.open(out_file, 'wb', 'utf8') as f:
# for w in self.wordTypes:
# f.write(u'{}\n'.format(' '.join(self.words[w])))
#
# def word_lengths(self):
# len_topic_words = sum([len(self.words[w])
# for w in self.posTopic])
# len_opinion_words = sum([len(self.words[w])
# for w in self.posOpinion])
# return len_topic_words, len_opinion_words
#
# def remove_trailing_digits(word):
# """Convert words like d66 to d.
#
# In the folia files from politicalmashup, words such as d66 have been
# extracted as two words (d and 66) and only d ended up in the data input
# files. The folia files were probably created with an old version of frog,
# because currenly, words like these are parsed correctly.
#
# This function can be used when parsing and lemmatizing new text to match
# the vocabulary used in the old folia files.
# """
# regex = re.compile('^(.+?)(\d+)$', flags=re.UNICODE)
# m = regex.match(word)
# if m:
# return m.group(1)
# return word
#
# Path: cptm/utils/dutchdata.py
# def pos_topic_words():
# return ['N']
#
# def pos_opinion_words():
# return ['ADJ', 'BW', 'WW']
#
# def word_types():
# return pos_topic_words() + pos_opinion_words()
#
# Path: cptm/utils/frog.py
# def get_frogclient(port=8020):
# try:
# frogclient = FrogClient('localhost', port)
# return frogclient
# except:
# logger.error('Cannot connect to the Frog server. '
# 'Is it running at port {}?'.format(port))
# logger.info('Start the Frog server with "docker run -p '
# '127.0.0.1:{}:{} -t -i proycon/lamachine frog '
# '-S {}"'.format(port, port, port))
# sys.exit(1)
#
# def pos_and_lemmas(text, frogclient):
# # add timeout functionality (so frog won't keep parsing faulty text
# # forever)
# signal.signal(signal.SIGALRM, timeout)
# signal.alarm(300)
#
# regex = re.compile(r'\(.*\)')
#
# try:
# for data in frogclient.process(text):
# word, lemma, morph, ext_pos = data[:4]
# if ext_pos: # ext_pos can be None
# pos = regex.sub('', ext_pos)
# yield pos, lemma
# except Exception, e:
# raise e
. Output only the next line. | p.add(pos, remove_trailing_digits(lemma)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.