id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
163,006 | from pathlib import Path
import nox
ROOT = Path(__file__).parent.parent.parent
def generate_parser(session: nox.Session, *, grammar: Path, to: Path, check: bool) -> None:
"""Generate a standalone lark parser to the given location.
Optionally check if there is a git diff.
"""
output = session.run(
'python',
'-m',
'lark.tools.standalone',
str(grammar),
silent=True,
env={'PYTHONHASHSEED': '0'},
)
assert isinstance(output, str)
to.write_text(output)
if check:
# Note: we can't check if there is a diff for the standalone parsers
# because of https://github.com/lark-parser/lark/issues/1194
# try:
# session.run('git', 'diff', '--quiet', str(to.relative_to(ROOT)), silent=True, external=True)
# except CommandFailed:
# print(
# f'There is a diff for the generated {to.relative_to(ROOT)} parser; You need to run `nox -r -s lark` & commit the changes'
# )
# raise
...
def generate_parsers(session: nox.Session, *, check: bool) -> None:
generate_parser(
session,
grammar=ROOT.joinpath('grammars/schema.lark'),
to=ROOT.joinpath('src/prisma/_vendor/lark_schema_parser.py'),
check=check,
)
generate_parser(
session,
grammar=ROOT.joinpath('grammars/schema_scan.lark'),
to=ROOT.joinpath('src/prisma/_vendor/lark_schema_scan_parser.py'),
check=check,
) | null |
163,007 | from __future__ import annotations
import nox
def generate(
session: nox.Session,
*,
schema: str | None = 'tests/data/schema.prisma',
clean: bool = True,
) -> None:
if clean:
session.run('python', '-m', 'prisma_cleanup')
if schema:
args = (f'--schema={schema}',)
else:
args = ()
session.run('prisma', 'generate', *args) | null |
163,008 | from prisma import Prisma
async def order(client: Prisma) -> None:
# case: valid
await client.post.find_first(
order={
'desc': 'asc',
},
)
await client.post.find_first(
order={
'title': 'asc',
},
)
await client.post.find_first(
order=[
{'desc': 'asc'},
{'title': 'asc'},
],
)
# case: one field allowed
await client.post.find_first(
order={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "order" of type "PostOrderByInput | List[PostOrderByInput] | None" in function "find_first"
'desc': 'asc',
'title': 'asc',
},
)
await client.post.find_first(
order=[
{ # E: Argument of type "list[dict[str, str] | _Post_title_OrderByInput]" cannot be assigned to parameter "order" of type "PostOrderByInput | List[PostOrderByInput] | None" in function "find_first"
'desc': 'asc',
'title': 'asc',
},
{'title': 'asc'},
],
) | null |
163,009 | from prisma import validate, types
class Foo:
def validator() -> None:
# case: return type instance of type passed
validated = validate(types.PostCreateInput, {})
reveal_type(validated) # T: PostCreateInput
# case: non-typeddict type
# these are allowed as we cannot type the TypeVar properly due to a mypy limitation
validate(Foo, {}) | null |
163,010 | from prisma.models import Profile
async def order() -> None:
# case: valid
await Profile.prisma().group_by(
['country'],
order={
'country': 'desc',
},
)
await Profile.prisma().group_by(
['country', 'city'],
order={
'country': 'desc',
},
)
# case: limitation
# this should error but it is not possible to both resolve the Mapping key type
# from the TypeVar and limit the number of fields allowed to 1. I would rather
# error if a non-grouped field is ordered by instead of if more than 1 field is ordered by
# as I expect the first case to be a more common error
await Profile.prisma().group_by(
['country', 'city'],
order={
'country': 'desc',
'city': 'asc',
},
)
# case: can only order by grouped fields
await Profile.prisma().group_by(
['city'],
order={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "order" of type "Mapping[ProfileScalarFieldKeysT@group_by, SortOrder] | List[Mapping[ProfileScalarFieldKeysT@group_by, SortOrder]] | None" in function "group_by"
'country': 'desc',
},
)
# case: invalid sort order
await Profile.prisma().group_by(
['country'],
order={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "order" of type "Mapping[ProfileScalarFieldKeysT@group_by, SortOrder] | List[Mapping[ProfileScalarFieldKeysT@group_by, SortOrder]] | None" in function "group_by"
'country': 'foo',
},
) | null |
163,011 | from prisma.models import Types, User
class MyUser(User):
def hello(self):
async def create() -> None:
# case: valid
user = await User.prisma().create(
data={
'name': 'Robert',
},
)
reveal_type(user) # T: User
user = await User.prisma().create(
data={
'name': 'Robert',
'profile': {
'create': {
'bio': 'hello',
},
},
},
)
reveal_type(user) # T: User
type_ = await Types.prisma().create(data={})
reveal_type(type_.bool) # T: bool
reveal_type(type_.string) # T: str
reveal_type(type_.bigint) # T: int
reveal_type(type_.integer) # T: int
reveal_type(type_.float_) # T: float
# case: subclassing
user2 = await MyUser.prisma().create(
data={
'name': 'Robert',
},
)
reveal_type(user2) # T: MyUser
reveal_type(user2.hello) # T: str
# case: invalid field
await User.prisma().create(
data={
'foo': 'Robert', # E: Argument of type "dict[str, str]" cannot be assigned to parameter "data" of type "UserCreateInput" in function "create"
},
)
# case: invalid nested field
await User.prisma().create(
data={
'name': 'Robert',
'profile': {
'create': {
'foo': 'bar', # E: Argument of type "dict[str, str | dict[str, dict[str, str]]]" cannot be assigned to parameter "data" of type "UserCreateInput" in function "create"
},
},
},
) | null |
163,012 | from prisma import Prisma
async def select(client: Prisma) -> None:
# case: None
total = await client.post.count(select=None)
reveal_type(total) # T: int
# case: empty
count = await client.post.count(select={})
reveal_type(count) # T: PostCountAggregateOutput
# case: valid fields
count = await client.post.count(
select={
'_all': True,
'views': True,
'created_at': True,
'desc': False,
},
)
reveal_type(count) # T: PostCountAggregateOutput
# case: invalid field
await client.post.count( # E: No overloads for "count" match the provided arguments
select={
'foo': True, # E: Argument of type "dict[str, bool]" cannot be assigned to parameter "select" of type "PostCountAggregateInput" in function "count"
},
)
# case: invalid type
await client.post.count( # E: No overloads for "count" match the provided arguments
select={
'author_id': 1, # E: Argument of type "dict[str, int]" cannot be assigned to parameter "select" of type "PostCountAggregateInput" in function "count"
},
) | null |
163,013 | from prisma import Prisma
from prisma.models import User
async def order_by(client: Prisma, user: User) -> None:
# case: 1-M valid
await client.user.find_unique(
where={
'id': user.id,
},
include={
'posts': {
'order_by': {
'published': 'asc',
},
},
},
)
# case: 1-M invalid field
await client.user.find_unique(
where={
'id': user.id,
},
include={ # E: Argument of type "dict[str, dict[str, dict[str, str]]]" cannot be assigned to parameter "include" of type "UserInclude | None" in function "find_unique"
'posts': {
'order_by': {
'name': 'asc',
},
},
},
)
# case: 1-M invalid value
await client.user.find_unique(
where={
'id': user.id,
},
include={ # E: Argument of type "dict[str, dict[str, dict[str, str]]]" cannot be assigned to parameter "include" of type "UserInclude | None" in function "find_unique"
'posts': {
'order_by': {
'published': 'foo',
},
},
},
) | null |
163,014 | from prisma import Prisma
async def filtering(client: Prisma) -> None:
# case: all valid filter fields
await client.types.find_first(
where={
'bigint': 237283,
},
)
await client.types.find_first(
where={
'bigint': {
'not': 173283,
},
},
)
await client.types.find_first(
where={
'bigint': {
'not': {
'not': {
'not': {
'not': {
'equals': 1,
},
},
},
},
},
},
)
await client.types.find_first(
where={
'bigint': {
'equals': 2,
'in': [2, 3, 4],
'not_in': [5, 6, 7],
'lt': 3,
'lte': 2,
'gt': 1,
'gte': 2,
'not': {
'equals': 2,
'in': [2, 3, 4],
'not_in': [5, 6, 7],
'lt': 3,
'lte': 2,
'gt': 1,
'gte': 2,
'not': {
'equals': 3,
},
},
},
},
)
# case: invalid types
await client.types.find_first(
where={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bigint': 'foo',
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bigint': {
'equals': '1',
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[str | int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bigint': {
'in': ['1', 2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int | str]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bigint': {
'not_in': [2, '3'],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bigint': {
'lt': [2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, tuple[Unknown, ...]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bigint': {
'lte': tuple(),
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bigint': {
'gt': '1',
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, Prisma]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bigint': {
'gte': client,
},
},
)
await client.types.find_first(
where={
'bigint': {
'not': {
'equals': 5,
},
},
},
) | null |
163,015 | from prisma import Prisma
async def updating(client: Prisma) -> None:
# case: setting
await client.types.update(
where={
'id': 1,
},
data={
'bigint': 290521015266836500,
},
)
await client.types.update(
where={
'id': 1,
},
data={
'bigint': {
'set': 540521015266836500,
},
},
)
# case: multiplying
await client.types.update(
where={
'id': 1,
},
data={
'bigint': {
'multiply': 10,
},
},
)
# case: dividing
await client.types.update(
where={
'id': 1,
},
data={
'bigint': {
'divide': 2,
},
},
)
# case: adding
await client.types.update(
where={
'id': 1,
},
data={
'bigint': {
'increment': 15,
},
},
)
# case: subtracting
await client.types.update(
where={
'id': 1,
},
data={
'bigint': {
'decrement': 15,
},
},
)
# case: invalid field
await client.types.update(
where={
'id': 1,
},
data={
'bigint': { # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'foo': 15,
},
},
)
# case: invalid types
await client.types.update(
where={
'id': 1,
},
data={
'bigint': [], # E: Argument of type "dict[str, list[Any]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
},
)
await client.types.update(
where={
'id': 1,
},
data={
'bigint': { # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'decrement': 'a',
},
},
)
await client.types.update(
where={
'id': 1,
},
data={
'bigint': { # E: Argument of type "dict[str, dict[str, Prisma]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'increment': client,
},
},
)
await client.types.update(
where={
'id': 1,
},
data={
'bigint': { # E: Argument of type "dict[str, dict[str, set[Unknown]]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'multiply': set(),
},
},
)
await client.types.update(
where={
'id': 1,
},
data={
'bigint': { # E: Argument of type "dict[str, dict[str, set[Unknown]]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'divide': set(),
},
},
)
# case: too many arguments
await client.types.update(
where={
'id': 1,
},
data={
'bigint': { # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'divide': 5,
'multiply': 2,
},
},
)
# case: too few arguments
await client.types.update(
where={
'id': 1,
},
data={
'bigint': {}, # E: Argument of type "dict[str, dict[Any, Any]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
},
) | null |
163,016 | from typing import Dict
from prisma import Json, Prisma
def raw() -> None:
# case: valid
Json(None)
Json(True)
Json(False)
Json(1.3723)
Json(56)
Json('hello world')
Json(['hello world'])
Json(['foo', 'bar', 'baz'])
Json({'foo': 10})
Json({'foo': {'bar': {'baz': 1}}})
# case: no other arguments
# TODO: these error messages are weird...
Json(
'bar', # E: Argument of type "Literal['bar']" cannot be assigned to parameter "object" of type "ReadableBuffer" in function "__new__"
'foo',
)
Json( # E: No overloads for "__new__" match the provided arguments
'foo',
item=1,
)
# case: invalid recursive type
Json(
{ # E: Argument of type "dict[str, dict[str, dict[str, type[Json]]]]" cannot be assigned to parameter "data" of type "Serializable" in function "__init__"
'foo': {
'bar': {
'baz': Json,
},
},
},
) | null |
163,017 | from typing import Dict
from prisma import Json, Prisma
def keys() -> None:
# case: valid
Json.keys(item=None)
Json.keys(item=True)
Json.keys(item=False)
Json.keys(item=1.3723)
Json.keys(item=56)
Json.keys(item='hello world')
Json.keys(item=['hello world'])
Json.keys(item=['foo', 'bar', 'baz'])
Json.keys(item={'foo': 10})
Json.keys(item={'foo': {'bar': {'baz': 1}}})
# case: unpacking inferred
kwargs1 = {'hello': 'world'}
Json.keys(**kwargs1)
# case: unpacking explicit
kwargs2: Dict[str, str] = {'hello': 'world'}
Json.keys(**kwargs2)
# case: invalid recursive type
Json.keys(
item={ # E: Argument of type "dict[str, dict[str, dict[str, type[Json]]]]" cannot be assigned to parameter "item" of type "Serializable" in function "keys"
'foo': {
'bar': {
'baz': Json,
},
},
},
)
# case: invalid key types
Json.keys(item={}) | null |
163,018 | from typing import Dict
from prisma import Json, Prisma
async def allowed_operations(client: Prisma) -> None:
model = await client.types.create(data={'json_obj': Json('foo')})
obj = model.json_obj
assert obj is not None
# case: dict is expected
assert obj['foo'] is True
# case: list is expected
assert obj[0] == 'foo'
assert obj[1:3] == [1, 2]
# case: string is expected
assert obj[0] == 'f'
assert obj[1:3] == 'er' | null |
163,019 | from typing import Dict
from prisma import Json, Prisma
async def narrowing_types(client: Prisma) -> None:
model = await client.types.create(data={'json_obj': Json('foo')})
obj = model.json_obj
assert obj is not None
reveal_type(obj) # T: Json
# case: dict
if isinstance(obj, dict):
reveal_type(obj) # T: <subclass of Json and dict>
obj.update(name='foo')
# case: list
elif isinstance(obj, list):
reveal_type(obj) # T: <subclass of Json and list>
obj.append('foo') | null |
163,020 | from typing import Dict
from prisma import Json, Prisma
async def client_api(client: Prisma) -> None:
# case: cannot pass Json to string field
# TODO: this should error
await client.types.create(
data={
'string': Json('wow'),
},
)
# case: narrowing type
model = await client.types.create(data={'json_obj': Json('1')})
assert isinstance(model.json_obj, int)
number = model.json_obj + 1
reveal_type(number) # T: int | null |
163,021 | from prisma import Prisma, Base64
async def filtering(client: Prisma) -> None:
# case: all valid filter fields
await client.types.find_first(
where={
'bytes': Base64.encode(b'foo'),
},
)
await client.types.find_first(
where={
'bytes': {
'equals': Base64.encode(b'a'),
},
},
)
await client.types.find_first(
where={
'bytes': {
'not': Base64.encode(b'a'),
},
},
)
await client.types.find_first(
where={
'bytes': {
'not': {
'equals': Base64.encode(b'a'),
},
},
},
)
# case: invalid types
await client.types.find_first(
where={ # E: Argument of type "dict[str, bytes]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bytes': b'foo',
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, bytes]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bytes': b'foo',
},
) | null |
163,022 | from prisma import Prisma
async def filtering(client: Prisma) -> None:
# case: all valid filter fields
await client.types.find_first(
where={
'integer': 237283,
},
)
await client.types.find_first(
where={
'integer': {
'not': 173283,
},
},
)
await client.types.find_first(
where={
'integer': {
'not': {
'not': {
'not': {
'not': {
'equals': 1,
},
},
},
},
},
},
)
await client.types.find_first(
where={
'integer': {
'equals': 2,
'in': [2, 3, 4],
'not_in': [5, 6, 7],
'lt': 3,
'lte': 2,
'gt': 1,
'gte': 2,
'not': {
'equals': 2,
'in': [2, 3, 4],
'not_in': [5, 6, 7],
'lt': 3,
'lte': 2,
'gt': 1,
'gte': 2,
'not': {
'equals': 3,
},
},
},
},
)
# case: invalid types
await client.types.find_first(
where={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'integer': 'foo',
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'integer': {
'equals': '1',
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[str | int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'integer': {
'in': ['1', 2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int | str]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'integer': {
'not_in': [2, '3'],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'integer': {
'lt': [2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, tuple[Unknown, ...]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'integer': {
'lte': tuple(),
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'integer': {
'gt': '1',
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, Prisma]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'integer': {
'gte': client,
},
},
)
await client.types.find_first(
where={
'integer': {
'not': {
'equals': 5,
},
},
},
) | null |
163,023 | from prisma import Prisma
async def updating(client: Prisma) -> None:
# case: setting
await client.types.update(
where={
'id': 1,
},
data={
'integer': 290521015266836500,
},
)
await client.types.update(
where={
'id': 1,
},
data={
'integer': {
'set': 540521015266836500,
},
},
)
# case: multiplying
await client.types.update(
where={
'id': 1,
},
data={
'integer': {
'multiply': 10,
},
},
)
# case: dividing
await client.types.update(
where={
'id': 1,
},
data={
'integer': {
'divide': 2,
},
},
)
# case: adding
await client.types.update(
where={
'id': 1,
},
data={
'integer': {
'increment': 15,
},
},
)
# case: subtracting
await client.types.update(
where={
'id': 1,
},
data={
'integer': {
'decrement': 15,
},
},
)
# case: invalid field
await client.types.update(
where={
'id': 1,
},
data={
'integer': { # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'foo': 15,
},
},
)
# case: invalid types
await client.types.update(
where={
'id': 1,
},
data={
'integer': [], # E: Argument of type "dict[str, list[Any]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
},
)
await client.types.update(
where={
'id': 1,
},
data={
'integer': { # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'decrement': 'a',
},
},
)
await client.types.update(
where={
'id': 1,
},
data={
'integer': { # E: Argument of type "dict[str, dict[str, Prisma]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'increment': client,
},
},
)
await client.types.update(
where={
'id': 1,
},
data={
'integer': { # E: Argument of type "dict[str, dict[str, set[Unknown]]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'multiply': set(),
},
},
)
await client.types.update(
where={
'id': 1,
},
data={
'integer': { # E: Argument of type "dict[str, dict[str, set[Unknown]]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'divide': set(),
},
},
)
# case: too many arguments
await client.types.update(
where={
'id': 1,
},
data={
'integer': { # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'divide': 5,
'multiply': 2,
},
},
)
# case: too few arguments
await client.types.update(
where={
'id': 1,
},
data={
'integer': {}, # E: Argument of type "dict[str, dict[Any, Any]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
},
) | null |
163,024 | from prisma import Prisma
async def filtering(client: Prisma) -> None:
# case: all valid filter fields
await client.types.find_first(
where={
'bool': True,
},
)
await client.types.find_first(
where={
'bool': {
'not': True,
},
},
)
await client.types.find_first(
where={
'bool': {
'not': {
'not': {
'not': {
'not': {
'equals': True,
},
},
},
},
},
},
)
await client.types.find_first(
where={
'bool': {
'equals': False,
'not': {
'equals': True,
'not': {
'equals': False,
},
},
},
},
)
# case: invalid types
await client.types.find_first(
where={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bool': 'foo',
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bool': {
'equals': '1',
},
},
)
# case: invalid field
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, bool]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bool': {
'foo': True,
},
},
) | null |
163,025 | from prisma import Prisma
async def updating(client: Prisma) -> None:
# case: setting
await client.types.update(
where={
'id': 1,
},
data={
'bool': True,
},
)
await client.types.update(
where={
'id': 1,
},
data={
'bool': False,
},
)
# case: invalid types
await client.types.update(
where={
'id': 1,
},
data={
'bool': [], # E: Argument of type "dict[str, list[Any]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
},
)
await client.types.update(
where={
'id': 1,
},
data={
'bool': { # E: Argument of type "dict[str, dict[str, bool]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'decrement': True,
},
},
) | null |
163,026 | from prisma import Prisma
from prisma.enums import Role
async def filtering(client: Prisma) -> None:
# case: all valid filter fields
await client.types.find_first(
where={
'role': Role.USER,
},
) | null |
163,027 | from prisma import Prisma
from prisma.enums import Role
def use_str_enum_as_str():
# case: StrEnum is compatible with str typing
_test_string: str = Role.USER | null |
163,028 | from prisma import Prisma
from prisma.enums import Role
def raise_error_on_invalid_type():
_test_int: int = Role.USER # E: Expression of type "Literal[Role.USER]" cannot be assigned to declared type "int" | null |
163,029 | from datetime import datetime
from prisma import Prisma
async def filtering(client: Prisma) -> None:
# case: all valid filter fields
await client.types.find_first(
where={
'datetime': datetime.now(),
},
)
await client.types.find_first(
where={
'datetime': {
'not': datetime.now(),
},
},
)
await client.types.find_first(
where={
'datetime': {
'not': {
'not': {
'not': {
'not': {
'equals': datetime.now(),
},
},
},
},
},
},
)
await client.types.find_first(
where={
'datetime': {
'equals': datetime.now(),
'in': [datetime.now(), datetime.utcnow()],
'not_in': [datetime.now(), datetime.utcnow()],
'lt': datetime.now(),
'lte': datetime.now(),
'gt': datetime.now(),
'gte': datetime.now(),
'not': {
'equals': datetime.now(),
'in': [datetime.now(), datetime.utcnow()],
'not_in': [datetime.now(), datetime.utcnow()],
'lt': datetime.now(),
'lte': datetime.now(),
'gt': datetime.now(),
'gte': datetime.now(),
'not': {
'equals': datetime.now(),
},
},
},
},
)
# case: invalid types
await client.types.find_first(
where={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'datetime': 'foo',
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'datetime': {
'equals': '1',
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[str | int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'datetime': {
'in': ['1', 2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int | str]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'datetime': {
'not_in': [2, '3'],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'datetime': {
'lt': [2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, tuple[Unknown, ...]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'datetime': {
'lte': tuple(),
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'datetime': {
'gt': '1',
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, Prisma]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'datetime': {
'gte': client,
},
},
) | null |
163,030 | from datetime import datetime
from prisma import Prisma
async def updating(client: Prisma) -> None:
# case: setting
await client.types.update(
where={
'id': 1,
},
data={
'datetime': datetime.now(),
},
)
# case: invalid types
await client.types.update(
where={
'id': 1,
},
data={
'datetime': [], # E: Argument of type "dict[str, list[Any]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
},
)
await client.types.update(
where={
'id': 1,
},
data={
'datetime': { # E: Argument of type "dict[str, dict[str, bool]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'decrement': True,
},
},
) | null |
163,031 | from prisma import Prisma
async def filtering(client: Prisma) -> None:
# case: all valid filter fields
await client.types.find_first(
where={
'string': 'foo',
},
)
await client.types.find_first(
where={
'string': {
'not': 'foo',
},
},
)
await client.types.find_first(
where={
'string': {
'not': {
'not': {
'not': {
'not': {
'equals': 'wow',
},
},
},
},
},
},
)
await client.types.find_first(
where={
'string': {
'equals': 'foo',
'in': ['bar', 'baz'],
'not_in': ['foo', 'thing'],
'lt': 'prisma',
'lte': 'prisma 2',
'gt': 'python',
'gte': 'wow',
'contains': 'foo',
'startswith': 'bar',
'endswith': 'baz',
'mode': 'insensitive',
'not': {
'equals': 'foo',
'in': ['one', 'two'],
'not_in': ['three', 'four'],
'lt': 'five',
'lte': 'six',
'gt': 'seven',
'gte': 'eight',
'contains': 'foo',
'startswith': 'bar',
'endswith': 'baz',
'mode': 'default',
'not': {
'equals': 'ten',
},
},
},
},
)
# case: invalid types
await client.types.find_first(
where={ # E: Argument of type "dict[str, int]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': 1,
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'equals': 1,
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[str | int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'in': ['1', 2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int | str]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'not_in': [2, '3'],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'lt': [2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, tuple[Unknown, ...]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'lte': tuple(),
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[Unknown]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'gt': list(),
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, Prisma]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'gte': client,
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'contains': 1,
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'startswith': 1,
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'endswith': 1,
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'string': {
'mode': 'foo',
},
},
) | null |
163,032 | from prisma import Prisma
async def updating(client: Prisma) -> None:
# case: setting
await client.types.update(
where={
'id': 1,
},
data={
'string': 'foo',
},
)
await client.types.update(
where={
'id': 1,
},
data={
'string': '\n'.join('foo,three'.split(',')),
},
)
# case: invalid types
await client.types.update(
where={
'id': 1,
},
data={
'string': [], # E: Argument of type "dict[str, list[Any]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
},
)
await client.types.update(
where={
'id': 1,
},
data={
'string': { # E: Argument of type "dict[str, dict[str, bool]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'decrement': True,
},
},
) | null |
163,033 | from prisma import Prisma
async def filtering(client: Prisma) -> None:
# case: all valid filter fields
await client.types.find_first(
where={
'float_': 237283,
},
)
await client.types.find_first(
where={
'float_': {
'not': 173283,
},
},
)
await client.types.find_first(
where={
'float_': {
'not': {
'not': {
'not': {
'not': {
'equals': 1,
},
},
},
},
},
},
)
await client.types.find_first(
where={
'float_': {
'equals': 2,
'in': [2, 3, 4],
'not_in': [5, 6, 7],
'lt': 3,
'lte': 2,
'gt': 1,
'gte': 2,
'not': {
'equals': 2,
'in': [2, 3, 4],
'not_in': [5, 6, 7],
'lt': 3,
'lte': 2,
'gt': 1,
'gte': 2,
'not': {
'equals': 3,
},
},
},
},
)
# case: invalid types
await client.types.find_first(
where={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'float_': 'foo',
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'float_': {
'equals': '1',
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[str | int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'float_': {
'in': ['1', 2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int | str]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'float_': {
'not_in': [2, '3'],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[int]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'float_': {
'lt': [2],
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, tuple[Unknown, ...]]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'float_': {
'lte': tuple(),
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'float_': {
'gt': '1',
},
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, dict[str, Prisma]]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'float_': {
'gte': client,
},
},
)
await client.types.find_first(
where={
'float_': {
'not': {
'equals': 5,
},
},
},
) | null |
163,034 | from prisma import Prisma
async def updating(client: Prisma) -> None:
# case: setting
await client.types.update(
where={
'id': 1,
},
data={
'float_': 290521015266836500,
},
)
await client.types.update(
where={
'id': 1,
},
data={
'float_': {
'set': 540521015266836500,
},
},
)
# case: multiplying
await client.types.update(
where={
'id': 1,
},
data={
'float_': {
'multiply': 10,
},
},
)
# case: dividing
await client.types.update(
where={
'id': 1,
},
data={
'float_': {
'divide': 2,
},
},
)
# case: adding
await client.types.update(
where={
'id': 1,
},
data={
'float_': {
'increment': 15,
},
},
)
# case: subtracting
await client.types.update(
where={
'id': 1,
},
data={
'float_': {
'decrement': 15,
},
},
)
# case: invalid field
await client.types.update(
where={
'id': 1,
},
data={
'float_': { # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'foo': 15,
},
},
)
# case: invalid types
await client.types.update(
where={
'id': 1,
},
data={
'float_': [], # E: Argument of type "dict[str, list[Any]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
},
)
await client.types.update(
where={
'id': 1,
},
data={
'float_': { # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'decrement': 'a',
},
},
)
await client.types.update(
where={
'id': 1,
},
data={
'float_': { # E: Argument of type "dict[str, dict[str, Prisma]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'increment': client,
},
},
)
await client.types.update(
where={
'id': 1,
},
data={
'float_': { # E: Argument of type "dict[str, dict[str, set[Unknown]]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'multiply': set(),
},
},
)
await client.types.update(
where={
'id': 1,
},
data={
'float_': { # E: Argument of type "dict[str, dict[str, set[Unknown]]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'divide': set(),
},
},
)
# case: too many arguments
await client.types.update(
where={
'id': 1,
},
data={
'float_': { # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
'divide': 5,
'multiply': 2,
},
},
)
# case: too few arguments
await client.types.update(
where={
'id': 1,
},
data={
'float_': {}, # E: Argument of type "dict[str, dict[Any, Any]]" cannot be assigned to parameter "data" of type "TypesUpdateInput" in function "update"
},
) | null |
163,035 | from decimal import Decimal
from prisma import Prisma
async def filtering(client: Prisma) -> None:
# case: valid filter fields
await client.types.find_first(
where={
'decimal': Decimal('1'),
},
)
await client.types.find_first(
where={
'decimal': {
'equals': Decimal(1),
},
},
)
await client.types.find_first(
where={
'decimal': {
'not': Decimal('1.2345'),
},
},
)
await client.types.find_first(
where={
'decimal': {
'not': {
'equals': Decimal('1'),
},
},
},
)
# case: invalid types
await client.types.find_first(
where={ # E: Argument of type "dict[str, bytes]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'decimal': b'foo',
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, bytes]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'decimal': b'foo',
},
) | null |
163,036 | from datetime import datetime
from prisma import Prisma, Base64, Json
from prisma.enums import Role
async def filtering(client: Prisma) -> None:
# case: multiple arguments not allowed
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, str | None]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'strings': {
'equals': None,
'has': 'a',
},
},
)
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, Base64 | None]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'bytes': {
'equals': None,
'has': Base64.encode(b'foo'),
},
},
)
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, datetime | None]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'dates': {
'equals': None,
'has': datetime.utcnow(),
},
},
)
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, bool | None]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'bools': {
'equals': None,
'has': True,
},
},
)
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, int | None]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'ints': {
'equals': None,
'has': 2,
},
},
)
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, float | None]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'floats': {
'equals': None,
'has': 10.4,
},
},
)
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, int | None]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'bigints': {
'equals': None,
'has': 237263876387263823,
},
},
)
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, Json | None]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'json_objects': {
'equals': None,
'has': Json('foo'),
},
},
)
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, Role | None]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'roles': {
'equals': None,
'has': Role.ADMIN,
},
},
)
# NOTE: after this we just test one type for simplicity's sake and it is
# incredibly unlikely for there to be any deviance in behaviour between types
# case: invalid equals
await client.lists.find_first(
where={ # E: Argument of type "dict[str, int]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'strings': 1,
},
)
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'strings': {
'equals': 'foo',
},
},
)
# case: invalid has
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, list[str]]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'strings': {
'has': ['foo'],
},
},
)
# case: invalid has_every
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'strings': {
'has_every': 'foo',
},
},
)
# case: invalid has_some
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, str]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'strings': {
'has_some': 'bar',
},
},
)
# case: invalid is_empty
await client.lists.find_first(
where={ # E: Argument of type "dict[str, dict[str, int]]" cannot be assigned to parameter "where" of type "ListsWhereInput | None" in function "find_first"
'strings': {
'is_empty': 1,
},
},
) | null |
163,037 | from datetime import datetime
from prisma import Prisma, Base64, Json
from prisma.enums import Role
async def updating(client: Prisma) -> None:
# case: invalid set
await client.lists.update(
where={
'id': '',
},
data={
'strings': 'foo', # E: Argument of type "dict[str, str]" cannot be assigned to parameter "data" of type "ListsUpdateInput" in function "update"
},
)
await client.lists.update(
where={
'id': '',
},
data={
'strings': { # E: Argument of type "dict[str, dict[str, tuple[Literal['foo'], Literal['bar']]]]" cannot be assigned to parameter "data" of type "ListsUpdateInput" in function "update"
'set': ('foo', 'bar'),
},
},
)
# case: invalid push
await client.lists.update(
where={
'id': '',
},
data={
'strings': { # E: Argument of type "dict[str, dict[str, tuple[Literal['foo'], Literal['bar']]]]" cannot be assigned to parameter "data" of type "ListsUpdateInput" in function "update"
'push': ('foo', 'bar'),
},
},
) | null |
163,038 | from datetime import datetime
from prisma import Prisma, Base64, Json
from prisma.enums import Role
async def models(client: Prisma) -> None:
model = await client.lists.find_first()
assert model is not None
reveal_type(model.ints) # T: List[int]
reveal_type(model.roles) # T: List[Role]
reveal_type(model.bytes) # T: List[Base64]
reveal_type(model.dates) # T: List[datetime]
reveal_type(model.bools) # T: List[bool]
reveal_type(model.floats) # T: List[float]
reveal_type(model.bigints) # T: List[int]
reveal_type(model.strings) # T: List[str]
reveal_type(model.json_objects) # T: List[Json] | null |
163,039 | from prisma import Prisma
async def nested_create(client: Prisma) -> None:
# TODO: test invalid cases
# case: valid nested create one-one
await client.post.create(
data={
'title': '',
'published': False,
'author': {
'create': {
'name': 'Robert',
},
},
},
)
await client.post.create(
data={
'title': '',
'published': False,
'author': {
'connect': {'id': 'a'},
},
},
)
# case: valid nested create one-many
await client.post.create(
data={
'title': '',
'published': False,
'categories': {
'create': {
'name': 'Category',
},
},
},
)
await client.post.create(
data={
'title': '',
'published': False,
'categories': {
'create': [
{
'name': 'Category',
},
{
'name': 'Category 2',
},
],
},
},
)
await client.post.create(
data={
'title': '',
'published': False,
'categories': {
'connect': {'id': 1},
},
},
)
await client.post.create(
data={
'title': '',
'published': False,
'categories': {
'connect': [
{
'id': 1,
},
{
'id': 2,
},
],
},
},
) | null |
163,040 | from prisma import Prisma
The provided code snippet includes necessary dependencies for implementing the `one_to_one_relation` function. Write a Python function `async def one_to_one_relation(client: Prisma) -> None` to solve the following problem:
Ensure relational filters are strongly typed with pyright
Here is the function:
async def one_to_one_relation(client: Prisma) -> None:
"""Ensure relational filters are strongly typed with pyright"""
user = await client.user.find_first(
where={
'profile': {
'is': {
'bio': {'contains': 'scotland'},
},
},
}
)
assert user is not None
reveal_type(user) # T: User
reveal_type(user.id) # T: str
reveal_type(user.profile) # T: Profile | None
await client.user.find_first(
where={ # E: Argument of type "dict[str, dict[str, dict[str, str]]]" cannot be assigned to parameter "where" of type "UserWhereInput | None" in function "find_first"
'profile': {
'is': {
'an_invalid_value': 'foo',
},
},
},
) | Ensure relational filters are strongly typed with pyright |
163,041 | import argparse
import time
import typing
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import higher
from support.omniglot_loaders import OmniglotNShot
def train(db, net, device, meta_opt, epoch, log):
net.train()
n_train_iter = db.x_train.shape[0] // db.batchsz
for batch_idx in range(n_train_iter):
start_time = time.time()
# Sample a batch of support and query images and labels.
x_spt, y_spt, x_qry, y_qry = db.next()
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
# TODO: Maybe pull this out into a separate module so it
# doesn't have to be duplicated between `train` and `test`?
# Initialize the inner optimizer to adapt the parameters to
# the support set.
n_inner_iter = 5
inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
qry_losses = []
qry_accs = []
meta_opt.zero_grad()
for i in range(task_num):
with higher.innerloop_ctx(
net, inner_opt, copy_initial_weights=False
) as (fnet, diffopt):
# Optimize the likelihood of the support set by taking
# gradient steps w.r.t. the model's parameters.
# This adapts the model's meta-parameters to the task.
# higher is able to automatically keep copies of
# your network's parameters as they are being updated.
for _ in range(n_inner_iter):
spt_logits = fnet(x_spt[i])
spt_loss = F.cross_entropy(spt_logits, y_spt[i])
diffopt.step(spt_loss)
# The final set of adapted parameters will induce some
# final loss and accuracy on the query dataset.
# These will be used to update the model's meta-parameters.
qry_logits = fnet(x_qry[i])
qry_loss = F.cross_entropy(qry_logits, y_qry[i])
qry_losses.append(qry_loss.detach())
qry_acc = (qry_logits.argmax(
dim=1) == y_qry[i]).sum().item() / querysz
qry_accs.append(qry_acc)
# Update the model's meta-parameters to optimize the query
# losses across all of the tasks sampled in this batch.
# This unrolls through the gradient steps.
qry_loss.backward()
meta_opt.step()
qry_losses = sum(qry_losses) / task_num
qry_accs = 100. * sum(qry_accs) / task_num
i = epoch + float(batch_idx) / n_train_iter
iter_time = time.time() - start_time
if batch_idx % 4 == 0:
print(
f'[Epoch {i:.2f}] Train Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f} | Time: {iter_time:.2f}'
)
log.append({
'epoch': i,
'loss': qry_losses,
'acc': qry_accs,
'mode': 'train',
'time': time.time(),
}) | null |
163,042 | import argparse
import time
import typing
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('bmh')
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import higher
from support.omniglot_loaders import OmniglotNShot
def plot(log):
# Generally you should pull your plotting code out of your training
# script but we are doing it here for brevity.
df = pd.DataFrame(log)
fig, ax = plt.subplots(figsize=(6, 4))
train_df = df[df['mode'] == 'train']
test_df = df[df['mode'] == 'test']
ax.plot(train_df['epoch'], train_df['acc'], label='Train')
ax.plot(test_df['epoch'], test_df['acc'], label='Test')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.set_ylim(70, 100)
fig.legend(ncol=2, loc='lower right')
fig.tight_layout()
fname = 'maml-accs.png'
print(f'--- Plotting accuracy to {fname}')
fig.savefig(fname)
plt.close(fig) | null |
163,043 | import argparse
import typing
import torch
from torch import nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import higher
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(model, device, train_loader, optimizer, epoch, mode, log_interval=10)` to solve the following problem:
The training loop that optimizes the likelihood of a differentable model. Our model in this example internally unrolls gradient descent over an energy function in a differentiable way using higher and we can use the outputs of this model just as we use the outputs of any other differentiable model to optimize a loss function by taking gradient steps.
Here is the function:
def train(model, device, train_loader, optimizer, epoch, mode, log_interval=10):
"""The training loop that optimizes the likelihood of a differentable model.
Our model in this example internally unrolls gradient descent
over an energy function in a differentiable way using higher
and we can use the outputs of this model just as we use the
outputs of any other differentiable model to optimize
a loss function by taking gradient steps.
"""
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if mode == 'ICNN':
# In the ICNN, the fce2 weights are required to be non-negative
# to maintain convexity of the energy function.
# Here we just project negative weights back onto the
# non-negative orthant.
fce2W = model.Enet.state_dict()['fce2.weight']
fce2W[fce2W < 0] = 0.
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item())) | The training loop that optimizes the likelihood of a differentable model. Our model in this example internally unrolls gradient descent over an energy function in a differentiable way using higher and we can use the outputs of this model just as we use the outputs of any other differentiable model to optimize a loss function by taking gradient steps. |
163,044 | import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
import os
import os.path
import errno
def find_classes(root_dir):
retour = []
for (root, dirs, files) in os.walk(root_dir):
for f in files:
if (f.endswith("png")):
r = root.split('/')
lr = len(r)
retour.append((f, r[lr - 2] + "/" + r[lr - 1], root))
print("== Found %d items " % len(retour))
return retour | null |
163,045 | import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
import os
import os.path
import errno
def index_classes(items):
idx = {}
for i in items:
if i[1] not in idx:
idx[i[1]] = len(idx)
print("== Found %d classes" % len(idx))
return idx | null |
163,046 | import torch as _torch
import typing as _typing
def _copy_tensor(
t: _torch.Tensor,
safe_copy: bool,
device: _typing.Optional[_torch.device] = None
) -> _torch.Tensor:
if safe_copy:
t = t.clone().detach().requires_grad_(t.requires_grad)
else:
t = t.detach().requires_grad_(t.requires_grad)
t = t if device is None else t.to(device)
return t
def _recursive_map(
target: _typing.Union[list, tuple, dict, set, _T],
map_fn: _typing.Callable[[_T], _U],
) -> _typing.Union[list, tuple, dict, set, _U]:
if isinstance(target, list):
return type(target)(
[_recursive_map(x, map_fn) for x in target]
)
elif isinstance(target, tuple):
return type(target)(
[_recursive_map(x, map_fn) for x in target]
)
elif isinstance(target, dict):
return type(target)(
{k: _recursive_map(v, map_fn)
for k, v in target.items()}
)
elif isinstance(target, set):
return type(target)(
{_recursive_map(x, map_fn)
for x in target}
)
else:
return map_fn(target)
def _recursive_copy_and_cast(
target: _typing.Union[list, tuple, dict, set, _torch.Tensor],
device: _typing.Optional[_torch.device]
) -> _torch.Tensor:
def map_fn(x):
if _torch.is_tensor(x):
return _copy_tensor(x, True, device=device)
else:
return x
return _recursive_map(target, map_fn) | null |
163,047 | import abc as _abc
from collections import OrderedDict as _OrderedDict
from contextlib import contextmanager as _contextmanager
import typing as _typing
import weakref as _weakref
import warnings as _warnings
import torch as _torch
from . import utils as _utils
class _MonkeyPatchBase(_abc.ABC, _torch.nn.Module):
def __init__(self) -> None:
self._param_mapping: _typing.List[int] = []
self._being_modified_internally: bool = True
self._track_higher_grads: bool = True
def forward(self):
raise NotImplementedError(
"The monkey-patching logic has failed to override self.forward "
"on the new module, or you tried calling forward on a patched "
"version of a module which doesn't have forward (e.g. ModuleList)."
)
def _expand_params(
self, params: _typing.List[_torch.Tensor]
) -> _typing.List[_torch.Tensor]:
expanded = []
for index in self._param_mapping:
expanded.append(params[index])
return expanded
def init_fast_params(self):
if not self.track_higher_grads:
raise Exception(
"Cannot get initial parameters when not tracking higher "
"gradients."
)
return self._fast_params[0]
def fast_params(self):
return None if self._fast_params is None else self._fast_params[-1]
def fast_params(self, value):
value = list(value)
if self._fast_params is None:
self._fast_params = []
if self.track_higher_grads:
self._fast_params.append(value)
else:
self._fast_params[0] = value
def track_higher_grads(self):
return self._track_higher_grads
def track_higher_grads(self, value):
if not isinstance(value, bool):
raise ValueError(
"Expected boolean argument. Got: {}.".format(type(value))
)
self._track_higher_grads = value
def buffer_sync(
module: _torch.nn.Module,
fmodule: _MonkeyPatchBase,
device: _typing.Optional[_torch.device] = None
) -> None:
r"""One off sync (copy) of buffers in ``fmodule`` with those from ``module``.
"""
for key, value in module._buffers.items():
if not _torch.is_tensor(value):
fmodule._buffers[key] = value
elif device is None:
fmodule._buffers[key] = value.clone().detach()
else:
fmodule._buffers[key] = value.clone().detach().to(device)
for name, child in module._modules.items():
if name in fmodule._modules:
buffer_sync(child, fmodule._modules[name], device)
else:
raise KeyError(
"Did not find expected submodule "
"{} of monkey-patched module {}.".format(name, fmodule)
)
def make_functional(
module: _torch.nn.Module,
encapsulator: _EncapsulatorType = None
) -> _MonkeyPatchBase:
r"""Returns a stateless version of an ``nn.Module`` instance."""
params_box = [None]
_, fmodule, MonkeyPatched = _make_functional(module, params_box, 0)
top_name = "Functional" + MonkeyPatched._wrapped_name
MonkeyPatched.__name__ = MonkeyPatched.__qualname__ = top_name
MonkeyPatched.boxed_forward = MonkeyPatched.forward
param_mapping = _utils._get_param_mapping(module, [], [])
setattr(fmodule, "_param_mapping", param_mapping)
def _refill_params_box(self, params):
if params is not None:
self.fast_params = params # update view on latest fast params
elif self.fast_params is None:
raise ValueError(
"params keyword must be provided if patched module not "
"tracking its own fast parameters"
)
# Copy fast parameters into params_box for use in boxed_forward
params_box[0] = self._expand_params(self.fast_params)
def _patched_forward(self, *args, params=None, **kwargs):
self._refill_params_box(params)
output = self.boxed_forward(*args, **kwargs)
# Clean up
params_box[0] = None
return output
def _update_params(self, params):
self.fast_params = params
params = self._expand_params(params)
_update_patched_params(self, [params], 0)
setattr(MonkeyPatched, "forward", _patched_forward)
setattr(MonkeyPatched, "parameters", _patched_parameters)
setattr(MonkeyPatched, "update_params", _update_params)
setattr(MonkeyPatched, "_refill_params_box", _refill_params_box)
if encapsulator is not None:
encapsulator(fmodule, module)
return fmodule
The provided code snippet includes necessary dependencies for implementing the `monkeypatch` function. Write a Python function `def monkeypatch( module: _torch.nn.Module, device: _typing.Optional[_torch.device] = None, copy_initial_weights: bool = True, track_higher_grads: bool = True ) -> _MonkeyPatchBase` to solve the following problem:
r"""Create a monkey-patched stateless version of a module. This function produces a monkey-patched version of a module, and returns a copy of its parameters for use as fast weights. Where the original module or any of its submodules have state (e.g. batch norm), this will be copied too, but further updates (e.g. during inner loop training) will cause these to diverge without changing the state of the original module. Args: module: a ``torch.nn.Module`` subclass instance. device (optional): a device to cast the fast weights and state to. copy_initial_weights: if True, the weights of the patched module are copied to form the initial weights of the patched module, and thus are not part of the gradient tape when unrolling the patched module. If this is set to False, the actual module weights will be the initial weights of the patched module. This is useful when doing MAML, for example. track_higher_grads: if True, during unrolled optimization the graph be retained, and the fast weights will bear grad funcs, so as to permit backpropagation through the optimization process. Setting this to False allows ``monkeypatch`` to be used in "test mode", without potentially tracking higher order gradients. This can be useful when running the training loop at test time, e.g. in k-shot learning experiments, without incurring a significant memory overhead. Returns: ``fmodule``: a "stateless" version of the original module, for which calls to forward take the additional kwarg-only parameter ``params``, which should be a list of torch tensors requiring gradients, ideally provided by this function (see below) or by an update step from one of the optimizers in ``higher.optim``.
Here is the function:
def monkeypatch(
module: _torch.nn.Module,
device: _typing.Optional[_torch.device] = None,
copy_initial_weights: bool = True,
track_higher_grads: bool = True
) -> _MonkeyPatchBase:
r"""Create a monkey-patched stateless version of a module.
This function produces a monkey-patched version of a module, and returns a
copy of its parameters for use as fast weights. Where the original module
or any of its submodules have state (e.g. batch norm), this will be copied
too, but further updates (e.g. during inner loop training) will cause these
to diverge without changing the state of the original module.
Args:
module: a ``torch.nn.Module`` subclass instance.
device (optional): a device to cast the fast weights and state to.
copy_initial_weights: if True, the weights of the patched module are
copied to form the initial weights of the patched module, and thus
are not part of the gradient tape when unrolling the patched module.
If this is set to False, the actual module weights will be the
initial weights of the patched module. This is useful when doing
MAML, for example.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows ``monkeypatch`` to be used in "test mode", without
potentially tracking higher order gradients. This can be useful when
running the training loop at test time, e.g. in k-shot learning
experiments, without incurring a significant memory overhead.
Returns:
``fmodule``: a "stateless" version of the original module, for which calls
to forward take the additional kwarg-only parameter ``params``, which
should be a list of torch tensors requiring gradients, ideally
provided by this function (see below) or by an update step from one
of the optimizers in ``higher.optim``.
"""
def encapsulator(
fmodule: _MonkeyPatchBase, module: _torch.nn.Module
) -> None:
if copy_initial_weights:
params = _utils.get_func_params(module, device=device)
else:
params = [
p.clone() if device is None else p.clone().to(device)
for p in module.parameters()
]
buffer_sync(module, fmodule, device)
fmodule.update_params(params)
fmodule = make_functional(module, encapsulator=encapsulator)
fmodule.track_higher_grads = track_higher_grads
return fmodule | r"""Create a monkey-patched stateless version of a module. This function produces a monkey-patched version of a module, and returns a copy of its parameters for use as fast weights. Where the original module or any of its submodules have state (e.g. batch norm), this will be copied too, but further updates (e.g. during inner loop training) will cause these to diverge without changing the state of the original module. Args: module: a ``torch.nn.Module`` subclass instance. device (optional): a device to cast the fast weights and state to. copy_initial_weights: if True, the weights of the patched module are copied to form the initial weights of the patched module, and thus are not part of the gradient tape when unrolling the patched module. If this is set to False, the actual module weights will be the initial weights of the patched module. This is useful when doing MAML, for example. track_higher_grads: if True, during unrolled optimization the graph be retained, and the fast weights will bear grad funcs, so as to permit backpropagation through the optimization process. Setting this to False allows ``monkeypatch`` to be used in "test mode", without potentially tracking higher order gradients. This can be useful when running the training loop at test time, e.g. in k-shot learning experiments, without incurring a significant memory overhead. Returns: ``fmodule``: a "stateless" version of the original module, for which calls to forward take the additional kwarg-only parameter ``params``, which should be a list of torch tensors requiring gradients, ideally provided by this function (see below) or by an update step from one of the optimizers in ``higher.optim``. |
163,048 | import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch
from . import utils as _utils
def _get_mask_closure(mask: _torch.Tensor) -> _GradClosureType:
def closure(grad: _torch.Tensor) -> _torch.Tensor:
grad = _torch.where(mask, _torch.zeros_like(grad), grad)
if grad.requires_grad:
grad.register_hook(_get_mask_closure(mask))
return grad
return closure
def _maybe_mask(tensor: _torch.Tensor, mask: _torch.Tensor) -> None:
if tensor.requires_grad:
tensor.register_hook(_get_mask_closure(mask)) | null |
163,049 | import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch
from . import utils as _utils
_OverrideType = _typing.Dict[str, _typing.List[_typing.Any]]
class DifferentiableOptimizer(_abc.ABC):
def __init__(
self,
other: _torch.optim.Optimizer,
reference_params: _typing.Iterable[_torch.Tensor],
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
track_higher_grads: bool = True,
**kwargs
) -> None:
r"""Initialize the optimizer with the state of an existing optimizer.
Args:
other: an existing optimizer instance.
reference_params: an iterable over the parameters of the original
model.
fmodel (optional): a patched stateless module with a view on
weights.
device (optional): the device to cast state tensors to.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides the
corresponding setting in the ``i``\ th parameter group. This permits
the passing of tensors requiring gradient to differentiable
optimizers for use as optimizer settings.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. If this keyword argument is provided when calling the
step method, its value will override the default specified here.
track_higher_grads: if True, during unrolled optimization the graph
be retained, and the fast weights will bear grad funcs, so as to
permit backpropagation through the optimization process. Setting
this to False allows the differentiable optimizer to be used in
"test mode", without potentially tracking higher order
gradients. This can be useful when running the training loop at
test time, e.g. in k-shot learning experiments, without
incurring a significant memory overhead.
"""
reference_params = list(reference_params)
# Copy param groups and set up structures for copy state
self.param_groups = _copy.deepcopy(other.param_groups)
self._group_to_param_list: _typing.List[_typing.List[int]] = []
self.state: _StateType = [
_collections.defaultdict(dict)
for _ in range(len(self.param_groups))
]
# Deal with override
if override is not None:
self._apply_override(override)
self._grad_callback = grad_callback
# Copy and cast state
zipped = zip(self.param_groups, other.param_groups)
for group_idx, (group, orig_group) in enumerate(zipped):
local_list = []
for p_idx, p in enumerate(orig_group['params']):
if p in other.state:
self.state[group_idx][p_idx] = {
k: _utils._recursive_copy_and_cast(v, device)
for k, v in other.state[p].items()
}
index = _utils._find_param_in_list(p, reference_params)
if index is None:
raise ValueError(
"Could not find parameter {} in reference parameters.".
format(str(p))
)
local_list.append(index)
group['params'] = [None] * len(group['params'])
self._group_to_param_list.append(local_list)
self._fmodel = fmodel
self._track_higher_grads = track_higher_grads
def _apply_override(self, override: _OverrideType) -> None:
for k, v in override.items():
# Sanity check
if (len(v) != 1) and (len(v) != len(self.param_groups)):
raise ValueError(
"Mismatch between the number of override tensors for "
"optimizer parameter {} and the number of "
"parameter groups.".format(k)
)
for group_idx, group in enumerate(self.param_groups):
group[k] = v[0] if len(v) == 1 else v[group_idx]
def step(
self,
loss: _torch.Tensor,
params: _typing.Iterable[_torch.Tensor] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
**kwargs
) -> _typing.Iterable[_torch.Tensor]:
r"""Perform a model update.
This would be used by replacing the normal sequence::
opt.zero_grad()
loss.backward()
opt.step()
with::
diffopt.step(loss)
Args:
loss: the loss tensor.
params (optional): the parameters with regard to which we measure
the loss. These must be provided if the differentiable optimizer
did not receive a patched model with a view over its own fast
weights at initialisation. If there is such a model, and params
are provided, they will overwrite the params of the encapsulated
model.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides
the corresponding setting in the ``i``\ th parameter group. This
permits the passing of tensors requiring gradient to
differentiable optimizers for use as optimizer settings. Setting
override here has highest precedence, i.e. it will override any
tensors provided as override during the creation of the
differentiable optimizer, where there is name clash.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. This callback overrides the default provided when
constructing the differentiable optimizer.
Returns:
The updated parameters, which will individually have ``grad_fn``\ s
of their own. If the optimizer has an encapsulated patched model,
its view over its own fast weights will be updated with these
params.
"""
# Deal with override
if override is not None:
self._apply_override(override)
if self._fmodel is None or self._fmodel.fast_params is None:
if params is None:
raise ValueError(
"params kwarg must be passed to step if the differentiable "
"optimizer doesn't have a view on a patched model with "
"params."
)
else:
params = self._fmodel.fast_params if params is None else params
params = list(params)
# This allows us to gracefully deal with cases where params are frozen.
grad_targets = [
p if p.requires_grad else _torch.tensor([], requires_grad=True)
for p in params
]
all_grads = _torch.autograd.grad(
loss,
grad_targets,
create_graph=self._track_higher_grads,
allow_unused=True # boo
)
if grad_callback is not None:
all_grads = grad_callback(all_grads)
elif self._grad_callback is not None:
all_grads = self._grad_callback(all_grads)
grouped_grads = []
for group, mapping in zip(self.param_groups, self._group_to_param_list):
grads = []
for i, index in enumerate(mapping):
group['params'][i] = params[index]
grads.append(all_grads[index])
grouped_grads.append(grads)
self._update(grouped_grads)
new_params = params[:]
for group, mapping in zip(self.param_groups, self._group_to_param_list):
for p, index in zip(group['params'], mapping):
if self._track_higher_grads:
new_params[index] = p
else:
new_params[index] = p.detach().requires_grad_()
if self._fmodel is not None:
self._fmodel.update_params(new_params)
return new_params
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
pass
_opt_mapping: _OptMappingType = {
_torch.optim.Adadelta: DifferentiableAdadelta,
_torch.optim.Adagrad: DifferentiableAdagrad,
_torch.optim.Adam: DifferentiableAdam,
_torch.optim.AdamW: DifferentiableAdamW,
_torch.optim.Adamax: DifferentiableAdamax,
_torch.optim.ASGD: DifferentiableASGD,
_torch.optim.RMSprop: DifferentiableRMSprop,
_torch.optim.Rprop: DifferentiableRprop,
_torch.optim.SGD: DifferentiableSGD,
}
The provided code snippet includes necessary dependencies for implementing the `get_diff_optim` function. Write a Python function `def get_diff_optim( opt: _torch.optim.Optimizer, reference_params: _typing.Iterable[_torch.Tensor], fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None, device: _typing.Optional[_torch.device] = None, override: _typing.Optional[_OverrideType] = None, track_higher_grads: bool = True, **kwargs ) -> DifferentiableOptimizer` to solve the following problem:
r"""Construct/initialize a differentiable version of an existing optimizer. Args: opt: an existing optimizer, assumed to be an instance of ``torch.optim.Optimizer``, of a supported type which is either defined in ``torch.optim``, or a custom implemantation which has been added to higher at runtime by using ``higher.register_optim``. We assume this optimizer tracks the parameters (or some subset thereof) of a single ``torch.nn.Module`` instance, with support for parameter groups. reference_params: the parameters of the module tracked by ``opt``, as returned by ``module.parameters()``. fmodel (optional): a patched version of the ``module`` tracked by ``opt``. It is assumed this patched instance has a view on its latest fast weights through ``fmodel.parameters()``. If provided, it is not necessary to pass the fast weights explicitly to the differentiable optimizer's ``step`` function via the keyword arg ``params``. If not provided, the fast weights to update must be provided to ``step``. device (optional): the device to cast the optimizer state to when creating the differentiable optimizer. If not provided, the same device as used for the parameters tracked by ``opt`` will be used. override (optional): a dictionary mapping optimizer settings (i.e. those which would be passed to the optimizer constructor or provided within parameter groups) to either singleton lists of override values, or to a list of override values of length equal to the number of parameter groups. If a single override is provided for a keyword, it is used for all parameter groups. If a list is provided, the ``i``\ th element of the list overrides the corresponding setting in the ``i``\ th parameter group. This permits the passing of tensors requiring gradient to differentiable optimizers for use as optimizer settings. track_higher_grads: if True, during unrolled optimization the graph be retained, and the fast weights will bear grad funcs, so as to permit backpropagation through the optimization process. Setting this to False allows the returned differentiable optimizer to be used in "test mode", without potentially tracking higher order gradients. This can be useful when running the training loop at test time, e.g. in k-shot learning experiments, without incurring a significant memory overhead. Returns: An initialized ``DifferentiableOptimizer`` instance of the right subtype.
Here is the function:
def get_diff_optim(
opt: _torch.optim.Optimizer,
reference_params: _typing.Iterable[_torch.Tensor],
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
track_higher_grads: bool = True,
**kwargs
) -> DifferentiableOptimizer:
r"""Construct/initialize a differentiable version of an existing optimizer.
Args:
opt: an existing optimizer, assumed to be an instance of
``torch.optim.Optimizer``, of a supported type which is either defined
in ``torch.optim``, or a custom implemantation which has been added to
higher at runtime by using ``higher.register_optim``. We assume this
optimizer tracks the parameters (or some subset thereof) of a single
``torch.nn.Module`` instance, with support for parameter groups.
reference_params: the parameters of the module tracked by ``opt``, as
returned by ``module.parameters()``.
fmodel (optional): a patched version of the ``module`` tracked by ``opt``.
It is assumed this patched instance has a view on its latest fast
weights through ``fmodel.parameters()``. If provided, it is not
necessary to pass the fast weights explicitly to the differentiable
optimizer's ``step`` function via the keyword arg ``params``. If not
provided, the fast weights to update must be provided to ``step``.
device (optional): the device to cast the optimizer state to when
creating the differentiable optimizer. If not provided, the same
device as used for the parameters tracked by ``opt`` will be used.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal to
the number of parameter groups. If a single override is provided for
a keyword, it is used for all parameter groups. If a list is
provided, the ``i``\ th element of the list overrides the corresponding
setting in the ``i``\ th parameter group. This permits the passing of
tensors requiring gradient to differentiable optimizers for use as
optimizer settings.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows the returned differentiable optimizer to be used in
"test mode", without potentially tracking higher order gradients.
This can be useful when running the training loop at test time,
e.g. in k-shot learning experiments, without incurring a significant
memory overhead.
Returns:
An initialized ``DifferentiableOptimizer`` instance of the right subtype.
"""
if type(opt) in _opt_mapping:
return _opt_mapping[type(opt)](
opt,
reference_params,
fmodel=fmodel,
device=device,
override=override,
track_higher_grads=track_higher_grads,
**kwargs
)
else:
raise ValueError(
"Optimizer type {} not supported by higher yet.".format(type(opt))
) | r"""Construct/initialize a differentiable version of an existing optimizer. Args: opt: an existing optimizer, assumed to be an instance of ``torch.optim.Optimizer``, of a supported type which is either defined in ``torch.optim``, or a custom implemantation which has been added to higher at runtime by using ``higher.register_optim``. We assume this optimizer tracks the parameters (or some subset thereof) of a single ``torch.nn.Module`` instance, with support for parameter groups. reference_params: the parameters of the module tracked by ``opt``, as returned by ``module.parameters()``. fmodel (optional): a patched version of the ``module`` tracked by ``opt``. It is assumed this patched instance has a view on its latest fast weights through ``fmodel.parameters()``. If provided, it is not necessary to pass the fast weights explicitly to the differentiable optimizer's ``step`` function via the keyword arg ``params``. If not provided, the fast weights to update must be provided to ``step``. device (optional): the device to cast the optimizer state to when creating the differentiable optimizer. If not provided, the same device as used for the parameters tracked by ``opt`` will be used. override (optional): a dictionary mapping optimizer settings (i.e. those which would be passed to the optimizer constructor or provided within parameter groups) to either singleton lists of override values, or to a list of override values of length equal to the number of parameter groups. If a single override is provided for a keyword, it is used for all parameter groups. If a list is provided, the ``i``\ th element of the list overrides the corresponding setting in the ``i``\ th parameter group. This permits the passing of tensors requiring gradient to differentiable optimizers for use as optimizer settings. track_higher_grads: if True, during unrolled optimization the graph be retained, and the fast weights will bear grad funcs, so as to permit backpropagation through the optimization process. Setting this to False allows the returned differentiable optimizer to be used in "test mode", without potentially tracking higher order gradients. This can be useful when running the training loop at test time, e.g. in k-shot learning experiments, without incurring a significant memory overhead. Returns: An initialized ``DifferentiableOptimizer`` instance of the right subtype. |
163,050 | import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch
from . import utils as _utils
_OverrideType = _typing.Dict[str, _typing.List[_typing.Any]]
class DifferentiableOptimizer(_abc.ABC):
def __init__(
self,
other: _torch.optim.Optimizer,
reference_params: _typing.Iterable[_torch.Tensor],
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
track_higher_grads: bool = True,
**kwargs
) -> None:
r"""Initialize the optimizer with the state of an existing optimizer.
Args:
other: an existing optimizer instance.
reference_params: an iterable over the parameters of the original
model.
fmodel (optional): a patched stateless module with a view on
weights.
device (optional): the device to cast state tensors to.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides the
corresponding setting in the ``i``\ th parameter group. This permits
the passing of tensors requiring gradient to differentiable
optimizers for use as optimizer settings.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. If this keyword argument is provided when calling the
step method, its value will override the default specified here.
track_higher_grads: if True, during unrolled optimization the graph
be retained, and the fast weights will bear grad funcs, so as to
permit backpropagation through the optimization process. Setting
this to False allows the differentiable optimizer to be used in
"test mode", without potentially tracking higher order
gradients. This can be useful when running the training loop at
test time, e.g. in k-shot learning experiments, without
incurring a significant memory overhead.
"""
reference_params = list(reference_params)
# Copy param groups and set up structures for copy state
self.param_groups = _copy.deepcopy(other.param_groups)
self._group_to_param_list: _typing.List[_typing.List[int]] = []
self.state: _StateType = [
_collections.defaultdict(dict)
for _ in range(len(self.param_groups))
]
# Deal with override
if override is not None:
self._apply_override(override)
self._grad_callback = grad_callback
# Copy and cast state
zipped = zip(self.param_groups, other.param_groups)
for group_idx, (group, orig_group) in enumerate(zipped):
local_list = []
for p_idx, p in enumerate(orig_group['params']):
if p in other.state:
self.state[group_idx][p_idx] = {
k: _utils._recursive_copy_and_cast(v, device)
for k, v in other.state[p].items()
}
index = _utils._find_param_in_list(p, reference_params)
if index is None:
raise ValueError(
"Could not find parameter {} in reference parameters.".
format(str(p))
)
local_list.append(index)
group['params'] = [None] * len(group['params'])
self._group_to_param_list.append(local_list)
self._fmodel = fmodel
self._track_higher_grads = track_higher_grads
def _apply_override(self, override: _OverrideType) -> None:
for k, v in override.items():
# Sanity check
if (len(v) != 1) and (len(v) != len(self.param_groups)):
raise ValueError(
"Mismatch between the number of override tensors for "
"optimizer parameter {} and the number of "
"parameter groups.".format(k)
)
for group_idx, group in enumerate(self.param_groups):
group[k] = v[0] if len(v) == 1 else v[group_idx]
def step(
self,
loss: _torch.Tensor,
params: _typing.Iterable[_torch.Tensor] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
**kwargs
) -> _typing.Iterable[_torch.Tensor]:
r"""Perform a model update.
This would be used by replacing the normal sequence::
opt.zero_grad()
loss.backward()
opt.step()
with::
diffopt.step(loss)
Args:
loss: the loss tensor.
params (optional): the parameters with regard to which we measure
the loss. These must be provided if the differentiable optimizer
did not receive a patched model with a view over its own fast
weights at initialisation. If there is such a model, and params
are provided, they will overwrite the params of the encapsulated
model.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides
the corresponding setting in the ``i``\ th parameter group. This
permits the passing of tensors requiring gradient to
differentiable optimizers for use as optimizer settings. Setting
override here has highest precedence, i.e. it will override any
tensors provided as override during the creation of the
differentiable optimizer, where there is name clash.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. This callback overrides the default provided when
constructing the differentiable optimizer.
Returns:
The updated parameters, which will individually have ``grad_fn``\ s
of their own. If the optimizer has an encapsulated patched model,
its view over its own fast weights will be updated with these
params.
"""
# Deal with override
if override is not None:
self._apply_override(override)
if self._fmodel is None or self._fmodel.fast_params is None:
if params is None:
raise ValueError(
"params kwarg must be passed to step if the differentiable "
"optimizer doesn't have a view on a patched model with "
"params."
)
else:
params = self._fmodel.fast_params if params is None else params
params = list(params)
# This allows us to gracefully deal with cases where params are frozen.
grad_targets = [
p if p.requires_grad else _torch.tensor([], requires_grad=True)
for p in params
]
all_grads = _torch.autograd.grad(
loss,
grad_targets,
create_graph=self._track_higher_grads,
allow_unused=True # boo
)
if grad_callback is not None:
all_grads = grad_callback(all_grads)
elif self._grad_callback is not None:
all_grads = self._grad_callback(all_grads)
grouped_grads = []
for group, mapping in zip(self.param_groups, self._group_to_param_list):
grads = []
for i, index in enumerate(mapping):
group['params'][i] = params[index]
grads.append(all_grads[index])
grouped_grads.append(grads)
self._update(grouped_grads)
new_params = params[:]
for group, mapping in zip(self.param_groups, self._group_to_param_list):
for p, index in zip(group['params'], mapping):
if self._track_higher_grads:
new_params[index] = p
else:
new_params[index] = p.detach().requires_grad_()
if self._fmodel is not None:
self._fmodel.update_params(new_params)
return new_params
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
pass
_opt_mapping: _OptMappingType = {
_torch.optim.Adadelta: DifferentiableAdadelta,
_torch.optim.Adagrad: DifferentiableAdagrad,
_torch.optim.Adam: DifferentiableAdam,
_torch.optim.AdamW: DifferentiableAdamW,
_torch.optim.Adamax: DifferentiableAdamax,
_torch.optim.ASGD: DifferentiableASGD,
_torch.optim.RMSprop: DifferentiableRMSprop,
_torch.optim.Rprop: DifferentiableRprop,
_torch.optim.SGD: DifferentiableSGD,
}
The provided code snippet includes necessary dependencies for implementing the `create_diff_optim` function. Write a Python function `def create_diff_optim( opt_type: _typing.Type[_torch.optim.Optimizer], opt_kwargs: _typing.Optional[_typing.Dict[str, _typing.Any]] = None, params: _typing.Optional[_typing.List[_torch.Tensor]] = None, fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None, device: _typing.Optional[_torch.device] = None, override: _typing.Optional[_OverrideType] = None, track_higher_grads: bool = True, **kwargs ) -> DifferentiableOptimizer` to solve the following problem:
r"""Construct a differentiable version of an new optimizer. Args: opt_type: the type (constructor) for a torch.optim.Optimizer subtype from amongst the types supported by the library, or registered with it a runtime. opt_kwargs: a dictionary of keywords to be passed to the optimizer constructor. params (optional): a list of (fast) weights which the differentiable optimizer will update. These must be provided if fmodel is not provided. If both, these will be used in lieu. These will only be used for shape inference when initializing the optimizer. This argument can also take the same format as parameter groups, i.e. an iterable over dictionaries which contain the 'params' key with fast weights as value, and group-specific hyperparameters. fmodel (optional): a patched version of the ``module`` tracked by ``opt``. It is assumed this patched instance has a view on its latest fast weights through ``fmodel.parameters()``. If provided, it is not necessary to pass the fast weights explicitly to the differentiable optimizer's ``step`` function via the keyword arg ``params``. If not provided, the fast weights to update must be provided to ``step``. device (optional): the device to cast the optimizer state to when creating the differentiable optimizer. If not provided, the same device as used for the parameters tracked by ``opt`` will be used. override (optional): a dictionary mapping optimizer settings (i.e. those which would be passed to the optimizer constructor or provided within parameter groups) to either singleton lists of override values, or to a list of override values of length equal to the number of parameter groups. If a single override is provided for a keyword, it is used for all parameter groups. If a list is provided, the ``i``\ th element of the list overrides the corresponding setting in the ``i``\ th parameter group. This permits the passing of tensors requiring gradient to differentiable optimizers for use as optimizer settings. track_higher_grads: if True, during unrolled optimization the graph be retained, and the fast weights will bear grad funcs, so as to permit backpropagation through the optimization process. Setting this to False allows the returned differentiable optimizer to be used in "test mode", without potentially tracking higher order gradients. This can be useful when running the training loop at test time, e.g. in k-shot learning experiments, without incurring a significant memory overhead. Returns: An initialized ``DifferentiableOptimizer`` instance of the right subtype.
Here is the function:
def create_diff_optim(
opt_type: _typing.Type[_torch.optim.Optimizer],
opt_kwargs: _typing.Optional[_typing.Dict[str, _typing.Any]] = None,
params: _typing.Optional[_typing.List[_torch.Tensor]] = None,
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
track_higher_grads: bool = True,
**kwargs
) -> DifferentiableOptimizer:
r"""Construct a differentiable version of an new optimizer.
Args:
opt_type: the type (constructor) for a torch.optim.Optimizer subtype
from amongst the types supported by the library, or registered with
it a runtime.
opt_kwargs: a dictionary of keywords to be passed to the optimizer
constructor.
params (optional): a list of (fast) weights which the differentiable
optimizer will update. These must be provided if fmodel is not
provided. If both, these will be used in lieu. These will only
be used for shape inference when initializing the optimizer.
This argument can also take the same format as parameter groups,
i.e. an iterable over dictionaries which contain the 'params' key
with fast weights as value, and group-specific hyperparameters.
fmodel (optional): a patched version of the ``module`` tracked by ``opt``.
It is assumed this patched instance has a view on its latest fast
weights through ``fmodel.parameters()``. If provided, it is not
necessary to pass the fast weights explicitly to the differentiable
optimizer's ``step`` function via the keyword arg ``params``. If not
provided, the fast weights to update must be provided to ``step``.
device (optional): the device to cast the optimizer state to when
creating the differentiable optimizer. If not provided, the same
device as used for the parameters tracked by ``opt`` will be used.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal to
the number of parameter groups. If a single override is provided for
a keyword, it is used for all parameter groups. If a list is
provided, the ``i``\ th element of the list overrides the corresponding
setting in the ``i``\ th parameter group. This permits the passing of
tensors requiring gradient to differentiable optimizers for use as
optimizer settings.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows the returned differentiable optimizer to be used in
"test mode", without potentially tracking higher order gradients.
This can be useful when running the training loop at test time,
e.g. in k-shot learning experiments, without incurring a significant
memory overhead.
Returns:
An initialized ``DifferentiableOptimizer`` instance of the right subtype.
"""
if opt_type in _opt_mapping:
if params is not None:
params = list(params)
if isinstance(params[0], dict):
dummy = [
{
k: _torch.zeros_like(v, requires_grad=True)
if k == "params" else v
for k, v in group.items()
} for group in params
]
else:
dummy = [
_torch.zeros_like(p, requires_grad=True)
for p in params
]
elif fmodel is not None:
dummy = [
_torch.zeros_like(p, requires_grad=True)
for p in fmodel.parameters()
]
else:
raise ValueError("Must specify one of fmodel or params in kwargs.")
opt_kwargs = {} if opt_kwargs is None else opt_kwargs
opt = opt_type(dummy, **opt_kwargs)
return _opt_mapping[opt_type](
opt,
dummy,
fmodel=fmodel,
device=device,
override=override,
track_higher_grads=track_higher_grads,
**kwargs
)
else:
raise ValueError(
"Optimizer type {} not supported by higher yet.".format(opt_type)
) | r"""Construct a differentiable version of an new optimizer. Args: opt_type: the type (constructor) for a torch.optim.Optimizer subtype from amongst the types supported by the library, or registered with it a runtime. opt_kwargs: a dictionary of keywords to be passed to the optimizer constructor. params (optional): a list of (fast) weights which the differentiable optimizer will update. These must be provided if fmodel is not provided. If both, these will be used in lieu. These will only be used for shape inference when initializing the optimizer. This argument can also take the same format as parameter groups, i.e. an iterable over dictionaries which contain the 'params' key with fast weights as value, and group-specific hyperparameters. fmodel (optional): a patched version of the ``module`` tracked by ``opt``. It is assumed this patched instance has a view on its latest fast weights through ``fmodel.parameters()``. If provided, it is not necessary to pass the fast weights explicitly to the differentiable optimizer's ``step`` function via the keyword arg ``params``. If not provided, the fast weights to update must be provided to ``step``. device (optional): the device to cast the optimizer state to when creating the differentiable optimizer. If not provided, the same device as used for the parameters tracked by ``opt`` will be used. override (optional): a dictionary mapping optimizer settings (i.e. those which would be passed to the optimizer constructor or provided within parameter groups) to either singleton lists of override values, or to a list of override values of length equal to the number of parameter groups. If a single override is provided for a keyword, it is used for all parameter groups. If a list is provided, the ``i``\ th element of the list overrides the corresponding setting in the ``i``\ th parameter group. This permits the passing of tensors requiring gradient to differentiable optimizers for use as optimizer settings. track_higher_grads: if True, during unrolled optimization the graph be retained, and the fast weights will bear grad funcs, so as to permit backpropagation through the optimization process. Setting this to False allows the returned differentiable optimizer to be used in "test mode", without potentially tracking higher order gradients. This can be useful when running the training loop at test time, e.g. in k-shot learning experiments, without incurring a significant memory overhead. Returns: An initialized ``DifferentiableOptimizer`` instance of the right subtype. |
163,051 | import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch
from . import utils as _utils
class DifferentiableOptimizer(_abc.ABC):
def __init__(
self,
other: _torch.optim.Optimizer,
reference_params: _typing.Iterable[_torch.Tensor],
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
track_higher_grads: bool = True,
**kwargs
) -> None:
r"""Initialize the optimizer with the state of an existing optimizer.
Args:
other: an existing optimizer instance.
reference_params: an iterable over the parameters of the original
model.
fmodel (optional): a patched stateless module with a view on
weights.
device (optional): the device to cast state tensors to.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides the
corresponding setting in the ``i``\ th parameter group. This permits
the passing of tensors requiring gradient to differentiable
optimizers for use as optimizer settings.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. If this keyword argument is provided when calling the
step method, its value will override the default specified here.
track_higher_grads: if True, during unrolled optimization the graph
be retained, and the fast weights will bear grad funcs, so as to
permit backpropagation through the optimization process. Setting
this to False allows the differentiable optimizer to be used in
"test mode", without potentially tracking higher order
gradients. This can be useful when running the training loop at
test time, e.g. in k-shot learning experiments, without
incurring a significant memory overhead.
"""
reference_params = list(reference_params)
# Copy param groups and set up structures for copy state
self.param_groups = _copy.deepcopy(other.param_groups)
self._group_to_param_list: _typing.List[_typing.List[int]] = []
self.state: _StateType = [
_collections.defaultdict(dict)
for _ in range(len(self.param_groups))
]
# Deal with override
if override is not None:
self._apply_override(override)
self._grad_callback = grad_callback
# Copy and cast state
zipped = zip(self.param_groups, other.param_groups)
for group_idx, (group, orig_group) in enumerate(zipped):
local_list = []
for p_idx, p in enumerate(orig_group['params']):
if p in other.state:
self.state[group_idx][p_idx] = {
k: _utils._recursive_copy_and_cast(v, device)
for k, v in other.state[p].items()
}
index = _utils._find_param_in_list(p, reference_params)
if index is None:
raise ValueError(
"Could not find parameter {} in reference parameters.".
format(str(p))
)
local_list.append(index)
group['params'] = [None] * len(group['params'])
self._group_to_param_list.append(local_list)
self._fmodel = fmodel
self._track_higher_grads = track_higher_grads
def _apply_override(self, override: _OverrideType) -> None:
for k, v in override.items():
# Sanity check
if (len(v) != 1) and (len(v) != len(self.param_groups)):
raise ValueError(
"Mismatch between the number of override tensors for "
"optimizer parameter {} and the number of "
"parameter groups.".format(k)
)
for group_idx, group in enumerate(self.param_groups):
group[k] = v[0] if len(v) == 1 else v[group_idx]
def step(
self,
loss: _torch.Tensor,
params: _typing.Iterable[_torch.Tensor] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
**kwargs
) -> _typing.Iterable[_torch.Tensor]:
r"""Perform a model update.
This would be used by replacing the normal sequence::
opt.zero_grad()
loss.backward()
opt.step()
with::
diffopt.step(loss)
Args:
loss: the loss tensor.
params (optional): the parameters with regard to which we measure
the loss. These must be provided if the differentiable optimizer
did not receive a patched model with a view over its own fast
weights at initialisation. If there is such a model, and params
are provided, they will overwrite the params of the encapsulated
model.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides
the corresponding setting in the ``i``\ th parameter group. This
permits the passing of tensors requiring gradient to
differentiable optimizers for use as optimizer settings. Setting
override here has highest precedence, i.e. it will override any
tensors provided as override during the creation of the
differentiable optimizer, where there is name clash.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. This callback overrides the default provided when
constructing the differentiable optimizer.
Returns:
The updated parameters, which will individually have ``grad_fn``\ s
of their own. If the optimizer has an encapsulated patched model,
its view over its own fast weights will be updated with these
params.
"""
# Deal with override
if override is not None:
self._apply_override(override)
if self._fmodel is None or self._fmodel.fast_params is None:
if params is None:
raise ValueError(
"params kwarg must be passed to step if the differentiable "
"optimizer doesn't have a view on a patched model with "
"params."
)
else:
params = self._fmodel.fast_params if params is None else params
params = list(params)
# This allows us to gracefully deal with cases where params are frozen.
grad_targets = [
p if p.requires_grad else _torch.tensor([], requires_grad=True)
for p in params
]
all_grads = _torch.autograd.grad(
loss,
grad_targets,
create_graph=self._track_higher_grads,
allow_unused=True # boo
)
if grad_callback is not None:
all_grads = grad_callback(all_grads)
elif self._grad_callback is not None:
all_grads = self._grad_callback(all_grads)
grouped_grads = []
for group, mapping in zip(self.param_groups, self._group_to_param_list):
grads = []
for i, index in enumerate(mapping):
group['params'][i] = params[index]
grads.append(all_grads[index])
grouped_grads.append(grads)
self._update(grouped_grads)
new_params = params[:]
for group, mapping in zip(self.param_groups, self._group_to_param_list):
for p, index in zip(group['params'], mapping):
if self._track_higher_grads:
new_params[index] = p
else:
new_params[index] = p.detach().requires_grad_()
if self._fmodel is not None:
self._fmodel.update_params(new_params)
return new_params
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
pass
_opt_mapping: _OptMappingType = {
_torch.optim.Adadelta: DifferentiableAdadelta,
_torch.optim.Adagrad: DifferentiableAdagrad,
_torch.optim.Adam: DifferentiableAdam,
_torch.optim.AdamW: DifferentiableAdamW,
_torch.optim.Adamax: DifferentiableAdamax,
_torch.optim.ASGD: DifferentiableASGD,
_torch.optim.RMSprop: DifferentiableRMSprop,
_torch.optim.Rprop: DifferentiableRprop,
_torch.optim.SGD: DifferentiableSGD,
}
The provided code snippet includes necessary dependencies for implementing the `register_optim` function. Write a Python function `def register_optim( optim_type: _torch.optim.Optimizer, diff_optim_type: _typing.Type[DifferentiableOptimizer] ) -> None` to solve the following problem:
r"""Registers a new optimizer type for use with higher functions. Args: optim_type: the type of a new optimizer, assumed to be an instance of ``torch.optim.Optimizer``. diff_optim_type: the type of a new differentiable optimizer, assumed to be an instance of ``higher.optim.DifferentiableOptimizer`` with functionally equivalent logic to ``optim_type``.
Here is the function:
def register_optim(
optim_type: _torch.optim.Optimizer,
diff_optim_type: _typing.Type[DifferentiableOptimizer]
) -> None:
r"""Registers a new optimizer type for use with higher functions.
Args:
optim_type: the type of a new optimizer, assumed to be an instance of
``torch.optim.Optimizer``.
diff_optim_type: the type of a new differentiable optimizer, assumed to
be an instance of ``higher.optim.DifferentiableOptimizer`` with
functionally equivalent logic to ``optim_type``.
"""
_opt_mapping[optim_type] = diff_optim_type | r"""Registers a new optimizer type for use with higher functions. Args: optim_type: the type of a new optimizer, assumed to be an instance of ``torch.optim.Optimizer``. diff_optim_type: the type of a new differentiable optimizer, assumed to be an instance of ``higher.optim.DifferentiableOptimizer`` with functionally equivalent logic to ``optim_type``. |
163,052 | import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch
from . import utils as _utils
_OverrideType = _typing.Dict[str, _typing.List[_typing.Any]]
The provided code snippet includes necessary dependencies for implementing the `get_trainable_opt_params` function. Write a Python function `def get_trainable_opt_params( opt: _torch.optim.Optimizer, device: _typing.Optional[_torch.device] = None ) -> _OverrideType` to solve the following problem:
r"""Get an override dictionary from an optimizer instance. Args: opt: the optimizer to obtain an override dictionary from. device (optional): the device to cast the learnable tensors to. Returns: A dictionary of the format expected for the override kwarg of differentiable optimizers. It is initialized with trainable tensors with as values those float and int hyperparameters found in the optimizer's parameter groups (or stuctures containing these). Heuristically, hyperparameters containing mixtures of differentiable and non-differentiable types will be ignored (and must be manually specified when constructing an override dict).
Here is the function:
def get_trainable_opt_params(
opt: _torch.optim.Optimizer, device: _typing.Optional[_torch.device] = None
) -> _OverrideType:
r"""Get an override dictionary from an optimizer instance.
Args:
opt: the optimizer to obtain an override dictionary from.
device (optional): the device to cast the learnable tensors to.
Returns:
A dictionary of the format expected for the override kwarg of
differentiable optimizers. It is initialized with trainable tensors
with as values those float and int hyperparameters found in the
optimizer's parameter groups (or stuctures containing these).
Heuristically, hyperparameters containing mixtures of differentiable
and non-differentiable types will be ignored (and must be manually
specified when constructing an override dict).
"""
override: _OverrideType = _collections.defaultdict(list)
def map_fn(x: _typing.Union[_torch.Tensor, int, float]) -> _torch.Tensor:
if isinstance(x, _torch.Tensor):
return x.clone().detach().requires_grad_()
else:
return _torch.tensor(float(x), device=device, requires_grad=True)
for group in opt.param_groups:
for k, v in group.items():
if k == "params":
# Ignore actual model parameters tracked by optim
continue
# Ignore hyperparameters that aren't structures containing ints
# or floats
if all(
isinstance(x, int) or isinstance(x, float)
for x in _utils.flatten(v)
):
override[k].append(_utils._recursive_map(v, map_fn))
return override | r"""Get an override dictionary from an optimizer instance. Args: opt: the optimizer to obtain an override dictionary from. device (optional): the device to cast the learnable tensors to. Returns: A dictionary of the format expected for the override kwarg of differentiable optimizers. It is initialized with trainable tensors with as values those float and int hyperparameters found in the optimizer's parameter groups (or stuctures containing these). Heuristically, hyperparameters containing mixtures of differentiable and non-differentiable types will be ignored (and must be manually specified when constructing an override dict). |
163,053 | import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch
from . import utils as _utils
_OverrideType = _typing.Dict[str, _typing.List[_typing.Any]]
def _recursive_apply(
replacement: _typing.Union[list, tuple, dict, set, _torch.Tensor],
target: _typing.Union[_torch.Tensor, int, float]
) -> _typing.Union[_torch.Tensor, int, float]:
if not isinstance(replacement, type(target)):
if (
isinstance(replacement, _torch.Tensor) and
not _utils._is_container(target)
):
return type(target)(replacement.item())
raise ValueError(
"Expected an non-container type for target, but got {} with value "
"{}".format(type(target), target)
)
elif (
isinstance(replacement, _torch.Tensor) and
isinstance(target, _torch.Tensor)
):
replacement = replacement.to(target.device)
target.data = replacement.data
return target
if isinstance(target, list):
return type(target)(
[_recursive_apply(r, t) for r, t in zip(replacement, target)]
)
elif isinstance(target, tuple):
return type(target)(
[_recursive_apply(r, t) for r, t in zip(replacement, target)]
)
elif isinstance(replacement, dict) and isinstance(target, dict):
return type(target)(
{k: _recursive_apply(r, t)
for (_, r), (k, t) in zip(replacement.items(), target.items())}
)
elif isinstance(target, set):
return type(target)(
{_recursive_apply(r, t)
for r, t in zip(replacement, target)}
)
else:
raise ValueError(
"Couldn't apply replacement of type {} to target of type "
"{}".format(type(replacement), type(target))
)
The provided code snippet includes necessary dependencies for implementing the `apply_trainable_opt_params` function. Write a Python function `def apply_trainable_opt_params( opt: _torch.optim.Optimizer, override: _OverrideType ) -> None` to solve the following problem:
r"""Apply learned hyperparameters back to original optimizer. Args: opt: the original optimizer. The hyperparameters in its parameter groups will be modified in place. override: dictionary of the format used for the override kwarg of differentiable optimizers.
Here is the function:
def apply_trainable_opt_params(
opt: _torch.optim.Optimizer, override: _OverrideType
) -> None:
r"""Apply learned hyperparameters back to original optimizer.
Args:
opt: the original optimizer. The hyperparameters in its parameter groups
will be modified in place.
override: dictionary of the format used for the override kwarg of
differentiable optimizers.
"""
for k, v in override.items():
# Sanity check
if (len(v) != 1) and (len(v) != len(opt.param_groups)):
raise ValueError(
"Mismatch between the number of override tensors for "
"optimizer parameter {} and the number of "
"parameter groups.".format(k)
)
for group_idx, group in enumerate(opt.param_groups):
replacement = v[0] if len(v) is 1 else v[group_idx]
group[k] = _recursive_apply(replacement, group[k]) | r"""Apply learned hyperparameters back to original optimizer. Args: opt: the original optimizer. The hyperparameters in its parameter groups will be modified in place. override: dictionary of the format used for the override kwarg of differentiable optimizers. |
163,054 | import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch
from . import utils as _utils
def _add(
tensor: _torch.Tensor,
a1: _typing.Union[float, int, _torch.Tensor],
a2: _typing.Optional[_torch.Tensor] = None
) -> _torch.Tensor:
if a2 is None:
value: _typing.Union[_torch.Tensor, float] = 1.
other = a1
else:
value = a1
other = a2
return tensor + (value * other) | null |
163,055 | import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch
from . import utils as _utils
def _addcdiv(
tensor: _torch.Tensor,
a1: _typing.Union[float, int, _torch.Tensor],
a2: _torch.Tensor,
a3: _typing.Optional[_torch.Tensor] = None
) -> _torch.Tensor:
if a3 is None:
value: _typing.Union[_torch.Tensor, float] = 1.
tensor1 = a1
tensor2 = a2
else:
value = a1
tensor1 = a2
tensor2 = a3
return tensor + value * (tensor1 / tensor2) | null |
163,056 | import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch
from . import utils as _utils
def _addcmul(
tensor: _torch.Tensor,
a1: _typing.Union[float, int, _torch.Tensor],
a2: _torch.Tensor,
a3: _typing.Optional[_torch.Tensor] = None
) -> _torch.Tensor:
if a3 is None:
value: _typing.Union[_torch.Tensor, float] = 1.
tensor1 = a1
tensor2 = a2
else:
value = a1
tensor1 = a2
tensor2 = a3
return tensor + (value * tensor1 * tensor2) | null |
163,057 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def construct_hyper_param(parser):
parser.add_argument("--do_train", default=False, action='store_true')
parser.add_argument('--do_infer', default=False, action='store_true')
parser.add_argument('--infer_loop', default=False)
parser.add_argument("--trained", default=False)
parser.add_argument('--fine_tune',
default=True,
help="If present, BERT is trained.")
parser.add_argument('--tepoch', default=20, type=int)
parser.add_argument('--test_epoch', default=1, type=int)
parser.add_argument("--bS", default=8, type=int,
help="Batch size")
parser.add_argument("--accumulate_gradients", default=1, type=int,
help="The number of accumulation of backpropagation to effectivly increase the batch size.")
parser.add_argument("--model_type", default='Seq2SQL_v1', type=str,
help="Type of model.")
parser.add_argument("--data_dir", type=str, help="Path of data.")
parser.add_argument("--output_dir", type=str, help="Path of output.")
parser.add_argument("--output_name", type=str, help="Name of output.")
parser.add_argument("--run_name", type=str, help="Name of running.")
# 1.2 BERT Parameters
parser.add_argument("--vocab_file",
default='vocab.txt', type=str,
help="The vocabulary file that the BERT model was trained on.")
parser.add_argument("--max_seq_length",
default=512, type=int, # Set based on maximum length of input tokens.
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--num_target_layers",
default=4, type=int,
help="The Number of final layers of BERT to be used in downstream task.")
parser.add_argument('--lr_bert', default=1e-5, type=float, help='BERT model learning rate.')
parser.add_argument('--lr_amr', default=1e-4, type=float, help='BERT model learning rate.')
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--no_pretraining', default=True, help='Use BERT pretrained model')
parser.add_argument("--bert_type_abb", default='uS', type=str,
help="Type of BERT model to load. e.g.) uS, uL, cS, cL, and mcS")
parser.add_argument("--bert_name", default="ckpt", type=str)
parser.add_argument("--bert_path", type=str)
# 1.3 Seq-to-SQL module parameters
parser.add_argument('--lS', default=2, type=int, help="The number of LSTM layers.")
parser.add_argument('--dr', default=0.3, type=float, help="Dropout rate.")
parser.add_argument('--lr', default=1e-3, type=float, help="Learning rate.")
parser.add_argument("--hS", default=100, type=int, help="The dimension of hidden vector in the seq-to-SQL module.")
# 1.4 Execution-guided decoding beam-size. It is used only in test.py
parser.add_argument('--EG',
default=True,
help="If present, Execution guided decoding is used in test.")
parser.add_argument('--beam_size',
type=int,
default=4,
help="The size of beam for smart decoding")
args = parser.parse_args()
map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12',
'uL': 'uncased_L-24_H-1024_A-16',
'cS': 'cased_L-12_H-768_A-12',
'cL': 'cased_L-24_H-1024_A-16',
'mcS': 'multi_cased_L-12_H-768_A-12'}
args.bert_type = map_bert_type_abb[args.bert_type_abb]
print(f"saved air {os.path.join('./'+str(args.bert_name), str(1)+'_model_bert_best.pt')}")
# Decide whether to use lower_case.
if args.bert_type_abb == 'cS' or args.bert_type_abb == 'cL' or args.bert_type_abb == 'mcS':
args.do_lower_case = False
else:
args.do_lower_case = True
# Seeds for random number generation
seed(args.seed)
python_random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
args.toy_model = False
args.toy_size = 1000
return args | null |
163,058 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print("make new folder ", path) | null |
163,059 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def get_opt(model, model_bert, fine_tune):
if fine_tune:
opt = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr, weight_decay=0)
opt_bert = torch.optim.Adam(filter(lambda p: p.requires_grad, model_bert.parameters()),
lr=args.lr_bert, weight_decay=0)
else:
opt = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr, weight_decay=0)
opt_bert = None
return opt, opt_bert | null |
163,060 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def get_bert(BERT_PT_PATH, bert_type, do_lower_case, no_pretraining):
bert_config = AutoConfig.from_pretrained(args.bert_path)
tokenizer = AutoTokenizer.from_pretrained(args.bert_path, do_lower_case=do_lower_case)
model_bert = AutoModel.from_pretrained(args.bert_path)
model_bert.resize_token_embeddings(len(tokenizer))
model_bert.to(device)
print(f"BERT-type: {model_bert.config._name_or_path}")
return model_bert, tokenizer, bert_config
def get_models(args, BERT_PT_PATH, trained=False, path_model_bert=None, path_model_amr=None, path_model=None):
# some constants
agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
# cond_ops = ['=', '>', '<', 'OP'] # do not know why 'OP' required. Hence,
cond_ops = [">", "<", "==", "!=", "LIKE", "DESC"] # do not know why 'OP' required. Hence,
dep_ops = ['null', 'scol', 'agg', 'wcol', 'val', 'op']
print(f"EG: {args.EG}")
print(f"Batch_size = {args.bS * args.accumulate_gradients}")
print(f"BERT parameters:")
print(f"learning rate: {args.lr_bert}")
print(f"Fine-tune BERT: {args.fine_tune}")
# Get BERT
model_bert, tokenizer, bert_config = get_bert(BERT_PT_PATH, args.bert_type, args.do_lower_case,
args.no_pretraining)
args.iS = bert_config.hidden_size * args.num_target_layers # Seq-to-SQL input vector dimenstion
# Get Seq-to-SQL
n_cond_ops = len(cond_ops)
n_agg_ops = len(agg_ops)
print(f"Seq-to-SQL: the number of final BERT layers to be used: {args.num_target_layers}")
print(f"Seq-to-SQL: the size of hidden dimension = {args.hS}")
print(f"Seq-to-SQL: LSTM encoding layer size = {args.lS}")
print(f"Seq-to-SQL: dropout rate = {args.dr}")
print(f"Seq-to-SQL: learning rate = {args.lr}")
model = Seq2SQL_v1(args.iS, args.hS, args.lS, args.dr, n_cond_ops, n_agg_ops)
model = model.to(device)
if trained:
assert path_model_bert != None
assert path_model != None
print(".......")
print("loading from ", path_model_bert, " and ", path_model, " and ", path_model_amr)
print(".......")
if torch.cuda.is_available():
res = torch.load(path_model_bert,map_location='cpu')
else:
res = torch.load(path_model_bert, map_location='cpu')
model_bert.load_state_dict(res['model_bert'])
model_bert.to(device)
if torch.cuda.is_available():
res = torch.load(path_model, map_location='cpu')
else:
res = torch.load(path_model, map_location='cpu')
model.load_state_dict(res['model'])
model.to(device)
return model, model_bert, tokenizer, bert_config | null |
163,061 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def get_data(path_wikisql, args):
train_data, train_table, dev_data, dev_table, _, _ = load_wikisql(path_wikisql, args.toy_model, args.toy_size,
no_w2i=True, no_hs_tok=True)
train_loader, dev_loader = get_loader_wikisql(train_data, dev_data, args.bS, shuffle_train=True)
return train_data, train_table, dev_data, dev_table, train_loader, dev_loader | null |
163,062 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def train(train_loader, train_table, model, model_bert, opt, bert_config, tokenizer,
max_seq_length, num_target_layers, accumulate_gradients=1, check_grad=True,
st_pos=0, opt_bert=None, path_db=None, dset_name='train', logger=None, epochid=0, epoch_end=0):
model.train()
model_bert.train()
amr_loss = 0
ave_loss = 0
cnt = 0 # count the # of examples
cnt_sc = 0 # count the # of correct predictions of select column
cnt_sa = 0 # of selectd aggregation
cnt_wn = 0 # of where number
cnt_wc = 0 # of where column
cnt_wo = 0 # of where operator
cnt_wv = 0 # of where-value
cnt_wvi = 0 # of where-value index (on question tokens)
cnt_lx = 0 # of logical form acc
cnt_x = 0 # of execution acc
start_time = time.time()
for iB, t in enumerate(train_loader):
sys.stdout.flush()
cnt += len(t)
if cnt < st_pos:
continue
# Get fields
# nlu : natural language utterance
# nlu_t: tokenized nlu
# sql_i: canonical form of SQL query
# sql_q: full SQL query text. Not used.
# sql_t: tokenized SQL query
# tb : table
# hs_t : tokenized headers. Not used.
nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hds = get_fields(t, train_table, no_hs_t=True, no_sql_t=True)
g_sc, g_sa, g_wn, g_wc, g_wo, g_wv = get_g(sql_i)
# get ground truth where-value index under CoreNLP tokenization scheme. It's done already on trainset.
g_wvi_corenlp = get_g_wvi_corenlp(t)
# wemb_n: natural language embedding
# wemb_h: header embedding
# l_n: token lengths of each question
# l_hpu: header token lengths
# l_hs: the number of columns (headers) of the tables.
wemb_n, wemb_h, l_n, l_hpu, l_hs, nlu_tt, t_to_tt_idx, tt_to_t_idx = get_wemb_bert(
bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length,
num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
g_wvi = get_g_wvi_bert_from_g_wvi_corenlp(t_to_tt_idx, g_wvi_corenlp)
knowledge = []
for k in t:
if "bertindex_knowledge" in k:
know = [0 if x >= 5 else x for x in k["bertindex_knowledge"]]
knowledge.append(know)
else:
knowledge.append(max(l_n)*[0])
knowledge_header = []
for k in t:
if "header_knowledge" in k:
know_h = [0 if x >= 5 else x for x in k["header_knowledge"]]
knowledge_header.append(know_h)
else:
knowledge_header.append(max(l_hs) * [0])
# score
s_sc, s_sa, s_wn, s_wc, s_wo, s_wv = model(
wemb_n, l_n, wemb_h, l_hpu, l_hs,
g_sc=g_sc, g_sa=g_sa, g_wn=g_wn, g_wc=g_wc, g_wvi=g_wvi,
knowledge=knowledge, knowledge_header=knowledge_header)
# Calculate loss & step
loss = Loss_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi)
loss_all = loss
# Calculate gradient
if iB % accumulate_gradients == 0: # mode
# at start, perform zero_grad
opt.zero_grad()
if opt_bert:
opt_bert.zero_grad()
loss_all.backward()
if accumulate_gradients == 1:
opt.step()
if opt_bert:
opt_bert.step()
elif iB % accumulate_gradients == (accumulate_gradients - 1):
# at the final, take step with accumulated graident
loss_all.backward()
opt.step()
if opt_bert:
opt_bert.step()
else:
# at intermediate stage, just accumulates the gradients
loss_all.backward()
# Prediction
pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi = pred_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, )
pr_wv_str, pr_wv_str_wp = convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_tt, tt_to_t_idx, nlu)
# Sort pr_wc:
# Sort pr_wc when training the model as pr_wo and pr_wvi are predicted using ground-truth where-column (g_wc)
# In case of 'dev' or 'test', it is not necessary as the ground-truth is not used during inference.
pr_wc_sorted = sort_pr_wc(pr_wc, g_wc)
pr_sql_i = generate_sql_i(pr_sc, pr_sa, pr_wn, pr_wc_sorted, pr_wo, pr_wv_str, nlu)
# Cacluate accuracy
cnt_sc1_list, cnt_sa1_list, cnt_wn1_list, \
cnt_wc1_list, cnt_wo1_list, \
cnt_wvi1_list, cnt_wv1_list = get_cnt_sw_list(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi,
pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi,
sql_i, pr_sql_i,
mode='train')
# lx stands for logical form accuracy
cnt_lx1_list = get_cnt_lx_list(cnt_sc1_list, cnt_sa1_list, cnt_wn1_list, cnt_wc1_list,
cnt_wo1_list, cnt_wv1_list)
# statistics
ave_loss += loss.item()
print(iB, "/", len(train_loader), "\tUsed time:", time.time() - start_time, "\tloss:", loss.item())
logger.info('{TRAIN} [epoch=%d/%d] [batch=%d/%d] used time: %.4f, loss: %.4f' % (
epochid, epoch_end, iB, len(train_loader), time.time() - start_time, loss.item()))
# count
cnt_sc += sum(cnt_sc1_list)
cnt_sa += sum(cnt_sa1_list)
cnt_wn += sum(cnt_wn1_list)
cnt_wc += sum(cnt_wc1_list)
cnt_wo += sum(cnt_wo1_list)
cnt_wvi += sum(cnt_wvi1_list)
cnt_wv += sum(cnt_wv1_list)
cnt_lx += sum(cnt_lx1_list)
ave_loss /= cnt
acc_sc = cnt_sc / cnt
acc_sa = cnt_sa / cnt
acc_wn = cnt_wn / cnt
acc_wc = cnt_wc / cnt
acc_wo = cnt_wo / cnt
acc_wvi = cnt_wvi / cnt
acc_wv = cnt_wv / cnt
acc_lx = cnt_lx / cnt
acc_x = cnt_x / cnt
acc = [ave_loss, acc_sc, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx, acc_x]
aux_out = 1
return acc, aux_out | null |
163,063 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def tokenize_corenlp(client, nlu1):
nlu1_tok = []
for sentence in client.annotate(nlu1):
for tok in sentence:
nlu1_tok.append(tok.originalText)
return nlu1_tok | null |
163,064 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def tokenize_corenlp_direct_version(client, nlu1):
nlu1_tok = []
for sentence in client.annotate(nlu1).sentence:
for tok in sentence.token:
nlu1_tok.append(tok.originalText)
return nlu1_tok | null |
163,065 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def print_result(epoch, acc, dname, epoch_end=0, logger=None):
ave_loss, acc_sc, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx, acc_x = acc
print(f'{dname} results ------------')
print(
f" Epoch: {epoch}, ave loss: {ave_loss}, acc_sc: {acc_sc:.3f}, acc_sa: {acc_sa:.3f}, acc_wn: {acc_wn:.3f}, \
acc_wc: {acc_wc:.3f}, acc_wo: {acc_wo:.3f}, acc_wvi: {acc_wvi:.3f}, acc_wv: {acc_wv:.3f}, acc_lx: {acc_lx:.3f}, acc_x: {acc_x:.3f}"
)
logger.info('{%s} [epoch=%d/%d] ave loss: %.4f, acc sc: %.3f, acc sa: %.3f, '
'acc wc: %.3f, acc wo: %.3f, acc wv: %.3f, acc lx: %.3f' % (
dname, epoch, epoch_end, ave_loss, acc_sc, acc_sa,
acc_wc, acc_wo, acc_wv, acc_lx)) | null |
163,066 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def infer_get_data(path_wikisql, mode, args):
path_sql = os.path.join(path_wikisql, mode + '_tok.json')
path_table = os.path.join(path_wikisql, 'table.json')
print("load path_sql: ", path_sql)
print("load path_table:", path_table)
data_dev = []
with open(path_sql) as f:
for idx, line in enumerate(f):
t1 = json.loads(line.strip())
data_dev.append(t1)
dev_table = {}
with open(path_table) as f:
for idx, line in enumerate(f):
t1 = json.loads(line.strip())
t1['id'] = t1['tablename']
dev_table[t1['id']] = t1
dev_loader = torch.utils.data.DataLoader(
batch_size=args.bS,
dataset=data_dev,
shuffle=False,
num_workers=4,
collate_fn=lambda x: x # now dictionary values are not merged!
)
return dev_table, data_dev, dev_loader | null |
163,067 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def infer_test(data_loader, data_table, model, model_bert, bert_config, tokenizer,
max_seq_length,
num_target_layers, detail=False, st_pos=0, cnt_tot=1, EG=False, beam_size=4,
path_db=None, dset_name='test'):
model.eval()
model_bert.eval()
wfin = open("sxron.json", 'w')
engine = DBEngine(os.path.join(path_db, f"{dset_name}.db"))
results = []
for iB, t in tqdm.tqdm(list(enumerate(data_loader))):
# Get fields
nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hds = get_fields(t, data_table, no_hs_t=True, no_sql_t=True)
wemb_n, wemb_h, l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx \
= get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length,
num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
knowledge = []
for k in t:
if "bertindex_knowledge" in k:
know = [0 if x >= 5 else x for x in k["bertindex_knowledge"]]
knowledge.append(know)
else:
knowledge.append(max(l_n)*[0])
knowledge_header = []
for k in t:
if "header_knowledge" in k:
know_h = [0 if x >= 5 else x for x in k["header_knowledge"]]
knowledge_header.append(know_h)
else:
knowledge_header.append(max(l_hs) * [0])
# get new header embedding
l_hs_new = []
l_hpu_new = []
select_idx = []
sum_l_h = 0
for l_h in l_hs:
l_hs_new.append(l_h - 1)
l_hpu_new += l_hpu[sum_l_h: sum_l_h + l_h - 1]
select_idx += range(sum_l_h, sum_l_h + l_h - 1, 1)
sum_l_h += l_h
wemb_h_new = torch.index_select(wemb_h, 0, torch.tensor(select_idx).to(device))
# model specific part
# score
s_sc, s_sa, s_wn, s_wc, s_wo, s_wv = model(
wemb_n, l_n, wemb_h_new, l_hpu_new, l_hs_new,
knowledge=knowledge,
knowledge_header=knowledge_header)
# prediction
pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi = pred_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, )
pr_wv_str, pr_wv_str_wp = convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_tt, tt_to_t_idx, nlu)
pr_sql_i = generate_sql_i(pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu)
pr_sql_q = generate_sql_q(pr_sql_i, tb)
# Saving for the official evaluation later.
for b, pr_sql_i1 in enumerate(pr_sql_i):
results1 = {}
results1["query"] = pr_sql_i1
results1["table_id"] = tb[b]["id"]
results1["nlu"] = nlu[b]
results.append(results1)
return results | null |
163,068 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def infer_print_result(epoch, acc, dname):
ave_loss, acc_sc, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_h, acc_d, acc_ah, acc_ad, acc_lx, acc_x = acc
f1 = open("./result.txt", 'w')
print(f'{dname} results ------------',file=f1 )
print(
f" Epoch: {epoch}, ave loss: {ave_loss}, acc_sc: {acc_sc:.3f}, acc_sa: {acc_sa:.3f}, acc_wn: {acc_wn:.3f}, \
acc_wc: {acc_wc:.3f}, acc_wo: {acc_wo:.3f}, acc_wvi: {acc_wvi:.3f}, acc_wv: {acc_wv:.3f}, acc_h: {acc_h:.3f}, \
acc_d: {acc_d:.3f}, acc_ah: {acc_ah:.3f}, acc_ad: {acc_ad:.3f}, acc_lx: {acc_lx:.3f}, acc_x: {acc_x:.3f}"
,file=f1) | null |
163,069 | import os
import argparse
import logging
import pathlib
import tqdm
import random as python_random
from transformers import AutoModel, AutoConfig, AutoTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from sqlova.model.nl2sql.wikisql_models import *
from sqlova.args import *
def convert_string(pr_wvi, nlu, nlu_tt):
convs = []
for b, nlu1 in enumerate(nlu):
conv_dict = {}
nlu_tt1 = nlu_tt[b]
idx = 0
convflag = True
for i, ntok in enumerate(nlu_tt1):
if idx >= len(nlu1):
convflag = False
break
if ntok.startswith('##'):
ntok = ntok.replace('##', '')
tok = nlu1[idx: idx + 1].lower()
if ntok == tok:
conv_dict[i] = [idx, idx + 1]
idx += 1
elif ntok == '#':
conv_dict[i] = [idx, idx]
elif ntok == '[UNK]':
conv_dict[i] = [idx, idx + 1]
j = i + 1
idx += 1
if idx < len(nlu1) and j < len(nlu_tt1) and nlu_tt1[j] != '[UNK]':
while idx < len(nlu1):
val = nlu1[idx: idx + 1].lower()
if nlu_tt1[j].startswith(val):
break
idx += 1
conv_dict[i][1] = idx
elif tok in ntok:
startid = idx
idx += 1
while idx < len(nlu1):
tok += nlu1[idx: idx + 1].lower()
if ntok == tok:
conv_dict[i] = [startid, idx + 1]
break
idx += 1
idx += 1
else:
convflag = False
conv = []
if convflag:
for pr_wvi1 in pr_wvi[b]:
s1, e1 = conv_dict[pr_wvi1[0]]
s2, e2 = conv_dict[pr_wvi1[1]]
newidx = pr_wvi1[1]
while newidx + 1 < len(nlu_tt1) and s2 == e2 and nlu_tt1[newidx] == '#':
newidx += 1
s2, e2 = conv_dict[newidx]
if newidx + 1 < len(nlu_tt1) and nlu_tt1[newidx + 1].startswith('##'):
s2, e2 = conv_dict[newidx + 1]
phrase = nlu1[s1: e2]
conv.append(phrase)
else:
for pr_wvi1 in pr_wvi[b]:
phrase = "".join(nlu_tt1[pr_wvi1[0]: pr_wvi1[1] + 1]).replace('##', '')
conv.append(phrase)
convs.append(conv)
return convs | null |
163,070 | from sqlova.args import *
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
def Loss_sa(s_sa, g_sa):
# w = torch.Tensor([1.0, 3.0, 3.0, 3.0, 3.0, 3.0]).to(device)
# loss = F.cross_entropy(s_sa, torch.tensor(g_sa).to(device), weight = w)
loss = F.cross_entropy(s_sa, torch.tensor(g_sa).to(device))
return loss
The provided code snippet includes necessary dependencies for implementing the `Loss_sw_se_agg` function. Write a Python function `def Loss_sw_se_agg(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi)` to solve the following problem:
:param s_wv: score [ B, n_conds, T, score] :param g_wn: [ B ] :param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] :return:
Here is the function:
def Loss_sw_se_agg(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi):
"""
:param s_wv: score [ B, n_conds, T, score]
:param g_wn: [ B ]
:param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
:return:
"""
loss = 0
# loss += Loss_sc(s_sc, g_sc)
loss += Loss_sa(s_sa, g_sa)
# loss += Loss_wn(s_wn, g_wn)
# loss += Loss_wc(s_wc, g_wc)
# loss += Loss_wo(s_wo, g_wn, g_wo)
# loss += Loss_wv_se(s_wv, g_wn, g_wvi)
return loss | :param s_wv: score [ B, n_conds, T, score] :param g_wn: [ B ] :param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] :return: |
163,071 | from sqlova.args import *
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
def Loss_sc(s_sc, g_sc):
loss = F.cross_entropy(s_sc, torch.tensor(g_sc).to(device))
return loss
def Loss_sa(s_sa, g_sa):
# w = torch.Tensor([1.0, 3.0, 3.0, 3.0, 3.0, 3.0]).to(device)
# loss = F.cross_entropy(s_sa, torch.tensor(g_sa).to(device), weight = w)
loss = F.cross_entropy(s_sa, torch.tensor(g_sa).to(device))
return loss
def Loss_wn(s_wn, g_wn):
loss = F.cross_entropy(s_wn, torch.tensor(g_wn).to(device))
return loss
def Loss_wc(s_wc, g_wc):
# Construct index matrix
bS, max_h_len = s_wc.shape
im = torch.zeros([bS, max_h_len]).to(device)
for b, g_wc1 in enumerate(g_wc):
for g_wc11 in g_wc1:
im[b, g_wc11] = 1.0
# Construct prob.
p = F.sigmoid(s_wc)
loss = F.binary_cross_entropy(p, im)
return loss
def Loss_wo(s_wo, g_wn, g_wo):
# Construct index matrix
loss = 0
for b, g_wn1 in enumerate(g_wn):
if g_wn1 == 0:
continue
g_wo1 = g_wo[b]
s_wo1 = s_wo[b]
loss += F.cross_entropy(s_wo1[:g_wn1], torch.tensor(g_wo1).to(device))
return loss
def Loss_wv_se(s_wv, g_wn, g_wvi):
"""
s_wv: [bS, 4, mL, 2], 4 stands for maximum # of condition, 2 tands for start & end logits.
g_wvi: [ [1, 3, 2], [4,3] ] (when B=2, wn(b=1) = 3, wn(b=2) = 2).
"""
loss = 0
# g_wvi = torch.tensor(g_wvi).to(device)
for b, g_wvi1 in enumerate(g_wvi):
# for i_wn, g_wvi11 in enumerate(g_wvi1):
g_wn1 = g_wn[b]
if g_wn1 == 0:
continue
g_wvi1 = torch.tensor(g_wvi1).to(device)
g_st1 = g_wvi1[:,0]
g_ed1 = g_wvi1[:,1]
# loss from the start position
loss += F.cross_entropy(s_wv[b,:g_wn1,:,0], g_st1)
# print("st_login: ", s_wv[b,:g_wn1,:,0], g_st1, loss)
# loss from the end position
loss += F.cross_entropy(s_wv[b,:g_wn1,:,1], g_ed1)
# print("ed_login: ", s_wv[b,:g_wn1,:,1], g_ed1, loss)
return loss
The provided code snippet includes necessary dependencies for implementing the `Loss_sw_se` function. Write a Python function `def Loss_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi)` to solve the following problem:
:param s_wv: score [ B, n_conds, T, score] :param g_wn: [ B ] :param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] :return:
Here is the function:
def Loss_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi):
"""
:param s_wv: score [ B, n_conds, T, score]
:param g_wn: [ B ]
:param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
:return:
"""
loss = 0
loss += Loss_sc(s_sc, g_sc)
loss += Loss_sa(s_sa, g_sa)
loss += Loss_wn(s_wn, g_wn)
loss += Loss_wc(s_wc, g_wc)
loss += Loss_wo(s_wo, g_wn, g_wo)
loss += Loss_wv_se(s_wv, g_wn, g_wvi)
return loss | :param s_wv: score [ B, n_conds, T, score] :param g_wn: [ B ] :param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] :return: |
163,072 | from sqlova.args import *
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
The provided code snippet includes necessary dependencies for implementing the `Loss_s2s` function. Write a Python function `def Loss_s2s(score, g_pnt_idxs)` to solve the following problem:
score = [B, T, max_seq_length]
Here is the function:
def Loss_s2s(score, g_pnt_idxs):
"""
score = [B, T, max_seq_length]
"""
# WHERE string part
loss = 0
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
ed = len(g_pnt_idxs1) - 1
score_part = score[b, :ed]
loss += F.cross_entropy(score_part, torch.tensor(g_pnt_idxs1[1:]).to(device)) # +1 shift.
return loss | score = [B, T, max_seq_length] |
163,074 | import os, json
import random as python_random
from matplotlib.pylab import *
The provided code snippet includes necessary dependencies for implementing the `ensure_dir` function. Write a Python function `def ensure_dir(my_path)` to solve the following problem:
Generate directory if not exists
Here is the function:
def ensure_dir(my_path):
""" Generate directory if not exists
"""
if not os.path.exists(my_path):
os.makedirs(my_path) | Generate directory if not exists |
163,075 | import os, json
import random as python_random
from matplotlib.pylab import *
def topk_multi_dim(tensor, n_topk=1, batch_exist=True):
if batch_exist:
idxs = []
for b, tensor1 in enumerate(tensor):
idxs1 = []
tensor1_1d = tensor1.reshape(-1)
values_1d, idxs_1d = tensor1_1d.topk(k=n_topk)
idxs_list = unravel_index(idxs_1d.cpu().numpy(), tensor1.shape)
# (dim0, dim1, dim2, ...)
# reconstruct
for i_beam in range(n_topk):
idxs11 = []
for idxs_list1 in idxs_list:
idxs11.append(idxs_list1[i_beam])
idxs1.append(idxs11)
idxs.append(idxs1)
else:
tensor1 = tensor
idxs1 = []
tensor1_1d = tensor1.reshape(-1)
values_1d, idxs_1d = tensor1_1d.topk(k=n_topk)
idxs_list = unravel_index(idxs_1d.numpy(), tensor1.shape)
# (dim0, dim1, dim2, ...)
# reconstruct
for i_beam in range(n_topk):
idxs11 = []
for idxs_list1 in idxs_list:
idxs11.append(idxs_list1[i_beam])
idxs1.append(idxs11)
idxs = idxs1
return idxs | null |
163,076 | import os, json
import random as python_random
from matplotlib.pylab import *
import random
random.seed(33)
def load_jsonl(path_file, toy_data=False, toy_size=4, shuffle=False, seed=1):
data = []
with open(path_file, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
if toy_data and idx >= toy_size and (not shuffle):
break
t1 = json.loads(line.strip())
data.append(t1)
if shuffle and toy_data:
# When shuffle required, get all the data, shuffle, and get the part of data.
print(
f"If the toy-data is used, the whole data loaded first and then shuffled before get the first {toy_size} data")
python_random.Random(seed).shuffle(data) # fixed
data = data[:toy_size]
return data | null |
163,077 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def load_wikisql_data(path_wikisql, mode='train', toy_model=False, toy_size=10, no_hs_tok=False, aug=False):
""" Load training sets
"""
if aug:
mode = f"aug.{mode}"
print('Augmented data is loaded!')
# path_sql = os.path.join(path_wikisql, mode+'_struct_tok.jsonl')
# if no_hs_tok:
# path_table = os.path.join(path_wikisql, mode + '.tables.jsonl')
# else:
# path_table = os.path.join(path_wikisql, mode+'_tok.tables.jsonl')
path_sql = os.path.join(path_wikisql, mode+'_tok.json')
path_table = os.path.join(path_wikisql, 'table.json')
print("load path_sql: ", path_sql)
print("load path_table:", path_table)
data = []
table = {}
with open(path_sql) as f:
for idx, line in enumerate(f):
if toy_model and idx >= toy_size:
break
t1 = json.loads(line.strip())
t1['sql']['sel'] = t1['sql']['sel'][0]
t1['sql']['agg'] = t1['sql']['agg'][0]
t1['struct_question'] = [0] * len(t1['question_tok'])
t1['struct_label'] = [0] * len(t1['question_tok'])
t1['query'] = t1['sql']
data.append(t1)
with open(path_table) as f:
for idx, line in enumerate(f):
if toy_model and idx > toy_size:
break
t1 = json.loads(line.strip())
t1['id'] = t1['tablename']
table[t1['id']] = t1
return data, table
def load_w2i_wemb(path_wikisql, bert=False):
""" Load pre-made subset of TAPI.
"""
if bert:
with open(os.path.join(path_wikisql, 'w2i_bert.json'), 'r') as f_w2i:
w2i = json.load(f_w2i)
wemb = load(os.path.join(path_wikisql, 'wemb_bert.npy'), )
else:
with open(os.path.join(path_wikisql, 'w2i.json'), 'r') as f_w2i:
w2i = json.load(f_w2i)
wemb = load(os.path.join(path_wikisql, 'wemb.npy'), )
return w2i, wemb
def load_wikisql(path_wikisql, toy_model, toy_size, bert=False, no_w2i=False, no_hs_tok=False, aug=False):
# Get data
train_data, train_table = load_wikisql_data(path_wikisql, mode='train', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok, aug=aug)
dev_data, dev_table = load_wikisql_data(path_wikisql, mode='dev', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok)
# Get word vector
if no_w2i:
w2i, wemb = None, None
else:
w2i, wemb = load_w2i_wemb(path_wikisql, bert)
return train_data, train_table, dev_data, dev_table, w2i, wemb | null |
163,078 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_loader_wikisql(data_train, data_dev, bS, shuffle_train=True, shuffle_dev=False):
train_loader = torch.utils.data.DataLoader(
batch_size=bS,
dataset=data_train,
shuffle=shuffle_train,
num_workers=4,
collate_fn=lambda x: x # now dictionary values are not merged!
)
dev_loader = torch.utils.data.DataLoader(
batch_size=bS,
dataset=data_dev,
shuffle=shuffle_dev,
num_workers=4,
collate_fn=lambda x: x # now dictionary values are not merged!
)
return train_loader, dev_loader | null |
163,079 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_fields_1(t1, tables, no_hs_t=False, no_sql_t=False):
nlu1 = t1['question']
nlu_t1 = t1['question_tok']
tid1 = t1['table_id']
sql_i1 = t1['sql']
sql_q1 = t1.get('query', t1['sql'])
if no_sql_t:
sql_t1 = None
else:
sql_t1 = t1['query_tok']
tb1 = tables[tid1]
if not no_hs_t:
hs_t1 = tb1['header_tok']
else:
hs_t1 = []
hs1 = tb1['header']
return nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1
def get_fields(t1s, tables, no_hs_t=False, no_sql_t=False):
nlu, nlu_t, tid, sql_i, sql_q, sql_t, tb, hs_t, hs = [], [], [], [], [], [], [], [], []
for t1 in t1s:
if no_hs_t:
nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1 = get_fields_1(t1, tables, no_hs_t, no_sql_t)
else:
nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1 = get_fields_1(t1, tables, no_hs_t, no_sql_t)
nlu.append(nlu1)
nlu_t.append(nlu_t1)
tid.append(tid1)
sql_i.append(sql_i1)
sql_q.append(sql_q1)
sql_t.append(sql_t1)
tb.append(tb1)
hs_t.append(hs_t1)
hs.append(hs1)
return nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hs | null |
163,080 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def words_to_idx(words, w2i, no_BE=False):
"""
Input: [ ['I', 'am', 'hero'],
['You', 'are 'geneus'] ]
output:
w2i = [ B x max_seq_len, 1]
wemb = [B x max_seq_len, dim]
- Zero-padded when word is not available (teated as <UNK>)
"""
bS = len(words)
l = torch.zeros(bS, dtype=torch.long).to(device) # length of the seq. of words.
w2i_l_list = [] # shall be replaced to arr
# wemb_NLq_batch = []
for i, words1 in enumerate(words):
w2i_l1, l1 = word_to_idx1(words1, w2i, no_BE)
w2i_l_list.append(w2i_l1)
l[i] = l1
# Prepare tensor of wemb
# overwrite w2i_l
w2i_l = torch.zeros([bS, int(max(l))], dtype=torch.long).to(device)
for b in range(bS):
w2i_l[b, :l[b]] = torch.LongTensor(w2i_l_list[b]).to(device)
return w2i_l, l
The provided code snippet includes necessary dependencies for implementing the `hs_to_idx` function. Write a Python function `def hs_to_idx(hs_t, w2i, no_BE=False)` to solve the following problem:
Zero-padded when word is not available (teated as <UNK>) Treat each "header tokens" as if they are NL-utterance tokens.
Here is the function:
def hs_to_idx(hs_t, w2i, no_BE=False):
""" Zero-padded when word is not available (teated as <UNK>)
Treat each "header tokens" as if they are NL-utterance tokens.
"""
bS = len(hs_t) # now, B = B_NLq
hpu_t = [] # header pseudo-utterance
l_hs = []
for hs_t1 in hs_t:
hpu_t += hs_t1
l_hs1 = len(hs_t1)
l_hs.append(l_hs1)
w2i_hpu, l_hpu = words_to_idx(hpu_t, w2i, no_BE=no_BE)
return w2i_hpu, l_hpu, l_hs | Zero-padded when word is not available (teated as <UNK>) Treat each "header tokens" as if they are NL-utterance tokens. |
163,081 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def encode(lstm, wemb_l, l, return_hidden=False, hc0=None, last_only=False):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def encode_hpu(lstm, wemb_hpu, l_hpu, l_hs):
wenc_hpu, hout, cout = encode( lstm,
wemb_hpu,
l_hpu,
return_hidden=True,
hc0=None,
last_only=True )
wenc_hpu = wenc_hpu.squeeze(1)
bS_hpu, mL_hpu, eS = wemb_hpu.shape
hS = wenc_hpu.size(-1)
wenc_hs = wenc_hpu.new_zeros(len(l_hs), max(l_hs), hS)
wenc_hs = wenc_hs.to(device)
# Re-pack according to batch.
# ret = [B_NLq, max_len_headers_all, dim_lstm]
st = 0
for i, l_hs1 in enumerate(l_hs):
wenc_hs[i, :l_hs1] = wenc_hpu[st:(st + l_hs1)]
st += l_hs1
return wenc_hs | null |
163,082 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_wc1(conds):
"""
[ [wc, wo, wv],
[wc, wo, wv], ...
]
"""
wc1 = []
for cond in conds:
wc1.append(cond[0])
return wc1
def get_wo1(conds):
"""
[ [wc, wo, wv],
[wc, wo, wv], ...
]
"""
wo1 = []
for cond in conds:
wo1.append(cond[1])
return wo1
def get_wv1(conds):
"""
[ [wc, wo, wv],
[wc, wo, wv], ...
]
"""
wv1 = []
for cond in conds:
wv1.append(cond[3])
return wv1
The provided code snippet includes necessary dependencies for implementing the `get_g` function. Write a Python function `def get_g(sql_i)` to solve the following problem:
for backward compatibility, separated with get_g
Here is the function:
def get_g(sql_i):
""" for backward compatibility, separated with get_g"""
g_sc = []
g_sa = []
g_wn = []
g_wc = []
g_wo = []
g_wv = []
for b, psql_i1 in enumerate(sql_i):
g_sc.append( psql_i1["sel"] )
g_sa.append( psql_i1["agg"])
conds = psql_i1['conds']
if not psql_i1["agg"] < 0:
g_wn.append( len( conds ) )
g_wc.append( get_wc1(conds) )
g_wo.append( get_wo1(conds) )
g_wv.append( get_wv1(conds) )
else:
raise EnvironmentError
return g_sc, g_sa, g_wn, g_wc, g_wo, g_wv | for backward compatibility, separated with get_g |
163,083 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_g_wvi_corenlp(t):
g_wvi_corenlp = []
for t1 in t:
g_wvi_corenlp.append( t1['wvi_corenlp'] )
return g_wvi_corenlp | null |
163,084 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def generate_w2i_wemb_table(tables, wv, idx_w2i, n_total, w2i, wemb):
""" Generate subset of GloVe
update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables.
To do
1. What should we do with the numeric?
"""
# word_set from NL query
for table_id, table_contents in tables.items():
# NLq = t1['question']
# word_tokens = NLq.rstrip().replace('?', '').split(' ')
headers = table_contents['header_tok'] # [ ['state/terriotry'], ['current', 'slogan'], [],
for header_tokens in headers:
for token in header_tokens:
idx_w2i, n_total = update_w2i_wemb(token, wv, idx_w2i, n_total, w2i, wemb)
# WikiSQL generaets unbelivable query... using state/territory in the NLq. Unnatural.. but as is
# when there is slash, unlike original SQLNet which treats them as single token, we use
# both tokens. e.g. 'state/terriotry' -> 'state'
# token_spl = token.split('/')
# for token_spl1 in token_spl:
# idx_w2i, n_total = update_w2i_wemb(token_spl1, wv, idx_w2i, n_total, w2i, wemb)
return idx_w2i, n_total
def generate_w2i_wemb(train_data, wv, idx_w2i, n_total, w2i, wemb):
""" Generate subset of GloVe
update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables.
To do
1. What should we do with the numeric?
"""
# word_set from NL query
for i, t1 in enumerate(train_data):
# NLq = t1['question']
# word_tokens = NLq.rstrip().replace('?', '').split(' ')
word_tokens = t1['question_tok']
# Currently, TAPI does not use "?". So, it is removed.
for word in word_tokens:
idx_w2i, n_total = update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb)
n_total += 1
return idx_w2i, n_total
def make_w2i_wemb(args, path_save_w2i_wemb, wv, data_train, data_dev, data_test, table_train, table_dev, table_test):
w2i = {'<UNK>': 0, '<BEG>': 1, '<END>': 2} # to use it when embeds NL query.
idx_w2i = 2
n_total = 3
wemb = [np.zeros(300, dtype=np.float32) for _ in range(3)] # 128 is of TAPI vector.
idx_w2i, n_total = generate_w2i_wemb(data_train, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb_table(table_train, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb(data_dev, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb_table(table_dev, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb(data_test, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb_table(table_test, wv, idx_w2i, n_total, w2i, wemb)
path_w2i = os.path.join(path_save_w2i_wemb, 'w2i.json')
path_wemb = os.path.join(path_save_w2i_wemb, 'wemb.npy')
wemb = np.stack(wemb, axis=0)
with open(path_w2i, 'w') as f_w2i:
json.dump(w2i, f_w2i)
np.save(path_wemb, wemb)
return w2i, wemb | null |
163,085 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb):
""" Follow same approach from SQLNet author's code.
Used inside of generaet_w2i_wemb.
"""
# global idx_w2i, w2i, wemb # idx, word2vec, word to idx dictionary, list of embedding vec, n_total: total number of words
if (word in wv) and (word not in w2i):
idx_w2i += 1
w2i[word] = idx_w2i
wemb.append(wv[word])
n_total += 1
return idx_w2i, n_total
The provided code snippet includes necessary dependencies for implementing the `generate_w2i_wemb_e2k_headers` function. Write a Python function `def generate_w2i_wemb_e2k_headers(e2k_dicts, wv, idx_w2i, n_total, w2i, wemb)` to solve the following problem:
Generate subset of TAPI from english-to-korean dict of table headers etc.. update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables. To do 1. What should we do with the numeric? Current version do not treat them specially. But this would be modified later so that we can use tags.
Here is the function:
def generate_w2i_wemb_e2k_headers(e2k_dicts, wv, idx_w2i, n_total, w2i, wemb):
""" Generate subset of TAPI from english-to-korean dict of table headers etc..
update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables.
To do
1. What should we do with the numeric?
Current version do not treat them specially. But this would be modified later so that we can use tags.
"""
# word_set from NL query
for table_name, e2k_dict in e2k_dicts.items():
word_tokens_list = list(e2k_dict.values())
# Currently, TAPI does not use "?". So, it is removed.
for word_tokens in word_tokens_list:
for word in word_tokens:
idx_w2i, n_total = update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb)
n_total += 1
return idx_w2i, n_total | Generate subset of TAPI from english-to-korean dict of table headers etc.. update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables. To do 1. What should we do with the numeric? Current version do not treat them specially. But this would be modified later so that we can use tags. |
163,086 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def tokenize_nlu1(tokenizer, nlu1):
nlu1_tok = tokenizer.tokenize(nlu1)
return nlu1_tok | null |
163,087 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def tokenize_hds1(tokenizer, hds1):
hds_all_tok = []
for hds11 in hds1:
sub_tok = tokenizer.tokenize(hds11)
hds_all_tok.append(sub_tok) | null |
163,088 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def gen_l_hpu(i_hds):
"""
# Treat columns as if it is a batch of natural language utterance with batch-size = # of columns * # of batch_size
i_hds = [(17, 18), (19, 21), (22, 23), (24, 25), (26, 29), (30, 34)])
"""
l_hpu = []
for i_hds1 in i_hds:
for i_hds11 in i_hds1:
l_hpu.append(i_hds11[1] - i_hds11[0])
return l_hpu
def generate_inputs_s2s(tokenizer, nlu1_tt, hds1, sql_vocab1):
"""
[CLS] sql_vocab [SEP] question [SEP] headers
To make sql_vocab in a fixed position.
"""
tokens = []
segment_ids = []
tokens.append(tokenizer.cls_token)
# sql_vocab
i_sql_vocab = []
# for doc
for i, sql_vocab11 in enumerate(sql_vocab1):
i_st_sql = len(tokens)
sub_tok = tokenizer.tokenize(sql_vocab11)
tokens += sub_tok
i_ed_sql = len(tokens)
i_sql_vocab.append((i_st_sql, i_ed_sql))
segment_ids += [1] * len(sub_tok)
if i < len(sql_vocab1) - 1:
tokens.append(tokenizer.sep_token)
segment_ids.append(0)
elif i == len(sql_vocab1) - 1:
tokens.append(tokenizer.sep_token)
segment_ids.append(1)
else:
raise EnvironmentError
# question
i_st_nlu = len(tokens) # to use it later
segment_ids.append(0)
for token in nlu1_tt:
tokens.append(token)
segment_ids.append(0)
i_ed_nlu = len(tokens)
tokens.append(tokenizer.sep_token)
segment_ids.append(0)
i_nlu = (i_st_nlu, i_ed_nlu)
# headers
i_hds = []
# for doc
for i, hds11 in enumerate(hds1):
i_st_hd = len(tokens)
sub_tok = tokenizer.tokenize(hds11)
tokens += sub_tok
i_ed_hd = len(tokens)
i_hds.append((i_st_hd, i_ed_hd))
segment_ids += [1] * len(sub_tok)
if i < len(hds1)-1:
tokens.append(tokenizer.sep_token)
segment_ids.append(0)
elif i == len(hds1)-1:
tokens.append(tokenizer.sep_token)
segment_ids.append(1)
else:
raise EnvironmentError
return tokens, segment_ids, i_sql_vocab, i_nlu, i_hds
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
The provided code snippet includes necessary dependencies for implementing the `get_bert_output_s2s` function. Write a Python function `def get_bert_output_s2s(model_bert, tokenizer, nlu_t, hds, sql_vocab, max_seq_length)` to solve the following problem:
s2s version. Treat SQL-tokens as pseudo-headers sql_vocab = ("sql select", "sql where", "sql and", "sql equal", "sql greater than", "sql less than") e.g.) Q: What is the name of the player with score greater than 15? H: Name of the player, score Input: [CLS], what, is, ..., [SEP], name, of, the, player, [SEP], score, [SEP] sql, select, [SEP], sql, where, [SEP], sql, and, [SEP], ... Here, input is tokenized further by WordPiece (WP) tokenizer and fed into BERT. INPUT :param model_bert: :param tokenizer: WordPiece toknizer :param nlu: Question :param nlu_t: CoreNLP tokenized nlu. :param hds: Headers :param hs_t: None or 1st-level tokenized headers :param max_seq_length: max input token length OUTPUT tokens: BERT input tokens nlu_tt: WP-tokenized input natural language questions orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token tok_to_orig_index: inverse map.
Here is the function:
def get_bert_output_s2s(model_bert, tokenizer, nlu_t, hds, sql_vocab, max_seq_length):
"""
s2s version. Treat SQL-tokens as pseudo-headers
sql_vocab = ("sql select", "sql where", "sql and", "sql equal", "sql greater than", "sql less than")
e.g.)
Q: What is the name of the player with score greater than 15?
H: Name of the player, score
Input: [CLS], what, is, ...,
[SEP], name, of, the, player, [SEP], score,
[SEP] sql, select, [SEP], sql, where, [SEP], sql, and, [SEP], ...
Here, input is tokenized further by WordPiece (WP) tokenizer and fed into BERT.
INPUT
:param model_bert:
:param tokenizer: WordPiece toknizer
:param nlu: Question
:param nlu_t: CoreNLP tokenized nlu.
:param hds: Headers
:param hs_t: None or 1st-level tokenized headers
:param max_seq_length: max input token length
OUTPUT
tokens: BERT input tokens
nlu_tt: WP-tokenized input natural language questions
orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token
tok_to_orig_index: inverse map.
"""
l_n = []
l_hs = [] # The length of columns for each batch
l_input = []
input_ids = []
tokens = []
segment_ids = []
input_mask = []
i_nlu = [] # index to retreive the position of contextual vector later.
i_hds = []
i_sql_vocab = []
doc_tokens = []
nlu_tt = []
t_to_tt_idx = []
tt_to_t_idx = []
for b, nlu_t1 in enumerate(nlu_t):
hds1 = hds[b]
l_hs.append(len(hds1))
# 1. 2nd tokenization using WordPiece
tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP).
t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens.
nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token
for (i, token) in enumerate(nlu_t1):
t_to_tt_idx1.append(
len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tt_to_t_idx1.append(i)
nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer
nlu_tt.append(nlu_tt1)
tt_to_t_idx.append(tt_to_t_idx1)
t_to_tt_idx.append(t_to_tt_idx1)
l_n.append(len(nlu_tt1))
# hds1_all_tok = tokenize_hds1(tokenizer, hds1)
# [CLS] nlu [SEP] col1 [SEP] col2 [SEP] ...col-n [SEP]
# 2. Generate BERT inputs & indices.
# Combine hds1 and sql_vocab
tokens1, segment_ids1, i_sql_vocab1, i_nlu1, i_hds1 = generate_inputs_s2s(tokenizer, nlu_tt1, hds1, sql_vocab)
# i_hds1
input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)
# Input masks
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask1 = [1] * len(input_ids1)
# 3. Zero-pad up to the sequence length.
l_input.append( len(input_ids1) )
while len(input_ids1) < max_seq_length:
input_ids1.append(0)
input_mask1.append(0)
segment_ids1.append(0)
assert len(input_ids1) == max_seq_length
assert len(input_mask1) == max_seq_length
assert len(segment_ids1) == max_seq_length
input_ids.append(input_ids1)
tokens.append(tokens1)
segment_ids.append(segment_ids1)
input_mask.append(input_mask1)
i_nlu.append(i_nlu1)
i_hds.append(i_hds1)
i_sql_vocab.append(i_sql_vocab1)
# Convert to tensor
all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)
all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)
# 4. Generate BERT output.
all_encoder_layer, pooled_output = model_bert(all_input_ids, all_segment_ids, all_input_mask)
# 5. generate l_hpu from i_hds
l_hpu = gen_l_hpu(i_hds)
return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, i_sql_vocab, \
l_n, l_hpu, l_hs, l_input, \
nlu_tt, t_to_tt_idx, tt_to_t_idx | s2s version. Treat SQL-tokens as pseudo-headers sql_vocab = ("sql select", "sql where", "sql and", "sql equal", "sql greater than", "sql less than") e.g.) Q: What is the name of the player with score greater than 15? H: Name of the player, score Input: [CLS], what, is, ..., [SEP], name, of, the, player, [SEP], score, [SEP] sql, select, [SEP], sql, where, [SEP], sql, and, [SEP], ... Here, input is tokenized further by WordPiece (WP) tokenizer and fed into BERT. INPUT :param model_bert: :param tokenizer: WordPiece toknizer :param nlu: Question :param nlu_t: CoreNLP tokenized nlu. :param hds: Headers :param hs_t: None or 1st-level tokenized headers :param max_seq_length: max input token length OUTPUT tokens: BERT input tokens nlu_tt: WP-tokenized input natural language questions orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token tok_to_orig_index: inverse map. |
163,089 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_bert_output_agg(model_bert, tokenizer, nlu_t, hds, max_seq_length):
"""
Here, input is toknized further by WordPiece (WP) tokenizer and fed into BERT.
INPUT
:param model_bert:
:param tokenizer: WordPiece toknizer
:param nlu: Question
:param nlu_t: CoreNLP tokenized nlu.
:param hds: Headers
:param hs_t: None or 1st-level tokenized headers
:param max_seq_length: max input token length
OUTPUT
tokens: BERT input tokens
nlu_tt: WP-tokenized input natural language questions
orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token
tok_to_orig_index: inverse map.
"""
l_n = []
l_hs = [] # The length of columns for each batch
input_ids = []
tokens = []
segment_ids = []
input_mask = []
i_nlu = [] # index to retreive the position of contextual vector later.
i_hds = []
doc_tokens = []
nlu_tt = []
t_to_tt_idx = []
tt_to_t_idx = []
for b, nlu_t1 in enumerate(nlu_t):
hds1 = hds[b]
l_hs.append(len(hds1))
# 1. 2nd tokenization using WordPiece
tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP).
t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens.
nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token
for (i, token) in enumerate(nlu_t1):
t_to_tt_idx1.append(
len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tt_to_t_idx1.append(i)
nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer
nlu_tt.append(nlu_tt1)
tt_to_t_idx.append(tt_to_t_idx1)
t_to_tt_idx.append(t_to_tt_idx1)
l_n.append(len(nlu_tt1))
# hds1_all_tok = tokenize_hds1(tokenizer, hds1)
# [CLS] nlu [SEP]
# 2. Generate BERT inputs & indices.
tokens1, segment_ids1, i_nlu1, i_hds1 = generate_inputs_agg(tokenizer, nlu_tt1, hds1)
input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)
# Input masks
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask1 = [1] * len(input_ids1)
# 3. Zero-pad up to the sequence length.
while len(input_ids1) < max_seq_length:
input_ids1.append(0)
input_mask1.append(0)
segment_ids1.append(0)
assert len(input_ids1) == max_seq_length
assert len(input_mask1) == max_seq_length
assert len(segment_ids1) == max_seq_length
input_ids.append(input_ids1)
tokens.append(tokens1)
segment_ids.append(segment_ids1)
input_mask.append(input_mask1)
i_nlu.append(i_nlu1)
i_hds.append(i_hds1)
# Convert to tensor
all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)
all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)
# 4. Generate BERT output.
all_encoder_layer, pooled_output = model_bert(all_input_ids, all_segment_ids, all_input_mask)
# 5. generate l_hpu from i_hds
l_hpu = gen_l_hpu(i_hds)
return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, \
l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx
def get_wemb_n(i_nlu, l_n, hS, num_hidden_layers, all_encoder_layer, num_out_layers_n):
"""
Get the representation of each tokens.
"""
bS = len(l_n)
l_n_max = max(l_n)
wemb_n = torch.zeros([bS, l_n_max, hS * num_out_layers_n]).to(device)
for b in range(bS):
# [B, max_len, dim]
# Fill zero for non-exist part.
l_n1 = l_n[b]
i_nlu1 = i_nlu[b]
for i_noln in range(num_out_layers_n):
i_layer = num_hidden_layers - 1 - i_noln
st = i_noln * hS
ed = (i_noln + 1) * hS
wemb_n[b, 0:(i_nlu1[1] - i_nlu1[0]), st:ed] = all_encoder_layer[i_layer][b, i_nlu1[0]:i_nlu1[1], :]
return wemb_n
#
def get_wemb_bert_agg(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n=1, num_out_layers_h=1):
# get contextual output of all tokens from bert
all_encoder_layer, pooled_output, tokens, i_nlu, i_hds,\
l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx = get_bert_output_agg(model_bert, tokenizer, nlu_t, hds, max_seq_length)
# all_encoder_layer: BERT outputs from all layers.
# pooled_output: output of [CLS] vec.
# tokens: BERT intput tokens
# i_nlu: start and end indices of question in tokens
# i_hds: start and end indices of headers
# get the wemb
wemb_n = get_wemb_n(i_nlu, l_n, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
num_out_layers_n)
# wemb_h = get_wemb_h(i_hds, l_hpu, l_hs, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
# num_out_layers_h)
return wemb_n, None, l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx | null |
163,090 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length):
"""
Here, input is toknized further by WordPiece (WP) tokenizer and fed into BERT.
INPUT
:param model_bert:
:param tokenizer: WordPiece toknizer
:param nlu: Question
:param nlu_t: CoreNLP tokenized nlu.
:param hds: Headers
:param hs_t: None or 1st-level tokenized headers
:param max_seq_length: max input token length
OUTPUT
tokens: BERT input tokens
nlu_tt: WP-tokenized input natural language questions
orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token
tok_to_orig_index: inverse map.
"""
l_n = []
l_hs = [] # The length of columns for each batch
input_ids = []
tokens = []
segment_ids = []
input_mask = []
i_nlu = [] # index to retreive the position of contextual vector later.
i_hds = []
doc_tokens = []
nlu_tt = []
t_to_tt_idx = []
tt_to_t_idx = []
for b, nlu_t1 in enumerate(nlu_t):
hds1 = deepcopy(hds[b])
hds1.append("空列")
l_hs.append(len(hds1))
# 1. 2nd tokenization using WordPiece
tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP).
t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens.
nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token
for (i, token) in enumerate(nlu_t1):
t_to_tt_idx1.append(
len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tt_to_t_idx1.append(i)
nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer
nlu_tt.append(nlu_tt1)
tt_to_t_idx.append(tt_to_t_idx1) # sub_token index
t_to_tt_idx.append(t_to_tt_idx1)
l_n.append(len(nlu_tt1))
# hds1_all_tok = tokenize_hds1(tokenizer, hds1)
# [CLS] nlu [SEP] col1 [SEP] col2 [SEP] ...col-n [SEP]
# 2. Generate BERT inputs & indices.
tokens1, segment_ids1, i_nlu1, i_hds1 = generate_inputs(tokenizer, nlu_tt1, hds1)
input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)
# Input masks
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask1 = [1] * len(input_ids1)
if len(input_ids1) > max_seq_length:
print(len(input_ids1), max_seq_length, nlu_tt1)
# 3. Zero-pad up to the sequence length.
while len(input_ids1) < max_seq_length:
input_ids1.append(0)
input_mask1.append(0)
segment_ids1.append(0)
assert len(input_ids1) == max_seq_length
assert len(input_mask1) == max_seq_length
assert len(segment_ids1) == max_seq_length
input_ids.append(input_ids1)
tokens.append(tokens1)
segment_ids.append(segment_ids1)
input_mask.append(input_mask1)
i_nlu.append(i_nlu1)
i_hds.append(i_hds1)
# Convert to tensor
all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)
all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)
# 4. Generate BERT output.
if "roberta" in model_bert.config._name_or_path or "ckpt" in model_bert.config._name_or_path or "grappa" in model_bert.config._name_or_path:
outputs = model_bert(all_input_ids, all_input_mask, output_hidden_states=True)
all_encoder_layer = outputs.hidden_states
pooled_output = outputs.pooler_output
else:
outputs = model_bert(all_input_ids, all_segment_ids, all_input_mask, output_hidden_states=True)
all_encoder_layer = outputs.hidden_states
pooled_output = outputs.pooler_output
# 5. generate l_hpu from i_hds
l_hpu = gen_l_hpu(i_hds)
return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, \
l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx
def get_wemb_n(i_nlu, l_n, hS, num_hidden_layers, all_encoder_layer, num_out_layers_n):
"""
Get the representation of each tokens.
"""
bS = len(l_n)
l_n_max = max(l_n)
wemb_n = torch.zeros([bS, l_n_max, hS * num_out_layers_n]).to(device)
for b in range(bS):
# [B, max_len, dim]
# Fill zero for non-exist part.
l_n1 = l_n[b]
i_nlu1 = i_nlu[b]
for i_noln in range(num_out_layers_n):
i_layer = num_hidden_layers - 1 - i_noln
st = i_noln * hS
ed = (i_noln + 1) * hS
wemb_n[b, 0:(i_nlu1[1] - i_nlu1[0]), st:ed] = all_encoder_layer[i_layer][b, i_nlu1[0]:i_nlu1[1], :]
return wemb_n
#
def get_wemb_h(i_hds, l_hpu, l_hs, hS, num_hidden_layers, all_encoder_layer, num_out_layers_h):
"""
As if
[ [table-1-col-1-tok1, t1-c1-t2, ...],
[t1-c2-t1, t1-c2-t2, ...].
...
[t2-c1-t1, ...,]
]
"""
bS = len(l_hs)
l_hpu_max = max(l_hpu)
num_of_all_hds = sum(l_hs)
wemb_h = torch.zeros([num_of_all_hds, l_hpu_max, hS * num_out_layers_h]).to(device)
b_pu = -1
for b, i_hds1 in enumerate(i_hds):
for b1, i_hds11 in enumerate(i_hds1):
b_pu += 1
for i_nolh in range(num_out_layers_h):
i_layer = num_hidden_layers - 1 - i_nolh
st = i_nolh * hS
ed = (i_nolh + 1) * hS
wemb_h[b_pu, 0:(i_hds11[1] - i_hds11[0]), st:ed] \
= all_encoder_layer[i_layer][b, i_hds11[0]:i_hds11[1],:]
return wemb_h
def get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n=1, num_out_layers_h=1):
# get contextual output of all tokens from bert
all_encoder_layer, pooled_output, tokens, i_nlu, i_hds,\
l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx = get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length)
# all_encoder_layer: BERT outputs from all layers.
# pooled_output: output of [CLS] vec.
# tokens: BERT intput tokens
# i_nlu: start and end indices of question in tokens
# i_hds: start and end indices of headers
# get the wemb
wemb_n = get_wemb_n(i_nlu, l_n, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
num_out_layers_n)
wemb_h = get_wemb_h(i_hds, l_hpu, l_hs, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
num_out_layers_h)
return wemb_n, wemb_h, l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx | null |
163,091 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
The provided code snippet includes necessary dependencies for implementing the `gen_pnt_n` function. Write a Python function `def gen_pnt_n(g_wvi, mL_w, mL_nt)` to solve the following problem:
Generate one-hot idx indicating vectors with their lenghts. :param g_wvi: e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] where_val idx in nlu_t. 0 = <BEG>, -1 = <END>. :param mL_w: 4 :param mL_nt: 200 :return:
Here is the function:
def gen_pnt_n(g_wvi, mL_w, mL_nt):
"""
Generate one-hot idx indicating vectors with their lenghts.
:param g_wvi: e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
where_val idx in nlu_t. 0 = <BEG>, -1 = <END>.
:param mL_w: 4
:param mL_nt: 200
:return:
"""
bS = len(g_wvi)
for g_wvi1 in g_wvi:
for g_wvi11 in g_wvi1:
l11 = len(g_wvi11)
mL_g_wvi = max([max([0] + [len(tok) for tok in gwsi]) for gwsi in g_wvi]) - 1
# zero because of '' case.
# -1 because we already have <BEG>
if mL_g_wvi < 1:
mL_g_wvi = 1
# NLq_token_pos = torch.zeros(bS, 5 - 1, mL_g_wvi, self.max_NLq_token_num)
# l_g_wvi = torch.zeros(bS, 5 - 1)
pnt_n = torch.zeros(bS, mL_w, mL_g_wvi, mL_nt).to(device) # one hot
l_g_wvi = torch.zeros(bS, mL_w).to(device)
for b, g_wvi1 in enumerate(g_wvi):
i_wn = 0 # To prevent error from zero number of condition.
for i_wn, g_wvi11 in enumerate(g_wvi1):
# g_wvi11: [0, where_conds pos in NLq, end]
g_wvi11_n1 = g_wvi11[:-1] # doesn't count <END> idx.
l_g_wvi[b, i_wn] = len(g_wvi11_n1)
for t, idx in enumerate(g_wvi11_n1):
pnt_n[b, i_wn, t, idx] = 1
# Pad
if i_wn < (mL_w - 1): # maximum number of conidtions is 4
pnt_n[b, i_wn + 1:, 0, 1] = 1 # # cannot understand... [<BEG>, <END>]??
l_g_wvi[b, i_wn + 1:] = 1 # it means there is only <BEG>.
return pnt_n, l_g_wvi | Generate one-hot idx indicating vectors with their lenghts. :param g_wvi: e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] where_val idx in nlu_t. 0 = <BEG>, -1 = <END>. :param mL_w: 4 :param mL_nt: 200 :return: |
163,092 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
The provided code snippet includes necessary dependencies for implementing the `pred_sc_beam` function. Write a Python function `def pred_sc_beam(s_sc, beam_size)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_sc_beam(s_sc, beam_size):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sc_beam = []
for s_sc1 in s_sc:
val, idxes = s_sc1.topk(k=beam_size)
pr_sc_beam.append(idxes.tolist())
return pr_sc_beam | return: [ pr_wc1_i, pr_wc2_i, ...] |
163,093 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
The provided code snippet includes necessary dependencies for implementing the `pred_wc_old` function. Write a Python function `def pred_wc_old(sql_i, s_wc)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_wc_old(sql_i, s_wc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_wc = []
for b, sql_i1 in enumerate(sql_i):
wn = len(sql_i1['conds'])
s_wc1 = s_wc[b]
pr_wc1 = argsort(-s_wc1.data.cpu().numpy())[:wn]
pr_wc1.sort()
pr_wc.append(list(pr_wc1))
return pr_wc | return: [ pr_wc1_i, pr_wc2_i, ...] |
163,094 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
The provided code snippet includes necessary dependencies for implementing the `pred_wc_sorted_by_prob` function. Write a Python function `def pred_wc_sorted_by_prob(s_wc)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted by prob. All colume-indexes are returned here.
Here is the function:
def pred_wc_sorted_by_prob(s_wc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
! Returned index is sorted by prob.
All colume-indexes are returned here.
"""
# get g_num
bS = len(s_wc)
pr_wc = []
for b in range(bS):
s_wc1 = s_wc[b]
pr_wc1 = argsort(-s_wc1.data.cpu().numpy())
pr_wc.append(list(pr_wc1))
return pr_wc | return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted by prob. All colume-indexes are returned here. |
163,095 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
The provided code snippet includes necessary dependencies for implementing the `pred_wvi_se_beam` function. Write a Python function `def pred_wvi_se_beam(max_wn, s_wv, beam_size)` to solve the following problem:
s_wv: [B, 4, mL, 2] - predict best st-idx & ed-idx output: pr_wvi_beam = [B, max_wn, n_pairs, 2]. 2 means [st, ed]. prob_wvi_beam = [B, max_wn, n_pairs]
Here is the function:
def pred_wvi_se_beam(max_wn, s_wv, beam_size):
"""
s_wv: [B, 4, mL, 2]
- predict best st-idx & ed-idx
output:
pr_wvi_beam = [B, max_wn, n_pairs, 2]. 2 means [st, ed].
prob_wvi_beam = [B, max_wn, n_pairs]
"""
bS = s_wv.shape[0]
s_wv_st, s_wv_ed = s_wv.split(1, dim=3) # [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1]
s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL]
s_wv_ed = s_wv_ed.squeeze(3)
prob_wv_st = F.softmax(s_wv_st, dim=-1).detach().to('cpu').numpy()
prob_wv_ed = F.softmax(s_wv_ed, dim=-1).detach().to('cpu').numpy()
k_logit = int(ceil(sqrt(beam_size)))
n_pairs = k_logit**2
assert n_pairs >= beam_size
values_st, idxs_st = s_wv_st.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit]
values_ed, idxs_ed = s_wv_ed.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit]
# idxs = [B, k_logit, 2]
# Generate all possible combination of st, ed indices & prob
pr_wvi_beam = [] # [B, max_wn, k_logit**2 [st, ed] paris]
prob_wvi_beam = zeros([bS, max_wn, n_pairs])
for b in range(bS):
pr_wvi_beam1 = []
idxs_st1 = idxs_st[b]
idxs_ed1 = idxs_ed[b]
for i_wn in range(max_wn):
idxs_st11 = idxs_st1[i_wn]
idxs_ed11 = idxs_ed1[i_wn]
pr_wvi_beam11 = []
pair_idx = -1
for i_k in range(k_logit):
for j_k in range(k_logit):
pair_idx += 1
st = idxs_st11[i_k].item()
ed = idxs_ed11[j_k].item()
pr_wvi_beam11.append([st, ed])
p1 = prob_wv_st[b, i_wn, st]
p2 = prob_wv_ed[b, i_wn, ed]
prob_wvi_beam[b, i_wn, pair_idx] = p1*p2
pr_wvi_beam1.append(pr_wvi_beam11)
pr_wvi_beam.append(pr_wvi_beam1)
# prob
return pr_wvi_beam, prob_wvi_beam | s_wv: [B, 4, mL, 2] - predict best st-idx & ed-idx output: pr_wvi_beam = [B, max_wn, n_pairs, 2]. 2 means [st, ed]. prob_wvi_beam = [B, max_wn, n_pairs] |
163,096 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def is_whitespace_g_wvi(c):
# if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
if c == " ":
return True
return False | null |
163,097 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
The provided code snippet includes necessary dependencies for implementing the `convert_pr_wvi_to_string` function. Write a Python function `def convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_wp_t, wp_to_wh_index, nlu)` to solve the following problem:
- Convert to the string in whilte-space-separated tokens - Add-hoc addition.
Here is the function:
def convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_wp_t, wp_to_wh_index, nlu):
"""
- Convert to the string in whilte-space-separated tokens
- Add-hoc addition.
"""
pr_wv_str_wp = [] # word-piece version
pr_wv_str = []
for b, pr_wvi1 in enumerate(pr_wvi):
pr_wv_str_wp1 = []
pr_wv_str1 = []
wp_to_wh_index1 = wp_to_wh_index[b]
nlu_wp_t1 = nlu_wp_t[b]
nlu_t1 = nlu_t[b]
for i_wn, pr_wvi11 in enumerate(pr_wvi1):
st_idx, ed_idx = pr_wvi11
# Ad-hoc modification of ed_idx to deal with wp-tokenization effect.
# e.g.) to convert "butler cc (" ->"butler cc (ks)" (dev set 1st question).
pr_wv_str_wp11 = nlu_wp_t1[st_idx:ed_idx+1]
pr_wv_str_wp1.append(pr_wv_str_wp11)
st_wh_idx = wp_to_wh_index1[st_idx]
ed_wh_idx = wp_to_wh_index1[ed_idx]
pr_wv_str11 = nlu_t1[st_wh_idx:ed_wh_idx+1]
pr_wv_str1.append(pr_wv_str11)
pr_wv_str_wp.append(pr_wv_str_wp1)
pr_wv_str.append(pr_wv_str1)
return pr_wv_str, pr_wv_str_wp | - Convert to the string in whilte-space-separated tokens - Add-hoc addition. |
163,098 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def pred_sa(s_sa):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sa = []
for s_sa1 in s_sa:
pr_sa.append(s_sa1.argmax().item())
return pr_sa
def pred_sw_se_agg(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv):
# pr_sc = pred_sc(s_sc)
pr_sa = pred_sa(s_sa)
# pr_wn = pred_wn(s_wn)
# pr_wc = pred_wc(pr_wn, s_wc)
# pr_wo = pred_wo(pr_wn, s_wo)
# pr_wvi = pred_wvi_se(pr_wn, s_wv)
return None, pr_sa, None, None, None, None | null |
163,099 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def pred_sc(s_sc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sc = []
for s_sc1 in s_sc:
pr_sc.append(s_sc1.argmax().item())
return pr_sc
def pred_sa(s_sa):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sa = []
for s_sa1 in s_sa:
pr_sa.append(s_sa1.argmax().item())
return pr_sa
def pred_wn(s_wn):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_wn = []
for s_wn1 in s_wn:
pr_wn.append(s_wn1.argmax().item())
# print(pr_wn, s_wn1)
# if s_wn1.argmax().item() == 3:
# input('')
return pr_wn
def pred_wc(wn, s_wc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
! Returned index is sorted!
"""
# get g_num
pr_wc = []
for b, wn1 in enumerate(wn):
s_wc1 = s_wc[b]
pr_wc1 = argsort(-s_wc1.data.cpu().numpy())[:wn1]
pr_wc1.sort()
pr_wc.append(list(pr_wc1))
return pr_wc
def pred_wo(wn, s_wo):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# s_wo = [B, 4, n_op]
pr_wo_a = s_wo.argmax(dim=2) # [B, 4]
# get g_num
pr_wo = []
for b, pr_wo_a1 in enumerate(pr_wo_a):
wn1 = wn[b]
pr_wo.append(list(pr_wo_a1.data.cpu().numpy()[:wn1]))
return pr_wo
def pred_wvi_se(wn, s_wv):
"""
s_wv: [B, 4, mL, 2]
- predict best st-idx & ed-idx
"""
s_wv_st, s_wv_ed = s_wv.split(1, dim=3) # [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1]
s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL]
s_wv_ed = s_wv_ed.squeeze(3)
pr_wvi_st_idx = s_wv_st.argmax(dim=2) # [B, 4, mL] -> [B, 4, 1]
pr_wvi_ed_idx = s_wv_ed.argmax(dim=2)
pr_wvi = []
for b, wn1 in enumerate(wn):
pr_wvi1 = []
for i_wn in range(wn1):
pr_wvi_st_idx11 = pr_wvi_st_idx[b][i_wn]
pr_wvi_ed_idx11 = pr_wvi_ed_idx[b][i_wn]
pr_wvi1.append([pr_wvi_st_idx11.item(), pr_wvi_ed_idx11.item()])
pr_wvi.append(pr_wvi1)
return pr_wvi
def pred_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv):
pr_sc = pred_sc(s_sc)
pr_sa = pred_sa(s_sa)
pr_wn = pred_wn(s_wn)
pr_wc = pred_wc(pr_wn, s_wc)
pr_wo = pred_wo(pr_wn, s_wo)
pr_wvi = pred_wvi_se(pr_wn, s_wv)
return pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi | null |
163,100 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def find_sql_where_op(gt_sql_tokens_part):
"""
gt_sql_tokens_part: Between 'WHERE' and 'AND'(if exists).
"""
# sql_where_op = ['=', 'EQL', '<', 'LT', '>', 'GT']
sql_where_op = ['EQL','LT','GT'] # wv sometimes contains =, < or >.
for sql_where_op in sql_where_op:
if sql_where_op in gt_sql_tokens_part:
found_sql_where_op = sql_where_op
break
return found_sql_where_op
def find_sub_list(sl, l):
# from stack overflow.
results = []
sll = len(sl)
for ind in (i for i, e in enumerate(l) if e == sl[0]):
if l[ind:ind + sll] == sl:
results.append((ind, ind + sll - 1))
return results
The provided code snippet includes necessary dependencies for implementing the `get_g_wvi_bert` function. Write a Python function `def get_g_wvi_bert(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t)` to solve the following problem:
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu.
Here is the function:
def get_g_wvi_bert(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t):
"""
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization.
Assumption: where_str always presents in the nlu.
"""
g_wvi = []
for b, sql_i1 in enumerate(sql_i):
nlu1 = nlu[b]
nlu_t1 = nlu_t[b]
nlu_wp_t1 = nlu_wp_t[b]
sql_t1 = sql_t[b]
wh_to_wp_index1 = wh_to_wp_index[b]
st = sql_t1.index('WHERE') + 1 if 'WHERE' in sql_t1 else len(sql_t1)
g_wvi1 = []
while st < len(sql_t1):
if 'AND' not in sql_t1[st:]:
ed = len(sql_t1)
else:
ed = sql_t1[st:].index('AND') + st
sql_wop = find_sql_where_op(sql_t1[st:ed]) # sql where operator
st_wop = st + sql_t1[st:ed].index(sql_wop)
wv_str11_t = sql_t1[st_wop + 1:ed]
results = find_sub_list(wv_str11_t, nlu_t1)
st_idx, ed_idx = results[0]
st_wp_idx = wh_to_wp_index1[st_idx]
ed_wp_idx = wh_to_wp_index1[ed_idx]
g_wvi11 = [st_wp_idx, ed_wp_idx]
g_wvi1.append(g_wvi11)
st = ed + 1
g_wvi.append(g_wvi1)
return g_wvi | Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu. |
163,101 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_amr_infos(t, l_n, l_hs):
batch_size = len(l_n)
maxlen = 0
for i, ln in enumerate(l_n):
if ln+ l_hs[i] > maxlen: maxlen = ln+ l_hs[i]
part_masks = Variable(torch.Tensor(batch_size, maxlen).zero_(), requires_grad=False)
heads = []
deps = []
for b, t1 in enumerate(t):
# print(t1, len(t1['struct_question']), l_n[b])
assert len(t1['struct_question']) == len(t1['struct_label'])
assert len(t1['struct_question']) == l_n[b]
head = np.zeros((l_n[b] + l_hs[b]), dtype=np.int32)
dep = np.zeros((l_n[b] + l_hs[b]), dtype=np.int32)
for j in range(l_n[b]):
head[j] = t1['struct_question'][j] + l_n[b]
dep[j] = t1['struct_label'][j]
part_masks[b, j] = 1
heads.append(head)
deps.append(dep)
return heads, deps, part_masks | null |
163,102 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
The provided code snippet includes necessary dependencies for implementing the `get_g_wvi_bert_from_g_wvi_corenlp` function. Write a Python function `def get_g_wvi_bert_from_g_wvi_corenlp(wh_to_wp_index, g_wvi_corenlp)` to solve the following problem:
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu.
Here is the function:
def get_g_wvi_bert_from_g_wvi_corenlp(wh_to_wp_index, g_wvi_corenlp):
"""
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization.
Assumption: where_str always presents in the nlu.
"""
g_wvi = []
for b, g_wvi_corenlp1 in enumerate(g_wvi_corenlp):
wh_to_wp_index1 = wh_to_wp_index[b]
g_wvi1 = []
for i_wn, g_wvi_corenlp11 in enumerate(g_wvi_corenlp1):
st_idx, ed_idx = g_wvi_corenlp11
if st_idx == -100 and ed_idx == -100:
g_wvi11 = [0, 0]
else:
st_wp_idx = wh_to_wp_index1[st_idx]
ed_wp_idx = wh_to_wp_index1[ed_idx]
g_wvi11 = [st_wp_idx, ed_wp_idx]
g_wvi1.append(g_wvi11)
g_wvi.append(g_wvi1)
return g_wvi | Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu. |
163,103 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def find_sql_where_op(gt_sql_tokens_part):
"""
gt_sql_tokens_part: Between 'WHERE' and 'AND'(if exists).
"""
# sql_where_op = ['=', 'EQL', '<', 'LT', '>', 'GT']
sql_where_op = ['EQL','LT','GT'] # wv sometimes contains =, < or >.
for sql_where_op in sql_where_op:
if sql_where_op in gt_sql_tokens_part:
found_sql_where_op = sql_where_op
break
return found_sql_where_op
def find_sub_list(sl, l):
# from stack overflow.
results = []
sll = len(sl)
for ind in (i for i, e in enumerate(l) if e == sl[0]):
if l[ind:ind + sll] == sl:
results.append((ind, ind + sll - 1))
return results
The provided code snippet includes necessary dependencies for implementing the `get_g_wvi_bert_from_sql_i` function. Write a Python function `def get_g_wvi_bert_from_sql_i(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t)` to solve the following problem:
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu.
Here is the function:
def get_g_wvi_bert_from_sql_i(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t):
"""
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization.
Assumption: where_str always presents in the nlu.
"""
g_wvi = []
for b, sql_i1 in enumerate(sql_i):
nlu1 = nlu[b]
nlu_t1 = nlu_t[b]
nlu_wp_t1 = nlu_wp_t[b]
sql_t1 = sql_t[b]
wh_to_wp_index1 = wh_to_wp_index[b]
st = sql_t1.index('WHERE') + 1 if 'WHERE' in sql_t1 else len(sql_t1)
g_wvi1 = []
while st < len(sql_t1):
if 'AND' not in sql_t1[st:]:
ed = len(sql_t1)
else:
ed = sql_t1[st:].index('AND') + st
sql_wop = find_sql_where_op(sql_t1[st:ed]) # sql where operator
st_wop = st + sql_t1[st:ed].index(sql_wop)
wv_str11_t = sql_t1[st_wop + 1:ed]
results = find_sub_list(wv_str11_t, nlu_t1)
st_idx, ed_idx = results[0]
st_wp_idx = wh_to_wp_index1[st_idx]
ed_wp_idx = wh_to_wp_index1[ed_idx]
g_wvi11 = [st_wp_idx, ed_wp_idx]
g_wvi1.append(g_wvi11)
st = ed + 1
g_wvi.append(g_wvi1)
return g_wvi | Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu. |
163,104 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_cnt_sc(g_sc, pr_sc):
cnt = 0
for b, g_sc1 in enumerate(g_sc):
pr_sc1 = pr_sc[b]
if pr_sc1 == g_sc1:
cnt += 1
return cnt
def get_cnt_sa(g_sa, pr_sa):
cnt = 0
for b, g_sa1 in enumerate(g_sa):
pr_sa1 = pr_sa[b]
if pr_sa1 == g_sa1:
cnt += 1
return cnt
def get_cnt_wn(g_wn, pr_wn):
cnt = 0
for b, g_wn1 in enumerate(g_wn):
pr_wn1 = pr_wn[b]
if pr_wn1 == g_wn1:
cnt += 1
return cnt
def get_cnt_wc(g_wc, pr_wc):
cnt = 0
for b, g_wc1 in enumerate(g_wc):
pr_wc1 = pr_wc[b]
pr_wn1 = len(pr_wc1)
g_wn1 = len(g_wc1)
if pr_wn1 != g_wn1:
continue
else:
wc1 = array(g_wc1)
wc1.sort()
if array_equal(pr_wc1, wc1):
cnt += 1
return cnt
def get_cnt_wo(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode):
""" pr's are all sorted as pr_wc are sorted in increasing order (in column idx)
However, g's are not sorted.
Sort g's in increasing order (in column idx)
"""
cnt = 0
for b, g_wo1 in enumerate(g_wo):
g_wc1 = g_wc[b]
pr_wc1 = pr_wc[b]
pr_wo1 = pr_wo[b]
pr_wn1 = len(pr_wo1)
g_wn1 = g_wn[b]
if g_wn1 != pr_wn1:
continue
else:
# Sort based on wc sequence.
if mode == 'test':
idx = argsort(array(g_wc1))
g_wo1_s = array(g_wo1)[idx]
g_wo1_s = list(g_wo1_s)
elif mode == 'train':
# due to teacher forcing, no need to sort.
g_wo1_s = g_wo1
else:
raise ValueError
if type(pr_wo1) != list:
raise TypeError
if g_wo1_s == pr_wo1:
cnt += 1
return cnt
def get_cnt_wv(g_wn, g_wc, g_wvi, pr_wvi, mode):
""" usalbe only when g_wc was used to find pr_wv
g_wvi
"""
cnt = 0
for b, g_wvi1 in enumerate(g_wvi):
pr_wvi1 = pr_wvi[b]
g_wc1 = g_wc[b]
pr_wn1 = len(pr_wvi1)
g_wn1 = g_wn[b]
# Now sorting.
# Sort based wc sequence.
if mode == 'test':
idx1 = argsort(array(g_wc1))
elif mode == 'train':
idx1 = list( range( g_wn1) )
else:
raise ValueError
if g_wn1 != pr_wn1:
continue
else:
flag = True
for i_wn, idx11 in enumerate(idx1):
g_wvi11 = g_wvi1[idx11]
pr_wvi11 = pr_wvi1[i_wn]
if g_wvi11 != pr_wvi11:
flag = False
# print(g_wv1, g_wv11)
# print(pr_wv1, pr_wv11)
# input('')
break
if flag:
cnt += 1
return cnt
The provided code snippet includes necessary dependencies for implementing the `get_cnt_sw` function. Write a Python function `def get_cnt_sw(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, mode)` to solve the following problem:
usalbe only when g_wc was used to find pr_wv
Here is the function:
def get_cnt_sw(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, mode):
""" usalbe only when g_wc was used to find pr_wv
"""
cnt_sc = get_cnt_sc(g_sc, pr_sc)
cnt_sa = get_cnt_sa(g_sa, pr_sa)
cnt_wn = get_cnt_wn(g_wn, pr_wn)
cnt_wc = get_cnt_wc(g_wc, pr_wc)
cnt_wo = get_cnt_wo(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode)
cnt_wv = get_cnt_wv(g_wn, g_wc, g_wvi, pr_wvi, mode)
return cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wv | usalbe only when g_wc was used to find pr_wv |
163,105 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_cnt_sc_list(g_sc, pr_sc):
cnt_list = []
for b, g_sc1 in enumerate(g_sc):
pr_sc1 = pr_sc[b]
if pr_sc1 == g_sc1:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
The provided code snippet includes necessary dependencies for implementing the `get_cnt_sw_list_agg` function. Write a Python function `def get_cnt_sw_list_agg(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, g_sql_i, pr_sql_i, mode)` to solve the following problem:
usalbe only when g_wc was used to find pr_wv
Here is the function:
def get_cnt_sw_list_agg(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi,
pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi,
g_sql_i, pr_sql_i,
mode):
""" usalbe only when g_wc was used to find pr_wv
"""
# cnt_sc = get_cnt_sc_list(g_sc, pr_sc)
cnt_sa = get_cnt_sc_list(g_sa, pr_sa)
# cnt_wn = get_cnt_sc_list(g_wn, pr_wn)
# cnt_wc = get_cnt_wc_list(g_wc, pr_wc)
# cnt_wo = get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode)
# if pr_wvi:
# cnt_wvi = get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode)
# else:
# cnt_wvi = [0]*len(cnt_sc)
# cnt_wv = get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode) # compare using wv-str which presented in original data.
return None, cnt_sa, None, None, None, None, None | usalbe only when g_wc was used to find pr_wv |
163,106 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_cnt_sc_list(g_sc, pr_sc):
cnt_list = []
for b, g_sc1 in enumerate(g_sc):
pr_sc1 = pr_sc[b]
if pr_sc1 == g_sc1:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
def get_cnt_wc_list(g_wc, pr_wc):
cnt_list= []
for b, g_wc1 in enumerate(g_wc):
pr_wc1 = pr_wc[b]
pr_wn1 = len(pr_wc1)
g_wn1 = len(g_wc1)
if pr_wn1 != g_wn1:
cnt_list.append(0)
continue
else:
wc1 = array(g_wc1)
wc1.sort()
if array_equal(pr_wc1, wc1):
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
def get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode):
""" pr's are all sorted as pr_wc are sorted in increasing order (in column idx)
However, g's are not sorted.
Sort g's in increasing order (in column idx)
"""
cnt_list=[]
for b, g_wo1 in enumerate(g_wo):
g_wc1 = g_wc[b]
pr_wc1 = pr_wc[b]
pr_wo1 = pr_wo[b]
pr_wn1 = len(pr_wo1)
g_wn1 = g_wn[b]
if g_wn1 != pr_wn1:
cnt_list.append(0)
continue
else:
# Sort based wc sequence.
if mode == 'test':
idx = argsort(array(g_wc1))
g_wo1_s = array(g_wo1)[idx]
g_wo1_s = list(g_wo1_s)
elif mode == 'train':
# due to tearch forcing, no need to sort.
g_wo1_s = g_wo1
else:
raise ValueError
if type(pr_wo1) != list:
raise TypeError
if g_wo1_s == pr_wo1:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
def get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode):
""" usalbe only when g_wc was used to find pr_wv
"""
cnt_list =[]
for b, g_wvi1 in enumerate(g_wvi):
g_wc1 = g_wc[b]
pr_wvi1 = pr_wvi[b]
pr_wn1 = len(pr_wvi1)
g_wn1 = g_wn[b]
# Now sorting.
# Sort based wc sequence.
if mode == 'test':
idx1 = argsort(array(g_wc1))
elif mode == 'train':
idx1 = list( range( g_wn1) )
else:
raise ValueError
if g_wn1 != pr_wn1:
cnt_list.append(0)
continue
else:
flag = True
for i_wn, idx11 in enumerate(idx1):
g_wvi11 = g_wvi1[idx11]
pr_wvi11 = pr_wvi1[i_wn]
if g_wvi11 != pr_wvi11:
flag = False
# print(g_wv1, g_wv11)
# print(pr_wv1, pr_wv11)
# input('')
break
if flag:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
def get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode):
""" usalbe only when g_wc was used to find pr_wv
"""
cnt_list =[]
for b, g_wc1 in enumerate(g_wc):
pr_wn1 = len(pr_sql_i[b]["conds"])
g_wn1 = g_wn[b]
# Now sorting.
# Sort based wc sequence.
if mode == 'test':
idx1 = argsort(array(g_wc1))
elif mode == 'train':
idx1 = list( range( g_wn1) )
else:
raise ValueError
if g_wn1 != pr_wn1:
cnt_list.append(0)
continue
else:
flag = True
for i_wn, idx11 in enumerate(idx1):
g_wvi_str11 = str(g_sql_i[b]["conds"][idx11][2]).lower()
pr_wvi_str11 = str(pr_sql_i[b]["conds"][i_wn][2]).lower()
# print(g_wvi_str11)
# print(pr_wvi_str11)
# print(g_wvi_str11==pr_wvi_str11)
if g_wvi_str11 != pr_wvi_str11:
flag = False
# print(g_wv1, g_wv11)
# print(pr_wv1, pr_wv11)
# input('')
break
if flag:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
The provided code snippet includes necessary dependencies for implementing the `get_cnt_sw_list` function. Write a Python function `def get_cnt_sw_list(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, g_sql_i, pr_sql_i, mode)` to solve the following problem:
usalbe only when g_wc was used to find pr_wv
Here is the function:
def get_cnt_sw_list(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi,
pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi,
g_sql_i, pr_sql_i,
mode):
""" usalbe only when g_wc was used to find pr_wv
"""
cnt_sc = get_cnt_sc_list(g_sc, pr_sc)
cnt_sa = get_cnt_sc_list(g_sa, pr_sa)
cnt_wn = get_cnt_sc_list(g_wn, pr_wn)
cnt_wc = get_cnt_wc_list(g_wc, pr_wc)
cnt_wo = get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode)
if pr_wvi:
cnt_wvi = get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode)
else:
cnt_wvi = [0]*len(cnt_sc)
cnt_wv = get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode) # compare using wv-str which presented in original data.
return cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wvi, cnt_wv | usalbe only when g_wc was used to find pr_wv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.