max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
tests/django_dummy_app/migrations/0002_auto_20211206_0602.py | qcoumes/django-opensearch-dsl | 9 | 6616851 | # Generated by Django 2.2.24 on 2021-12-06 06:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_dummy_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='event',
name='null_field',
field=models.IntegerField(default=None, null=True),
),
migrations.AlterField(
model_name='continent',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='country',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='event',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| # Generated by Django 2.2.24 on 2021-12-06 06:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_dummy_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='event',
name='null_field',
field=models.IntegerField(default=None, null=True),
),
migrations.AlterField(
model_name='continent',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='country',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='event',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| en | 0.880075 | # Generated by Django 2.2.24 on 2021-12-06 06:02 | 1.760552 | 2 |
superhelp/helpers/sorting_reversing_help.py | grantps/superhelp | 27 | 6616852 | <reponame>grantps/superhelp
from ..helpers import any_block_help, filt_block_help
from .. import conf, name_utils
from ..gen_utils import get_nice_str_list, layout_comment as layout
def _get_sorting_or_reversing_comment(block_dets):
"""
Get a comment on any sorting or reversing identified.
:return: string describing type of reversing/sorting or None
:rtype: str
"""
element = block_dets.element
comment = None
func_attr_els = element.xpath('descendant-or-self::Call/func/Attribute')
sort_els = [func_attr_el for func_attr_el in func_attr_els
if func_attr_el.get('attr') == 'sort']
func_name_els = element.xpath('descendant-or-self::Call/func/Name')
sorted_els = [func_name_el for func_name_el in func_name_els
if func_name_el.get('id') == 'sorted']
reversed_els = [func_name_el for func_name_el in func_name_els
if func_name_el.get('id') == 'reversed']
if sort_els:
comment = "has list sorting (`.sort()`)"
if comment and (sorted_els or reversed_els):
comment = ' and ' + comment
if sorted_els and reversed_els:
comment = "uses both the `sorted` and `reversed` functions"
elif sorted_els:
comment = "uses the `sorted` function"
elif reversed_els:
comment = "uses the `reversed` function"
return comment
@any_block_help()
def sorting_reversing_overview(block_dets, *, repeat=False, **_kwargs):
"""
Provide an overview of sorting and/or reversing. Advise on common
confusions.
"""
sorting_or_reversing_comment = _get_sorting_or_reversing_comment(block_dets)
if not sorting_or_reversing_comment:
return None
title = layout("""\
### Sorting / reversing
""")
if not repeat:
summary = layout(f"""\
This block of code {sorting_or_reversing_comment}.
Sorting and, to a lesser extent, reversing are very common needs in
programming. Two key points:
1) reversing is not the same as sorting with `reverse=True`
2) the list sort method e.g. my_list.`sort()` returns `None`, not the
sorted list
""")
details = (
layout(f"""\
Sorting and, to a lesser extent, reversing are very common needs in
programming. Two key points:
1) reversing is not the same as sorting with `reverse=True`
To illustrate:
""")
+
layout("""\
word = 'cat'
word_reversed = reversed(word)
## >>> word_reversed
## >>> 'tac'
word_reverse_sorted = sorted(word, reverse=True)
## >>> word_reverse_sorted
## >>> 'tca'
## >>> word_reversed == word_reverse_sorted
## >>> False
""", is_code=True)
+
layout("""\
Using the reversed function does not apply any sorting to the
sequence being reversed - it merely flips the (possibly) unordered
sequence the other way.
2) the list sort method e.g. `my_list.sort()` returns `None`, not
the sorted list
`sorted(my_list)` returns a sorted list but `my_list.sort()` is an
in-place process. It mutates something rather than returning a
separate thing.
To illustrate:
i) `sorted()` returning a result and leaving its input unchanged
""")
+
layout("""\
fruit = ['banana', 'apple', 'cranberry']
fruit_sorted = sorted(fruit)
## >>> fruit_sorted
## >>> ['apple', 'banana', 'cranberry']
## fruit itself has not been changed
## >>> fruit
## >>> ['banana', 'apple', 'cranberry']
""", is_code=True)
+
layout("""\
ii) `.sort()` returning `None` and changing its input in-place
""")
+
layout("""\
result_of_fruit_sort = fruit.sort()
## >>> result_of_fruit_sort
## >>> None
## fruit has been changed by the in-place sort method
## >>> fruit
## >>> ['apple', 'banana', 'cranberry']
""", is_code=True)
)
else:
summary = layout(f"""\
This block of code {sorting_or_reversing_comment}.
""")
details = summary
message = {
conf.Level.BRIEF: title + summary,
conf.Level.MAIN: title + details,
}
return message
ASSIGN_FUNC_ATTRIBUTE_XPATH = 'descendant-or-self::Assign/value/Call/func/Attribute'
@filt_block_help(xpath=ASSIGN_FUNC_ATTRIBUTE_XPATH, warning=True)
def list_sort_as_value(block_dets, *, repeat=False, **_kwargs):
"""
Warn about assigning a name to the result using .sort() on a list.
"""
func_attr_els = block_dets.element.xpath(ASSIGN_FUNC_ATTRIBUTE_XPATH)
if not func_attr_els:
return None
names_assigned_to_sort = []
for func_attr_el in func_attr_els:
is_sort = (func_attr_el.get('attr') == 'sort')
if is_sort:
try:
name_dets = name_utils.get_assigned_name(func_attr_el)
except Exception:
continue
names_assigned_to_sort.append(name_dets.name_str)
if not names_assigned_to_sort:
return None
title = layout("""\
### Assignment of `None` result from in-place `.sort()` on list
""")
if not repeat:
multiple = len(names_assigned_to_sort) > 1
if multiple:
nice_str_list = get_nice_str_list(
names_assigned_to_sort, quoter='`')
details = layout(f"""\
{nice_str_list} are assigned to the results of in-place sort
operations. This is almost certainly a mistake as the intention is
probably not to set them each to `None` (the return value of the
`.sort()` method).
""")
else:
name = names_assigned_to_sort[0]
details = layout(f"""\
`{name}` is assigned to the result of an in-place sort operation.
This is almost certainly a mistake as the intention is probably not
to set `{name}` to `None` (the return value of the `.sort()`
method).
""")
else:
details = ''
message = {
conf.Level.BRIEF: title + details,
}
return message
| from ..helpers import any_block_help, filt_block_help
from .. import conf, name_utils
from ..gen_utils import get_nice_str_list, layout_comment as layout
def _get_sorting_or_reversing_comment(block_dets):
"""
Get a comment on any sorting or reversing identified.
:return: string describing type of reversing/sorting or None
:rtype: str
"""
element = block_dets.element
comment = None
func_attr_els = element.xpath('descendant-or-self::Call/func/Attribute')
sort_els = [func_attr_el for func_attr_el in func_attr_els
if func_attr_el.get('attr') == 'sort']
func_name_els = element.xpath('descendant-or-self::Call/func/Name')
sorted_els = [func_name_el for func_name_el in func_name_els
if func_name_el.get('id') == 'sorted']
reversed_els = [func_name_el for func_name_el in func_name_els
if func_name_el.get('id') == 'reversed']
if sort_els:
comment = "has list sorting (`.sort()`)"
if comment and (sorted_els or reversed_els):
comment = ' and ' + comment
if sorted_els and reversed_els:
comment = "uses both the `sorted` and `reversed` functions"
elif sorted_els:
comment = "uses the `sorted` function"
elif reversed_els:
comment = "uses the `reversed` function"
return comment
@any_block_help()
def sorting_reversing_overview(block_dets, *, repeat=False, **_kwargs):
"""
Provide an overview of sorting and/or reversing. Advise on common
confusions.
"""
sorting_or_reversing_comment = _get_sorting_or_reversing_comment(block_dets)
if not sorting_or_reversing_comment:
return None
title = layout("""\
### Sorting / reversing
""")
if not repeat:
summary = layout(f"""\
This block of code {sorting_or_reversing_comment}.
Sorting and, to a lesser extent, reversing are very common needs in
programming. Two key points:
1) reversing is not the same as sorting with `reverse=True`
2) the list sort method e.g. my_list.`sort()` returns `None`, not the
sorted list
""")
details = (
layout(f"""\
Sorting and, to a lesser extent, reversing are very common needs in
programming. Two key points:
1) reversing is not the same as sorting with `reverse=True`
To illustrate:
""")
+
layout("""\
word = 'cat'
word_reversed = reversed(word)
## >>> word_reversed
## >>> 'tac'
word_reverse_sorted = sorted(word, reverse=True)
## >>> word_reverse_sorted
## >>> 'tca'
## >>> word_reversed == word_reverse_sorted
## >>> False
""", is_code=True)
+
layout("""\
Using the reversed function does not apply any sorting to the
sequence being reversed - it merely flips the (possibly) unordered
sequence the other way.
2) the list sort method e.g. `my_list.sort()` returns `None`, not
the sorted list
`sorted(my_list)` returns a sorted list but `my_list.sort()` is an
in-place process. It mutates something rather than returning a
separate thing.
To illustrate:
i) `sorted()` returning a result and leaving its input unchanged
""")
+
layout("""\
fruit = ['banana', 'apple', 'cranberry']
fruit_sorted = sorted(fruit)
## >>> fruit_sorted
## >>> ['apple', 'banana', 'cranberry']
## fruit itself has not been changed
## >>> fruit
## >>> ['banana', 'apple', 'cranberry']
""", is_code=True)
+
layout("""\
ii) `.sort()` returning `None` and changing its input in-place
""")
+
layout("""\
result_of_fruit_sort = fruit.sort()
## >>> result_of_fruit_sort
## >>> None
## fruit has been changed by the in-place sort method
## >>> fruit
## >>> ['apple', 'banana', 'cranberry']
""", is_code=True)
)
else:
summary = layout(f"""\
This block of code {sorting_or_reversing_comment}.
""")
details = summary
message = {
conf.Level.BRIEF: title + summary,
conf.Level.MAIN: title + details,
}
return message
ASSIGN_FUNC_ATTRIBUTE_XPATH = 'descendant-or-self::Assign/value/Call/func/Attribute'
@filt_block_help(xpath=ASSIGN_FUNC_ATTRIBUTE_XPATH, warning=True)
def list_sort_as_value(block_dets, *, repeat=False, **_kwargs):
"""
Warn about assigning a name to the result using .sort() on a list.
"""
func_attr_els = block_dets.element.xpath(ASSIGN_FUNC_ATTRIBUTE_XPATH)
if not func_attr_els:
return None
names_assigned_to_sort = []
for func_attr_el in func_attr_els:
is_sort = (func_attr_el.get('attr') == 'sort')
if is_sort:
try:
name_dets = name_utils.get_assigned_name(func_attr_el)
except Exception:
continue
names_assigned_to_sort.append(name_dets.name_str)
if not names_assigned_to_sort:
return None
title = layout("""\
### Assignment of `None` result from in-place `.sort()` on list
""")
if not repeat:
multiple = len(names_assigned_to_sort) > 1
if multiple:
nice_str_list = get_nice_str_list(
names_assigned_to_sort, quoter='`')
details = layout(f"""\
{nice_str_list} are assigned to the results of in-place sort
operations. This is almost certainly a mistake as the intention is
probably not to set them each to `None` (the return value of the
`.sort()` method).
""")
else:
name = names_assigned_to_sort[0]
details = layout(f"""\
`{name}` is assigned to the result of an in-place sort operation.
This is almost certainly a mistake as the intention is probably not
to set `{name}` to `None` (the return value of the `.sort()`
method).
""")
else:
details = ''
message = {
conf.Level.BRIEF: title + details,
}
return message | en | 0.81755 | Get a comment on any sorting or reversing identified. :return: string describing type of reversing/sorting or None :rtype: str Provide an overview of sorting and/or reversing. Advise on common confusions. \ ### Sorting / reversing \ This block of code {sorting_or_reversing_comment}. Sorting and, to a lesser extent, reversing are very common needs in programming. Two key points: 1) reversing is not the same as sorting with `reverse=True` 2) the list sort method e.g. my_list.`sort()` returns `None`, not the sorted list \ Sorting and, to a lesser extent, reversing are very common needs in programming. Two key points: 1) reversing is not the same as sorting with `reverse=True` To illustrate: \ word = 'cat' word_reversed = reversed(word) ## >>> word_reversed ## >>> 'tac' word_reverse_sorted = sorted(word, reverse=True) ## >>> word_reverse_sorted ## >>> 'tca' ## >>> word_reversed == word_reverse_sorted ## >>> False \ Using the reversed function does not apply any sorting to the sequence being reversed - it merely flips the (possibly) unordered sequence the other way. 2) the list sort method e.g. `my_list.sort()` returns `None`, not the sorted list `sorted(my_list)` returns a sorted list but `my_list.sort()` is an in-place process. It mutates something rather than returning a separate thing. To illustrate: i) `sorted()` returning a result and leaving its input unchanged \ fruit = ['banana', 'apple', 'cranberry'] fruit_sorted = sorted(fruit) ## >>> fruit_sorted ## >>> ['apple', 'banana', 'cranberry'] ## fruit itself has not been changed ## >>> fruit ## >>> ['banana', 'apple', 'cranberry'] \ ii) `.sort()` returning `None` and changing its input in-place \ result_of_fruit_sort = fruit.sort() ## >>> result_of_fruit_sort ## >>> None ## fruit has been changed by the in-place sort method ## >>> fruit ## >>> ['apple', 'banana', 'cranberry'] \ This block of code {sorting_or_reversing_comment}. Warn about assigning a name to the result using .sort() on a list. \ ### Assignment of `None` result from in-place `.sort()` on list \ {nice_str_list} are assigned to the results of in-place sort operations. This is almost certainly a mistake as the intention is probably not to set them each to `None` (the return value of the `.sort()` method). \ `{name}` is assigned to the result of an in-place sort operation. This is almost certainly a mistake as the intention is probably not to set `{name}` to `None` (the return value of the `.sort()` method). | 2.509414 | 3 |
tests/__init__.py | markfinger/python-webpack | 66 | 6616853 | <reponame>markfinger/python-webpack
import sys
import os
import atexit
import subprocess
import time
if 'nosetests' in sys.argv[0]:
# Configure webpack before any tests are run
import webpack.conf
from .settings import WEBPACK
webpack.conf.settings.configure(**WEBPACK)
from webpack.compiler import build_server
if build_server.is_running():
raise Exception(
'A build server is already running at {}, this will cause test failures. The server should be stopped'.format(
build_server.url
)
)
process = subprocess.Popen(
(
os.path.join(os.getcwd(), 'node_modules', '.bin', 'webpack-build'),
'-s'
),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
# Ensure the process is killed
atexit.register(lambda _process: _process.kill(), process)
output = process.stdout.readline().decode('utf-8')
if output.strip() == '':
output += process.stdout.readline().decode('utf-8')
if 'webpack-build v' not in output:
raise Exception('Unexpected output: "{}"'.format(output))
# Travis can take a while to boot the server up
if os.environ.get('TRAVIS', None):
for i in range(5):
if not build_server.is_running(True):
time.sleep(1)
time.sleep(0.5)
if not build_server.is_running():
raise Exception(
'The build server appears to have booted, but it is not responding at {} within the expected time period'.format(
build_server.url
)
)
| import sys
import os
import atexit
import subprocess
import time
if 'nosetests' in sys.argv[0]:
# Configure webpack before any tests are run
import webpack.conf
from .settings import WEBPACK
webpack.conf.settings.configure(**WEBPACK)
from webpack.compiler import build_server
if build_server.is_running():
raise Exception(
'A build server is already running at {}, this will cause test failures. The server should be stopped'.format(
build_server.url
)
)
process = subprocess.Popen(
(
os.path.join(os.getcwd(), 'node_modules', '.bin', 'webpack-build'),
'-s'
),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
# Ensure the process is killed
atexit.register(lambda _process: _process.kill(), process)
output = process.stdout.readline().decode('utf-8')
if output.strip() == '':
output += process.stdout.readline().decode('utf-8')
if 'webpack-build v' not in output:
raise Exception('Unexpected output: "{}"'.format(output))
# Travis can take a while to boot the server up
if os.environ.get('TRAVIS', None):
for i in range(5):
if not build_server.is_running(True):
time.sleep(1)
time.sleep(0.5)
if not build_server.is_running():
raise Exception(
'The build server appears to have booted, but it is not responding at {} within the expected time period'.format(
build_server.url
)
) | en | 0.919107 | # Configure webpack before any tests are run # Ensure the process is killed # Travis can take a while to boot the server up | 2.065884 | 2 |
src/MediaPlayer/Subtitles/SubtitlesSubDB.py | JKorf/MediaPi | 2 | 6616854 | <gh_stars>1-10
import hashlib
from MediaPlayer.Subtitles.SubtitleSourceBase import SubtitleSourceBase
from Shared.Logger import Logger, LogVerbosity
from Shared.Network import RequestFactory
class SubtitlesSubDB(SubtitleSourceBase):
def __init__(self):
super().__init__()
@staticmethod
def get_subtitles(size, file_length, filename, first_64k, last_64k):
data = first_64k + last_64k
file_hash = hashlib.md5(data).hexdigest()
result = RequestFactory.make_request(
"http://sandbox.thesubdb.com/?action=download&hash=" + file_hash + "&language=en",
useragent="SubDB/1.0 (MediaPi/0.1; http://github.com/jkorf/mediapi)")
if not result:
Logger().write(LogVerbosity.Info, "SubDB: no subtitles found for " + file_hash)
return []
Logger().write(LogVerbosity.Info, "SubDB: Found a subtitle for hash " + file_hash)
return [SubtitleSourceBase.save_file("SubDB", result)]
| import hashlib
from MediaPlayer.Subtitles.SubtitleSourceBase import SubtitleSourceBase
from Shared.Logger import Logger, LogVerbosity
from Shared.Network import RequestFactory
class SubtitlesSubDB(SubtitleSourceBase):
def __init__(self):
super().__init__()
@staticmethod
def get_subtitles(size, file_length, filename, first_64k, last_64k):
data = first_64k + last_64k
file_hash = hashlib.md5(data).hexdigest()
result = RequestFactory.make_request(
"http://sandbox.thesubdb.com/?action=download&hash=" + file_hash + "&language=en",
useragent="SubDB/1.0 (MediaPi/0.1; http://github.com/jkorf/mediapi)")
if not result:
Logger().write(LogVerbosity.Info, "SubDB: no subtitles found for " + file_hash)
return []
Logger().write(LogVerbosity.Info, "SubDB: Found a subtitle for hash " + file_hash)
return [SubtitleSourceBase.save_file("SubDB", result)] | none | 1 | 2.446825 | 2 | |
arnold/api/api_v1/endpoints/sample.py | Clinical-Genomics/arnold | 0 | 6616855 | from arnold.adapter import ArnoldAdapter
from arnold.crud import create, update
from arnold.crud.read.sample import find_sample, find_all_samples
from arnold.crud.read.step import find_sample_fields
from arnold.models.database import Sample
from typing import List
from fastapi import APIRouter, Depends, status
from fastapi.responses import JSONResponse
import logging
import arnold.crud.read.sample
from arnold.settings import get_arnold_adapter
LOG = logging.getLogger(__name__)
router = APIRouter()
@router.get("/sample/fields")
def get_sample_fields(
adapter: ArnoldAdapter = Depends(get_arnold_adapter),
):
"""Get sample fields"""
return find_sample_fields(adapter=adapter)
@router.get("/sample/{sample_id}", response_model=Sample)
def get_sample(
sample_id: str,
adapter: ArnoldAdapter = Depends(get_arnold_adapter),
):
"""fetch a sample by sample id"""
sample: Sample = find_sample(sample_id=sample_id, adapter=adapter)
return sample
@router.get("/samples/", response_model=List[Sample])
def get_samples(
adapter: ArnoldAdapter = Depends(get_arnold_adapter),
):
"""Get all samples"""
samples: List[Sample] = find_all_samples(adapter=adapter)
return samples
@router.post("/sample/")
def create_sample(
sample: Sample, adapter: ArnoldAdapter = Depends(get_arnold_adapter)
) -> JSONResponse:
if arnold.crud.read.sample.find_sample(sample_id=sample.sample_id, adapter=adapter):
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED, content="Sample already in database"
)
try:
create.create_sample(adapter=adapter, sample=sample)
except Exception as e:
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED,
content=f"exception {e} ",
)
return JSONResponse(
status_code=status.HTTP_200_OK, content=f"Sample {sample.sample_id} inserted to db"
)
@router.post("/samples/")
def create_samples(
samples: List[Sample], adapter: ArnoldAdapter = Depends(get_arnold_adapter)
) -> JSONResponse:
try:
create.create_samples(adapter=adapter, samples=samples)
except Exception as e:
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED,
content=f"exception {e} ",
)
return JSONResponse(status_code=status.HTTP_200_OK, content="Samples inserted to db")
@router.put("/sample/")
def update_sample(
sample: Sample, adapter: ArnoldAdapter = Depends(get_arnold_adapter)
) -> JSONResponse:
try:
update.update_sample(adapter=adapter, sample=sample)
except Exception as e:
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED,
content=f"exception {e} ",
)
return JSONResponse(
status_code=status.HTTP_200_OK, content=f"Sample {sample.sample_id} inserted to db"
)
@router.put("/samples/")
def update_samples(
samples: List[Sample], adapter: ArnoldAdapter = Depends(get_arnold_adapter)
) -> JSONResponse:
try:
update.update_samples(adapter=adapter, samples=samples)
except Exception as e:
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED,
content=f"exception {e} ",
)
return JSONResponse(status_code=status.HTTP_200_OK, content="Samples inserted to db")
| from arnold.adapter import ArnoldAdapter
from arnold.crud import create, update
from arnold.crud.read.sample import find_sample, find_all_samples
from arnold.crud.read.step import find_sample_fields
from arnold.models.database import Sample
from typing import List
from fastapi import APIRouter, Depends, status
from fastapi.responses import JSONResponse
import logging
import arnold.crud.read.sample
from arnold.settings import get_arnold_adapter
LOG = logging.getLogger(__name__)
router = APIRouter()
@router.get("/sample/fields")
def get_sample_fields(
adapter: ArnoldAdapter = Depends(get_arnold_adapter),
):
"""Get sample fields"""
return find_sample_fields(adapter=adapter)
@router.get("/sample/{sample_id}", response_model=Sample)
def get_sample(
sample_id: str,
adapter: ArnoldAdapter = Depends(get_arnold_adapter),
):
"""fetch a sample by sample id"""
sample: Sample = find_sample(sample_id=sample_id, adapter=adapter)
return sample
@router.get("/samples/", response_model=List[Sample])
def get_samples(
adapter: ArnoldAdapter = Depends(get_arnold_adapter),
):
"""Get all samples"""
samples: List[Sample] = find_all_samples(adapter=adapter)
return samples
@router.post("/sample/")
def create_sample(
sample: Sample, adapter: ArnoldAdapter = Depends(get_arnold_adapter)
) -> JSONResponse:
if arnold.crud.read.sample.find_sample(sample_id=sample.sample_id, adapter=adapter):
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED, content="Sample already in database"
)
try:
create.create_sample(adapter=adapter, sample=sample)
except Exception as e:
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED,
content=f"exception {e} ",
)
return JSONResponse(
status_code=status.HTTP_200_OK, content=f"Sample {sample.sample_id} inserted to db"
)
@router.post("/samples/")
def create_samples(
samples: List[Sample], adapter: ArnoldAdapter = Depends(get_arnold_adapter)
) -> JSONResponse:
try:
create.create_samples(adapter=adapter, samples=samples)
except Exception as e:
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED,
content=f"exception {e} ",
)
return JSONResponse(status_code=status.HTTP_200_OK, content="Samples inserted to db")
@router.put("/sample/")
def update_sample(
sample: Sample, adapter: ArnoldAdapter = Depends(get_arnold_adapter)
) -> JSONResponse:
try:
update.update_sample(adapter=adapter, sample=sample)
except Exception as e:
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED,
content=f"exception {e} ",
)
return JSONResponse(
status_code=status.HTTP_200_OK, content=f"Sample {sample.sample_id} inserted to db"
)
@router.put("/samples/")
def update_samples(
samples: List[Sample], adapter: ArnoldAdapter = Depends(get_arnold_adapter)
) -> JSONResponse:
try:
update.update_samples(adapter=adapter, samples=samples)
except Exception as e:
return JSONResponse(
status_code=status.HTTP_405_METHOD_NOT_ALLOWED,
content=f"exception {e} ",
)
return JSONResponse(status_code=status.HTTP_200_OK, content="Samples inserted to db")
| en | 0.912003 | Get sample fields fetch a sample by sample id Get all samples | 2.139875 | 2 |
core/layers/group_conv2d.py | wavce/classificationx | 3 | 6616856 | <filename>core/layers/group_conv2d.py<gh_stars>1-10
import tensorflow as tf
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine.input_spec import InputSpec
class GroupConv2D(tf.keras.layers.Conv2D):
def __init__(self,
filters,
kernel_size,
strides=(1,1),
padding='valid',
data_format=None,
dilation_rate=(1,1),
activation=None,
use_bias=True,
group=1,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.group = group
def build(self, input_shape):
if self.group > 1:
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel // self.group, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_channel})
self._build_conv_op_input_shape = input_shape
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
self._conv_op_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
if self.data_format == "channels_last":
input_shapes = [
tensor_shape.TensorShape([input_shape[0], input_shape[1], input_shape[2], input_shape[3] // self.group])
] * self.group
elif self.data_format == "channels_first":
input_shapes = [
tensor_shape.TensorShape([input_shape[0] // self.group, input_shape[1], input_shape[2], input_shape[3]])
] * self.group
else:
raise ValueError('Invalid data_format:', self.data_format)
self._channel_axis = channel_axis
filter_shape = tensor_shape.TensorShape(self.kernel_size + (input_channel // self.group, self.filters // self.group))
# print(self.kernels)
self._convolution_ops = [
nn_ops.Convolution(
input_shapes[i],
filter_shape=filter_shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
for i in range(self.group)]
self.built = True
else:
super(GroupConv2D, self).build(input_shape)
def call(self, inputs):
if self.group > 1:
if self._recreate_conv_op(inputs):
input_shape = inputs.get_shape()
if self.data_format == "channels_last":
input_shapes = [
tensor_shape.TensorShape([input_shape[0], input_shape[1], input_shape[2], input_shape[3] // self.group])
] * self.group
elif self.data_format == "channels_first":
input_shapes = [
tensor_shape.TensorShape([input_shape[0] // self.group, input_shape[1], input_shape[2], input_shape[3]])
] * self.group
else:
raise ValueError('Invalid data_format:', self.data_format)
self._convolution_ops = [
nn_ops.Convolution(
input_shapes[i],
filter_shape=self.kernels[i].shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
for i in range(self.group)]
kernels = tf.split(self.kernel, self.group, self._channel_axis)
inputs = tf.split(inputs, self.group, self._channel_axis)
outputs = [
self._convolution_ops[i](inputs[i], kernels[i]) for i in range(self.group)
]
outputs = tf.concat(outputs, self._channel_axis)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
else:
return super(GroupConv2D, self).call(inputs)
| <filename>core/layers/group_conv2d.py<gh_stars>1-10
import tensorflow as tf
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine.input_spec import InputSpec
class GroupConv2D(tf.keras.layers.Conv2D):
def __init__(self,
filters,
kernel_size,
strides=(1,1),
padding='valid',
data_format=None,
dilation_rate=(1,1),
activation=None,
use_bias=True,
group=1,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.group = group
def build(self, input_shape):
if self.group > 1:
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel // self.group, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_channel})
self._build_conv_op_input_shape = input_shape
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
self._conv_op_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
if self.data_format == "channels_last":
input_shapes = [
tensor_shape.TensorShape([input_shape[0], input_shape[1], input_shape[2], input_shape[3] // self.group])
] * self.group
elif self.data_format == "channels_first":
input_shapes = [
tensor_shape.TensorShape([input_shape[0] // self.group, input_shape[1], input_shape[2], input_shape[3]])
] * self.group
else:
raise ValueError('Invalid data_format:', self.data_format)
self._channel_axis = channel_axis
filter_shape = tensor_shape.TensorShape(self.kernel_size + (input_channel // self.group, self.filters // self.group))
# print(self.kernels)
self._convolution_ops = [
nn_ops.Convolution(
input_shapes[i],
filter_shape=filter_shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
for i in range(self.group)]
self.built = True
else:
super(GroupConv2D, self).build(input_shape)
def call(self, inputs):
if self.group > 1:
if self._recreate_conv_op(inputs):
input_shape = inputs.get_shape()
if self.data_format == "channels_last":
input_shapes = [
tensor_shape.TensorShape([input_shape[0], input_shape[1], input_shape[2], input_shape[3] // self.group])
] * self.group
elif self.data_format == "channels_first":
input_shapes = [
tensor_shape.TensorShape([input_shape[0] // self.group, input_shape[1], input_shape[2], input_shape[3]])
] * self.group
else:
raise ValueError('Invalid data_format:', self.data_format)
self._convolution_ops = [
nn_ops.Convolution(
input_shapes[i],
filter_shape=self.kernels[i].shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
for i in range(self.group)]
kernels = tf.split(self.kernel, self.group, self._channel_axis)
inputs = tf.split(inputs, self.group, self._channel_axis)
outputs = [
self._convolution_ops[i](inputs[i], kernels[i]) for i in range(self.group)
]
outputs = tf.concat(outputs, self._channel_axis)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
else:
return super(GroupConv2D, self).call(inputs)
| en | 0.520281 | # print(self.kernels) # nn.bias_add does not accept a 1D input tensor. | 2.467479 | 2 |
cms/cms/doctype/food_menu/food_menu.py | subash13579/CMS | 1 | 6616857 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Teampro and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import today
import json
from datetime import datetime
from frappe.utils import getdate, nowdate, add_days, add_months, flt, get_first_day, get_last_day
class FoodMenu(Document):
def get_food_items(self):
items = frappe.db.sql("""select item,item_image from `tabFood Item` where %s between from_time and to_time""",(datetime.now().time()),as_dict=True)
return items
@frappe.whitelist()
def calculate_amount(item,employee,food_child):
month_start_date = get_first_day(nowdate())
month_end_date = get_last_day(nowdate())
subsidy = frappe.db.get_value("Employee",employee,["subsidy_amount"])
subsidy_utilized = frappe.db.sql("select sum(price) as amount from `tabDaily Food Entry` where employee = %s and date between %s and %s and subsidy = 1 ",(employee,month_start_date,month_end_date),as_dict=True)
try:
subsidy_balance = subsidy - subsidy_utilized[0].amount
except:
subsidy_balance = subsidy
if subsidy >= subsidy_balance:
items = json.loads(food_child)
existing_items = []
for i in items:
existing_items.append(i["food_item"])
if frappe.db.exists("Daily Food Entry",{"employee":employee,"item":item,"date":today()}):
return frappe.db.get_value("Food Item",item,["item","original_rate"]),0
else:
if items:
if item not in existing_items:
return frappe.db.get_value("Food Item",item,["item","subsidy_rate"]),1
else:
return frappe.db.get_value("Food Item",item,["item","original_rate"]),0
else:
return frappe.db.get_value("Food Item",item,["item","subsidy_rate"]),1
else:
return frappe.db.get_value("Food Item",item,["item","original_rate"]),0
@frappe.whitelist()
def daily_food_entry(food_child,employee):
food_order = frappe.new_doc("Food Order No")
food_order.employee = employee
food_order.date = today()
food_order.save(ignore_permissions=True)
frappe.errprint(food_order)
items = json.loads(food_child)
for i in items:
frappe.errprint(i)
doc = frappe.new_doc("Daily Food Entry")
doc.item = i["food_item"]
doc.employee = employee
doc.food_order_no = food_order.name
doc.price = i["price"]
doc.subsidy = i["subsidy"]
doc.date = today()
doc.save(ignore_permissions=True)
return items | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Teampro and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import today
import json
from datetime import datetime
from frappe.utils import getdate, nowdate, add_days, add_months, flt, get_first_day, get_last_day
class FoodMenu(Document):
def get_food_items(self):
items = frappe.db.sql("""select item,item_image from `tabFood Item` where %s between from_time and to_time""",(datetime.now().time()),as_dict=True)
return items
@frappe.whitelist()
def calculate_amount(item,employee,food_child):
month_start_date = get_first_day(nowdate())
month_end_date = get_last_day(nowdate())
subsidy = frappe.db.get_value("Employee",employee,["subsidy_amount"])
subsidy_utilized = frappe.db.sql("select sum(price) as amount from `tabDaily Food Entry` where employee = %s and date between %s and %s and subsidy = 1 ",(employee,month_start_date,month_end_date),as_dict=True)
try:
subsidy_balance = subsidy - subsidy_utilized[0].amount
except:
subsidy_balance = subsidy
if subsidy >= subsidy_balance:
items = json.loads(food_child)
existing_items = []
for i in items:
existing_items.append(i["food_item"])
if frappe.db.exists("Daily Food Entry",{"employee":employee,"item":item,"date":today()}):
return frappe.db.get_value("Food Item",item,["item","original_rate"]),0
else:
if items:
if item not in existing_items:
return frappe.db.get_value("Food Item",item,["item","subsidy_rate"]),1
else:
return frappe.db.get_value("Food Item",item,["item","original_rate"]),0
else:
return frappe.db.get_value("Food Item",item,["item","subsidy_rate"]),1
else:
return frappe.db.get_value("Food Item",item,["item","original_rate"]),0
@frappe.whitelist()
def daily_food_entry(food_child,employee):
food_order = frappe.new_doc("Food Order No")
food_order.employee = employee
food_order.date = today()
food_order.save(ignore_permissions=True)
frappe.errprint(food_order)
items = json.loads(food_child)
for i in items:
frappe.errprint(i)
doc = frappe.new_doc("Daily Food Entry")
doc.item = i["food_item"]
doc.employee = employee
doc.food_order_no = food_order.name
doc.price = i["price"]
doc.subsidy = i["subsidy"]
doc.date = today()
doc.save(ignore_permissions=True)
return items | en | 0.759567 | # -*- coding: utf-8 -*- # Copyright (c) 2021, Teampro and contributors # For license information, please see license.txt select item,item_image from `tabFood Item` where %s between from_time and to_time | 2.327383 | 2 |
src/util/MR3CharacteristicBuilder.py | MidgeOnGithub/Monster-Rancher-3-Lexicon | 0 | 6616858 | import sys
class Characteristic:
def __init__(self, name, description):
self.name = name
self.description = description
def __str__(self):
return f"'{self.name}', '{self.description}'"
class MR3CharacteristicBuilder:
@staticmethod
def parse_text(file: str) -> list[Characteristic]:
with open(file) as f:
text = '\n'.join([line.strip() for line in f.readlines()[1:]]) # Take [1:] to remove the header row.
characteristics = []
for line in text.splitlines():
name, description = line.split(';')
if name == "Name":
continue
else:
description = description.replace("'", "''")
characteristics.append(Characteristic(name, description))
return characteristics
def main(file: str) -> str:
"""Creates an SQL INSERT query from the data in file.
:param str file:
:return: An SQL INSERT query featuring all MR3 Attacks
:rtype: str
"""
characteristics = []
for characteristic in MR3CharacteristicBuilder.parse_text(file):
characteristics.append(f"({str(characteristic)})")
first_line = "INSERT INTO Characteristic (Characteristic, Description) VALUES"
return first_line + "\n\t" + ",\n\t".join(characteristics) + ";"
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: MR3CharacteristicBuilder.py <input-file>")
exit(1)
print(main(sys.argv[1]))
| import sys
class Characteristic:
def __init__(self, name, description):
self.name = name
self.description = description
def __str__(self):
return f"'{self.name}', '{self.description}'"
class MR3CharacteristicBuilder:
@staticmethod
def parse_text(file: str) -> list[Characteristic]:
with open(file) as f:
text = '\n'.join([line.strip() for line in f.readlines()[1:]]) # Take [1:] to remove the header row.
characteristics = []
for line in text.splitlines():
name, description = line.split(';')
if name == "Name":
continue
else:
description = description.replace("'", "''")
characteristics.append(Characteristic(name, description))
return characteristics
def main(file: str) -> str:
"""Creates an SQL INSERT query from the data in file.
:param str file:
:return: An SQL INSERT query featuring all MR3 Attacks
:rtype: str
"""
characteristics = []
for characteristic in MR3CharacteristicBuilder.parse_text(file):
characteristics.append(f"({str(characteristic)})")
first_line = "INSERT INTO Characteristic (Characteristic, Description) VALUES"
return first_line + "\n\t" + ",\n\t".join(characteristics) + ";"
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: MR3CharacteristicBuilder.py <input-file>")
exit(1)
print(main(sys.argv[1]))
| en | 0.695027 | # Take [1:] to remove the header row. Creates an SQL INSERT query from the data in file. :param str file: :return: An SQL INSERT query featuring all MR3 Attacks :rtype: str | 3.870859 | 4 |
mocy/utils.py | cymoo/mini_spider | 2 | 6616859 | import logging
import re
import sys
import time
from queue import Queue, PriorityQueue
from random import random, randint
from threading import Thread
from typing import Union
from urllib.parse import urlparse
__all__ = [
'DelayQueue',
'logger',
'random_range',
'random_ip',
'same_origin',
'identity',
'assert_positive_number',
'assert_not_negative_number',
'assert_positive_integer',
'assert_not_negative_integer',
]
class DelayQueue(Queue):
def __init__(self, maxsize=0):
super().__init__(maxsize)
self.pq = PriorityQueue()
poller = Thread(target=self._poll, name='poller')
poller.daemon = True
poller.start()
def put_later(self, item, delay=1):
self.pq.put((time.time() + delay, item))
def _poll(self):
while True:
item = self.pq.get()
if item[0] <= time.time():
self.put(item[1])
else:
self.pq.put(item)
time.sleep(0.05)
def random_range(
value: Union[int, float],
scale1: Union[int, float],
scale2: Union[int, float]
) -> float:
if scale1 > scale2:
lo, hi = scale2, scale1
else:
lo, hi = scale1, scale2
factor = lo + (hi - lo) * random()
return factor * value
def identity(x):
return x
def assert_not_negative_number(num):
assert num >= 0 and isinstance(num, (int, float))
def assert_positive_number(num):
assert num > 0 and isinstance(num, (int, float))
def assert_not_negative_integer(num):
assert num >= 0 and isinstance(num, int)
def assert_positive_integer(num):
assert num > 0 and isinstance(num, int)
def add_http_if_no_scheme(url: str) -> str:
"""Add http as the default scheme if it is missing from the url."""
match = re.match(r'^\w+://', url, flags=re.I)
if not match:
parts = urlparse(url)
scheme = "http:" if parts.netloc else "http://"
url = scheme + url
return url
def same_origin(url1: str, url2: str) -> bool:
"""Return True if the two urls are the same origin
>>> same_origin('http://a.com', 'https://a.com')
False
>>> same_origin('https://a.com', 'https://a.com:8080')
False
>>> same_origin('https://a.com/foo', 'https://a.com/bar')
True
"""
return all(map(
lambda x: x[0] == x[1],
list(zip(urlparse(url1), urlparse(url2)))[0:2]
))
def random_ip() -> str:
"""A simple ipv4 generator that filters some special ips."""
specials = [0, 10, 100, 127, 172, 192, 198, 203, 224, 240, 255]
def gen(): return randint(0, 255)
while True:
prefix = gen()
if prefix not in specials:
break
return '.'.join(map(str, (prefix, gen(), gen(), gen())))
class Logger:
logger_format = '[%(asctime)-15s] %(levelname)-7s: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
def __init__(self, name: str, level: int = logging.DEBUG) -> None:
self._logger = logging.getLogger(name)
self._logger.setLevel(level)
self._add_stream_handlers()
def replace(self, new_logger: logging.Logger) -> None:
self._logger = new_logger
def set_level(self, level: int) -> None:
self._logger.setLevel(level)
def add_handler(self, handler: logging.Handler) -> None:
self._logger.addHandler(handler)
def debug(self, msg: str, *args, **kwargs) -> None:
self._logger.debug(msg, *args, **kwargs)
def info(self, msg: str, *args, **kwargs) -> None:
self._logger.info(msg, *args, **kwargs)
def warn(self, msg: str, *args, **kwargs) -> None:
self._logger.warning(msg, *args, **kwargs)
def error(self, msg: str, *args, **kwargs) -> None:
self._logger.error(msg, *args, **kwargs)
def _add_stream_handlers(self):
stdout_handler = self._stream_handler(
sys.stdout,
logging.DEBUG,
lambda record: record.levelno < logging.ERROR
)
stderr_handler = self._stream_handler(
sys.stderr,
logging.ERROR
)
self._logger.addHandler(stdout_handler)
self._logger.addHandler(stderr_handler)
def _stream_handler(self, stream, level, msg_filter=None):
handler = logging.StreamHandler(stream)
handler.setLevel(level)
formatter = logging.Formatter(self.logger_format, datefmt=self.date_format)
handler.setFormatter(formatter)
if msg_filter:
handler.addFilter(msg_filter)
return handler
logger = Logger('mocy')
| import logging
import re
import sys
import time
from queue import Queue, PriorityQueue
from random import random, randint
from threading import Thread
from typing import Union
from urllib.parse import urlparse
__all__ = [
'DelayQueue',
'logger',
'random_range',
'random_ip',
'same_origin',
'identity',
'assert_positive_number',
'assert_not_negative_number',
'assert_positive_integer',
'assert_not_negative_integer',
]
class DelayQueue(Queue):
def __init__(self, maxsize=0):
super().__init__(maxsize)
self.pq = PriorityQueue()
poller = Thread(target=self._poll, name='poller')
poller.daemon = True
poller.start()
def put_later(self, item, delay=1):
self.pq.put((time.time() + delay, item))
def _poll(self):
while True:
item = self.pq.get()
if item[0] <= time.time():
self.put(item[1])
else:
self.pq.put(item)
time.sleep(0.05)
def random_range(
value: Union[int, float],
scale1: Union[int, float],
scale2: Union[int, float]
) -> float:
if scale1 > scale2:
lo, hi = scale2, scale1
else:
lo, hi = scale1, scale2
factor = lo + (hi - lo) * random()
return factor * value
def identity(x):
return x
def assert_not_negative_number(num):
assert num >= 0 and isinstance(num, (int, float))
def assert_positive_number(num):
assert num > 0 and isinstance(num, (int, float))
def assert_not_negative_integer(num):
assert num >= 0 and isinstance(num, int)
def assert_positive_integer(num):
assert num > 0 and isinstance(num, int)
def add_http_if_no_scheme(url: str) -> str:
"""Add http as the default scheme if it is missing from the url."""
match = re.match(r'^\w+://', url, flags=re.I)
if not match:
parts = urlparse(url)
scheme = "http:" if parts.netloc else "http://"
url = scheme + url
return url
def same_origin(url1: str, url2: str) -> bool:
"""Return True if the two urls are the same origin
>>> same_origin('http://a.com', 'https://a.com')
False
>>> same_origin('https://a.com', 'https://a.com:8080')
False
>>> same_origin('https://a.com/foo', 'https://a.com/bar')
True
"""
return all(map(
lambda x: x[0] == x[1],
list(zip(urlparse(url1), urlparse(url2)))[0:2]
))
def random_ip() -> str:
"""A simple ipv4 generator that filters some special ips."""
specials = [0, 10, 100, 127, 172, 192, 198, 203, 224, 240, 255]
def gen(): return randint(0, 255)
while True:
prefix = gen()
if prefix not in specials:
break
return '.'.join(map(str, (prefix, gen(), gen(), gen())))
class Logger:
logger_format = '[%(asctime)-15s] %(levelname)-7s: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
def __init__(self, name: str, level: int = logging.DEBUG) -> None:
self._logger = logging.getLogger(name)
self._logger.setLevel(level)
self._add_stream_handlers()
def replace(self, new_logger: logging.Logger) -> None:
self._logger = new_logger
def set_level(self, level: int) -> None:
self._logger.setLevel(level)
def add_handler(self, handler: logging.Handler) -> None:
self._logger.addHandler(handler)
def debug(self, msg: str, *args, **kwargs) -> None:
self._logger.debug(msg, *args, **kwargs)
def info(self, msg: str, *args, **kwargs) -> None:
self._logger.info(msg, *args, **kwargs)
def warn(self, msg: str, *args, **kwargs) -> None:
self._logger.warning(msg, *args, **kwargs)
def error(self, msg: str, *args, **kwargs) -> None:
self._logger.error(msg, *args, **kwargs)
def _add_stream_handlers(self):
stdout_handler = self._stream_handler(
sys.stdout,
logging.DEBUG,
lambda record: record.levelno < logging.ERROR
)
stderr_handler = self._stream_handler(
sys.stderr,
logging.ERROR
)
self._logger.addHandler(stdout_handler)
self._logger.addHandler(stderr_handler)
def _stream_handler(self, stream, level, msg_filter=None):
handler = logging.StreamHandler(stream)
handler.setLevel(level)
formatter = logging.Formatter(self.logger_format, datefmt=self.date_format)
handler.setFormatter(formatter)
if msg_filter:
handler.addFilter(msg_filter)
return handler
logger = Logger('mocy')
| en | 0.766384 | Add http as the default scheme if it is missing from the url. Return True if the two urls are the same origin >>> same_origin('http://a.com', 'https://a.com') False >>> same_origin('https://a.com', 'https://a.com:8080') False >>> same_origin('https://a.com/foo', 'https://a.com/bar') True A simple ipv4 generator that filters some special ips. | 2.99801 | 3 |
nerfactor/eval.py | leehsiu/nerfactor | 0 | 6616860 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing.spawn import get_preparation_data
from os.path import join, basename
from absl import app, flags
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from nerfactor import datasets
from nerfactor import models
from nerfactor.util import logging as logutil, io as ioutil, \
config as configutil
from third_party.xiuminglib import xiuminglib as xm
from third_party.turbo_colormap import turbo_colormap_data, interpolate_or_clip
flags.DEFINE_string(
'ckpt', '/path/to/ckpt-100', "path to checkpoint (prefix only)")
flags.DEFINE_boolean('color_correct_albedo', False, "")
flags.DEFINE_boolean('debug', False, "debug mode switch")
FLAGS = flags.FLAGS
logger = logutil.Logger(loggee="eval")
def tonemapping(src,gamma=2.2):
return src**(1./gamma)
def compute_rgb_scales(alpha_thres=0.9):
"""Computes RGB scales that match predicted albedo to ground truth,
using just the first validation view.
"""
config_ini = configutil.get_config_ini(FLAGS.ckpt)
config = ioutil.read_config(config_ini)
# First validation view
vali_dir = join(config_ini[:-4], 'vis_vali')
data_root = config.get('DEFAULT', 'data_root')
epoch_dirs = xm.os.sortglob(vali_dir, 'epoch?????????')
epoch_dir = epoch_dirs[-1]
batch_dirs = xm.os.sortglob(epoch_dir, 'batch?????????')
batch_dir = batch_dirs[0]
# Find GT path
metadata_path = join(batch_dir, 'metadata.json')
metadata = xm.io.json.load(metadata_path)
view = metadata['id']
pred_path = join(batch_dir, 'pred_albedo.png')
gt_path = join(data_root, view, 'albedo.png')
# Load prediction and GT
pred = xm.io.img.read(pred_path) # gamma corrected
gt = xm.io.img.read(gt_path) # linear
pred = xm.img.normalize_uint(pred)
gt = xm.img.normalize_uint(gt)
pred = pred ** 2.2 # undo gamma
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
alpha = gt[:, :, 3]
gt = gt[:, :, :3]
# Compute color correction scales, in the linear space
is_fg = alpha > alpha_thres
opt_scale = []
for i in range(3):
x_hat = pred[:, :, i][is_fg]
x = gt[:, :, i][is_fg]
scale = x_hat.dot(x) / x_hat.dot(x_hat)
opt_scale.append(scale)
opt_scale = tf.convert_to_tensor(opt_scale, dtype=tf.float32)
return opt_scale
#albedo
#normal
#rgba_{}
#pred_rgb_probes_{}
def cal_psnr(gt_path,pred_path,fg,gamma=False):
# Load prediction and GT
pred = xm.io.img.read(pred_path) # gamma corrected
gt = xm.io.img.read(gt_path) # linear
pred = xm.img.normalize_uint(pred)
gt = xm.img.normalize_uint(gt)
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
gt = gt[...,:3]
if gamma:
gt = tonemapping(gt)
gt_fg = gt[fg,:]
pred_fg = pred[fg,:]
mse = np.mean((gt_fg - pred_fg)**2)
psnr = -10.*np.log(mse)/np.log(10.)
return psnr
def cal_normal(gt_path,pred_path,fg):
# Load prediction and GT
pred = xm.io.img.read(pred_path) # gamma corrected
gt = xm.io.img.read(gt_path) # linear
pred = xm.img.normalize_uint(pred)
gt = xm.img.normalize_uint(gt)
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
gt = gt[...,:3]
gt_fg = gt[fg,:]*2.0 - 1.0
pred_fg = pred[fg,:]*2.0 - 1.0
gt_fg = xm.linalg.normalize(gt_fg,1)
pred_fg = xm.linalg.normalize(pred_fg,1)
dot = np.sum(gt_fg*pred_fg,axis=1)
dot = np.clip(dot,-1.,1.0)
dot_mean = np.mean(dot)
return np.arccos(dot_mean)/np.pi*180
def eval(batch_dirs,alpha_thres=0.9):
config_ini = configutil.get_config_ini(FLAGS.ckpt)
config = ioutil.read_config(config_ini)
data_root = config.get('DEFAULT', 'data_root')
psnr_albedo = []
psnr_relight = []
psnr_fv = []
err_normal = []
import glob
all_lights_file = glob.glob(join(batch_dirs[0],'*probes*'))
all_lights = [basename(el)[16:-4] for el in all_lights_file]
for batch_dir in batch_dirs:
#Find GT path
metadata_path = join(batch_dir, 'metadata.json')
metadata = xm.io.json.load(metadata_path)
view = metadata['id']
pred_path = join(batch_dir, 'pred_albedo.png')
gt_path = join(data_root, view, 'albedo.png')
# Load prediction and GT
gt = xm.io.img.read(gt_path) # linear
pred = xm.io.img.read(pred_path)
gt = xm.img.normalize_uint(gt)
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
alpha = gt[:, :, 3]
fg = alpha > alpha_thres
psnr = cal_psnr(gt_path,pred_path,fg,True)
psnr_albedo.append(psnr)
pred_path = join(batch_dir, 'pred_rgb.png')
gt_path = join(data_root, view, 'rgba.png')
psnr = cal_psnr(gt_path,pred_path,fg,False)
psnr_fv.append(psnr)
for light in all_lights:
pred_path = join(batch_dir, f'pred_rgb_probes_{light}.png')
gt_path = join(data_root, view, f'rgba_{light}.png')
# Load prediction and GT
psnr = cal_psnr(gt_path,pred_path,fg,False)
psnr_relight.append(psnr)
pred_path = join(batch_dir, 'pred_normal.png')
gt_path = join(data_root, view, 'normal.png')
err = cal_normal(gt_path,pred_path,fg)
err_normal.append(err)
print('albedo',np.mean(psnr_albedo))
print('fv',np.mean(psnr_fv))
print('relight',np.mean(psnr_relight))
print('normal',np.mean(err_normal))
# Compute color correction scales, in the linear space
return
def main(_):
if FLAGS.debug:
logger.warn("Debug mode: on")
# Config
config_ini = configutil.get_config_ini(FLAGS.ckpt)
config = ioutil.read_config(config_ini)
# Output directory
outroot = join(config_ini[:-4], 'vis_eval', basename(FLAGS.ckpt))
# Make dataset
logger.info("Making the actual data pipeline")
dataset_name = config.get('DEFAULT', 'dataset')
Dataset = datasets.get_dataset_class(dataset_name)
dataset = Dataset(config, 'vali', debug=FLAGS.debug)
n_views = dataset.get_n_views()
no_batch = config.getboolean('DEFAULT', 'no_batch')
datapipe = dataset.build_pipeline(no_batch=no_batch, no_shuffle=True)
# Restore model
logger.info("Restoring trained model")
model_name = config.get('DEFAULT', 'model')
Model = models.get_model_class(model_name)
model = Model(config, debug=FLAGS.debug)
ioutil.restore_model(model, FLAGS.ckpt)
# Optionally, color-correct the albedo
albedo_scales = None
if FLAGS.color_correct_albedo:
albedo_scales = compute_rgb_scales()
#For all test views
logger.info("Running inference")
for batch_i, batch in enumerate(
tqdm(datapipe, desc="Inferring Views", total=n_views)):
# Inference
_, _, _, to_vis = model.call(
batch, mode='vali', relight_probes=True,
albedo_scales=albedo_scales)
# Visualize
outdir = join(outroot, 'batch{i:09d}'.format(i=batch_i))
model.vis_batch(to_vis, outdir, mode='vali')
# Break if debugging
if FLAGS.debug:
break
#calculate metrics
batch_vis_dirs = xm.os.sortglob(outroot, 'batch?????????')
eval(batch_vis_dirs)
if __name__ == '__main__':
app.run(main)
| # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing.spawn import get_preparation_data
from os.path import join, basename
from absl import app, flags
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from nerfactor import datasets
from nerfactor import models
from nerfactor.util import logging as logutil, io as ioutil, \
config as configutil
from third_party.xiuminglib import xiuminglib as xm
from third_party.turbo_colormap import turbo_colormap_data, interpolate_or_clip
flags.DEFINE_string(
'ckpt', '/path/to/ckpt-100', "path to checkpoint (prefix only)")
flags.DEFINE_boolean('color_correct_albedo', False, "")
flags.DEFINE_boolean('debug', False, "debug mode switch")
FLAGS = flags.FLAGS
logger = logutil.Logger(loggee="eval")
def tonemapping(src,gamma=2.2):
return src**(1./gamma)
def compute_rgb_scales(alpha_thres=0.9):
"""Computes RGB scales that match predicted albedo to ground truth,
using just the first validation view.
"""
config_ini = configutil.get_config_ini(FLAGS.ckpt)
config = ioutil.read_config(config_ini)
# First validation view
vali_dir = join(config_ini[:-4], 'vis_vali')
data_root = config.get('DEFAULT', 'data_root')
epoch_dirs = xm.os.sortglob(vali_dir, 'epoch?????????')
epoch_dir = epoch_dirs[-1]
batch_dirs = xm.os.sortglob(epoch_dir, 'batch?????????')
batch_dir = batch_dirs[0]
# Find GT path
metadata_path = join(batch_dir, 'metadata.json')
metadata = xm.io.json.load(metadata_path)
view = metadata['id']
pred_path = join(batch_dir, 'pred_albedo.png')
gt_path = join(data_root, view, 'albedo.png')
# Load prediction and GT
pred = xm.io.img.read(pred_path) # gamma corrected
gt = xm.io.img.read(gt_path) # linear
pred = xm.img.normalize_uint(pred)
gt = xm.img.normalize_uint(gt)
pred = pred ** 2.2 # undo gamma
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
alpha = gt[:, :, 3]
gt = gt[:, :, :3]
# Compute color correction scales, in the linear space
is_fg = alpha > alpha_thres
opt_scale = []
for i in range(3):
x_hat = pred[:, :, i][is_fg]
x = gt[:, :, i][is_fg]
scale = x_hat.dot(x) / x_hat.dot(x_hat)
opt_scale.append(scale)
opt_scale = tf.convert_to_tensor(opt_scale, dtype=tf.float32)
return opt_scale
#albedo
#normal
#rgba_{}
#pred_rgb_probes_{}
def cal_psnr(gt_path,pred_path,fg,gamma=False):
# Load prediction and GT
pred = xm.io.img.read(pred_path) # gamma corrected
gt = xm.io.img.read(gt_path) # linear
pred = xm.img.normalize_uint(pred)
gt = xm.img.normalize_uint(gt)
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
gt = gt[...,:3]
if gamma:
gt = tonemapping(gt)
gt_fg = gt[fg,:]
pred_fg = pred[fg,:]
mse = np.mean((gt_fg - pred_fg)**2)
psnr = -10.*np.log(mse)/np.log(10.)
return psnr
def cal_normal(gt_path,pred_path,fg):
# Load prediction and GT
pred = xm.io.img.read(pred_path) # gamma corrected
gt = xm.io.img.read(gt_path) # linear
pred = xm.img.normalize_uint(pred)
gt = xm.img.normalize_uint(gt)
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
gt = gt[...,:3]
gt_fg = gt[fg,:]*2.0 - 1.0
pred_fg = pred[fg,:]*2.0 - 1.0
gt_fg = xm.linalg.normalize(gt_fg,1)
pred_fg = xm.linalg.normalize(pred_fg,1)
dot = np.sum(gt_fg*pred_fg,axis=1)
dot = np.clip(dot,-1.,1.0)
dot_mean = np.mean(dot)
return np.arccos(dot_mean)/np.pi*180
def eval(batch_dirs,alpha_thres=0.9):
config_ini = configutil.get_config_ini(FLAGS.ckpt)
config = ioutil.read_config(config_ini)
data_root = config.get('DEFAULT', 'data_root')
psnr_albedo = []
psnr_relight = []
psnr_fv = []
err_normal = []
import glob
all_lights_file = glob.glob(join(batch_dirs[0],'*probes*'))
all_lights = [basename(el)[16:-4] for el in all_lights_file]
for batch_dir in batch_dirs:
#Find GT path
metadata_path = join(batch_dir, 'metadata.json')
metadata = xm.io.json.load(metadata_path)
view = metadata['id']
pred_path = join(batch_dir, 'pred_albedo.png')
gt_path = join(data_root, view, 'albedo.png')
# Load prediction and GT
gt = xm.io.img.read(gt_path) # linear
pred = xm.io.img.read(pred_path)
gt = xm.img.normalize_uint(gt)
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
alpha = gt[:, :, 3]
fg = alpha > alpha_thres
psnr = cal_psnr(gt_path,pred_path,fg,True)
psnr_albedo.append(psnr)
pred_path = join(batch_dir, 'pred_rgb.png')
gt_path = join(data_root, view, 'rgba.png')
psnr = cal_psnr(gt_path,pred_path,fg,False)
psnr_fv.append(psnr)
for light in all_lights:
pred_path = join(batch_dir, f'pred_rgb_probes_{light}.png')
gt_path = join(data_root, view, f'rgba_{light}.png')
# Load prediction and GT
psnr = cal_psnr(gt_path,pred_path,fg,False)
psnr_relight.append(psnr)
pred_path = join(batch_dir, 'pred_normal.png')
gt_path = join(data_root, view, 'normal.png')
err = cal_normal(gt_path,pred_path,fg)
err_normal.append(err)
print('albedo',np.mean(psnr_albedo))
print('fv',np.mean(psnr_fv))
print('relight',np.mean(psnr_relight))
print('normal',np.mean(err_normal))
# Compute color correction scales, in the linear space
return
def main(_):
if FLAGS.debug:
logger.warn("Debug mode: on")
# Config
config_ini = configutil.get_config_ini(FLAGS.ckpt)
config = ioutil.read_config(config_ini)
# Output directory
outroot = join(config_ini[:-4], 'vis_eval', basename(FLAGS.ckpt))
# Make dataset
logger.info("Making the actual data pipeline")
dataset_name = config.get('DEFAULT', 'dataset')
Dataset = datasets.get_dataset_class(dataset_name)
dataset = Dataset(config, 'vali', debug=FLAGS.debug)
n_views = dataset.get_n_views()
no_batch = config.getboolean('DEFAULT', 'no_batch')
datapipe = dataset.build_pipeline(no_batch=no_batch, no_shuffle=True)
# Restore model
logger.info("Restoring trained model")
model_name = config.get('DEFAULT', 'model')
Model = models.get_model_class(model_name)
model = Model(config, debug=FLAGS.debug)
ioutil.restore_model(model, FLAGS.ckpt)
# Optionally, color-correct the albedo
albedo_scales = None
if FLAGS.color_correct_albedo:
albedo_scales = compute_rgb_scales()
#For all test views
logger.info("Running inference")
for batch_i, batch in enumerate(
tqdm(datapipe, desc="Inferring Views", total=n_views)):
# Inference
_, _, _, to_vis = model.call(
batch, mode='vali', relight_probes=True,
albedo_scales=albedo_scales)
# Visualize
outdir = join(outroot, 'batch{i:09d}'.format(i=batch_i))
model.vis_batch(to_vis, outdir, mode='vali')
# Break if debugging
if FLAGS.debug:
break
#calculate metrics
batch_vis_dirs = xm.os.sortglob(outroot, 'batch?????????')
eval(batch_vis_dirs)
if __name__ == '__main__':
app.run(main)
| en | 0.739102 | # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Computes RGB scales that match predicted albedo to ground truth, using just the first validation view. # First validation view # Find GT path # Load prediction and GT # gamma corrected # linear # undo gamma # Compute color correction scales, in the linear space #albedo #normal #rgba_{} #pred_rgb_probes_{} # Load prediction and GT # gamma corrected # linear # Load prediction and GT # gamma corrected # linear #Find GT path # Load prediction and GT # linear # Load prediction and GT # Compute color correction scales, in the linear space # Config # Output directory # Make dataset # Restore model # Optionally, color-correct the albedo #For all test views # Inference # Visualize # Break if debugging #calculate metrics | 1.678105 | 2 |
doreg/arg_parser.py | joaompinto/doreg | 0 | 6616861 | <gh_stars>0
import sys
from optparse import OptionParser
from .version import version
def arg_parse():
usage = f"{sys.argv[0]}"
parser = OptionParser(usage, version=version)
(options, args) = parser.parse_args()
return (options, args)
| import sys
from optparse import OptionParser
from .version import version
def arg_parse():
usage = f"{sys.argv[0]}"
parser = OptionParser(usage, version=version)
(options, args) = parser.parse_args()
return (options, args) | none | 1 | 2.452928 | 2 | |
example_gallery/1_basic/plot_find_rois_simple.py | scikit-maad/scikit-maad | 31 | 6616862 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple audio segmentation
=========================
In audio signals, regions of interest are usually regions with high density of energy. The function ``find_rois_cwt`` allows finding regions of interest in the signal giving very simple and intuitive parameters: temporal length and frequency limits. This segmentation can be seen as a coarse detection process, the starting point of more advanced classification methods.
The following sound example as two main different soundtypes in the foreground:
- A bouncy trill between 4.5 and 8 kHz lasting approximately 2 seconds.
- A fast descending chirp between 8 and 12 kHz lasting 0.1 approximately seconds.
"""
#%%
# Load audio file
# ---------------
# Load an audio file and compute the spectrogram for visualization.
from maad import sound
from maad.rois import find_rois_cwt
from maad.util import plot_spectrogram
s, fs = sound.load('../../data/spinetail.wav')
Sxx, tn, fn, ext = sound.spectrogram(s, fs, nperseg=1024, noverlap=512)
plot_spectrogram(Sxx, extent=ext, db_range=60, gain=20, figsize=(4,10))
#%%
# Detect the bouncy trill
# -----------------------
# The accelerating trill is the song of a small neotropical bird, the Red-faced Spinetail *Cranioleuca erythrops*. This song can be detected on the recording using the function ``find_rois_cwt`` and setting frequency limits ``flims=(4500,8000)`` and temporal length of signal ``tlen=2``. The segmentation results are returned as a dataframe with temporal segmentation given by the function and using the frequency limits defined by the user.
df_trill = find_rois_cwt(s, fs, flims=(4500,8000), tlen=2, th=0, display=True, figsize=(10,6))
print(df_trill)
#%%
# Detect the fast descending chirp
# --------------------------------
# Alternatively, the fast descending chirp (unknown species) can be segmented in the recording by changing the detection parameters, ``flims`` and ``tlen``.
df_chirp = find_rois_cwt(s, fs, flims=(8000,12000), tlen=0.1, th=0.001, display=True, figsize=(10,6))
print(df_chirp)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple audio segmentation
=========================
In audio signals, regions of interest are usually regions with high density of energy. The function ``find_rois_cwt`` allows finding regions of interest in the signal giving very simple and intuitive parameters: temporal length and frequency limits. This segmentation can be seen as a coarse detection process, the starting point of more advanced classification methods.
The following sound example as two main different soundtypes in the foreground:
- A bouncy trill between 4.5 and 8 kHz lasting approximately 2 seconds.
- A fast descending chirp between 8 and 12 kHz lasting 0.1 approximately seconds.
"""
#%%
# Load audio file
# ---------------
# Load an audio file and compute the spectrogram for visualization.
from maad import sound
from maad.rois import find_rois_cwt
from maad.util import plot_spectrogram
s, fs = sound.load('../../data/spinetail.wav')
Sxx, tn, fn, ext = sound.spectrogram(s, fs, nperseg=1024, noverlap=512)
plot_spectrogram(Sxx, extent=ext, db_range=60, gain=20, figsize=(4,10))
#%%
# Detect the bouncy trill
# -----------------------
# The accelerating trill is the song of a small neotropical bird, the Red-faced Spinetail *Cranioleuca erythrops*. This song can be detected on the recording using the function ``find_rois_cwt`` and setting frequency limits ``flims=(4500,8000)`` and temporal length of signal ``tlen=2``. The segmentation results are returned as a dataframe with temporal segmentation given by the function and using the frequency limits defined by the user.
df_trill = find_rois_cwt(s, fs, flims=(4500,8000), tlen=2, th=0, display=True, figsize=(10,6))
print(df_trill)
#%%
# Detect the fast descending chirp
# --------------------------------
# Alternatively, the fast descending chirp (unknown species) can be segmented in the recording by changing the detection parameters, ``flims`` and ``tlen``.
df_chirp = find_rois_cwt(s, fs, flims=(8000,12000), tlen=0.1, th=0.001, display=True, figsize=(10,6))
print(df_chirp)
| en | 0.833448 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Simple audio segmentation ========================= In audio signals, regions of interest are usually regions with high density of energy. The function ``find_rois_cwt`` allows finding regions of interest in the signal giving very simple and intuitive parameters: temporal length and frequency limits. This segmentation can be seen as a coarse detection process, the starting point of more advanced classification methods. The following sound example as two main different soundtypes in the foreground: - A bouncy trill between 4.5 and 8 kHz lasting approximately 2 seconds. - A fast descending chirp between 8 and 12 kHz lasting 0.1 approximately seconds. #%% # Load audio file # --------------- # Load an audio file and compute the spectrogram for visualization. #%% # Detect the bouncy trill # ----------------------- # The accelerating trill is the song of a small neotropical bird, the Red-faced Spinetail *Cranioleuca erythrops*. This song can be detected on the recording using the function ``find_rois_cwt`` and setting frequency limits ``flims=(4500,8000)`` and temporal length of signal ``tlen=2``. The segmentation results are returned as a dataframe with temporal segmentation given by the function and using the frequency limits defined by the user. #%% # Detect the fast descending chirp # -------------------------------- # Alternatively, the fast descending chirp (unknown species) can be segmented in the recording by changing the detection parameters, ``flims`` and ``tlen``. | 3.056594 | 3 |
page_data.py | vladiscripts/ws_interlinker_via_wikidata | 0 | 6616863 | <reponame>vladiscripts/ws_interlinker_via_wikidata
from __init__ import *
import wiki_util
from wd_utils import props # , WD_utils
class PageMeta:
tpl: Optional[mwp.wikicode.Template]
tpl_name: Optional[str]
params: List[Tuple[str, str]]
enc_with_transcludes: bool
is_author_tpl: bool
short_page: bool
params_to_delete: list
params_to_value_clear: list
summaries: list
page: pwb.Page
item: pwb.ItemPage
# не обрабатывать страницу, устанавливается фильтрами
do_skip: bool
cause: str
"""Перенос ссылок на энциклопедии/словари из статей в Викиданые и создание там записи."""
re_cat_redirect = re.compile(r'\[\[Категория:[^]]+?Перенаправления', flags=re.IGNORECASE)
def __init__(self, processor, page: pwb.page.Page, enc_metas: dict, allowed_header_names):
self.processor = processor
self.do_skip = False
# self.is_author_tpl = False
self.page = wiki_util.get_wikipage(page.site, page=page)
self.title = page.title()
self.rootpagename, _, self.subpagename = self.title.partition('/')
self.enc_with_transcludes = bool(self.rootpagename in ('ПБЭ', 'ЭЛ'))
self.enc_meta = enc_metas.get(self.rootpagename)
self.allowed_header_names = allowed_header_names
self.short_page = False
self.params_to_delete = []
self.params_to_value_clear = []
# do_cause = None
self.summaries = []
# self.get_filled_params()
self.params = []
self.itemWD = processor.wd.get_item_by_page(self.page)
if not self.itemWD and processor.works_pages_with_wditems:
self.set_skip('no p.itemWD')
return
# работать по энциклопедическая статья и словарная статья
if processor.work_only_enc and not [True for e in processor.wd.get_claims.item_type(self.itemWD)
if e.target.id in props.types_to_search]:
self.set_skip('не словарная статья')
return
self.text = self.page.get()
self.wikicode = mwp.parse(self.text)
self.tpl = None
self.tpl_name = None
# if self.re_cat_redirect.search(page.text): # пропускать страницы-перенаправления
if [s for s in self.wikicode.filter_wikilinks(matches=r'^\[\[Категория:[^|]*?:Перенаправления')]:
self.set_skip('перенаправление')
return
# фильтр по размеру текста
if processor.skip_by_text_lengh:
self.filter_by_text_lenght()
if self.short_page:
self.set_skip('размер текста < 100')
return
def filter_by_text_lenght(self, text_limit: int = 100):
"""фильтр по размеру текста"""
if not self.processor.skip_by_text_lengh:
return
if self.enc_with_transcludes:
return
for _tpl in self.wikicode.filter_templates():
if _tpl.name.strip() in self.allowed_header_names:
self.tpl_data(_tpl)
break
if self.tpl:
# убираем шаблон
t = self.text.replace(str(self.tpl), '')
# убираем категории и викиссылки
for s in self.wikicode.filter_wikilinks(matches=r'^\[\[Категория:'):
t = t.replace(str(s), '')
for s in self.wikicode.filter_wikilinks():
t = t.replace(str(s), str(s.text))
t = t.strip()
# часть ЭСБЕ - трансклюзии
if self.tpl_name == 'ЭСБЕ' and len(t) == 0:
return
if len(t) < text_limit:
self.short_page = True
return
def tpl_data(self, tpl: mwp.wikicode.Template):
self.tpl = tpl
self.tpl_name = tpl.name.strip()
# self.is_author_tpl = self.tplname.lower() in allowed_header_names
def set_skip(self, cause: str):
self.do_skip = True
self.cause = cause
def __repr__(self):
return self.page.title()
| from __init__ import *
import wiki_util
from wd_utils import props # , WD_utils
class PageMeta:
tpl: Optional[mwp.wikicode.Template]
tpl_name: Optional[str]
params: List[Tuple[str, str]]
enc_with_transcludes: bool
is_author_tpl: bool
short_page: bool
params_to_delete: list
params_to_value_clear: list
summaries: list
page: pwb.Page
item: pwb.ItemPage
# не обрабатывать страницу, устанавливается фильтрами
do_skip: bool
cause: str
"""Перенос ссылок на энциклопедии/словари из статей в Викиданые и создание там записи."""
re_cat_redirect = re.compile(r'\[\[Категория:[^]]+?Перенаправления', flags=re.IGNORECASE)
def __init__(self, processor, page: pwb.page.Page, enc_metas: dict, allowed_header_names):
self.processor = processor
self.do_skip = False
# self.is_author_tpl = False
self.page = wiki_util.get_wikipage(page.site, page=page)
self.title = page.title()
self.rootpagename, _, self.subpagename = self.title.partition('/')
self.enc_with_transcludes = bool(self.rootpagename in ('ПБЭ', 'ЭЛ'))
self.enc_meta = enc_metas.get(self.rootpagename)
self.allowed_header_names = allowed_header_names
self.short_page = False
self.params_to_delete = []
self.params_to_value_clear = []
# do_cause = None
self.summaries = []
# self.get_filled_params()
self.params = []
self.itemWD = processor.wd.get_item_by_page(self.page)
if not self.itemWD and processor.works_pages_with_wditems:
self.set_skip('no p.itemWD')
return
# работать по энциклопедическая статья и словарная статья
if processor.work_only_enc and not [True for e in processor.wd.get_claims.item_type(self.itemWD)
if e.target.id in props.types_to_search]:
self.set_skip('не словарная статья')
return
self.text = self.page.get()
self.wikicode = mwp.parse(self.text)
self.tpl = None
self.tpl_name = None
# if self.re_cat_redirect.search(page.text): # пропускать страницы-перенаправления
if [s for s in self.wikicode.filter_wikilinks(matches=r'^\[\[Категория:[^|]*?:Перенаправления')]:
self.set_skip('перенаправление')
return
# фильтр по размеру текста
if processor.skip_by_text_lengh:
self.filter_by_text_lenght()
if self.short_page:
self.set_skip('размер текста < 100')
return
def filter_by_text_lenght(self, text_limit: int = 100):
"""фильтр по размеру текста"""
if not self.processor.skip_by_text_lengh:
return
if self.enc_with_transcludes:
return
for _tpl in self.wikicode.filter_templates():
if _tpl.name.strip() in self.allowed_header_names:
self.tpl_data(_tpl)
break
if self.tpl:
# убираем шаблон
t = self.text.replace(str(self.tpl), '')
# убираем категории и викиссылки
for s in self.wikicode.filter_wikilinks(matches=r'^\[\[Категория:'):
t = t.replace(str(s), '')
for s in self.wikicode.filter_wikilinks():
t = t.replace(str(s), str(s.text))
t = t.strip()
# часть ЭСБЕ - трансклюзии
if self.tpl_name == 'ЭСБЕ' and len(t) == 0:
return
if len(t) < text_limit:
self.short_page = True
return
def tpl_data(self, tpl: mwp.wikicode.Template):
self.tpl = tpl
self.tpl_name = tpl.name.strip()
# self.is_author_tpl = self.tplname.lower() in allowed_header_names
def set_skip(self, cause: str):
self.do_skip = True
self.cause = cause
def __repr__(self):
return self.page.title() | ru | 0.936046 | # , WD_utils # не обрабатывать страницу, устанавливается фильтрами Перенос ссылок на энциклопедии/словари из статей в Викиданые и создание там записи. # self.is_author_tpl = False # do_cause = None # self.get_filled_params() # работать по энциклопедическая статья и словарная статья # if self.re_cat_redirect.search(page.text): # пропускать страницы-перенаправления # фильтр по размеру текста фильтр по размеру текста # убираем шаблон # убираем категории и викиссылки # часть ЭСБЕ - трансклюзии # self.is_author_tpl = self.tplname.lower() in allowed_header_names | 2.285285 | 2 |
pqst/instagram/dbc.py | TWoolhouse/WebServer | 0 | 6616864 | import website
import database as db
from . import igapi
from interface import Interface
class Chat:
def __init__(self, name: str, db_name: str, pwd: str, chat_name: str):
self.name = name
self.password = <PASSWORD>
self.db = db.DatabaseAsync(f"{website.path}resource/database/{db_name}.save.db")
self.db.open()
self.ig = igapi.IGData(self.db("API"), chat_name)
chat = {
"PoppyTester": Chat("PoppyTester", "poppy_tester", "ptlol", "Poppy Tester"),
} | import website
import database as db
from . import igapi
from interface import Interface
class Chat:
def __init__(self, name: str, db_name: str, pwd: str, chat_name: str):
self.name = name
self.password = <PASSWORD>
self.db = db.DatabaseAsync(f"{website.path}resource/database/{db_name}.save.db")
self.db.open()
self.ig = igapi.IGData(self.db("API"), chat_name)
chat = {
"PoppyTester": Chat("PoppyTester", "poppy_tester", "ptlol", "Poppy Tester"),
} | none | 1 | 2.701947 | 3 | |
hashing/resizeableHashFunction.py | itsvinayak/cracking_the_codeing_interview | 4 | 6616865 | <gh_stars>1-10
# if hash table is full resize table automatically to increase length
def hash(string, tablesize):
sum = 0
for pos in range(len(string)):
sum = sum + ord(string[pos])
return sum % tablesize
class HashTable:
def __init__(self, size=11, toIncreaseBy=1):
self.size = size
# to increase size of new table by factor of one
self.toIncreaseBy = toIncreaseBy
self.slots = [None] * self.size
self.data = [None] * self.size
def reimplement(self, key, data):
"""
reimplement slots and data array
with new size value
"""
tempSlots = self.slots[:] ## temp. values
tempData = self.data[:]
self.size += self.toIncreaseBy
self.slots = [None] * self.size
self.data = [None] * self.size
for k, d in zip(tempSlots, tempData):
if k is not None: # if not checked can give error with self.hashfunction
self.put(k, d)
self.put(key, data)
def put(self, key, data):
# count none in table : if NoneCount is 0 means our table is full
NoneCount = sum(x is None for x in self.slots)
if NoneCount == 0:
self.reimplement(key, data)
hashvalue = self.hashfunction(key, len(self.slots))
if self.slots[hashvalue] == None:
self.slots[hashvalue] = key
self.data[hashvalue] = data
else:
if self.slots[hashvalue] == key:
self.data[hashvalue] = data # replace
else:
nextslot = self.rehash(hashvalue, len(self.slots))
while self.slots[nextslot] != None and self.slots[nextslot] != key:
nextslot = self.rehash(nextslot, len(self.slots))
if self.slots[nextslot] == None:
self.slots[nextslot] = key
self.data[nextslot] = data
else:
self.data[nextslot] = data # replace
def hashfunction(self, key, size):
return key % size
def rehash(self, oldhash, size):
return (oldhash + 1) % size
def get(self, key):
startslot = self.hashfunction(key, len(self.slots))
data = None
stop = False
found = False
position = startslot
while self.slots[position] != None and not found and not stop:
if self.slots[position] == key:
found = True
data = self.data[position]
else:
position = self.rehash(position, len(self.slots))
if position == startslot:
stop = True
return data
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, data):
self.put(key, data)
H = HashTable()
H[54] = "cat"
H[26] = "dog"
H[93] = "lion"
H[17] = "tiger"
H[77] = "bird"
H[31] = "cow"
H[44] = "goat"
H[55] = "pig"
H[20] = "chicken"
print(H.slots)
print(H.data)
H.reimplement(22, "vina")
print(H.slots)
print(H.data)
print(H[20])
print(H[17])
H[20] = "duck"
print(H[20])
print(H[99])
| # if hash table is full resize table automatically to increase length
def hash(string, tablesize):
sum = 0
for pos in range(len(string)):
sum = sum + ord(string[pos])
return sum % tablesize
class HashTable:
def __init__(self, size=11, toIncreaseBy=1):
self.size = size
# to increase size of new table by factor of one
self.toIncreaseBy = toIncreaseBy
self.slots = [None] * self.size
self.data = [None] * self.size
def reimplement(self, key, data):
"""
reimplement slots and data array
with new size value
"""
tempSlots = self.slots[:] ## temp. values
tempData = self.data[:]
self.size += self.toIncreaseBy
self.slots = [None] * self.size
self.data = [None] * self.size
for k, d in zip(tempSlots, tempData):
if k is not None: # if not checked can give error with self.hashfunction
self.put(k, d)
self.put(key, data)
def put(self, key, data):
# count none in table : if NoneCount is 0 means our table is full
NoneCount = sum(x is None for x in self.slots)
if NoneCount == 0:
self.reimplement(key, data)
hashvalue = self.hashfunction(key, len(self.slots))
if self.slots[hashvalue] == None:
self.slots[hashvalue] = key
self.data[hashvalue] = data
else:
if self.slots[hashvalue] == key:
self.data[hashvalue] = data # replace
else:
nextslot = self.rehash(hashvalue, len(self.slots))
while self.slots[nextslot] != None and self.slots[nextslot] != key:
nextslot = self.rehash(nextslot, len(self.slots))
if self.slots[nextslot] == None:
self.slots[nextslot] = key
self.data[nextslot] = data
else:
self.data[nextslot] = data # replace
def hashfunction(self, key, size):
return key % size
def rehash(self, oldhash, size):
return (oldhash + 1) % size
def get(self, key):
startslot = self.hashfunction(key, len(self.slots))
data = None
stop = False
found = False
position = startslot
while self.slots[position] != None and not found and not stop:
if self.slots[position] == key:
found = True
data = self.data[position]
else:
position = self.rehash(position, len(self.slots))
if position == startslot:
stop = True
return data
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, data):
self.put(key, data)
H = HashTable()
H[54] = "cat"
H[26] = "dog"
H[93] = "lion"
H[17] = "tiger"
H[77] = "bird"
H[31] = "cow"
H[44] = "goat"
H[55] = "pig"
H[20] = "chicken"
print(H.slots)
print(H.data)
H.reimplement(22, "vina")
print(H.slots)
print(H.data)
print(H[20])
print(H[17])
H[20] = "duck"
print(H[20])
print(H[99]) | en | 0.653461 | # if hash table is full resize table automatically to increase length # to increase size of new table by factor of one reimplement slots and data array
with new size value ## temp. values # if not checked can give error with self.hashfunction # count none in table : if NoneCount is 0 means our table is full # replace # replace | 3.448106 | 3 |
chat/migrations/0001_initial.py | nim65s/tutochan | 0 | 6616866 | <reponame>nim65s/tutochan<filename>chat/migrations/0001_initial.py
# Generated by Django 3.1.1 on 2020-09-06 23:13
import autoslug.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import ndh.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=200, unique=True)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True)),
],
options={
'abstract': False,
},
bases=(models.Model, ndh.models.Links),
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('chan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='chat.chan')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| # Generated by Django 3.1.1 on 2020-09-06 23:13
import autoslug.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import ndh.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=200, unique=True)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True)),
],
options={
'abstract': False,
},
bases=(models.Model, ndh.models.Links),
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('chan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='chat.chan')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
] | en | 0.799511 | # Generated by Django 3.1.1 on 2020-09-06 23:13 | 1.835823 | 2 |
gnosis/utils/optim.py | samuelstanton/gnosis | 13 | 6616867 | <gh_stars>10-100
def get_decay_fn(initial_val, final_val, start, stop):
"""
Returns function handle to use in torch.optim.lr_scheduler.LambdaLR.
The returned function supplies the multiplier to decay a value linearly.
"""
assert stop > start
def decay_fn(counter):
if counter <= start:
return 1
if counter >= stop:
return final_val / initial_val
time_range = stop - start
return 1 - (counter - start) * (1 - final_val / initial_val) / time_range
assert decay_fn(start) * initial_val == initial_val
assert decay_fn(stop) * initial_val == final_val
return decay_fn
| def get_decay_fn(initial_val, final_val, start, stop):
"""
Returns function handle to use in torch.optim.lr_scheduler.LambdaLR.
The returned function supplies the multiplier to decay a value linearly.
"""
assert stop > start
def decay_fn(counter):
if counter <= start:
return 1
if counter >= stop:
return final_val / initial_val
time_range = stop - start
return 1 - (counter - start) * (1 - final_val / initial_val) / time_range
assert decay_fn(start) * initial_val == initial_val
assert decay_fn(stop) * initial_val == final_val
return decay_fn | en | 0.67427 | Returns function handle to use in torch.optim.lr_scheduler.LambdaLR. The returned function supplies the multiplier to decay a value linearly. | 2.582257 | 3 |
config.py | lucifer1004/ML-docker | 0 | 6616868 | c = get_config()
# 所有matplotlib的图像都通过iline的方式显示
c.IPKernelApp.pylab = 'inline'
# 这一行指向我们刚刚创建的ssl证书
c.NotebookApp.certfile = './mycert.pem'
# 给出刚刚创建的密码的哈希值
c.NotebookApp.password = '<PASSWORD>'
c.NotebookApp.ip = '0.0.0.0'
# 给出运行的端口,ipython默认为8888
c.NotebookApp.port = 8888
# 禁止在运行ipython的同时弹出浏览器
c.NotebookApp.open_browser = False | c = get_config()
# 所有matplotlib的图像都通过iline的方式显示
c.IPKernelApp.pylab = 'inline'
# 这一行指向我们刚刚创建的ssl证书
c.NotebookApp.certfile = './mycert.pem'
# 给出刚刚创建的密码的哈希值
c.NotebookApp.password = '<PASSWORD>'
c.NotebookApp.ip = '0.0.0.0'
# 给出运行的端口,ipython默认为8888
c.NotebookApp.port = 8888
# 禁止在运行ipython的同时弹出浏览器
c.NotebookApp.open_browser = False | zh | 0.935945 | # 所有matplotlib的图像都通过iline的方式显示 # 这一行指向我们刚刚创建的ssl证书 # 给出刚刚创建的密码的哈希值 # 给出运行的端口,ipython默认为8888 # 禁止在运行ipython的同时弹出浏览器 | 1.748789 | 2 |
src/lambda_function/function.py | QuiNovas/dynamo-parquet-etl | 1 | 6616869 | <gh_stars>1-10
import boto3
import json
import logging.config
import os
import re
from boto3.dynamodb.types import TypeDeserializer
from datetime import datetime
from jsonpointer import resolve_pointer
boto3.resource('dynamodb')
deserializer = TypeDeserializer()
dynamo_parquet_etl = json.loads(os.environ['DYNAMO_PARQUET_ETL'])
dynamo_table_re = re.compile('^arn:aws:dynamodb:[a-z]{2}-[a-z]*-[0-9]:[0-9]*:table/(.+?)/')
def handler(event, context):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.debug('Processing event {}'.format(json.dumps(event)))
return event
| import boto3
import json
import logging.config
import os
import re
from boto3.dynamodb.types import TypeDeserializer
from datetime import datetime
from jsonpointer import resolve_pointer
boto3.resource('dynamodb')
deserializer = TypeDeserializer()
dynamo_parquet_etl = json.loads(os.environ['DYNAMO_PARQUET_ETL'])
dynamo_table_re = re.compile('^arn:aws:dynamodb:[a-z]{2}-[a-z]*-[0-9]:[0-9]*:table/(.+?)/')
def handler(event, context):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.debug('Processing event {}'.format(json.dumps(event)))
return event | none | 1 | 1.994477 | 2 | |
tools/leetcode/request.py | vNaonLu/daily-leetcode | 2 | 6616870 | import requests
import json
__session = requests.Session()
__leetcode_quest_url = "https://leetcode.com/api/problems/all/"
__leetcode_gphql_url = "https://leetcode.com/graphql"
__user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
def questions():
request_count = 10
while request_count > 0:
resp = __session.get(
__leetcode_quest_url,
headers={
'User-Agent': __user_agent,
'Connection': 'keep-alive'
},
timeout=10)
request_count -= 1
if resp.status_code == 200:
break
if resp.status_code != 200:
return None
raw_list = json.loads(resp.content.decode('utf-8'))
ques_list = raw_list["stat_status_pairs"]
return ques_list
def question_slug(frontend_id: int):
qlist = questions()
if qlist:
for q in qlist:
if frontend_id == q['stat']['frontend_question_id']:
return q['stat']['question__title_slug']
return None
def question_details(ques_slug: str):
param = {
'operationName': "getQuestionDetail",
'variables': {'titleSlug': ques_slug},
'query': '''query getQuestionDetail($titleSlug: String!) {
question(titleSlug: $titleSlug) {
questionFrontendId
questionTitle
content
difficulty
titleSlug
codeSnippets {
lang
langSlug
code
__typename
}
}
}'''}
request_count = 10
while request_count > 0:
resp = __session.post(
__leetcode_gphql_url,
data=json.dumps(param).encode("utf8"),
headers={
'User-Agent': __user_agent, 'Connection':
'keep-alive', 'Content-Type': 'application/json',
'Referer': 'https://leetcode.com/problems/' + ques_slug
},
timeout=10)
request_count -= 1
if resp.status_code == 200:
break
if resp.status_code == 200:
raw_data = json.loads(resp.content.decode("utf8"))
return raw_data['data']['question']
return None
| import requests
import json
__session = requests.Session()
__leetcode_quest_url = "https://leetcode.com/api/problems/all/"
__leetcode_gphql_url = "https://leetcode.com/graphql"
__user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
def questions():
request_count = 10
while request_count > 0:
resp = __session.get(
__leetcode_quest_url,
headers={
'User-Agent': __user_agent,
'Connection': 'keep-alive'
},
timeout=10)
request_count -= 1
if resp.status_code == 200:
break
if resp.status_code != 200:
return None
raw_list = json.loads(resp.content.decode('utf-8'))
ques_list = raw_list["stat_status_pairs"]
return ques_list
def question_slug(frontend_id: int):
qlist = questions()
if qlist:
for q in qlist:
if frontend_id == q['stat']['frontend_question_id']:
return q['stat']['question__title_slug']
return None
def question_details(ques_slug: str):
param = {
'operationName': "getQuestionDetail",
'variables': {'titleSlug': ques_slug},
'query': '''query getQuestionDetail($titleSlug: String!) {
question(titleSlug: $titleSlug) {
questionFrontendId
questionTitle
content
difficulty
titleSlug
codeSnippets {
lang
langSlug
code
__typename
}
}
}'''}
request_count = 10
while request_count > 0:
resp = __session.post(
__leetcode_gphql_url,
data=json.dumps(param).encode("utf8"),
headers={
'User-Agent': __user_agent, 'Connection':
'keep-alive', 'Content-Type': 'application/json',
'Referer': 'https://leetcode.com/problems/' + ques_slug
},
timeout=10)
request_count -= 1
if resp.status_code == 200:
break
if resp.status_code == 200:
raw_data = json.loads(resp.content.decode("utf8"))
return raw_data['data']['question']
return None
| en | 0.376029 | query getQuestionDetail($titleSlug: String!) { question(titleSlug: $titleSlug) { questionFrontendId questionTitle content difficulty titleSlug codeSnippets { lang langSlug code __typename } } } | 2.970233 | 3 |
surfaise/common/io.py | gautelinga/Surfaise | 0 | 6616871 | <filename>surfaise/common/io.py
import dolfin as df
import os
from surfaise.common.cmd import mpi_is_root, mpi_barrier, mpi_comm, \
mpi_size, mpi_rank, info_red, info_cyan, info_on_red
import numpy as np
import json
import h5py
import cloudpickle as pickle
from .utilities import NdFunction
def dump_xdmf(f, folder=""):
filename = os.path.join(folder, "{}.xdmf".format(f.name()))
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(f)
def dump_coords(geo_map, folder="", name="xyz"):
filename = os.path.join(folder, "{}.xdmf".format(name))
xyz = geo_map.coords()
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(xyz)
mpi_barrier()
if mpi_is_root() and geo_map.is_periodic_in_3d():
with h5py.File(os.path.join(
folder, "{}.h5".format(name)), "r+") as h5f:
ts = np.array(h5f["Mesh/mesh/geometry"])
t = ts[:, 0]
s = ts[:, 1]
xyz_new = np.vstack((geo_map.evalf["x"](t, s),
geo_map.evalf["y"](t, s),
geo_map.evalf["z"](t, s))).T
xyz = h5f["VisualisationVector/0"]
xyz[:, :] = xyz_new
def dump_dcoords(geo_map, folder="", name="dxyz"):
dxyz = geo_map.dcoords()
for i, xyzi in enumerate(dxyz):
filename = os.path.join(folder, "{}{}.xdmf".format(name, i))
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(xyzi)
def dump_metric_tensor(geo_map, folder="", name="g_ab"):
filename = os.path.join(folder, "{}.xdmf".format(name))
g = geo_map.metric_tensor()
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(g)
def dump_metric_tensor_inv(geo_map, folder="", name="gab"):
# Is it necessary to dump both?
filename = os.path.join(folder, "{}.xdmf".format(name))
g_inv = geo_map.metric_tensor_inv()
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(g_inv)
def dump_curvature_tensor(geo_map, folder="", name="K_ab"):
filename = os.path.join(folder, "{}.xdmf".format(name))
K_ab = geo_map.curvature_tensor()
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(K_ab)
def dump_map(geo_map, folder=""):
filename = os.path.join(folder, "map.pkl")
if mpi_is_root():
with open(filename, "wb") as f:
pickle.dump(geo_map.map, f)
def dump_evalf(geo_map, folder=""):
filename = os.path.join(folder, "evalf.pkl")
if mpi_is_root():
with open(filename, "wb") as f:
pickle.dump(geo_map.evalf, f)
def makedirs_safe(folder):
""" Make directory in a safe way. """
if mpi_is_root() and not os.path.exists(folder):
os.makedirs(folder)
def remove_safe(path):
""" Remove file in a safe way. """
if mpi_is_root() and os.path.exists(path):
os.remove(path)
def create_directories(results_folder):
""" Create folders. """
makedirs_safe(results_folder)
mpi_barrier()
info_red("Creating initial folders.")
# GL: Add option to restart.
previous_list = os.listdir(results_folder)
if len(previous_list) == 0:
folder = os.path.join(results_folder, "1")
else:
previous = max([int(entry) if entry.isdigit() else 0
for entry in previous_list])
folder = os.path.join(results_folder, str(previous+1))
mpi_barrier()
geofolder = os.path.join(folder, "Geometry")
tsfolder = os.path.join(folder, "Timeseries")
statsfolder = os.path.join(folder, "Statistics")
checkpointfolder = os.path.join(folder, "Checkpoint")
settingsfolder = os.path.join(folder, "Settings")
makedirs_safe(geofolder)
makedirs_safe(tsfolder)
makedirs_safe(statsfolder)
makedirs_safe(checkpointfolder)
makedirs_safe(settingsfolder)
# GL: add more?
return folder
class Timeseries:
def __init__(self, results_folder, u_, field_names, geo_map, tstep0=0,
parameters=None, restart_folder=None):
self.u_ = u_ # Pointer
self.tstep0 = tstep0
num_sub_el = u_.function_space().ufl_element().num_sub_elements()
self.fields = field_names
if num_sub_el > 0:
assert(num_sub_el == len(field_names))
else:
assert(len(field_names) == 1 or isinstance(field_names, str))
if isinstance(field_names, str):
self.fields = (field_names,)
if restart_folder is None:
self.folder = create_directories(results_folder)
else:
self.folder = restart_folder.split("Checkpoint")[0]
geofolder = os.path.join(self.folder, "Geometry")
checkpointfolder = os.path.join(self.folder, "Checkpoint")
dump_coords(geo_map, folder=geofolder)
dump_dcoords(geo_map, folder=geofolder)
dump_xdmf(geo_map.normal(), folder=geofolder)
dump_metric_tensor(geo_map, folder=geofolder)
dump_metric_tensor_inv(geo_map, folder=geofolder)
dump_curvature_tensor(geo_map, folder=geofolder)
dump_map(geo_map, folder=checkpointfolder)
dump_evalf(geo_map, folder=checkpointfolder)
self.files = dict()
for field in self.fields:
filename = os.path.join(self.folder, "Timeseries",
"{}_from_tstep_{}".format(field,
self.tstep0))
self.files[field] = self._create_file(filename)
if parameters:
parametersfile = os.path.join(
self.folder, "Settings",
"parameters_from_tstep_{}.dat".format(self.tstep0))
dump_parameters(parameters, parametersfile)
self.S_ref = geo_map.S_ref
self.extra_fields = dict()
self.extra_field_functions = dict()
def dump(self, tstep):
q_ = self._unpack()
for field, qi_ in zip(self.fields, q_):
qi_.rename(field, "tmp")
self.files[field].write(qi_, tstep)
# Dumping extra fields
if len(self.extra_fields) > 0:
# S = q_[0].function_space().collapse()
S = self.S_ref
for field, ufl_expression in self.extra_fields.items():
if isinstance(ufl_expression, list):
v = [df.project(expr_i, S) for expr_i in ufl_expression]
vf = NdFunction(v, name=field)
vf()
self.extra_field_functions[field] = vf
self.files[field].write(vf, tstep)
else:
self.extra_field_functions[field] = df.project(ufl_expression,
S)
self.extra_field_functions[field].rename(field, "tmp")
self.files[field].write(self.extra_field_functions[field],
tstep)
def _unpack(self):
num_fields = len(self.fields)
if num_fields == 1:
return (self.u_,)
else:
return self.u_.split()
def _create_file(self, filename):
f = df.XDMFFile(mpi_comm(), "{}.xdmf".format(filename))
f.parameters["rewrite_function_mesh"] = False
f.parameters["flush_output"] = True
return f
def close(self):
for field in self.files.keys():
self.files[field].close()
def add_field(self, ufl_expression, field_name):
filename = os.path.join(self.folder, "Timeseries",
"{}_from_tstep_{}".format(field_name,
self.tstep0))
self.extra_fields[field_name] = ufl_expression
self.files[field_name] = self._create_file(filename)
def get_function(self, field):
return self.extra_field_functions[field]
def dump_stats(self, t, data_at_t, name):
if mpi_is_root():
filename = os.path.join(self.folder, "Statistics",
"{}.dat".format(name))
data_at_t = np.array(data_at_t).flatten()
with open(filename, "a+") as outfile:
outfile.write("{}".format(t))
for d in data_at_t:
outfile.write("\t {}".format(d))
outfile.write("\n")
def save_checkpoint(tstep, t, mesh, w_, w_1, folder, parameters, name=""):
""" Save checkpoint files.
A part of this is taken from the Oasis code."""
checkpointfolder = os.path.join(folder, "Checkpoint")
parameters["num_processes"] = mpi_size()
parameters["t_0"] = t
parameters["tstep"] = tstep
parametersfile = os.path.join(checkpointfolder, "parameters.dat")
parametersfile_old = parametersfile + ".old"
if mpi_is_root():
# In case of failure, keep old file.
if os.path.exists(parametersfile):
os.system("mv {0} {1}".format(parametersfile,
parametersfile_old))
dump_parameters(parameters, parametersfile)
mpi_barrier()
h5filename = os.path.join(checkpointfolder, "fields.h5")
h5filename_old = h5filename + ".old"
# In case of failure, keep old file.
if mpi_is_root() and os.path.exists(h5filename):
os.system("mv {0} {1}".format(h5filename, h5filename_old))
h5file = df.HDF5File(mpi_comm(), h5filename, "w")
h5file.flush()
info_red("Storing mesh")
h5file.write(mesh, "mesh")
mpi_barrier()
info_red("Storing current solution")
h5file.write(w_, "{}/current".format(name))
info_red("Storing previous solution")
h5file.write(w_1, "{}/previous".format(name))
mpi_barrier()
h5file.close()
# Since program is still running, delete the old files.
remove_safe(h5filename_old)
mpi_barrier()
remove_safe(parametersfile_old)
def load_checkpoint(checkpointfolder, w_, w_1, name=""):
if checkpointfolder:
h5filename = os.path.join(checkpointfolder, "fields.h5")
h5file = df.HDF5File(mpi_comm(), h5filename, "r")
info_red("Loading current solution")
h5file.read(w_, "{}/current".format(name))
info_red("Loading previous solution")
h5file.read(w_1, "{}/previous".format(name))
h5file.close()
def load_mesh(filename, subdir="mesh",
use_partition_from_file=False):
""" Loads the mesh specified by the argument filename. """
info_cyan("Loading mesh: " + filename)
if not os.path.exists(filename):
info_red("Couldn't find file: " + filename)
exit()
mesh = df.Mesh()
h5file = df.HDF5File(mesh.mpi_comm(), filename, "r")
h5file.read(mesh, subdir, use_partition_from_file)
h5file.close()
return mesh
def dump_parameters(parameters, settingsfilename):
""" Dump parameters to file """
with open(settingsfilename, "w") as settingsfile:
json.dump(parameters, settingsfile, indent=4*' ', sort_keys=True)
def load_parameters(parameters, settingsfilename):
if not os.path.exists(settingsfilename):
info_on_red("File " + settingsfilename + " does not exist.")
exit()
with open(settingsfilename, "r") as settingsfile:
parameters.update(json.load(settingsfile))
| <filename>surfaise/common/io.py
import dolfin as df
import os
from surfaise.common.cmd import mpi_is_root, mpi_barrier, mpi_comm, \
mpi_size, mpi_rank, info_red, info_cyan, info_on_red
import numpy as np
import json
import h5py
import cloudpickle as pickle
from .utilities import NdFunction
def dump_xdmf(f, folder=""):
filename = os.path.join(folder, "{}.xdmf".format(f.name()))
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(f)
def dump_coords(geo_map, folder="", name="xyz"):
filename = os.path.join(folder, "{}.xdmf".format(name))
xyz = geo_map.coords()
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(xyz)
mpi_barrier()
if mpi_is_root() and geo_map.is_periodic_in_3d():
with h5py.File(os.path.join(
folder, "{}.h5".format(name)), "r+") as h5f:
ts = np.array(h5f["Mesh/mesh/geometry"])
t = ts[:, 0]
s = ts[:, 1]
xyz_new = np.vstack((geo_map.evalf["x"](t, s),
geo_map.evalf["y"](t, s),
geo_map.evalf["z"](t, s))).T
xyz = h5f["VisualisationVector/0"]
xyz[:, :] = xyz_new
def dump_dcoords(geo_map, folder="", name="dxyz"):
dxyz = geo_map.dcoords()
for i, xyzi in enumerate(dxyz):
filename = os.path.join(folder, "{}{}.xdmf".format(name, i))
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(xyzi)
def dump_metric_tensor(geo_map, folder="", name="g_ab"):
filename = os.path.join(folder, "{}.xdmf".format(name))
g = geo_map.metric_tensor()
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(g)
def dump_metric_tensor_inv(geo_map, folder="", name="gab"):
# Is it necessary to dump both?
filename = os.path.join(folder, "{}.xdmf".format(name))
g_inv = geo_map.metric_tensor_inv()
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(g_inv)
def dump_curvature_tensor(geo_map, folder="", name="K_ab"):
filename = os.path.join(folder, "{}.xdmf".format(name))
K_ab = geo_map.curvature_tensor()
with df.XDMFFile(mpi_comm(), filename) as xdmff:
xdmff.parameters["rewrite_function_mesh"] = False
xdmff.parameters["flush_output"] = True
xdmff.write(K_ab)
def dump_map(geo_map, folder=""):
filename = os.path.join(folder, "map.pkl")
if mpi_is_root():
with open(filename, "wb") as f:
pickle.dump(geo_map.map, f)
def dump_evalf(geo_map, folder=""):
filename = os.path.join(folder, "evalf.pkl")
if mpi_is_root():
with open(filename, "wb") as f:
pickle.dump(geo_map.evalf, f)
def makedirs_safe(folder):
""" Make directory in a safe way. """
if mpi_is_root() and not os.path.exists(folder):
os.makedirs(folder)
def remove_safe(path):
""" Remove file in a safe way. """
if mpi_is_root() and os.path.exists(path):
os.remove(path)
def create_directories(results_folder):
""" Create folders. """
makedirs_safe(results_folder)
mpi_barrier()
info_red("Creating initial folders.")
# GL: Add option to restart.
previous_list = os.listdir(results_folder)
if len(previous_list) == 0:
folder = os.path.join(results_folder, "1")
else:
previous = max([int(entry) if entry.isdigit() else 0
for entry in previous_list])
folder = os.path.join(results_folder, str(previous+1))
mpi_barrier()
geofolder = os.path.join(folder, "Geometry")
tsfolder = os.path.join(folder, "Timeseries")
statsfolder = os.path.join(folder, "Statistics")
checkpointfolder = os.path.join(folder, "Checkpoint")
settingsfolder = os.path.join(folder, "Settings")
makedirs_safe(geofolder)
makedirs_safe(tsfolder)
makedirs_safe(statsfolder)
makedirs_safe(checkpointfolder)
makedirs_safe(settingsfolder)
# GL: add more?
return folder
class Timeseries:
def __init__(self, results_folder, u_, field_names, geo_map, tstep0=0,
parameters=None, restart_folder=None):
self.u_ = u_ # Pointer
self.tstep0 = tstep0
num_sub_el = u_.function_space().ufl_element().num_sub_elements()
self.fields = field_names
if num_sub_el > 0:
assert(num_sub_el == len(field_names))
else:
assert(len(field_names) == 1 or isinstance(field_names, str))
if isinstance(field_names, str):
self.fields = (field_names,)
if restart_folder is None:
self.folder = create_directories(results_folder)
else:
self.folder = restart_folder.split("Checkpoint")[0]
geofolder = os.path.join(self.folder, "Geometry")
checkpointfolder = os.path.join(self.folder, "Checkpoint")
dump_coords(geo_map, folder=geofolder)
dump_dcoords(geo_map, folder=geofolder)
dump_xdmf(geo_map.normal(), folder=geofolder)
dump_metric_tensor(geo_map, folder=geofolder)
dump_metric_tensor_inv(geo_map, folder=geofolder)
dump_curvature_tensor(geo_map, folder=geofolder)
dump_map(geo_map, folder=checkpointfolder)
dump_evalf(geo_map, folder=checkpointfolder)
self.files = dict()
for field in self.fields:
filename = os.path.join(self.folder, "Timeseries",
"{}_from_tstep_{}".format(field,
self.tstep0))
self.files[field] = self._create_file(filename)
if parameters:
parametersfile = os.path.join(
self.folder, "Settings",
"parameters_from_tstep_{}.dat".format(self.tstep0))
dump_parameters(parameters, parametersfile)
self.S_ref = geo_map.S_ref
self.extra_fields = dict()
self.extra_field_functions = dict()
def dump(self, tstep):
q_ = self._unpack()
for field, qi_ in zip(self.fields, q_):
qi_.rename(field, "tmp")
self.files[field].write(qi_, tstep)
# Dumping extra fields
if len(self.extra_fields) > 0:
# S = q_[0].function_space().collapse()
S = self.S_ref
for field, ufl_expression in self.extra_fields.items():
if isinstance(ufl_expression, list):
v = [df.project(expr_i, S) for expr_i in ufl_expression]
vf = NdFunction(v, name=field)
vf()
self.extra_field_functions[field] = vf
self.files[field].write(vf, tstep)
else:
self.extra_field_functions[field] = df.project(ufl_expression,
S)
self.extra_field_functions[field].rename(field, "tmp")
self.files[field].write(self.extra_field_functions[field],
tstep)
def _unpack(self):
num_fields = len(self.fields)
if num_fields == 1:
return (self.u_,)
else:
return self.u_.split()
def _create_file(self, filename):
f = df.XDMFFile(mpi_comm(), "{}.xdmf".format(filename))
f.parameters["rewrite_function_mesh"] = False
f.parameters["flush_output"] = True
return f
def close(self):
for field in self.files.keys():
self.files[field].close()
def add_field(self, ufl_expression, field_name):
filename = os.path.join(self.folder, "Timeseries",
"{}_from_tstep_{}".format(field_name,
self.tstep0))
self.extra_fields[field_name] = ufl_expression
self.files[field_name] = self._create_file(filename)
def get_function(self, field):
return self.extra_field_functions[field]
def dump_stats(self, t, data_at_t, name):
if mpi_is_root():
filename = os.path.join(self.folder, "Statistics",
"{}.dat".format(name))
data_at_t = np.array(data_at_t).flatten()
with open(filename, "a+") as outfile:
outfile.write("{}".format(t))
for d in data_at_t:
outfile.write("\t {}".format(d))
outfile.write("\n")
def save_checkpoint(tstep, t, mesh, w_, w_1, folder, parameters, name=""):
""" Save checkpoint files.
A part of this is taken from the Oasis code."""
checkpointfolder = os.path.join(folder, "Checkpoint")
parameters["num_processes"] = mpi_size()
parameters["t_0"] = t
parameters["tstep"] = tstep
parametersfile = os.path.join(checkpointfolder, "parameters.dat")
parametersfile_old = parametersfile + ".old"
if mpi_is_root():
# In case of failure, keep old file.
if os.path.exists(parametersfile):
os.system("mv {0} {1}".format(parametersfile,
parametersfile_old))
dump_parameters(parameters, parametersfile)
mpi_barrier()
h5filename = os.path.join(checkpointfolder, "fields.h5")
h5filename_old = h5filename + ".old"
# In case of failure, keep old file.
if mpi_is_root() and os.path.exists(h5filename):
os.system("mv {0} {1}".format(h5filename, h5filename_old))
h5file = df.HDF5File(mpi_comm(), h5filename, "w")
h5file.flush()
info_red("Storing mesh")
h5file.write(mesh, "mesh")
mpi_barrier()
info_red("Storing current solution")
h5file.write(w_, "{}/current".format(name))
info_red("Storing previous solution")
h5file.write(w_1, "{}/previous".format(name))
mpi_barrier()
h5file.close()
# Since program is still running, delete the old files.
remove_safe(h5filename_old)
mpi_barrier()
remove_safe(parametersfile_old)
def load_checkpoint(checkpointfolder, w_, w_1, name=""):
if checkpointfolder:
h5filename = os.path.join(checkpointfolder, "fields.h5")
h5file = df.HDF5File(mpi_comm(), h5filename, "r")
info_red("Loading current solution")
h5file.read(w_, "{}/current".format(name))
info_red("Loading previous solution")
h5file.read(w_1, "{}/previous".format(name))
h5file.close()
def load_mesh(filename, subdir="mesh",
use_partition_from_file=False):
""" Loads the mesh specified by the argument filename. """
info_cyan("Loading mesh: " + filename)
if not os.path.exists(filename):
info_red("Couldn't find file: " + filename)
exit()
mesh = df.Mesh()
h5file = df.HDF5File(mesh.mpi_comm(), filename, "r")
h5file.read(mesh, subdir, use_partition_from_file)
h5file.close()
return mesh
def dump_parameters(parameters, settingsfilename):
""" Dump parameters to file """
with open(settingsfilename, "w") as settingsfile:
json.dump(parameters, settingsfile, indent=4*' ', sort_keys=True)
def load_parameters(parameters, settingsfilename):
if not os.path.exists(settingsfilename):
info_on_red("File " + settingsfilename + " does not exist.")
exit()
with open(settingsfilename, "r") as settingsfile:
parameters.update(json.load(settingsfile))
| en | 0.854502 | # Is it necessary to dump both? Make directory in a safe way. Remove file in a safe way. Create folders. # GL: Add option to restart. # GL: add more? # Pointer # Dumping extra fields # S = q_[0].function_space().collapse() Save checkpoint files. A part of this is taken from the Oasis code. # In case of failure, keep old file. # In case of failure, keep old file. # Since program is still running, delete the old files. Loads the mesh specified by the argument filename. Dump parameters to file | 2.074731 | 2 |
Scripts/assignment.py | jshaffar/Congress-Simulator | 0 | 6616872 | <reponame>jshaffar/Congress-Simulator
import yaml
CURRENT_SENATORS_FILE = "People/current_senators.txt"
CURRENT_CONGRESSMEN_FILE = "People/current_congressmen.txt"
with open(r'congress-legislators/legislators-current.yaml') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
documents = yaml.full_load(file)
house_file = open(CURRENT_CONGRESSMEN_FILE, "w")
senate_file = open(CURRENT_SENATORS_FILE, "w")
for person in documents:
if (person['terms'][-1]['type'] == 'rep'):
try:
house_file.write(str(person['name']['official_full']) + '\n')
except:
name = ""
for part in person['name']:
name = name + person['name'][part] + " "
name += '\n'
house_file.write(name)
elif (person['terms'][-1]['type'] == 'sen'):
senate_file.write(str(person['name']['official_full']) + '\n')
house_file.close()
senate_file.close()
| import yaml
CURRENT_SENATORS_FILE = "People/current_senators.txt"
CURRENT_CONGRESSMEN_FILE = "People/current_congressmen.txt"
with open(r'congress-legislators/legislators-current.yaml') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
documents = yaml.full_load(file)
house_file = open(CURRENT_CONGRESSMEN_FILE, "w")
senate_file = open(CURRENT_SENATORS_FILE, "w")
for person in documents:
if (person['terms'][-1]['type'] == 'rep'):
try:
house_file.write(str(person['name']['official_full']) + '\n')
except:
name = ""
for part in person['name']:
name = name + person['name'][part] + " "
name += '\n'
house_file.write(name)
elif (person['terms'][-1]['type'] == 'sen'):
senate_file.write(str(person['name']['official_full']) + '\n')
house_file.close()
senate_file.close() | en | 0.142149 | # The FullLoader parameter handles the conversion from YAML # scalar values to Python the dictionary format | 3.110502 | 3 |
tests/__init__.py | vreuter/attmap | 4 | 6616873 | <filename>tests/__init__.py
""" Create tests package, to help with pytest coverage logistics """
| <filename>tests/__init__.py
""" Create tests package, to help with pytest coverage logistics """
| en | 0.941449 | Create tests package, to help with pytest coverage logistics | 1.113285 | 1 |
yt/frontends/owls/io.py | aemerick/yt | 0 | 6616874 | <gh_stars>0
from yt.frontends.gadget.io import \
IOHandlerGadgetHDF5
class IOHandlerOWLS(IOHandlerGadgetHDF5):
_dataset_type = "OWLS"
| from yt.frontends.gadget.io import \
IOHandlerGadgetHDF5
class IOHandlerOWLS(IOHandlerGadgetHDF5):
_dataset_type = "OWLS" | none | 1 | 1.281898 | 1 | |
accbpg/algorithms.py | Bhaskers-Blu-Org2/accbpg | 17 | 6616875 | <gh_stars>10-100
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import time
def BPG(f, h, L, x0, maxitrs, epsilon=1e-14, linesearch=True, ls_ratio=1.2,
verbose=True, verbskip=1):
"""
Bregman Proximal Gradient (BGP) method for min_{x in C} f(x) + Psi(x):
x(k+1) = argmin_{x in C} { Psi(x) + <f'(x(k)), x> + L(k) * D_h(x,x(k))}
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
maxitrs: maximum number of iterations
epsilon: stop if F(x[k])-F(x[k-1]) < epsilon, where F(x)=f(x)+Psi(x)
linesearch: whether or not perform line search (True or False)
ls_ratio: backtracking line search parameter >= 1
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Ls: array storing local Lipschitz constants obtained by line search
T: array storing time used up to iteration k
"""
if verbose:
print("\nBPG_LS method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) Lk time")
start_time = time.time()
F = np.zeros(maxitrs)
Ls = np.ones(maxitrs) * L
T = np.zeros(maxitrs)
x = np.copy(x0)
for k in range(maxitrs):
fx, g = f.func_grad(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
if linesearch:
L = L / ls_ratio
x1 = h.div_prox_map(x, g, L)
while f(x1) > fx + np.dot(g, x1-x) + L*h.divergence(x1, x):
L = L * ls_ratio
x1 = h.div_prox_map(x, g, L)
x = x1
else:
x = h.div_prox_map(x, g, L)
# store and display computational progress
Ls[k] = L
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:6.1f}".format(k, F[k], L, T[k]))
# stopping criteria
if k > 0 and abs(F[k]-F[k-1]) < epsilon:
break;
F = F[0:k+1]
Ls = Ls[0:k+1]
T = T[0:k+1]
return x, F, Ls, T
def solve_theta(theta, gamma, gainratio=1):
"""
solve theta_k1 from the equation
(1-theta_k1)/theta_k1^gamma = gainratio * 1/theta_k^gamma
using Newton's method, starting from theta
"""
ckg = theta**gamma / gainratio
cta = theta
eps = 1e-6 * theta
phi = cta**gamma - ckg*(1-cta)
while abs(phi) > eps:
drv = gamma * cta**(gamma-1) + ckg
cta = cta - phi / drv
phi = cta**gamma - ckg*(1-cta)
return cta
def ABPG(f, h, L, x0, gamma, maxitrs, epsilon=1e-14, theta_eq=False,
restart=False, restart_rule='g', verbose=True, verbskip=1):
"""
Accelerated Bregman Proximal Gradient (ABPG) method for solving
minimize_{x in C} f(x) + Psi(x):
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent (TSE) for Bregman div D_h(x,y)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
theta_eq: calculate theta_k by solving equality using Newton's method
restart: restart the algorithm when overshooting (True or False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
G: triangle scaling gains D(xk,yk) / D(zk,zk_1) / theta_k^gamma
T: array storing time used up to iteration k
"""
if verbose:
print("\nABPG method for minimize_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta" +
" TSG D(x+,y) D(z+,z) time")
start_time = time.time()
F = np.zeros(maxitrs)
G = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
z = np.copy(x0)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x # only required for restart mode
if theta_eq and kk > 0:
theta = solve_theta(theta, gamma)
else:
theta = gamma / (kk + gamma)
y = (1-theta)*x + theta*z_1
g = f.gradient(y)
z = h.div_prox_map(z_1, g, theta**(gamma-1) * L)
x = (1-theta)*x + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
Gdr = dxy / dzz / theta**gamma
# store and display computational progress
G[k] = Gdr
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:6.1f}".format(
k, F[k], theta, Gdr, dxy, dzz, T[k]))
# restart if gradient predicts objective increase
kk += 1
if restart and k > 0:
#if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0:
if (restart_rule == 'f' and F[k] > F[k-1]) or (restart_rule == 'g' and np.dot(g, x-x_1) > 0):
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
G = G[0:k+1]
T = T[0:k+1]
return x, F, G, T
def ABPG_expo(f, h, L, x0, gamma0, maxitrs, epsilon=1e-14, delta=0.2,
theta_eq=True, checkdiv=False, Gmargin=10, restart=False,
restart_rule='g', verbose=True, verbskip=1):
"""
Accelerated Bregman Proximal Gradient method with exponent adaption for
minimize_{x in C} f(x) + Psi(x)
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma0: initial triangle scaling exponent(TSE) for D_h(x,y) (>2)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
delta: amount to decrease TSE for exponent adaption
theta_eq: calculate theta_k by solving equality using Newton's method
checkdiv: check triangle scaling inequality for adaption (True/False)
Gmargin: extra gain margin allowed for checking TSI
restart: restart the algorithm when overshooting (True or False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Gamma: gamma_k obtained at each iteration
G: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma_k
T: array storing time used up to iteration k
"""
if verbose:
print("\nABPG_expo method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta gamma" +
" TSG D(x+,y) D(z+,z) time")
start_time = time.time()
F = np.zeros(maxitrs)
G = np.zeros(maxitrs)
Gamma = np.ones(maxitrs) * gamma0
T = np.zeros(maxitrs)
gamma = gamma0
x = np.copy(x0)
z = np.copy(x0)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x
if theta_eq and kk > 0:
theta = solve_theta(theta, gamma)
else:
theta = gamma / (kk + gamma)
y = (1-theta)*x_1 + theta*z_1
#g = f.gradient(y)
fy, g = f.func_grad(y)
condition = True
while condition: # always execute at least once per iteration
z = h.div_prox_map(z_1, g, theta**(gamma-1) * L)
x = (1-theta)*x_1 + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
Gdr = dxy / dzz / theta**gamma
if checkdiv:
condition = (dxy > Gmargin * (theta**gamma) * dzz )
else:
condition = (f(x) > fy + np.dot(g, x-y) + theta**gamma*L*dzz)
if condition and gamma > 1:
gamma = max(gamma - delta, 1)
else:
condition = False
# store and display computational progress
G[k] = Gdr
Gamma[k] = gamma
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:10.3e} {7:6.1f}".format(
k, F[k], theta, gamma, Gdr, dxy, dzz, T[k]))
# restart if gradient predicts objective increase
kk += 1
if restart:
#if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0:
if (restart_rule == 'f' and F[k] > F[k-1]) or (restart_rule == 'g' and np.dot(g, x-x_1) > 0):
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
Gamma = Gamma[0:k+1]
G = G[0:k+1]
T = T[0:k+1]
return x, F, Gamma, G, T
def ABPG_gain(f, h, L, x0, gamma, maxitrs, epsilon=1e-14, G0=1,
ls_inc=1.2, ls_dec=1.2, theta_eq=True, checkdiv=False,
restart=False, restart_rule='g', verbose=True, verbskip=1):
"""
Accelerated Bregman Proximal Gradient (ABPG) method with gain adaption for
minimize_{x in C} f(x) + Psi(x):
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent(TSE) for Bregman distance D_h(x,y)
G0: initial value for triangle scaling gain
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
ls_inc: factor of increasing gain (>=1)
ls_dec: factor of decreasing gain (>=1)
theta_eq: calculate theta_k by solving equality using Newton's method
checkdiv: check triangle scaling inequality for adaption (True/False)
restart: restart the algorithm when overshooting (True/False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True/False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Gain: triangle scaling gains G_k obtained by LS at each iteration
Gdiv: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma_k
Gavg: geometric mean of G_k at all steps up to iteration k
T: array storing time used up to iteration k
"""
if verbose:
print("\nABPG_gain method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta Gk" +
" TSG D(x+,y) D(z+,z) Gavg time")
start_time = time.time()
F = np.zeros(maxitrs)
Gain = np.ones(maxitrs) * G0
Gdiv = np.zeros(maxitrs)
Gavg = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
z = np.copy(x0)
G = G0
# logGavg = (gamma*log(G0) + log(G_1) + ... + log(Gk)) / (k+gamma)
sumlogG = gamma * np.log(G)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x
# adaptive option: always try a smaller Gain first before line search
G_1 = G
theta_1 = theta
G = G / ls_dec
condition = True
while condition:
if kk > 0:
if theta_eq:
theta = solve_theta(theta_1, gamma, G / G_1)
else:
alpha = G / G_1
theta = theta_1*((1+alpha*(gamma-1))/(gamma*alpha+theta_1))
y = (1-theta)*x_1 + theta*z_1
#g = f.gradient(y)
fy, g = f.func_grad(y)
z = h.div_prox_map(z_1, g, theta**(gamma-1) * G * L)
x = (1-theta)*x_1 + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
if dzz < epsilon:
break
Gdr = dxy / dzz / theta**gamma
if checkdiv:
condition = (Gdr > G )
else:
condition = (f(x) > fy + np.dot(g,x-y) + theta**gamma*G*L*dzz)
if condition:
G = G * ls_inc
# store and display computational progress
Gain[k] = G
Gdiv[k] = Gdr
sumlogG += np.log(G)
Gavg[k] = np.exp(sumlogG / (gamma + k))
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:10.3e} {7:10.3e} {8:6.1f}".format(
k, F[k], theta, G, Gdr, dxy, dzz, Gavg[k], T[k]))
# restart if gradient predicts objective increase
kk += 1
if restart:
#if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0:
if (restart_rule == 'f' and F[k] > F[k-1]) or (restart_rule == 'g' and np.dot(g, x-x_1) > 0):
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
Gain = Gain[0:k+1]
Gdiv = Gdiv[0:k+1]
Gavg = Gavg[0:k+1]
T = T[0:k+1]
return x, F, Gain, Gdiv, Gavg, T
def ABDA(f, h, L, x0, gamma, maxitrs, epsilon=1e-14, theta_eq=True,
verbose=True, verbskip=1):
"""
Accelerated Bregman Dual Averaging (ABDA) method for solving
minimize_{x in C} f(x) + Psi(x)
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent (TSE) for Bregman distance D_h(x,y)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
theta_eq: calculate theta_k by solving equality using Newton's method
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
G: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma
T: array storing time used up to iteration k
"""
# Simple restart schemes for dual averaging method do not work!
restart = False
if verbose:
print("\nABDA method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta" +
" TSG D(x+,y) D(z+,z) time")
start_time = time.time()
F = np.zeros(maxitrs)
G = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
z = np.copy(x0)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
gavg = np.zeros(x.size)
csum = 0
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x
if theta_eq and kk > 0:
theta = solve_theta(theta, gamma)
else:
theta = gamma / (kk + gamma)
y = (1-theta)*x_1 + theta*z_1
g = f.gradient(y)
gavg = gavg + theta**(1-gamma) * g
csum = csum + theta**(1-gamma)
z = h.prox_map(gavg/csum, L/csum)
x = (1-theta)*x_1 + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
Gdr = dxy / dzz / theta**gamma
# store and display computational progress
G[k] = Gdr
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:6.1f}".format(
k, F[k], theta, Gdr, dxy, dzz, T[k]))
kk += 1
# restart does not work for ABDA (restart = False)
if restart:
if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0: # this does not work for dual averaging
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
gavg = np.zeros(x.size) # this is why restart does not work
csum = 0
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
G = G[0:k+1]
T = T[0:k+1]
return x, F, G, T
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import time
def BPG(f, h, L, x0, maxitrs, epsilon=1e-14, linesearch=True, ls_ratio=1.2,
verbose=True, verbskip=1):
"""
Bregman Proximal Gradient (BGP) method for min_{x in C} f(x) + Psi(x):
x(k+1) = argmin_{x in C} { Psi(x) + <f'(x(k)), x> + L(k) * D_h(x,x(k))}
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
maxitrs: maximum number of iterations
epsilon: stop if F(x[k])-F(x[k-1]) < epsilon, where F(x)=f(x)+Psi(x)
linesearch: whether or not perform line search (True or False)
ls_ratio: backtracking line search parameter >= 1
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Ls: array storing local Lipschitz constants obtained by line search
T: array storing time used up to iteration k
"""
if verbose:
print("\nBPG_LS method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) Lk time")
start_time = time.time()
F = np.zeros(maxitrs)
Ls = np.ones(maxitrs) * L
T = np.zeros(maxitrs)
x = np.copy(x0)
for k in range(maxitrs):
fx, g = f.func_grad(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
if linesearch:
L = L / ls_ratio
x1 = h.div_prox_map(x, g, L)
while f(x1) > fx + np.dot(g, x1-x) + L*h.divergence(x1, x):
L = L * ls_ratio
x1 = h.div_prox_map(x, g, L)
x = x1
else:
x = h.div_prox_map(x, g, L)
# store and display computational progress
Ls[k] = L
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:6.1f}".format(k, F[k], L, T[k]))
# stopping criteria
if k > 0 and abs(F[k]-F[k-1]) < epsilon:
break;
F = F[0:k+1]
Ls = Ls[0:k+1]
T = T[0:k+1]
return x, F, Ls, T
def solve_theta(theta, gamma, gainratio=1):
"""
solve theta_k1 from the equation
(1-theta_k1)/theta_k1^gamma = gainratio * 1/theta_k^gamma
using Newton's method, starting from theta
"""
ckg = theta**gamma / gainratio
cta = theta
eps = 1e-6 * theta
phi = cta**gamma - ckg*(1-cta)
while abs(phi) > eps:
drv = gamma * cta**(gamma-1) + ckg
cta = cta - phi / drv
phi = cta**gamma - ckg*(1-cta)
return cta
def ABPG(f, h, L, x0, gamma, maxitrs, epsilon=1e-14, theta_eq=False,
restart=False, restart_rule='g', verbose=True, verbskip=1):
"""
Accelerated Bregman Proximal Gradient (ABPG) method for solving
minimize_{x in C} f(x) + Psi(x):
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent (TSE) for Bregman div D_h(x,y)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
theta_eq: calculate theta_k by solving equality using Newton's method
restart: restart the algorithm when overshooting (True or False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
G: triangle scaling gains D(xk,yk) / D(zk,zk_1) / theta_k^gamma
T: array storing time used up to iteration k
"""
if verbose:
print("\nABPG method for minimize_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta" +
" TSG D(x+,y) D(z+,z) time")
start_time = time.time()
F = np.zeros(maxitrs)
G = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
z = np.copy(x0)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x # only required for restart mode
if theta_eq and kk > 0:
theta = solve_theta(theta, gamma)
else:
theta = gamma / (kk + gamma)
y = (1-theta)*x + theta*z_1
g = f.gradient(y)
z = h.div_prox_map(z_1, g, theta**(gamma-1) * L)
x = (1-theta)*x + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
Gdr = dxy / dzz / theta**gamma
# store and display computational progress
G[k] = Gdr
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:6.1f}".format(
k, F[k], theta, Gdr, dxy, dzz, T[k]))
# restart if gradient predicts objective increase
kk += 1
if restart and k > 0:
#if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0:
if (restart_rule == 'f' and F[k] > F[k-1]) or (restart_rule == 'g' and np.dot(g, x-x_1) > 0):
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
G = G[0:k+1]
T = T[0:k+1]
return x, F, G, T
def ABPG_expo(f, h, L, x0, gamma0, maxitrs, epsilon=1e-14, delta=0.2,
theta_eq=True, checkdiv=False, Gmargin=10, restart=False,
restart_rule='g', verbose=True, verbskip=1):
"""
Accelerated Bregman Proximal Gradient method with exponent adaption for
minimize_{x in C} f(x) + Psi(x)
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma0: initial triangle scaling exponent(TSE) for D_h(x,y) (>2)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
delta: amount to decrease TSE for exponent adaption
theta_eq: calculate theta_k by solving equality using Newton's method
checkdiv: check triangle scaling inequality for adaption (True/False)
Gmargin: extra gain margin allowed for checking TSI
restart: restart the algorithm when overshooting (True or False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Gamma: gamma_k obtained at each iteration
G: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma_k
T: array storing time used up to iteration k
"""
if verbose:
print("\nABPG_expo method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta gamma" +
" TSG D(x+,y) D(z+,z) time")
start_time = time.time()
F = np.zeros(maxitrs)
G = np.zeros(maxitrs)
Gamma = np.ones(maxitrs) * gamma0
T = np.zeros(maxitrs)
gamma = gamma0
x = np.copy(x0)
z = np.copy(x0)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x
if theta_eq and kk > 0:
theta = solve_theta(theta, gamma)
else:
theta = gamma / (kk + gamma)
y = (1-theta)*x_1 + theta*z_1
#g = f.gradient(y)
fy, g = f.func_grad(y)
condition = True
while condition: # always execute at least once per iteration
z = h.div_prox_map(z_1, g, theta**(gamma-1) * L)
x = (1-theta)*x_1 + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
Gdr = dxy / dzz / theta**gamma
if checkdiv:
condition = (dxy > Gmargin * (theta**gamma) * dzz )
else:
condition = (f(x) > fy + np.dot(g, x-y) + theta**gamma*L*dzz)
if condition and gamma > 1:
gamma = max(gamma - delta, 1)
else:
condition = False
# store and display computational progress
G[k] = Gdr
Gamma[k] = gamma
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:10.3e} {7:6.1f}".format(
k, F[k], theta, gamma, Gdr, dxy, dzz, T[k]))
# restart if gradient predicts objective increase
kk += 1
if restart:
#if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0:
if (restart_rule == 'f' and F[k] > F[k-1]) or (restart_rule == 'g' and np.dot(g, x-x_1) > 0):
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
Gamma = Gamma[0:k+1]
G = G[0:k+1]
T = T[0:k+1]
return x, F, Gamma, G, T
def ABPG_gain(f, h, L, x0, gamma, maxitrs, epsilon=1e-14, G0=1,
ls_inc=1.2, ls_dec=1.2, theta_eq=True, checkdiv=False,
restart=False, restart_rule='g', verbose=True, verbskip=1):
"""
Accelerated Bregman Proximal Gradient (ABPG) method with gain adaption for
minimize_{x in C} f(x) + Psi(x):
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent(TSE) for Bregman distance D_h(x,y)
G0: initial value for triangle scaling gain
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
ls_inc: factor of increasing gain (>=1)
ls_dec: factor of decreasing gain (>=1)
theta_eq: calculate theta_k by solving equality using Newton's method
checkdiv: check triangle scaling inequality for adaption (True/False)
restart: restart the algorithm when overshooting (True/False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True/False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Gain: triangle scaling gains G_k obtained by LS at each iteration
Gdiv: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma_k
Gavg: geometric mean of G_k at all steps up to iteration k
T: array storing time used up to iteration k
"""
if verbose:
print("\nABPG_gain method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta Gk" +
" TSG D(x+,y) D(z+,z) Gavg time")
start_time = time.time()
F = np.zeros(maxitrs)
Gain = np.ones(maxitrs) * G0
Gdiv = np.zeros(maxitrs)
Gavg = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
z = np.copy(x0)
G = G0
# logGavg = (gamma*log(G0) + log(G_1) + ... + log(Gk)) / (k+gamma)
sumlogG = gamma * np.log(G)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x
# adaptive option: always try a smaller Gain first before line search
G_1 = G
theta_1 = theta
G = G / ls_dec
condition = True
while condition:
if kk > 0:
if theta_eq:
theta = solve_theta(theta_1, gamma, G / G_1)
else:
alpha = G / G_1
theta = theta_1*((1+alpha*(gamma-1))/(gamma*alpha+theta_1))
y = (1-theta)*x_1 + theta*z_1
#g = f.gradient(y)
fy, g = f.func_grad(y)
z = h.div_prox_map(z_1, g, theta**(gamma-1) * G * L)
x = (1-theta)*x_1 + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
if dzz < epsilon:
break
Gdr = dxy / dzz / theta**gamma
if checkdiv:
condition = (Gdr > G )
else:
condition = (f(x) > fy + np.dot(g,x-y) + theta**gamma*G*L*dzz)
if condition:
G = G * ls_inc
# store and display computational progress
Gain[k] = G
Gdiv[k] = Gdr
sumlogG += np.log(G)
Gavg[k] = np.exp(sumlogG / (gamma + k))
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:10.3e} {7:10.3e} {8:6.1f}".format(
k, F[k], theta, G, Gdr, dxy, dzz, Gavg[k], T[k]))
# restart if gradient predicts objective increase
kk += 1
if restart:
#if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0:
if (restart_rule == 'f' and F[k] > F[k-1]) or (restart_rule == 'g' and np.dot(g, x-x_1) > 0):
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
Gain = Gain[0:k+1]
Gdiv = Gdiv[0:k+1]
Gavg = Gavg[0:k+1]
T = T[0:k+1]
return x, F, Gain, Gdiv, Gavg, T
def ABDA(f, h, L, x0, gamma, maxitrs, epsilon=1e-14, theta_eq=True,
verbose=True, verbskip=1):
"""
Accelerated Bregman Dual Averaging (ABDA) method for solving
minimize_{x in C} f(x) + Psi(x)
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent (TSE) for Bregman distance D_h(x,y)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
theta_eq: calculate theta_k by solving equality using Newton's method
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
G: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma
T: array storing time used up to iteration k
"""
# Simple restart schemes for dual averaging method do not work!
restart = False
if verbose:
print("\nABDA method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta" +
" TSG D(x+,y) D(z+,z) time")
start_time = time.time()
F = np.zeros(maxitrs)
G = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
z = np.copy(x0)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
gavg = np.zeros(x.size)
csum = 0
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x
if theta_eq and kk > 0:
theta = solve_theta(theta, gamma)
else:
theta = gamma / (kk + gamma)
y = (1-theta)*x_1 + theta*z_1
g = f.gradient(y)
gavg = gavg + theta**(1-gamma) * g
csum = csum + theta**(1-gamma)
z = h.prox_map(gavg/csum, L/csum)
x = (1-theta)*x_1 + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
Gdr = dxy / dzz / theta**gamma
# store and display computational progress
G[k] = Gdr
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:6.1f}".format(
k, F[k], theta, Gdr, dxy, dzz, T[k]))
kk += 1
# restart does not work for ABDA (restart = False)
if restart:
if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0: # this does not work for dual averaging
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
gavg = np.zeros(x.size) # this is why restart does not work
csum = 0
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
G = G[0:k+1]
T = T[0:k+1]
return x, F, G, T | en | 0.680941 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. Bregman Proximal Gradient (BGP) method for min_{x in C} f(x) + Psi(x):
x(k+1) = argmin_{x in C} { Psi(x) + <f'(x(k)), x> + L(k) * D_h(x,x(k))}
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
maxitrs: maximum number of iterations
epsilon: stop if F(x[k])-F(x[k-1]) < epsilon, where F(x)=f(x)+Psi(x)
linesearch: whether or not perform line search (True or False)
ls_ratio: backtracking line search parameter >= 1
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Ls: array storing local Lipschitz constants obtained by line search
T: array storing time used up to iteration k # store and display computational progress # stopping criteria solve theta_k1 from the equation
(1-theta_k1)/theta_k1^gamma = gainratio * 1/theta_k^gamma
using Newton's method, starting from theta Accelerated Bregman Proximal Gradient (ABPG) method for solving
minimize_{x in C} f(x) + Psi(x):
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent (TSE) for Bregman div D_h(x,y)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
theta_eq: calculate theta_k by solving equality using Newton's method
restart: restart the algorithm when overshooting (True or False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
G: triangle scaling gains D(xk,yk) / D(zk,zk_1) / theta_k^gamma
T: array storing time used up to iteration k # initialize theta = 1 for updating with equality # separate counter for theta_k, easy for restart # function value at previous iteration # Update three iterates x, y and z # only required for restart mode # compute triangle scaling quantities # store and display computational progress # restart if gradient predicts objective increase #if k > 0 and F[k] > F[k-1]: #if np.dot(g, x-x_1) > 0: # reset theta = 1 for updating with equality # reset kk = 0 for theta = gamma/(kk+gamma) # in either case, reset z = x and also y # stopping criteria Accelerated Bregman Proximal Gradient method with exponent adaption for
minimize_{x in C} f(x) + Psi(x)
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma0: initial triangle scaling exponent(TSE) for D_h(x,y) (>2)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
delta: amount to decrease TSE for exponent adaption
theta_eq: calculate theta_k by solving equality using Newton's method
checkdiv: check triangle scaling inequality for adaption (True/False)
Gmargin: extra gain margin allowed for checking TSI
restart: restart the algorithm when overshooting (True or False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Gamma: gamma_k obtained at each iteration
G: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma_k
T: array storing time used up to iteration k # initialize theta = 1 for updating with equality # separate counter for theta_k, easy for restart # function value at previous iteration # Update three iterates x, y and z #g = f.gradient(y) # always execute at least once per iteration # compute triangle scaling quantities # store and display computational progress # restart if gradient predicts objective increase #if k > 0 and F[k] > F[k-1]: #if np.dot(g, x-x_1) > 0: # reset theta = 1 for updating with equality # reset kk = 0 for theta = gamma/(kk+gamma) # in either case, reset z = x and also y # stopping criteria Accelerated Bregman Proximal Gradient (ABPG) method with gain adaption for
minimize_{x in C} f(x) + Psi(x):
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent(TSE) for Bregman distance D_h(x,y)
G0: initial value for triangle scaling gain
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
ls_inc: factor of increasing gain (>=1)
ls_dec: factor of decreasing gain (>=1)
theta_eq: calculate theta_k by solving equality using Newton's method
checkdiv: check triangle scaling inequality for adaption (True/False)
restart: restart the algorithm when overshooting (True/False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True/False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Gain: triangle scaling gains G_k obtained by LS at each iteration
Gdiv: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma_k
Gavg: geometric mean of G_k at all steps up to iteration k
T: array storing time used up to iteration k # logGavg = (gamma*log(G0) + log(G_1) + ... + log(Gk)) / (k+gamma) # initialize theta = 1 for updating with equality # separate counter for theta_k, easy for restart # function value at previous iteration # Update three iterates x, y and z # adaptive option: always try a smaller Gain first before line search #g = f.gradient(y) # compute triangle scaling quantities # store and display computational progress # restart if gradient predicts objective increase #if k > 0 and F[k] > F[k-1]: #if np.dot(g, x-x_1) > 0: # reset theta = 1 for updating with equality # reset kk = 0 for theta = gamma/(kk+gamma) # in either case, reset z = x and also y # stopping criteria Accelerated Bregman Dual Averaging (ABDA) method for solving
minimize_{x in C} f(x) + Psi(x)
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent (TSE) for Bregman distance D_h(x,y)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
theta_eq: calculate theta_k by solving equality using Newton's method
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
G: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma
T: array storing time used up to iteration k # Simple restart schemes for dual averaging method do not work! # initialize theta = 1 for updating with equality # separate counter for theta_k, easy for restart # function value at previous iteration # Update three iterates x, y and z # compute triangle scaling quantities # store and display computational progress # restart does not work for ABDA (restart = False) #if np.dot(g, x-x_1) > 0: # this does not work for dual averaging # reset theta = 1 for updating with equality # reset kk = 0 for theta = gamma/(kk+gamma) # in either case, reset z = x and also y # this is why restart does not work # stopping criteria | 2.695956 | 3 |
src/core/reinforce_wrappers.py | MathieuRita/Dialog | 0 | 6616876 | <filename>src/core/reinforce_wrappers.py
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from collections import defaultdict
import numpy as np
from .transformer import TransformerEncoder, TransformerDecoder
from .rnn import RnnEncoder, RnnEncoderImpatient,RnnEncoderExternalEmbedding
from .util import find_lengths
class ReinforceWrapper(nn.Module):
"""
Reinforce Wrapper for an agent. Assumes that the during the forward,
the wrapped agent returns log-probabilities over the potential outputs. During training, the wrapper
transforms them into a tuple of (sample from the multinomial, log-prob of the sample, entropy for the multinomial).
Eval-time the sample is replaced with argmax.
>>> agent = nn.Sequential(nn.Linear(10, 3), nn.LogSoftmax(dim=1))
>>> agent = ReinforceWrapper(agent)
>>> sample, log_prob, entropy = agent(torch.ones(4, 10))
>>> sample.size()
torch.Size([4])
>>> (log_prob < 0).all().item()
1
>>> (entropy > 0).all().item()
1
"""
def __init__(self, agent):
super(ReinforceWrapper, self).__init__()
self.agent = agent
def forward(self, *args, **kwargs):
logits = self.agent(*args, **kwargs)
distr = Categorical(logits=logits)
entropy = distr.entropy()
if self.training:
sample = distr.sample()
else:
sample = logits.argmax(dim=1)
log_prob = distr.log_prob(sample)
return sample, log_prob, entropy
class ReinforceDeterministicWrapper(nn.Module):
"""
Simple wrapper that makes a deterministic agent (without sampling) compatible with Reinforce-based game, by
adding zero log-probability and entropy values to the output. No sampling is run on top of the wrapped agent,
it is passed as is.
>>> agent = nn.Sequential(nn.Linear(10, 3), nn.LogSoftmax(dim=1))
>>> agent = ReinforceDeterministicWrapper(agent)
>>> sample, log_prob, entropy = agent(torch.ones(4, 10))
>>> sample.size()
torch.Size([4, 3])
>>> (log_prob == 0).all().item()
1
>>> (entropy == 0).all().item()
1
"""
def __init__(self, agent):
super(ReinforceDeterministicWrapper, self).__init__()
self.agent = agent
def forward(self, *args, **kwargs):
out = self.agent(*args, **kwargs)
return out, torch.zeros(1).to(out.device), torch.zeros(1).to(out.device)
class SymbolGameReinforce(nn.Module):
"""
A single-symbol Sender/Receiver game implemented with Reinforce.
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff=0.0, receiver_entropy_coeff=0.0):
"""
:param sender: Sender agent. On forward, returns a tuple of (message, log-prob of the message, entropy).
:param receiver: Receiver agent. On forward, accepts a message and the dedicated receiver input. Returns
a tuple of (output, log-probs, entropy).
:param loss: The loss function that accepts:
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs the end-to-end loss. Can be non-differentiable; if it is differentiable, this will be leveraged
:param sender_entropy_coeff: The entropy regularization coefficient for Sender
:param receiver_entropy_coeff: The entropy regularizatino coefficient for Receiver
"""
super(SymbolGameReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.loss = loss
self.receiver_entropy_coeff = receiver_entropy_coeff
self.sender_entropy_coeff = sender_entropy_coeff
self.mean_baseline = 0.0
self.n_points = 0.0
def forward(self, sender_input, labels, receiver_input=None):
message, sender_log_prob, sender_entropy = self.sender(sender_input)
receiver_output, receiver_log_prob, receiver_entropy = self.receiver(message, receiver_input)
loss, rest_info = self.loss(sender_input, message, receiver_input, receiver_output, labels)
policy_loss = ((loss.detach() - self.mean_baseline) * (sender_log_prob + receiver_log_prob)).mean()
entropy_loss = -(sender_entropy.mean() * self.sender_entropy_coeff + receiver_entropy.mean() * self.receiver_entropy_coeff)
if self.training:
self.n_points += 1.0
self.mean_baseline += (loss.detach().mean().item() -
self.mean_baseline) / self.n_points
full_loss = policy_loss + entropy_loss + loss.mean()
for k, v in rest_info.items():
if hasattr(v, 'mean'):
rest_info[k] = v.mean().item()
rest_info['baseline'] = self.mean_baseline
rest_info['loss'] = loss.mean().item()
rest_info['sender_entropy'] = sender_entropy.mean()
rest_info['receiver_entropy'] = receiver_entropy.mean()
return full_loss, rest_info
class RnnSenderReinforce(nn.Module):
"""
Reinforce Wrapper for Sender in variable-length message game. Assumes that during the forward,
the wrapped agent returns the initial hidden state for a RNN cell. This cell is the unrolled by the wrapper.
During training, the wrapper samples from the cell, getting the output message. Evaluation-time, the sampling
is replaced by argmax.
>>> agent = nn.Linear(10, 3)
>>> agent = RnnSenderReinforce(agent, vocab_size=5, embed_dim=5, hidden_size=3, max_len=10, cell='lstm', force_eos=False)
>>> input = torch.FloatTensor(16, 10).uniform_(-0.1, 0.1)
>>> message, logprob, entropy = agent(input)
>>> message.size()
torch.Size([16, 10])
>>> (entropy > 0).all().item()
1
>>> message.size() # batch size x max_len
torch.Size([16, 10])
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, max_len, num_layers=1, cell='rnn', force_eos=True):
"""
:param agent: the agent to be wrapped
:param vocab_size: the communication vocabulary size
:param embed_dim: the size of the embedding used to embed the output symbols
:param hidden_size: the RNN cell's hidden state size
:param max_len: maximal length of the output messages
:param cell: type of the cell used (rnn, gru, lstm)
:param force_eos: if set to True, each message is extended by an EOS symbol. To ensure that no message goes
beyond `max_len`, Sender only generates `max_len - 1` symbols from an RNN cell and appends EOS.
"""
super(RnnSenderReinforce, self).__init__()
self.agent = agent
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.norm_h = nn.LayerNorm(hidden_size)
self.norm_c = nn.LayerNorm(hidden_size)
self.vocab_size = vocab_size
self.num_layers = num_layers
self.cells = None
cell = cell.lower()
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
if cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell]
self.cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def forward(self, x):
prev_hidden = [self.agent(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class RnnSenderReinforceModel3(nn.Module):
"""
Reinforce Wrapper for Sender in variable-length message game. Assumes that during the forward,
the wrapped agent returns the initial hidden state for a RNN cell. This cell is the unrolled by the wrapper.
During training, the wrapper samples from the cell, getting the output message. Evaluation-time, the sampling
is replaced by argmax.
>>> agent = nn.Linear(10, 3)
>>> agent = RnnSenderReinforce(agent, vocab_size=5, embed_dim=5, hidden_size=3, max_len=10, cell='lstm', force_eos=False)
>>> input = torch.FloatTensor(16, 10).uniform_(-0.1, 0.1)
>>> message, logprob, entropy = agent(input)
>>> message.size()
torch.Size([16, 10])
>>> (entropy > 0).all().item()
1
>>> message.size() # batch size x max_len
torch.Size([16, 10])
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, max_len, num_layers=1, cell='rnn', force_eos=True):
"""
:param agent: the agent to be wrapped
:param vocab_size: the communication vocabulary size
:param embed_dim: the size of the embedding used to embed the output symbols
:param hidden_size: the RNN cell's hidden state size
:param max_len: maximal length of the output messages
:param cell: type of the cell used (rnn, gru, lstm)
:param force_eos: if set to True, each message is extended by an EOS symbol. To ensure that no message goes
beyond `max_len`, Sender only generates `max_len - 1` symbols from an RNN cell and appends EOS.
"""
super(RnnSenderReinforceModel3, self).__init__()
self.agent = agent
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.norm_h = torch.nn.LayerNorm(hidden_size)
self.norm_c = torch.nn.LayerNorm(hidden_size)
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.num_layers = num_layers
self.cells = None
cell = cell.lower()
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
if cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell]
self.cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def forward(self, x, imitate=False):
prev_hidden = [self.agent(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_h(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
if imitate:
logits.append(distr.probs)
else:
logits.append(distr.log_prob(x))
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
if imitate:
logits = torch.stack(logits).permute(1,2, 0)
else:
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class RnnSenderReinforceExternalEmbedding(nn.Module):
"""
Reinforce Wrapper for Sender in variable-length message game. Assumes that during the forward,
the wrapped agent returns the initial hidden state for a RNN cell. This cell is the unrolled by the wrapper.
During training, the wrapper samples from the cell, getting the output message. Evaluation-time, the sampling
is replaced by argmax.
>>> agent = nn.Linear(10, 3)
>>> agent = RnnSenderReinforce(agent, vocab_size=5, embed_dim=5, hidden_size=3, max_len=10, cell='lstm', force_eos=False)
>>> input = torch.FloatTensor(16, 10).uniform_(-0.1, 0.1)
>>> message, logprob, entropy = agent(input)
>>> message.size()
torch.Size([16, 10])
>>> (entropy > 0).all().item()
1
>>> message.size() # batch size x max_len
torch.Size([16, 10])
"""
def __init__(self, agent,embedding_layer, vocab_size, embed_dim, hidden_size, max_len, num_layers=1, cell='rnn', force_eos=True):
"""
:param agent: the agent to be wrapped
:param vocab_size: the communication vocabulary size
:param embed_dim: the size of the embedding used to embed the output symbols
:param hidden_size: the RNN cell's hidden state size
:param max_len: maximal length of the output messages
:param cell: type of the cell used (rnn, gru, lstm)
:param force_eos: if set to True, each message is extended by an EOS symbol. To ensure that no message goes
beyond `max_len`, Sender only generates `max_len - 1` symbols from an RNN cell and appends EOS.
"""
super(RnnSenderReinforceExternalEmbedding, self).__init__()
self.agent = agent
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.norm_h = torch.nn.LayerNorm(hidden_size)
self.norm_c = torch.nn.LayerNorm(hidden_size)
self.embedding = embedding_layer
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.num_layers = num_layers
self.cells = None
cell = cell.lower()
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
if cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell]
self.cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def forward(self, x, imitate=False):
prev_hidden = [self.agent(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_h(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
if imitate:
logits.append(distr.probs)
else:
logits.append(distr.log_prob(x))
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
if imitate:
logits = torch.stack(logits).permute(1,2, 0)
else:
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class RnnReceiverReinforce(nn.Module):
"""
Reinforce Wrapper for Receiver in variable-length message game. The wrapper logic feeds the message into the cell
and calls the wrapped agent on the hidden state vector for the step that either corresponds to the EOS input to the
input that reaches the maximal length of the sequence.
This output is assumed to be the tuple of (output, logprob, entropy).
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, cell='rnn', num_layers=1):
super(RnnReceiverReinforce, self).__init__()
self.agent = agent
self.encoder = RnnEncoder(vocab_size, embed_dim, hidden_size, cell, num_layers)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
sample, logits, entropy = self.agent(encoded, input)
return sample, logits, entropy
class RnnReceiverCompositionality(nn.Module):
"""
Reinforce Wrapper for Receiver in variable-length message game with several attributes (for compositionality experiments).
RnnReceiverCompositionality is equivalent to RnnReceiverReinforce but treated each attribute independently.
This output is assumed to be the tuple of (output, logprob, entropy).
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size,max_len,n_attributes, n_values, cell='rnn', num_layers=1):
super(RnnReceiverCompositionality, self).__init__()
self.agent = agent
self.n_attributes=n_attributes
self.n_values=n_values
self.encoder = RnnEncoder(vocab_size, embed_dim, hidden_size, cell, num_layers)
self.hidden_to_output = nn.Linear(hidden_size, n_attributes*n_values)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
logits = F.log_softmax(self.hidden_to_output(encoded).reshape(encoded.size(0),self.n_attributes,self.n_values), dim=2)
#entropy=-torch.exp(logits)*logits
entropy=[]
slogits= []
for i in range(logits.size(1)):
distr = Categorical(logits=logits[:,i,:])
entropy.append(distr.entropy())
x = distr.sample()
slogits.append(distr.log_prob(x))
entropy = torch.stack(entropy).permute(1, 0)
slogits = torch.stack(slogits).permute(1, 0)
return logits, slogits, entropy
class RnnReceiverDeterministic(nn.Module):
"""
Reinforce Wrapper for a deterministic Receiver in variable-length message game. The wrapper logic feeds the message
into the cell and calls the wrapped agent with the hidden state that either corresponds to the end-of-sequence
term or to the end of the sequence. The wrapper extends it with zero-valued log-prob and entropy tensors so that
the agent becomes compatible with the SenderReceiverRnnReinforce game.
As the wrapped agent does not sample, it has to be trained via regular back-propagation. This requires that both the
the agent's output and loss function and the wrapped agent are differentiable.
>>> class Agent(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc = nn.Linear(5, 3)
... def forward(self, rnn_output, _input = None):
... return self.fc(rnn_output)
>>> agent = RnnReceiverDeterministic(Agent(), vocab_size=10, embed_dim=10, hidden_size=5)
>>> message = torch.zeros((16, 10)).long().random_(0, 10) # batch of 16, 10 symbol length
>>> output, logits, entropy = agent(message)
>>> (logits == 0).all().item()
1
>>> (entropy == 0).all().item()
1
>>> output.size()
torch.Size([16, 3])
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, cell='rnn', num_layers=1):
super(RnnReceiverDeterministic, self).__init__()
self.agent = agent
self.encoder = RnnEncoder(vocab_size, embed_dim, hidden_size, cell, num_layers)
self.norm = nn.LayerNorm(hidden_size)
def forward(self, message, input=None, lengths=None,imitate=False):
encoded = self.encoder(message)
encoded=self.norm(encoded)
agent_output = self.agent(encoded, input)
if imitate:
step_logits = F.log_softmax(agent_output, dim=1)
distr = Categorical(logits=step_logits)
entropy=distr.entropy()
logits=distr.probs
entropy=entropy.to(agent_output.device)
logits=logits.to(agent_output.device)
det_logits=torch.zeros(agent_output.size(0)).to(agent_output.device)
det_entropy=det_logits
return agent_output, logits, entropy, det_logits, det_entropy
else:
logits = torch.zeros(agent_output.size(0)).to(agent_output.device)
entropy = logits
return agent_output, logits, entropy
class RnnReceiverDeterministicExternalEmbedding(nn.Module):
"""
Reinforce Wrapper for a deterministic Receiver in variable-length message game. The wrapper logic feeds the message
into the cell and calls the wrapped agent with the hidden state that either corresponds to the end-of-sequence
term or to the end of the sequence. The wrapper extends it with zero-valued log-prob and entropy tensors so that
the agent becomes compatible with the SenderReceiverRnnReinforce game.
As the wrapped agent does not sample, it has to be trained via regular back-propagation. This requires that both the
the agent's output and loss function and the wrapped agent are differentiable.
>>> class Agent(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc = nn.Linear(5, 3)
... def forward(self, rnn_output, _input = None):
... return self.fc(rnn_output)
>>> agent = RnnReceiverDeterministic(Agent(), vocab_size=10, embed_dim=10, hidden_size=5)
>>> message = torch.zeros((16, 10)).long().random_(0, 10) # batch of 16, 10 symbol length
>>> output, logits, entropy = agent(message)
>>> (logits == 0).all().item()
1
>>> (entropy == 0).all().item()
1
>>> output.size()
torch.Size([16, 3])
"""
def __init__(self, agent, embedding_layer, vocab_size, embed_dim, hidden_size, cell='rnn', num_layers=1):
super(RnnReceiverDeterministicExternalEmbedding, self).__init__()
self.agent = agent
self.encoder = RnnEncoderExternalEmbedding(embedding_layer, vocab_size, embed_dim, hidden_size, cell, num_layers)
self.norm = nn.LayerNorm(hidden_size)
def forward(self, message, input=None, lengths=None,imitate=False):
encoded = self.encoder(message)
encoded=self.norm(encoded)
agent_output = self.agent(encoded, input)
if imitate:
step_logits = F.log_softmax(agent_output, dim=1)
distr = Categorical(logits=step_logits)
entropy=distr.entropy()
logits=distr.probs
entropy=entropy.to(agent_output.device)
logits=logits.to(agent_output.device)
det_logits=torch.zeros(agent_output.size(0)).to(agent_output.device)
det_entropy=det_logits
return agent_output, logits, entropy, det_logits, det_entropy
else:
logits = torch.zeros(agent_output.size(0)).to(agent_output.device)
entropy = logits
return agent_output, logits, entropy
class RnnReceiverImpatient(nn.Module):
"""
Impatient Listener.
The wrapper logic feeds the message into the cell and calls the wrapped agent.
The wrapped agent has to returns the intermediate hidden states for every position.
All the hidden states are mapped to a categorical distribution with a single
Linear layer (hidden_to_ouput) followed by a softmax.
Thess categorical probabilities (step_logits) will then be used to compute the Impatient loss function.
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size,max_len,n_features, cell='rnn', num_layers=1):
super(RnnReceiverImpatient, self).__init__()
self.max_len = max_len
self.hidden_to_output = nn.Linear(hidden_size, n_features)
self.encoder = RnnEncoderImpatient(vocab_size, embed_dim, hidden_size, cell, num_layers)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
sequence = []
logits = []
entropy = []
for step in range(encoded.size(0)):
h_t=encoded[step,:,:]
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample() # Sampling useless ?
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
sequence.append(step_logits)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
return sequence, logits, entropy
class RnnReceiverImpatientCompositionality(nn.Module):
"""
RnnReceiverImpatientCompositionality is an adaptation of RnnReceiverImpatientCompositionality
for inputs with several attributes (compositionality experiments).
Each attribute is treated independently.
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size,max_len,n_attributes, n_values, cell='rnn', num_layers=1):
super(RnnReceiverImpatientCompositionality, self).__init__()
self.max_len = max_len
self.n_attributes=n_attributes
self.n_values=n_values
self.hidden_to_output = nn.Linear(hidden_size, n_attributes*n_values)
self.encoder = RnnEncoderImpatient(vocab_size, embed_dim, hidden_size, cell, num_layers)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
sequence = []
slogits = []
entropy = []
for step in range(encoded.size(0)):
h_t=encoded[step,:,:]
step_logits = F.log_softmax(self.hidden_to_output(h_t).reshape(h_t.size(0),self.n_attributes,self.n_values), dim=2)
distr = Categorical(logits=step_logits)
sequence.append(step_logits)
entropy_step=[]
slogits_step=[]
for i in range(step_logits.size(1)):
distr = Categorical(logits=step_logits[:,i,:])
entropy_step.append(distr.entropy())
x = distr.sample()
slogits_step.append(distr.log_prob(x))
entropy_step = torch.stack(entropy_step).permute(1, 0)
slogits_step = torch.stack(slogits_step).permute(1, 0)
entropy.append(entropy_step)
slogits.append(slogits_step)
sequence = torch.stack(sequence).permute(1,0,2,3)
entropy = torch.stack(entropy).permute(1,0,2)
slogits= torch.stack(slogits).permute(1,0,2)
#logits = torch.stack(logits).permute(1, 0)
#entropy = torch.stack(entropy).permute(1, 0)
return sequence, slogits, entropy
class RnnReceiverWithHiddenStates(nn.Module):
"""
Impatient Listener.
The wrapper logic feeds the message into the cell and calls the wrapped agent.
The wrapped agent has to returns the intermediate hidden states for every position.
All the hidden states are mapped to a categorical distribution with a single
Linear layer (hidden_to_ouput) followed by a softmax.
Thess categorical probabilities (step_logits) will then be used to compute the Impatient loss function.
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size,max_len,n_features, cell='rnn', num_layers=1):
super(RnnReceiverWithHiddenStates, self).__init__()
self.max_len = max_len
self.hidden_to_output = nn.Linear(hidden_size, n_features)
self.norm_h=nn.LayerNorm(hidden_size)
self.norm_c=nn.LayerNorm(hidden_size)
self.encoder = RnnEncoderImpatient(vocab_size, embed_dim, hidden_size, cell, num_layers)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
sequence = []
logits = []
entropy = []
hidden_states = []
for step in range(encoded.size(0)):
h_t=encoded[step,:,:]
h_t=norm_h(h_t)
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample() # Sampling useless ?
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
sequence.append(step_logits)
hidden_states.append(h_t)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
hidden_states = torch.stack(hidden_states).permute(1, 0, 2)
return sequence, logits, entropy, hidden_states
class AgentBaseline(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self, receiver, sender):
super(AgentBaseline, self).__init__()
self.receiver=receiver
self.sender=sender
def forward(self, message, input=None, lengths=None):
raise NotImplementedError
class AgentModel2(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self, receiver, sender):
super(AgentModel2, self).__init__()
self.receiver=receiver
self.sender=sender
def send(self, sender_input):
return self.sender(sender_input)
def receive(self,message, receiver_input, message_lengths):
receiver_output, log_prob_r, entropy_r,hidden_states = self.receiver(message, receiver_input, message_lengths)
sequence_lm=[]
logits_lm=[]
for step in range(hidden_states.size(1)):
h_t=hidden_states[:,step,:]
step_logits_lm = F.log_softmax(self.sender.hidden_to_output(h_t), dim=1)
distr_lm = Categorical(logits=step_logits_lm)
#entropy_lm.append(distr_lm.entropy())
x = step_logits_lm.argmax(dim=1)
logits_lm.append(distr_lm.log_prob(x))
sequence_lm.append(step_logits_lm)
sequence_lm = torch.stack(sequence_lm).permute(1, 0, 2)
logits_lm = torch.stack(logits_lm).permute(1, 0)
return receiver_output, log_prob_r, entropy_r, sequence_lm, logits_lm
class AgentModel3(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self, receiver, sender):
super(AgentModel3, self).__init__()
self.receiver=receiver
self.sender=sender
def send(self, sender_input):
return self.sender(sender_input)
def receive(self,message, receiver_input, message_lengths,imitate=True):
return self.receiver(message, receiver_input, message_lengths,imitate)
def imitate(self,sender_input,imitate=True):
return self.sender(sender_input,imitate)
# New class agent
class AgentBaseline2(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_features,
vocab_size,
max_len,
embed_dim,
sender_hidden_size,
receiver_hidden_size,
sender_cell,
receiver_cell,
sender_num_layers,
receiver_num_layers,
force_eos):
super(AgentBaseline2, self).__init__()
# Common to sender and receiver
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.sender_hidden_size=sender_hidden_size
self.receiver_hidden_size=receiver_hidden_size
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
# Sender
self.agent_sender = nn.Linear(n_features, sender_hidden_size) #nn.Linear(n_features, n_hidden)
self.sender_cells = None
self.sender_num_layers = sender_num_layers
self.sender_norm_h = nn.LayerNorm(sender_hidden_size)
self.sender_norm_c = nn.LayerNorm(sender_hidden_size)
self.hidden_to_output = nn.Linear(sender_hidden_size, vocab_size)
self.sender_embedding = nn.Embedding(vocab_size, embed_dim)
sender_cell = sender_cell.lower()
if sender_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {sender_cell}")
cell_type = cell_types[sender_cell]
self.sender_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=sender_hidden_size) if i == 0 else \
cell_type(input_size=sender_hidden_size, hidden_size=sender_hidden_size) for i in range(self.sender_num_layers)])
self.reset_parameters()
# Receiver
self.agent_receiver = nn.Linear(receiver_hidden_size, n_features) #nn.Linear(n_hidden, n_features)
self.receiver_cells = None
self.receiver_num_layers = receiver_num_layers
self.receiver_norm_h = nn.LayerNorm(receiver_hidden_size)
self.receiver_norm_c = nn.LayerNorm(receiver_hidden_size)
#self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.receiver_embedding = nn.Embedding(vocab_size, embed_dim)
receiver_cell = receiver_cell.lower()
if receiver_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {receiver_cell}")
cell_types_r = {'rnn': nn.RNN, 'gru': nn.GRU, 'lstm': nn.LSTM}
cell_type = cell_types_r[receiver_cell]
self.receiver_cell = cell_types_r[receiver_cell](input_size=embed_dim, batch_first=True,
hidden_size=receiver_hidden_size, num_layers=receiver_num_layers)
#self.receiver_cells = nn.ModuleList([
# cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
# cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False,return_policies=False):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
whole_logits = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
whole_logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
whole_logits = torch.stack(whole_logits).permute(1,0, 2)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
if return_policies:
return sequence,logits,whole_logits, entropy
else:
return sequence,logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers)] # only used for LSTM
inputs = self.receiver_embedding(message)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.receiver_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.receiver_norm_h(h_t)
c_t = self.receiver_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = self.agent_receiver(h_t)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def receive_2(self,message, receiver_input, message_lengths):
emb = self.receiver_embedding(message)
if message_lengths is None:
message_lengths = find_lengths(message)
packed = nn.utils.rnn.pack_padded_sequence(
emb, message_lengths.cpu(), batch_first=True, enforce_sorted=False)
_, rnn_hidden = self.receiver_cell(packed)
if isinstance(self.receiver_cell, nn.LSTM):
rnn_hidden, _ = rnn_hidden
encoded = rnn_hidden[-1]
#encoded=self.receiver_norm_h(encoded)
agent_output = self.agent_receiver(encoded)
logits = torch.zeros(agent_output.size(0)).to(agent_output.device)
entropy = logits
return agent_output, logits, entropy
def imitate(self,x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class AgentBaselineCompositionality(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_values,
n_attributes,
vocab_size,
max_len,
embed_dim,
sender_hidden_size,
receiver_hidden_size,
sender_cell,
receiver_cell,
sender_num_layers,
receiver_num_layers,
force_eos):
super(AgentBaselineCompositionality, self).__init__()
# Common to sender and receiver
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.n_attributes=n_attributes
self.n_values=n_values
self.sender_hidden_size=sender_hidden_size
self.receiver_hidden_size=receiver_hidden_size
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
# Sender
self.agent_sender = nn.Linear(n_values*n_attributes, sender_hidden_size) #nn.Linear(n_features, n_hidden)
self.sender_cells = None
self.sender_num_layers = sender_num_layers
self.sender_norm_h = nn.LayerNorm(sender_hidden_size)
self.sender_norm_c = nn.LayerNorm(sender_hidden_size)
self.hidden_to_output = nn.Linear(sender_hidden_size, vocab_size)
self.sender_embedding = nn.Embedding(vocab_size, embed_dim)
sender_cell = sender_cell.lower()
if sender_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {sender_cell}")
cell_type = cell_types[sender_cell]
self.sender_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=sender_hidden_size) if i == 0 else \
cell_type(input_size=sender_hidden_size, hidden_size=sender_hidden_size) for i in range(self.sender_num_layers)])
self.reset_parameters()
# Receiver
self.agent_receiver = nn.Linear(receiver_hidden_size, n_values*n_attributes) #nn.Linear(n_hidden, n_features)
self.receiver_cells = None
self.receiver_num_layers = receiver_num_layers
self.receiver_norm_h = nn.LayerNorm(receiver_hidden_size)
self.receiver_norm_c = nn.LayerNorm(receiver_hidden_size)
#self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.receiver_embedding = nn.Embedding(vocab_size, embed_dim)
receiver_cell = receiver_cell.lower()
if receiver_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {receiver_cell}")
cell_types_r = {'rnn': nn.RNN, 'gru': nn.GRU, 'lstm': nn.LSTM}
cell_type = cell_types_r[receiver_cell]
self.receiver_cell = cell_types_r[receiver_cell](input_size=embed_dim, batch_first=True,
hidden_size=receiver_hidden_size, num_layers=receiver_num_layers)
#self.receiver_cells = nn.ModuleList([
# cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
# cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False,return_policies=False):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
whole_logits = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
whole_logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
whole_logits = torch.stack(whole_logits).permute(1,0, 2)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
if return_policies:
return sequence,logits,whole_logits, entropy
else:
return sequence,logits, entropy
def receive(self,message, receiver_input, message_lengths,return_policies=False,return_sample=False):
emb = self.receiver_embedding(message)
if message_lengths is None:
message_lengths = find_lengths(message)
packed = nn.utils.rnn.pack_padded_sequence(
emb, message_lengths.cpu(), batch_first=True, enforce_sorted=False)
_, rnn_hidden = self.receiver_cell(packed)
if isinstance(self.receiver_cell, nn.LSTM):
rnn_hidden, _ = rnn_hidden
encoded = rnn_hidden[-1]
agent_output = self.agent_receiver(encoded).reshape(encoded.size(0),self.n_attributes,self.n_values)
logits = F.log_softmax(agent_output,dim=2)
entropy=[]
slogits= []
sample = []
for i in range(logits.size(1)):
distr = Categorical(logits=logits[:,i,:])
entropy.append(distr.entropy())
if self.training:
#x = distr.sample()
x = logits[:,i,:].argmax(dim=1)
sample.append(x)
else:
x = logits[:,i,:].argmax(dim=1)
slogits.append(distr.log_prob(x))
entropy = torch.stack(entropy).permute(1, 0)
slogits = torch.stack(slogits).permute(1, 0)
sample = torch.stack(sample).permute(1, 0)
if return_sample:
return sample, agent_output, slogits,logits, entropy
elif return_policies:
return agent_output, slogits,logits, entropy
else:
return agent_output, slogits, entropy
def imitate(self,x):
raise NotImplementedError
class AgentBaselineKL(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_features,
vocab_size,
max_len,
embed_dim,
hidden_size,
sender_cell,
receiver_cell,
sender_num_layers,
receiver_num_layers,
force_eos):
super(AgentBaselineKL, self).__init__()
# Common to sender and receiver
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.hidden_size=hidden_size
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
# Sender
self.agent_sender = nn.Linear(n_features, hidden_size) #nn.Linear(n_features, n_hidden)
self.sender_cells = None
self.sender_num_layers = sender_num_layers
self.sender_norm_h = nn.LayerNorm(hidden_size)
self.sender_norm_c = nn.LayerNorm(hidden_size)
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.sender_embedding = nn.Embedding(vocab_size, embed_dim)
sender_cell = sender_cell.lower()
if sender_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {sender_cell}")
cell_type = cell_types[sender_cell]
self.sender_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.sender_num_layers)])
self.reset_parameters()
# Receiver
self.agent_receiver = nn.Linear(hidden_size, n_features) #nn.Linear(n_hidden, n_features)
self.receiver_cells = None
self.receiver_num_layers = receiver_num_layers
self.receiver_norm_h = nn.LayerNorm(hidden_size)
self.receiver_norm_c = nn.LayerNorm(hidden_size)
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.receiver_embedding = nn.Embedding(vocab_size, embed_dim)
receiver_cell = receiver_cell.lower()
if receiver_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {receiver_cell}")
cell_type = cell_types[receiver_cell]
self.receiver_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
whole_logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
whole_logits.append(step_logits)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
whole_logits = torch.stack(whole_logits).permute(1,0, 2)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits,whole_logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers)] # only used for LSTM
inputs = self.receiver_embedding(message)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.receiver_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.receiver_norm_h(h_t)
c_t = self.receiver_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = self.agent_receiver(h_t)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def imitate(self,x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class AgentPol(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_features,
vocab_size,
max_len,
embed_dim,
hidden_size,
sender_cell,
receiver_cell,
sender_num_layers,
receiver_num_layers,
force_eos):
super(AgentPol, self).__init__()
# Common to sender and receiver
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.hidden_size=hidden_size
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
# Memory
self.mem={}
self.w_mem={}
self.est_policy={}
for k in range(n_features):
self.mem[k]=[]
self.w_mem[k]=[]
self.est_policy[k]=torch.zeros([self.max_len,self.vocab_size]).to("cuda")
# Sender
self.agent_sender = nn.Linear(n_features, hidden_size) #nn.Linear(n_features, n_hidden)
self.sender_cells = None
self.sender_num_layers = sender_num_layers
self.sender_norm_h = nn.LayerNorm(hidden_size)
self.sender_norm_c = nn.LayerNorm(hidden_size)
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.sender_embedding = nn.Embedding(vocab_size, embed_dim)
sender_cell = sender_cell.lower()
if sender_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {sender_cell}")
cell_type = cell_types[sender_cell]
self.sender_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.sender_num_layers)])
self.reset_parameters()
# Receiver
self.agent_receiver = nn.Linear(hidden_size, n_features) #nn.Linear(n_hidden, n_features)
self.receiver_cells = None
self.receiver_num_layers = receiver_num_layers
self.receiver_norm_h = nn.LayerNorm(hidden_size)
self.receiver_norm_c = nn.LayerNorm(hidden_size)
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.receiver_embedding = nn.Embedding(vocab_size, embed_dim)
receiver_cell = receiver_cell.lower()
if receiver_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {receiver_cell}")
cell_type = cell_types[receiver_cell]
self.receiver_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers)] # only used for LSTM
inputs = self.receiver_embedding(message)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.receiver_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.receiver_norm_h(h_t)
c_t = self.receiver_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = self.agent_receiver(h_t)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def imitate(self,x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class AgentSharedRNN(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
vocab_size,
max_len,
embed_dim,
hidden_size,
cell,
num_layers,
force_eos):
super(AgentSharedRNN, self).__init__()
self.agent_receiver = nn.Linear(n_hidden, n_features) #nn.Linear(n_hidden, n_features)
self.agent_sender = nn.Linear(n_features, n_hidden) #nn.Linear(n_features, n_hidden)
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.norm_h = nn.LayerNorm(hidden_size)
self.norm_c = nn.LayerNorm(hidden_size)
self.vocab_size = vocab_size
self.num_layers = num_layers
self.cells = None
self.hidden_size=hidden_size
cell = cell.lower()
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
if cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell]
self.cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
inputs = self.embedding(message)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = self.agent_receiver(h_t, None)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
#distr = Categorical(logits=step_logits)
#entropy.append(distr.entropy())
#x=step_logits.argmax(dim=1)
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def imitate(self,x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class AgentSharedEmbedding(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_features,
vocab_size,
max_len,
embed_dim,
hidden_size,
cell_sender,
cell_receiver,
num_layers_sender,
num_layers_receiver,
force_eos):
super(AgentSharedEmbedding, self).__init__()
assert embed_dim==hidden_size, "embed_dim has to be equal to hidden_size"
self.FC_features = nn.Linear(n_features,hidden_size,bias=False) #nn.Linear(n_hidden, n_features)
self.FC_vocabulary = nn.Linear(hidden_size,vocab_size,bias=False)
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.embedding_speaker = nn.Embedding(vocab_size, embed_dim)
self.embedding_listener = nn.Embedding(vocab_size, embed_dim)
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.norm_h = nn.LayerNorm(hidden_size)
self.norm_c = nn.LayerNorm(hidden_size)
self.vocab_size = vocab_size
self.num_layers_sender = num_layers_sender
self.num_layers_receiver = num_layers_receiver
self.cells = None
self.hidden_size=hidden_size
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
cell_sender = cell_sender.lower()
if cell_sender not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell_sender]
self.cells_sender = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers_sender)])
cell_receiver = cell_receiver.lower()
if cell_receiver not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell_receiver]
self.cells_receiver = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers_receiver)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False):
prev_hidden = [self.FC_features(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_sender - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_sender)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells_sender):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.FC_vocabulary(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
#input = F.embedding(x,weight=self.FC_vocabulary.weight)
input = self.embedding_speaker(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_receiver - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_receiver)] # only used for LSTM
#inputs = self.embedding(message)
inputs = F.embedding(message,weight=self.FC_vocabulary.weight)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.cells_receiver):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = F.log_softmax(F.linear(h_t,weight=self.FC_features.weight.T), dim=1)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
#distr = Categorical(logits=step_logits)
#entropy.append(distr.entropy())
#x=step_logits.argmax(dim=1)
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def imitate(self,x):
prev_hidden = [self.FC_features(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_sender - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_sender)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells_sender):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.FC_vocabulary(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
#input = F.embedding(x,weight=self.FC_vocabulary.weight)
input = self.embedding_speaker(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class DialogReinforce(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforce, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.agent_1.to(self.device)
self.agent_2.to(self.device)
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive_2(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive_2(message, receiver_input, message_lengths)
# Imitation
#candidates_cross=receiver_output_cross.argmax(dim=1)
#message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input)
message_to_imitate, _, _ = agent_receiver.send(sender_input,eval=True)
message_to_imitate_lengths = find_lengths(message_to_imitate)
send_output, _, _ = agent_sender.receive_2(message_to_imitate, receiver_input, message_to_imitate_lengths)
message_reconstruction, prob_reconstruction, _ = agent_sender.imitate(sender_input)
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
#loss_imitation, rest_imitation = self.loss_message_imitation(message,prob_reconstruction,message_lengths)
loss_imitation, rest_imitation = self.loss_message_imitation(message_to_imitate,prob_reconstruction,message_to_imitate_lengths)
_, rest_und_cross = self.loss_understanding(sender_input,send_output)
prob_conf=torch.exp((sender_input*F.log_softmax(send_output,dim=1)).sum(1))
loss_imitation=loss_imitation*prob_conf
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross + self.loss_weights["imitation"]*loss_imitation
loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - self.mean_baseline['loss_imitation_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - loss_imitation.detach().mean()) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross + self.loss_weights["imitation"]*policy_loss_imitation
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceSingleListener(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceSingleListener, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.agent_1.to(self.device)
self.agent_2.to(self.device)
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
self.loss_weights={"self":1.,"cross":0.,"imitation":0.}
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
self.loss_weights={"self":0.,"cross":1.,"imitation":0.}
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive_2(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive_2(message, receiver_input, message_lengths)
# Imitation
#candidates_cross=receiver_output_cross.argmax(dim=1)
#message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input)
message_to_imitate, _, _ = agent_receiver.send(sender_input,eval=True)
message_to_imitate_lengths = find_lengths(message_to_imitate)
send_output, _, _ = agent_sender.receive_2(message_to_imitate, receiver_input, message_to_imitate_lengths)
message_reconstruction, prob_reconstruction, _ = agent_sender.imitate(sender_input)
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
#loss_imitation, rest_imitation = self.loss_message_imitation(message,prob_reconstruction,message_lengths)
loss_imitation, rest_imitation = self.loss_message_imitation(message_to_imitate,prob_reconstruction,message_to_imitate_lengths)
_, rest_und_cross = self.loss_understanding(sender_input,send_output)
prob_conf=torch.exp((sender_input*F.log_softmax(send_output,dim=1)).sum(1))
loss_imitation=loss_imitation*prob_conf
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross + self.loss_weights["imitation"]*loss_imitation
loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - self.mean_baseline['loss_imitation_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - loss_imitation.detach().mean()) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross + self.loss_weights["imitation"]*policy_loss_imitation
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceCompositionality(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
n_attributes,
n_values,
loss_understanding,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceCompositionality, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.n_attributes=n_attributes
self.n_values=n_values
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.agent_1.to(self.device)
self.agent_2.to(self.device)
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
# NO IMITATION
"2. Losses computation"
loss_cross, rest_cross = self.loss_understanding(sender_input, receiver_output_cross,self.n_attributes,self.n_values)
loss_self, rest_self = self.loss_understanding(sender_input, receiver_output_self,self.n_attributes,self.n_values)
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross
loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
elif self.reward_mode=="dense":
reward_self = 1.*(rest_self["acc"].sum(1)==self.n_attributes).detach()
reward_cross = 1.*(rest_cross["acc"].sum(1)==self.n_attributes).detach()
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self.mean(1))
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self.mean(1))
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceCompositionalitySingleListener(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
n_attributes,
n_values,
loss_understanding,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceCompositionalitySingleListener, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.n_attributes=n_attributes
self.n_values=n_values
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.agent_1.to(self.device)
self.agent_2.to(self.device)
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
self.loss_weights={"self":1.,"cross":0.,"imitation":1.}
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
self.loss_weights={"self":0.,"cross":1.,"imitation":1.}
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
# NO IMITATION
"2. Losses computation"
loss_cross, rest_cross = self.loss_understanding(sender_input, receiver_output_cross,self.n_attributes,self.n_values)
loss_self, rest_self = self.loss_understanding(sender_input, receiver_output_self,self.n_attributes,self.n_values)
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross
loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
elif self.reward_mode=="dense":
reward_self = 1.*(rest_self["acc"].sum(1)==self.n_attributes).detach()
reward_cross = 1.*(rest_cross["acc"].sum(1)==self.n_attributes).detach()
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self.mean(1))
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self.mean(1))
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceCompositionalityMultiAgent(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agents,
n_attributes,
n_values,
loss_understanding,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceCompositionalityMultiAgent, self).__init__()
self.agents = Agents
self.n_attributes=n_attributes
self.n_values=n_values
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
for agent in self.agents:
self.agents[agent].to(self.device)
def forward(self,
sender_input,
unused_labels,
sender_id,
receiver_ids,
receiver_input=None,
save_probs=None):
"""
Inputs:
- direction : N means "N->0"
"""
sender_input=sender_input.to(self.device)
"0. Get sender and receiver (id + optim info) for playing the game"
# Get sender_id and sender information
agent_sender = self.agents["agent_{}".format(sender_id)]
loss_weights_sender = self.loss_weights["agent_{}".format(sender_id)]
optim_params_sender = self.optim_params["agent_{}".format(sender_id)]
# Get receiver information (receiver_id always 0)
agent_receivers={"agent_{}".format(receiver_id):self.agents["agent_{}".format(receiver_id)] for receiver_id in receiver_ids}
" 1. Agent actions and loss"
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
loss_self, rest_self = self.loss_understanding(sender_input, receiver_output_self,self.n_attributes,self.n_values)
# Cross listening
losses_cross={}
restes_cross = {}
if self.reward_mode=="dense":
samples = {}
for agent in agent_receivers:
if self.reward_mode=="dense":
sample, receiver_output_cross, log_prob_r_cross,whole_log_prob_r_cross, entropy_r_cross = agent_receivers[agent].receive(message, receiver_input, message_lengths,return_sample=True)
samples[agent] = sample
else:
receiver_output_cross, log_prob_r_cross,whole_log_prob_r_cross, entropy_r_cross = agent_receivers[agent].receive(message, receiver_input, message_lengths,return_policies=True)
loss_cross, rest_cross = self.loss_understanding(sender_input, receiver_output_cross,self.n_attributes,self.n_values)
losses_cross[agent] = loss_cross
restes_cross[agent] = rest_cross
if save_probs:
np.save(save_probs+"_receiver_probs_"+agent+".npy",whole_log_prob_r_cross.cpu().numpy())
# Imitation
# NO IMITATION
"2. Reward computation"
loss_cross= torch.stack([losses_cross[agent] for agent in losses_cross]).mean(0)# MEAN ACROSS AXIS
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = loss_weights_sender["self"]*loss_self + loss_weights_sender["cross"]*loss_cross
loss /= (loss_weights_sender["self"]+loss_weights_sender["cross"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
elif self.reward_mode=="dense":
reward_self = 1.*(rest_self["acc"].sum(1)==self.n_attributes).detach()
reward_cross=[]
#for agent in agent_receivers:
#reward_cross.append(1.*(restes_cross[agent]["acc"].sum(1)==self.n_attributes).detach())
#reward_cross=torch.stack(reward_cross)
#reward_cross=reward_cross.mean(0)
for agent in agent_receivers:
acc = 1*(samples[agent] == sender_input.reshape([sample.size(0),sample.size(1),sender_input.size(1)//sample.size(1)]).argmax(2)).float().mean(1).detach()
acc = 1*(acc==1).float()
reward_cross.append(acc)
reward_cross=torch.stack(reward_cross)
reward_cross=reward_cross.mean(0)
elif self.reward_mode=="discrete":
reward_self = rest_self["acc"].sum(1).detach()
reward_cross = rest_cross["acc"].sum(1).detach()
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self.mean(1))
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self.mean(1))
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * optim_params_sender["sender_entropy_coeff"] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * optim_params_sender["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
eps=1e-16
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()+eps) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()+eps) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = loss_weights_sender["self"]*policy_loss_self + loss_weights_sender["cross"]*policy_loss_cross
policy_loss /= (loss_weights_sender["self"]+loss_weights_sender["cross"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
speaker_loss = optimized_loss.detach().item()
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_speaker'] = speaker_loss
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
for receiver_id in receiver_ids:
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = losses_cross["agent_{}".format(receiver_id)].mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
for receiver_id in receiver_ids:
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=restes_cross["agent_{}".format(receiver_id)]['acc'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
"7. Save probs"
if save_probs:
np.save(save_probs+"_sender_input.npy",sender_input.cpu().numpy())
np.save(save_probs+"_message.npy",message.cpu().numpy())
np.save(save_probs+"_sender_probs.npy",whole_log_prob_s.cpu().numpy())
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class ForwardPassSpeakerMultiAgent(nn.Module):
def __init__(self,
Agents,
n_attributes,
n_values,
loss_imitation,
optim_params,
message_to_imitate):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(ForwardPassSpeakerMultiAgent, self).__init__()
self.agents = Agents
self.n_attributes=n_attributes
self.n_values=n_values
self.optim_params = optim_params
self.loss_imitation = loss_imitation
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.message_to_imitate = message_to_imitate
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
for agent in self.agents:
self.agents[agent].to(self.device)
def forward(self,
sender_input,
unused_labels,
sender_id,
receiver_ids,
receiver_input=None,
save_probs=None):
"""
Inputs:
- direction : N means "N->0"
"""
sender_input=sender_input.to(self.device)
"0. Get sender and receiver (id + optim info) for playing the game"
# Get sender_id and sender information
agent_sender = self.agents["agent_{}".format(sender_id)]
optim_params_sender = self.optim_params["agent_{}".format(sender_id)]
" 1. Agent actions and loss"
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
message_to_imitate_lengths = find_lengths(self.message_to_imitate)
loss_imitation, rest_imitation = self.loss_imitation(self.message_to_imitate,whole_log_prob_s,self.message_to_imitate_lengths)
"6. Store results"
rest={}
rest['loss'] = loss_imitation.detach().item()
rest['loss_{}'.format(sender_id)] = loss_imitation.detach().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
"7. Save probs"
if save_probs:
np.save(save_probs+"_sender_input.npy",sender_input.cpu().numpy())
np.save(save_probs+"_message.npy",message.cpu().numpy())
np.save(save_probs+"_sender_probs.npy",whole_log_prob_s.cpu().numpy())
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceMemory(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
vocab_size,
max_len,
n_features,
device):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceMemory, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.max_len=max_len
self.vocab_size=vocab_size
self.n_features=n_features
self.device=device
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s, entropy_s = agent_sender.send(sender_input)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
#candidates_cross=receiver_output_cross.argmax(dim=1)
#message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input)
message_to_imitate, _, _ = agent_receiver.send(sender_input,eval=True)
message_to_imitate_lengths = find_lengths(message_to_imitate)
send_output, _, _ = agent_sender.receive(message_to_imitate, receiver_input, message_to_imitate_lengths)
i_hat=send_output.argmax(1).cpu().numpy()
policy_prob=torch.exp(send_output.max(1).values)
for j in range(send_output.size(0)):
m=message_to_imitate[j]
m_dense=torch.zeros([self.max_len,self.vocab_size]).to("cuda")
for i in range(len(m)):
m_dense[i,m[i]]=1.
agent_sender.mem[i_hat[j]].append(m_dense)
agent_sender.w_mem[i_hat[j]].append(torch.exp(policy_prob[j]))
for i in agent_sender.mem:
if len(agent_sender.mem[i])>0:
agent_sender.est_policy[i]=(torch.stack(agent_sender.mem[i])*torch.stack(agent_sender.w_mem[i]).unsqueeze(1).unsqueeze(2)).sum(0)
agent_sender.est_policy[i]/=torch.stack(agent_sender.w_mem[i]).sum(0)
policy_receiver=[]
for i in range(sender_input.size(0)):
policy_receiver.append(agent_sender.est_policy[int(sender_input.argmax(1)[i].cpu().numpy())])
policy_receiver=torch.stack(policy_receiver)
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
loss_imitation=torch.zeros([1024]).to("cuda")
rest_imitation={"acc_imitation":torch.tensor([0.])}
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross
loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
policy_loss_self = ((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = ((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
def to_dense(self,m):
m_dense=torch.zeros([self.max_len,self.vocab_size])
for i in range(len(m)):
m_dense[i,m[i]]=1.
class DialogReinforceBis(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
n_features,
max_len,
batch_size,
device):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceBis, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.batch_size=batch_size
self.last_messages_train=torch.zeros([batch_size,max_len],dtype=int).to("cuda")
self.last_messages_eval=torch.zeros([n_features,max_len],dtype=int).to("cuda")
self.last_input_train=torch.zeros([batch_size,n_features]).to("cuda")
self.last_input_eval=torch.zeros([n_features,n_features]).to("cuda")
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s, entropy_s = agent_sender.send(sender_input)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
if sender_input.size(0)==self.batch_size:
message_to_imitate=self.last_messages_train
last_input=self.last_input_train
else:
message_to_imitate=self.last_messages_eval
last_input=self.last_input_eval
message_to_imitate_lengths = find_lengths(message_to_imitate)
send_output, _, _ = agent_sender.receive(message_to_imitate, receiver_input, message_to_imitate_lengths)
one_hots=torch.eye(100)
inp_to_imitate=[]
for i in range(send_output.size(0)):
inp_to_imitate.append(one_hots[send_output.argmax(1)[i]])
inp_to_imitate=torch.stack(inp_to_imitate).to("cuda")
message_reconstruction, prob_reconstruction, _ = agent_sender.imitate(last_input)
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
loss_imitation, rest_imitation = self.loss_message_imitation(message_to_imitate,prob_reconstruction,message_to_imitate_lengths)
_, rest_und_cross = self.loss_understanding(last_input,send_output)
loss_imitation=loss_imitation*rest_und_cross["acc"]
prob_conf=torch.exp((last_input*F.log_softmax(send_output,dim=1)).sum(1))
#loss_imitation=loss_imitation-(sender_input*F.log_softmax(send_output,dim=1)).sum(1)
loss_imitation*=prob_conf
if sender_input.size(0)==self.batch_size:
self.last_messages_train=message
self.last_input_train=sender_input
else:
self.last_messages_eval=message
self.last_input_eval=sender_input
#print(torch.exp((sender_input*F.log_softmax(send_output,dim=1)).sum(1)))
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross + self.loss_weights["imitation"]*loss_imitation
loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
policy_loss_self = ((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = ((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - self.mean_baseline['loss_imitation_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross + self.loss_weights["imitation"]*policy_loss_imitation
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceKL(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
device):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceKL, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
#candidates_cross=receiver_output_cross.argmax(dim=1)
#message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input)
message_other, other_log_prob_s,other_whole_log_prob_s, _ = agent_receiver.send(sender_input,eval=True)
message_other_lengths = find_lengths(message_other)
send_output, _, _ = agent_sender.receive(message_other, receiver_input, message_other_lengths)
other_log_prob_s = send_output.max()
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
#loss_imitation, rest_imitation = self.loss_message_imitation(message,prob_reconstruction,message_lengths)
#loss_imitation, rest_imitation = self.loss_message_imitation(message_to_imitate,prob_reconstruction,message_to_imitate_lengths)
#_, rest_und_cross = self.loss_understanding(sender_input,send_output)
#loss_imitation=loss_imitation*rest_und_cross["acc"]
prob_conf=torch.exp((sender_input*F.log_softmax(send_output,dim=1)).sum(1))
KL_div=torch.nn.KLDivLoss(reduce=False)
#loss_imitation=KL_div(whole_log_prob_s.reshape(whole_log_prob_s.size(0)*whole_log_prob_s.size(1)*whole_log_prob_s.size(2)),other_whole_log_prob_s.reshape(other_whole_log_prob_s.size(0)*other_whole_log_prob_s.size(1)*other_whole_log_prob_s.size(2)))
loss_imitation = KL_div(torch.exp(whole_log_prob_s.reshape([whole_log_prob_s.size(0)*whole_log_prob_s.size(1),whole_log_prob_s.size(2)])),torch.exp(other_whole_log_prob_s.reshape([other_whole_log_prob_s.size(0)*other_whole_log_prob_s.size(1),other_whole_log_prob_s.size(2)])))
loss_imitation=loss_imitation.reshape([whole_log_prob_s.size(0),whole_log_prob_s.size(1),whole_log_prob_s.size(2)])
loss_imitation=loss_imitation.sum(2).sum(1)
rest_imitation={"acc_imitation":torch.tensor([0.])}
loss_imitation*=prob_conf
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross + self.loss_weights["imitation"]*loss_imitation
loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
policy_loss_self = ((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = ((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - self.mean_baseline['loss_imitation_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross #+ self.loss_weights["imitation"]*policy_loss_imitation
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"])#+self.loss_weights["imitation"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceBaseline(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[0.5,0.5],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceBaseline, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
self.device=device
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent_1 -> Agent_2"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.sender(sender_input)
message_lengths_1 = find_lengths(message_1)
receiver_output_1, log_prob_r_1, entropy_r_1 = self.agent_2.receiver(message_1, receiver_input, message_lengths_1)
loss_1, rest_1 = self.loss(sender_input, message_1, receiver_input, receiver_output_1, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_1)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_1)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_1 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_1.mean() * self.receiver_entropy_coeff_1
log_prob_1 = effective_log_prob_s_1 + log_prob_r_1
length_loss_1 = message_lengths_1.float() * self.length_cost
policy_length_loss_1 = ((length_loss_1.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_1 = ((loss_1.detach() - self.mean_baseline['loss_1']) * log_prob_1).mean()
optimized_loss_1 = policy_length_loss_1 + policy_loss_1 - weighted_entropy_1
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_1 += loss_1.mean()
if self.training:
self.update_baseline('loss_1', loss_1)
self.update_baseline('length_1', length_loss_1)
for k, v in rest_1.items():
rest_1[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_1['loss'] = optimized_loss_1.detach().item()
rest_1['sender_entropy'] = entropy_s_1.mean().item()
rest_1['receiver_entropy'] = entropy_r_1.mean().item()
rest_1['original_loss'] = loss_1.mean().item()
rest_1['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent_2 -> Agent_1"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.sender(sender_input)
message_lengths_2 = find_lengths(message_2)
receiver_output_2, log_prob_r_2, entropy_r_2 = self.agent_1.receiver(message_2, receiver_input, message_lengths_2)
loss_2, rest_2 = self.loss(sender_input, message_2, receiver_input, receiver_output_2, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_2)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_2)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_2 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_2.mean() * self.receiver_entropy_coeff_2
log_prob_2 = effective_log_prob_s_2 + log_prob_r_2
length_loss_2 = message_lengths_2.float() * self.length_cost
policy_length_loss_2 = ((length_loss_2.float() - self.mean_baseline['length_2']) * effective_log_prob_s_2).mean()
policy_loss_2 = ((loss_2.detach() - self.mean_baseline['loss_2']) * log_prob_2).mean()
optimized_loss_2 = policy_length_loss_2 + policy_loss_2 - weighted_entropy_2
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_2 += loss_2.mean()
if self.training:
self.update_baseline('loss_2', loss_2)
self.update_baseline('length_2', length_loss_2)
for k, v in rest_2.items():
rest_2[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_2['loss'] = optimized_loss_2.detach().item()
rest_2['sender_entropy'] = entropy_s_2.mean().item()
rest_2['receiver_entropy'] = entropy_r_2.mean().item()
rest_2['original_loss'] = loss_2.mean().item()
rest_2['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss = self.loss_weights[0]*optimized_loss_1 + self.loss_weights[1]*optimized_loss_2
rest={}
rest['loss']=self.loss_weights[0]*rest_1['loss'] + self.loss_weights[1]* rest_2['loss']
rest['sender_entropy']=self.loss_weights[0]*rest_1['sender_entropy'] + self.loss_weights[1]* rest_2['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0]*rest_1['receiver_entropy'] + self.loss_weights[1]* rest_2['receiver_entropy']
rest['original_loss']=self.loss_weights[0]*rest_1['original_loss'] + self.loss_weights[1]* rest_2['original_loss']
rest['mean_length']=self.loss_weights[0]*rest_1['mean_length'] + self.loss_weights[1]* rest_2['mean_length']
rest['acc']=self.loss_weights[0]*rest_1['acc'] + self.loss_weights[1]* rest_2['acc']
return optimized_loss_1, optimized_loss_2, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel1(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[[0.25,0.25],[0.25,0.25]],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel1, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.device=device
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent 1"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.sender(sender_input)
message_lengths_1 = find_lengths(message_1)
"1.2 Agent_1 -> Agent_2"
#message_12, log_prob_s_12, entropy_s_12 = message_1, log_prob_s_1, entropy_s_1
receiver_output_12, log_prob_r_12, entropy_r_12 = self.agent_2.receiver(message_1, receiver_input, message_lengths_1)
loss_12, rest_12 = self.loss(sender_input, message_1, receiver_input, receiver_output_12, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_12)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_12)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_12 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob_12 = effective_log_prob_s_1 + log_prob_r_12
length_loss_12 = message_lengths_1.float() * self.length_cost
policy_length_loss_12 = ((length_loss_12.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_12 = ((loss_12.detach() - self.mean_baseline['loss_12']) * log_prob_12).mean()
optimized_loss_12 = policy_length_loss_12 + policy_loss_12 - weighted_entropy_12
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_12 += loss_12.mean()
if self.training:
self.update_baseline('loss_12', loss_12)
self.update_baseline('length_12', length_loss_12)
for k, v in rest_12.items():
rest_12[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_12['loss'] = optimized_loss_12.detach().item()
rest_12['sender_entropy'] = entropy_s_1.mean().item()
rest_12['receiver_entropy'] = entropy_r_12.mean().item()
rest_12['original_loss'] = loss_12.mean().item()
rest_12['mean_length'] = message_lengths_1.float().mean().item()
"1.1 Agent_1 -> Agent_1"
#message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1
receiver_output_11, log_prob_r_11, entropy_r_11 = self.agent_1.receiver(message_1, receiver_input, message_lengths_1)
loss_11, rest_11 = self.loss(sender_input, message_1, receiver_input, receiver_output_11, labels)
weighted_entropy_11 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_11.mean() * self.receiver_entropy_coeff_1
log_prob_11 = effective_log_prob_s_1 + log_prob_r_11
length_loss_11 = message_lengths_1.float() * self.length_cost
policy_length_loss_11 = ((length_loss_11.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_11 = ((loss_11.detach() - self.mean_baseline['loss_11']) * log_prob_11).mean()
optimized_loss_11 = policy_length_loss_11 + policy_loss_11 - weighted_entropy_11
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_11 += loss_11.mean()
if self.training:
self.update_baseline('loss_11', loss_11)
self.update_baseline('length_11', length_loss_11)
for k, v in rest_11.items():
rest_11[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_11['loss'] = optimized_loss_11.detach().item()
rest_11['sender_entropy'] = entropy_s_1.mean().item()
rest_11['receiver_entropy'] = entropy_r_11.mean().item()
rest_11['original_loss'] = loss_11.mean().item()
rest_11['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent 2"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.sender(sender_input)
message_lengths_2 = find_lengths(message_2)
"2. Agent_2 -> Agent_1"
#message_21, log_prob_s_21, entropy_s_21 = message_2, log_prob_s_2, entropy_s_2
receiver_output_21, log_prob_r_21, entropy_r_21 = self.agent_1.receiver(message_2, receiver_input, message_lengths_2)
loss_21, rest_21 = self.loss(sender_input, message_2, receiver_input, receiver_output_21, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_21)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_21)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_21 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_21.mean() * self.receiver_entropy_coeff_2
log_prob_21 = effective_log_prob_s_2 + log_prob_r_21
length_loss_21 = message_lengths_2.float() * self.length_cost
policy_length_loss_21 = ((length_loss_21.float() - self.mean_baseline['length_21']) * effective_log_prob_s_2).mean()
policy_loss_21 = ((loss_21.detach() - self.mean_baseline['loss_21']) * log_prob_21).mean()
optimized_loss_21 = policy_length_loss_21 + policy_loss_21 - weighted_entropy_21
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_21 += loss_21.mean()
if self.training:
self.update_baseline('loss_21', loss_21)
self.update_baseline('length_21', length_loss_21)
for k, v in rest_21.items():
rest_21[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_21['loss'] = optimized_loss_21.detach().item()
rest_21['sender_entropy'] = entropy_s_2.mean().item()
rest_21['receiver_entropy'] = entropy_r_21.mean().item()
rest_21['original_loss'] = loss_21.mean().item()
rest_21['mean_length'] = message_lengths_2.float().mean().item()
"2. Agent_2 -> Agent_2"
#message_22, log_prob_s_22, entropy_s_22 = message_2, log_prob_s_2, entropy_s_2
#message_lengths_22 = find_lengths(message_22)
receiver_output_22, log_prob_r_22, entropy_r_22 = self.agent_2.receiver(message_2, receiver_input, message_lengths_2)
loss_22, rest_22 = self.loss(sender_input, message_2, receiver_input, receiver_output_22, labels)
weighted_entropy_22 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_22.mean() * self.receiver_entropy_coeff_2
log_prob_22 = effective_log_prob_s_2 + log_prob_r_22
length_loss_22 = message_lengths_2.float() * self.length_cost
policy_length_loss_22 = ((length_loss_22.float() - self.mean_baseline['length_22']) * effective_log_prob_s_2).mean()
policy_loss_22 = ((loss_22.detach() - self.mean_baseline['loss_22']) * log_prob_22).mean()
optimized_loss_22 = policy_length_loss_22 + policy_loss_22 - weighted_entropy_22
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_22 += loss_22.mean()
if self.training:
self.update_baseline('loss_22', loss_22)
self.update_baseline('length_22', length_loss_22)
for k, v in rest_22.items():
rest_22[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_22['loss'] = optimized_loss_22.detach().item()
rest_22['sender_entropy'] = entropy_s_2.mean().item()
rest_22['receiver_entropy'] = entropy_r_22.mean().item()
rest_22['original_loss'] = loss_22.mean().item()
rest_22['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss_1 = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12
optimized_loss_2 = self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
optimized_loss = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12+ \
self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
rest={}
rest['loss']=self.loss_weights[0][0]*rest_11['loss'] + self.loss_weights[0][1]*rest_12['loss']+ \
self.loss_weights[1][0]*rest_21['loss'] + self.loss_weights[1][1]*rest_22['loss']
rest['sender_entropy']=self.loss_weights[0][0]*rest_11['sender_entropy'] + self.loss_weights[0][1]*rest_12['sender_entropy']+ \
self.loss_weights[1][0]*rest_21['sender_entropy'] + self.loss_weights[1][1]*rest_22['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0][0]*rest_11['receiver_entropy'] + self.loss_weights[0][1]*rest_12['receiver_entropy']+ \
self.loss_weights[1][0]*rest_21['receiver_entropy'] + self.loss_weights[1][1]*rest_22['receiver_entropy']
rest['original_loss']=self.loss_weights[0][0]*rest_11['original_loss'] + self.loss_weights[0][1]*rest_12['original_loss']+ \
self.loss_weights[1][0]*rest_21['original_loss'] + self.loss_weights[1][1]*rest_22['original_loss']
rest['mean_length']=self.loss_weights[0][0]*rest_11['mean_length'] + self.loss_weights[0][1]*rest_12['mean_length']+ \
self.loss_weights[1][0]*rest_21['mean_length'] + self.loss_weights[1][1]*rest_22['mean_length']
rest['acc']=self.loss_weights[0][0]*rest_11['acc'] + self.loss_weights[0][1]*rest_12['acc']+ \
self.loss_weights[1][0]*rest_21['acc'] + self.loss_weights[1][1]*rest_22['acc']
return optimized_loss_11, optimized_loss_12, optimized_loss_21, optimized_loss_22, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel2(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[0.5,0.5],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel2, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
self.device=device
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent_1 -> Agent_2"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.send(sender_input)
message_lengths_1 = find_lengths(message_1)
receiver_output_1, log_prob_r_1, entropy_r_1, sequence_lm, log_probs_lm = self.agent_2.receive(message_1, receiver_input, message_lengths_1)
# Take only the last => change to EOS position
log_prob_r_1=log_prob_r_1[:,-1]
entropy_r_1=entropy_r_1[:,-1]
loss_1, loss_lm_1, rest_1 = self.loss(sender_input, message_1, message_lengths_1, receiver_input, receiver_output_1, sequence_lm , labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_1)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_1)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_1 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_1.mean() * self.receiver_entropy_coeff_1
log_prob_1 = effective_log_prob_s_1 + log_prob_r_1
length_loss_1 = message_lengths_1.float() * self.length_cost
policy_length_loss_1 = ((length_loss_1.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_1 = ((loss_1.detach() - self.mean_baseline['loss_1']) * log_prob_1).mean()
optimized_loss_1 = policy_length_loss_1 + policy_loss_1 - weighted_entropy_1
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_1 += loss_1.mean()
# Average between task and imitation loss
optimized_loss_1 = 0.5*(optimized_loss_1 + loss_lm_1.mean())
if self.training:
self.update_baseline('loss_1', loss_1)
self.update_baseline('length_1', length_loss_1)
for k, v in rest_1.items():
rest_1[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_1['loss'] = optimized_loss_1.detach().item()
rest_1['sender_entropy'] = entropy_s_1.mean().item()
rest_1['receiver_entropy'] = entropy_r_1.mean().item()
rest_1['original_loss'] = loss_1.mean().item()
rest_1['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent_2 -> Agent_1"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.send(sender_input)
message_lengths_2 = find_lengths(message_2)
receiver_output_2, log_prob_r_2, entropy_r_2,sequence_lm, logits_lm = self.agent_1.receive(message_2, receiver_input, message_lengths_2)
# Take only the last => change to EOS position
log_prob_r_2=log_prob_r_2[:,-1]
entropy_r_2=entropy_r_2[:,-1]
loss_2, loss_lm_2, rest_2 = self.loss(sender_input, message_2, message_lengths_2, receiver_input, receiver_output_2, sequence_lm , labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_2)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_2)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_2 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_2.mean() * self.receiver_entropy_coeff_2
log_prob_2 = effective_log_prob_s_2 + log_prob_r_2
length_loss_2 = message_lengths_2.float() * self.length_cost
policy_length_loss_2 = ((length_loss_2.float() - self.mean_baseline['length_2']) * effective_log_prob_s_2).mean()
policy_loss_2 = ((loss_2.detach() - self.mean_baseline['loss_2']) * log_prob_2).mean()
optimized_loss_2 = policy_length_loss_2 + policy_loss_2 - weighted_entropy_2
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_2 += loss_2.mean()
optimized_loss_2 = 0.5*(optimized_loss_2 + loss_lm_2.mean())
if self.training:
self.update_baseline('loss_2', loss_2)
self.update_baseline('length_2', length_loss_2)
for k, v in rest_2.items():
rest_2[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_2['loss'] = optimized_loss_2.detach().item()
rest_2['sender_entropy'] = entropy_s_2.mean().item()
rest_2['receiver_entropy'] = entropy_r_2.mean().item()
rest_2['original_loss'] = loss_2.mean().item()
rest_2['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss = self.loss_weights[0]*optimized_loss_1 + self.loss_weights[1]*optimized_loss_2
rest={}
rest['loss']=self.loss_weights[0]*rest_1['loss'] + self.loss_weights[1]* rest_2['loss']
rest['sender_entropy']=self.loss_weights[0]*rest_1['sender_entropy'] + self.loss_weights[1]* rest_2['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0]*rest_1['receiver_entropy'] + self.loss_weights[1]* rest_2['receiver_entropy']
rest['original_loss']=self.loss_weights[0]*rest_1['original_loss'] + self.loss_weights[1]* rest_2['original_loss']
rest['mean_length']=self.loss_weights[0]*rest_1['mean_length'] + self.loss_weights[1]* rest_2['mean_length']
rest['acc']=self.loss_weights[0]*rest_1['acc'] + self.loss_weights[1]* rest_2['acc']
return optimized_loss_1, optimized_loss_2, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel3(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[0.5,0.5],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel3, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
self.device=device
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent_1 -> Agent_2"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.send(sender_input)
message_lengths_1 = find_lengths(message_1)
receiver_output_1, prob_r_1, _ , log_prob_r_1, entropy_r_1 = self.agent_2.receive(message_1, receiver_input, message_lengths_1,imitate=True)
candidates_1=receiver_output_1.argmax(dim=1)
message_reconstruction_1, prob_reconstruction_1, _ = self.agent_2.imitate(sender_input,imitate=True)
loss_1_comm, loss_1_imitation, rest_1 = self.loss(sender_input, message_1, receiver_input, receiver_output_1,message_reconstruction_1,prob_reconstruction_1, labels)
# Imitation loss weighted by likelihood of candidate
loss_1_imitation = loss_1_imitation #* prob_r_1.max(1).values
loss_1_imitation=loss_1_imitation.mean()
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_1)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_1)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_1 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_1.mean() * self.receiver_entropy_coeff_1
log_prob_1 = effective_log_prob_s_1 + log_prob_r_1
length_loss_1 = message_lengths_1.float() * self.length_cost
policy_length_loss_1 = ((length_loss_1.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_1 = ((loss_1_comm.detach() - self.mean_baseline['loss_1']) * log_prob_1).mean()
optimized_loss_1 = policy_length_loss_1 + policy_loss_1 - weighted_entropy_1
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_1 += loss_1_comm.mean()
if self.training:
self.update_baseline('loss_1', loss_1_comm)
self.update_baseline('length_1', length_loss_1)
for k, v in rest_1.items():
rest_1[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_1['loss'] = optimized_loss_1.detach().item()
rest_1['sender_entropy'] = entropy_s_1.mean().item()
rest_1['receiver_entropy'] = entropy_r_1.mean().item()
rest_1['original_loss'] = loss_1_comm.mean().item()
rest_1['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent_2 -> Agent_1"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.send(sender_input)
message_lengths_2 = find_lengths(message_2)
receiver_output_2, prob_r_2, _ , log_prob_r_2, entropy_r_2 = self.agent_1.receive(message_2, receiver_input, message_lengths_2,imitate=True)
candidates_2=receiver_output_2.argmax(dim=1)
message_reconstruction_2, prob_reconstruction_2, _ = self.agent_1.imitate(sender_input,imitate=True)
loss_2_comm, loss_2_imitation, rest_2 = self.loss(sender_input, message_2, receiver_input, receiver_output_2,message_reconstruction_2,prob_reconstruction_2, labels)
# Imitation loss weighted by likelihood of candidate
loss_2_imitation = loss_2_imitation #* prob_r_2.max(1).values
loss_2_imitation=loss_2_imitation.mean()
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_2)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_2)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_2 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_2.mean() * self.receiver_entropy_coeff_2
log_prob_2 = effective_log_prob_s_2 + log_prob_r_2
length_loss_2 = message_lengths_2.float() * self.length_cost
policy_length_loss_2 = ((length_loss_2.float() - self.mean_baseline['length_2']) * effective_log_prob_s_2).mean()
policy_loss_2 = ((loss_2_comm.detach() - self.mean_baseline['loss_2']) * log_prob_2).mean()
optimized_loss_2 = policy_length_loss_2 + policy_loss_2 - weighted_entropy_2
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_2 += loss_2_comm.mean()
if self.training:
self.update_baseline('loss_2', loss_2_comm)
self.update_baseline('length_2', length_loss_2)
for k, v in rest_2.items():
rest_2[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_2['loss'] = optimized_loss_2.detach().item()
rest_2['sender_entropy'] = entropy_s_2.mean().item()
rest_2['receiver_entropy'] = entropy_r_2.mean().item()
rest_2['original_loss'] = loss_2_comm.mean().item()
rest_2['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss = self.loss_weights[0]*optimized_loss_1 + self.loss_weights[1]*optimized_loss_2
rest={}
rest['loss']=self.loss_weights[0]*rest_1['loss'] + self.loss_weights[1]* rest_2['loss']
rest['sender_entropy']=self.loss_weights[0]*rest_1['sender_entropy'] + self.loss_weights[1]* rest_2['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0]*rest_1['receiver_entropy'] + self.loss_weights[1]* rest_2['receiver_entropy']
rest['original_loss']=self.loss_weights[0]*rest_1['original_loss'] + self.loss_weights[1]* rest_2['original_loss']
rest['mean_length']=self.loss_weights[0]*rest_1['mean_length'] + self.loss_weights[1]* rest_2['mean_length']
rest['acc']=self.loss_weights[0]*rest_1['acc'] + self.loss_weights[1]* rest_2['acc']
return optimized_loss_1,loss_1_imitation, optimized_loss_2, loss_2_imitation, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel4(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[[0.25,0.25],[0.25,0.25]],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel4, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.device=device
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent 1"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.sender(sender_input)
message_lengths_1 = find_lengths(message_1)
a_self=3.
a_cross=1.
a_im=1.
"1.2 Agent_1 -> Agent_2"
#message_12, log_prob_s_12, entropy_s_12 = message_1, log_prob_s_1, entropy_s_1
receiver_output_12, prob_r_12, _ , log_prob_r_12, entropy_r_12 = self.agent_2.receive(message_1, receiver_input, message_lengths_1,imitate=True)
candidates_12=receiver_output_12.argmax(dim=1)
message_reconstruction_12, prob_reconstruction_12, _ = self.agent_2.imitate(sender_input,imitate=True)
loss_12_comm, loss_12_imitation, rest_12 = self.loss(sender_input, message_1, receiver_input, receiver_output_12,message_reconstruction_12,prob_reconstruction_12, labels,message_lengths_1)
# Imitation loss weighted by likelihood of candidate
loss_12_imitation = loss_12_imitation #* prob_r_12.max(1).values
#loss_12_imitation=loss_12_imitation.mean()
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_12)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_12)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_12 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob_12 = effective_log_prob_s_1 + log_prob_r_12
length_loss_12 = message_lengths_1.float() * self.length_cost
policy_length_loss_12 = ((length_loss_12.float() - self.mean_baseline['length_12']) * effective_log_prob_s_1).mean()
policy_loss_12 = ((loss_12_comm.detach() - self.mean_baseline['loss_12']) * log_prob_12).mean()
optimized_loss_12 = policy_length_loss_12 + policy_loss_12 - weighted_entropy_12
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_12 += loss_12_comm.mean()
if self.training:
self.update_baseline('loss_12', loss_12_comm)
self.update_baseline('length_12', length_loss_12)
for k, v in rest_12.items():
rest_12[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_12['loss'] = optimized_loss_12.detach().item()
rest_12['sender_entropy'] = entropy_s_1.mean().item()
rest_12['receiver_entropy'] = entropy_r_12.mean().item()
rest_12['original_loss'] = loss_12_comm.mean().item()
rest_12['mean_length'] = message_lengths_1.float().mean().item()
"1.1 Agent_1 -> Agent_1"
#message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1
receiver_output_11, prob_r_11, _ , log_prob_r_11, entropy_r_11 = self.agent_1.receive(message_1, receiver_input, message_lengths_1,imitate=True)
candidates_11=receiver_output_11.argmax(dim=1)
message_reconstruction_11, prob_reconstruction_11, _ = self.agent_1.imitate(sender_input,imitate=True)
loss_11_comm, loss_11_imitation, rest_11 = self.loss(sender_input, message_1, receiver_input, receiver_output_11,message_reconstruction_11,prob_reconstruction_11, labels,message_lengths_1)
# Imitation loss weighted by likelihood of candidate
loss_11_imitation = loss_11_imitation #* prob_r_11.max(1).values
#loss_11_imitation=loss_11_imitation.mean()
loss_11_comm=a_self*loss_11_comm+a_cross*loss_12_comm+a_im*loss_12_imitation
weighted_entropy_11 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_11.mean() * self.receiver_entropy_coeff_1
log_prob_11 = effective_log_prob_s_1 + log_prob_r_11
length_loss_11 = message_lengths_1.float() * self.length_cost
policy_length_loss_11 = ((length_loss_11.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_11 = ((loss_11_comm.detach() - self.mean_baseline['loss_11']) * log_prob_11).mean()
optimized_loss_11 = policy_length_loss_11 + policy_loss_11 - weighted_entropy_11
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_11 += loss_11_comm.mean()
if self.training:
self.update_baseline('loss_11', loss_11_comm)
self.update_baseline('length_11', length_loss_11)
for k, v in rest_11.items():
rest_11[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_11['loss'] = optimized_loss_11.detach().item()
rest_11['sender_entropy'] = entropy_s_1.mean().item()
rest_11['receiver_entropy'] = entropy_r_11.mean().item()
rest_11['original_loss'] = loss_11_comm.mean().item()
rest_11['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent 2"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.sender(sender_input)
message_lengths_2 = find_lengths(message_2)
"2. Agent_2 -> Agent_1"
#message_21, log_prob_s_21, entropy_s_21 = message_2, log_prob_s_2, entropy_s_2
receiver_output_21, prob_r_21, _ , log_prob_r_21, entropy_r_21 = self.agent_1.receive(message_2, receiver_input, message_lengths_2,imitate=True)
candidates_21=receiver_output_21.argmax(dim=1)
message_reconstruction_21, prob_reconstruction_21, _ = self.agent_1.imitate(sender_input,imitate=True)
loss_21_comm, loss_21_imitation, rest_21 = self.loss(sender_input, message_2, receiver_input, receiver_output_21,message_reconstruction_21,prob_reconstruction_21, labels,message_lengths_2)
# Imitation loss weighted by likelihood of candidate
loss_21_imitation = loss_21_imitation #* prob_r_21.max(1).values
#loss_21_imitation=loss_21_imitation.mean()
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_21)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_21)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_21 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_21.mean() * self.receiver_entropy_coeff_2
log_prob_21 = effective_log_prob_s_2 + log_prob_r_21
length_loss_21 = message_lengths_2.float() * self.length_cost
policy_length_loss_21 = ((length_loss_21.float() - self.mean_baseline['length_21']) * effective_log_prob_s_2).mean()
policy_loss_21 = ((loss_21_comm.detach() - self.mean_baseline['loss_21']) * log_prob_21).mean()
optimized_loss_21 = policy_length_loss_21 + policy_loss_21 - weighted_entropy_21
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_21 += loss_21_comm.mean()
if self.training:
self.update_baseline('loss_21', loss_21_comm)
self.update_baseline('length_21', length_loss_21)
for k, v in rest_21.items():
rest_21[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_21['loss'] = optimized_loss_21.detach().item()
rest_21['sender_entropy'] = entropy_s_2.mean().item()
rest_21['receiver_entropy'] = entropy_r_21.mean().item()
rest_21['original_loss'] = loss_21_comm.mean().item()
rest_21['mean_length'] = message_lengths_2.float().mean().item()
"2. Agent_2 -> Agent_2"
#message_22, log_prob_s_22, entropy_s_22 = message_2, log_prob_s_2, entropy_s_2
#message_lengths_22 = find_lengths(message_22)
receiver_output_22, prob_r_22, _ , log_prob_r_22, entropy_r_22 = self.agent_2.receive(message_2, receiver_input, message_lengths_2,imitate=True)
candidates_22=receiver_output_22.argmax(dim=1)
message_reconstruction_22, prob_reconstruction_22, _ = self.agent_2.imitate(sender_input,imitate=True)
loss_22_comm, loss_22_imitation, rest_22 = self.loss(sender_input, message_2, receiver_input, receiver_output_22,message_reconstruction_22,prob_reconstruction_22, labels, message_lengths_2)
# Imitation loss weighted by likelihood of candidate
loss_22_imitation = loss_22_imitation #* prob_r_22.max(1).values
#loss_22_imitation=loss_22_imitation.mean()
loss_22_comm=a_self*loss_22_comm+a_cross*loss_21_comm+a_im*loss_21_imitation
weighted_entropy_22 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_22.mean() * self.receiver_entropy_coeff_2
log_prob_22 = effective_log_prob_s_2 + log_prob_r_22
length_loss_22 = message_lengths_2.float() * self.length_cost
policy_length_loss_22 = ((length_loss_22.float() - self.mean_baseline['length_22']) * effective_log_prob_s_2).mean()
policy_loss_22 = ((loss_22_comm.detach() - self.mean_baseline['loss_22']) * log_prob_22).mean()
optimized_loss_22 = policy_length_loss_22 + policy_loss_22 - weighted_entropy_22
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_22 += loss_22_comm.mean()
if self.training:
self.update_baseline('loss_22', loss_22_comm)
self.update_baseline('length_22', length_loss_22)
for k, v in rest_22.items():
rest_22[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_22['loss'] = optimized_loss_22.detach().item()
rest_22['sender_entropy'] = entropy_s_2.mean().item()
rest_22['receiver_entropy'] = entropy_r_22.mean().item()
rest_22['original_loss'] = loss_22_comm.mean().item()
rest_22['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss_1 = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12
optimized_loss_2 = self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
optimized_loss = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12+ \
self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
rest={}
rest['loss']=self.loss_weights[0][0]*rest_11['loss'] + self.loss_weights[0][1]*rest_12['loss']+ \
self.loss_weights[1][0]*rest_21['loss'] + self.loss_weights[1][1]*rest_22['loss']
rest['sender_entropy']=self.loss_weights[0][0]*rest_11['sender_entropy'] + self.loss_weights[0][1]*rest_12['sender_entropy']+ \
self.loss_weights[1][0]*rest_21['sender_entropy'] + self.loss_weights[1][1]*rest_22['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0][0]*rest_11['receiver_entropy'] + self.loss_weights[0][1]*rest_12['receiver_entropy']+ \
self.loss_weights[1][0]*rest_21['receiver_entropy'] + self.loss_weights[1][1]*rest_22['receiver_entropy']
rest['original_loss']=self.loss_weights[0][0]*rest_11['original_loss'] + self.loss_weights[0][1]*rest_12['original_loss']+ \
self.loss_weights[1][0]*rest_21['original_loss'] + self.loss_weights[1][1]*rest_22['original_loss']
rest['mean_length']=self.loss_weights[0][0]*rest_11['mean_length'] + self.loss_weights[0][1]*rest_12['mean_length']+ \
self.loss_weights[1][0]*rest_21['mean_length'] + self.loss_weights[1][1]*rest_22['mean_length']
rest['acc']=self.loss_weights[0][0]*rest_11['acc'] + self.loss_weights[0][1]*rest_12['acc']+ \
self.loss_weights[1][0]*rest_21['acc'] + self.loss_weights[1][1]*rest_22['acc']
rest["acc_11"]=rest_11["acc"]
rest["acc_12"]=rest_12["acc"]
rest["acc_21"]=rest_21["acc"]
rest["acc_22"]=rest_22["acc"]
return optimized_loss_11,loss_11_imitation, optimized_loss_12,loss_12_imitation, optimized_loss_21,loss_21_imitation, optimized_loss_22,loss_22_imitation, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class PretrainAgent(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
loss,
pretrained_messages,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
device,
n_features,
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(PretrainAgent, self).__init__()
self.agent_1 = Agent_1
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.pretrained_messages=pretrained_messages
if self.pretrained_messages is not None:
self.pretrained_messages=self.pretrained_messages.to(device)
self.loss = loss
self.n_features=n_features
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.device=device
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.sender(sender_input)
message_lengths_1 = find_lengths(message_1)
"1.1 Agent_1 -> Agent_1"
#message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1
receiver_output_11, prob_r_11, _ , log_prob_r_11, entropy_r_11 = self.agent_1.receive(message_1, receiver_input, message_lengths_1,imitate=True)
if self.pretrained_messages is not None:
pretrained_sender_input = torch.eye(self.n_features).to(self.device)
message_reconstruction_11, prob_reconstruction_11, _ = self.agent_1.imitate(pretrained_sender_input,imitate=True)
else:
message_reconstruction_11=None
prob_reconstruction_11=None
loss_11_comm, loss_11_imitation, rest_11 = self.loss(sender_input, message_1,self.pretrained_messages, receiver_input, receiver_output_11,message_reconstruction_11,prob_reconstruction_11, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_11)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_11)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_11 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_11.mean() * self.receiver_entropy_coeff_1
log_prob_11 = effective_log_prob_s_1 + log_prob_r_11
length_loss_11 = message_lengths_1.float() * self.length_cost
policy_length_loss_11 = ((length_loss_11.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_11 = ((loss_11_comm.detach() - self.mean_baseline['loss_11']) * log_prob_11).mean()
optimized_loss_11 = policy_length_loss_11 + policy_loss_11 - weighted_entropy_11
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_11 += loss_11_comm.mean()
if self.training:
self.update_baseline('loss_11', loss_11_comm)
self.update_baseline('length_11', length_loss_11)
for k, v in rest_11.items():
rest_11[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_11['loss'] = optimized_loss_11.detach().item()
rest_11['sender_entropy'] = entropy_s_1.mean().item()
rest_11['receiver_entropy'] = entropy_r_11.mean().item()
rest_11['original_loss'] = loss_11_comm.mean().item()
rest_11['mean_length'] = message_lengths_1.float().mean().item()
return optimized_loss_11,loss_11_imitation, rest_11
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel6(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
imitate,
device,
loss_weights=[[0.25,0.25],[0.25,0.25]],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel6, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.device=device
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
self.imitate=imitate
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent 1"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.send(sender_input)
message_lengths_1 = find_lengths(message_1)
a_self=3.
a_cross=1.
a_im=1.
"1.2 Agent_1 -> Agent_2"
#message_12, log_prob_s_12, entropy_s_12 = message_1, log_prob_s_1, entropy_s_1
receiver_output_12, log_prob_r_12, entropy_r_12 = self.agent_2.receive(message_1, receiver_input, message_lengths_1)
if self.imitate:
candidates_12=receiver_output_12.argmax(dim=1)
message_reconstruction_12, prob_reconstruction_12, _ = self.agent_2.imitate(sender_input)
loss_12, loss_12_imitation, rest_12 = self.loss(sender_input, message_1, receiver_input, receiver_output_12,message_reconstruction_12,prob_reconstruction_12, labels,message_lengths_1)
#loss_12_imitation=loss_12_imitation.mean()
else:
loss_12, rest_12 = self.loss(sender_input, message_1, receiver_input, receiver_output_12, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_12)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_12)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_12 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob_12 = effective_log_prob_s_1 + log_prob_r_12
length_loss_12 = message_lengths_1.float() * self.length_cost
policy_length_loss_12 = ((length_loss_12.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_12 = ((loss_12.detach() - self.mean_baseline['loss_12']) * log_prob_12).mean()
optimized_loss_12 = policy_length_loss_12 + policy_loss_12 - weighted_entropy_12
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_12 += loss_12.mean()
if self.training:
self.update_baseline('loss_12', loss_12)
self.update_baseline('length_12', length_loss_12)
for k, v in rest_12.items():
rest_12[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_12['loss'] = optimized_loss_12.detach().item()
rest_12['sender_entropy'] = entropy_s_1.mean().item()
rest_12['receiver_entropy'] = entropy_r_12.mean().item()
rest_12['original_loss'] = loss_12.mean().item()
rest_12['mean_length'] = message_lengths_1.float().mean().item()
"1.1 Agent_1 -> Agent_1"
#message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1
receiver_output_11, log_prob_r_11, entropy_r_11 = self.agent_1.receive(message_1, receiver_input, message_lengths_1)
if self.imitate:
candidates_11=receiver_output_11.argmax(dim=1)
message_reconstruction_11, prob_reconstruction_11, _ = self.agent_1.imitate(sender_input)
loss_11, loss_11_imitation, rest_11 = self.loss(sender_input, message_1, receiver_input, receiver_output_11,message_reconstruction_11,prob_reconstruction_11, labels,message_lengths_1)
#loss_11_imitation=loss_11_imitation.mean()
loss_11=a_self*loss_11+a_cross*loss_12+a_im*loss_12_imitation
else:
loss_11, rest_11 = self.loss(sender_input, message_1, receiver_input, receiver_output_11, labels)
weighted_entropy_11 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_11.mean() * self.receiver_entropy_coeff_1
log_prob_11 = effective_log_prob_s_1 + log_prob_r_11
length_loss_11 = message_lengths_1.float() * self.length_cost
policy_length_loss_11 = ((length_loss_11.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_11 = ((loss_11.detach() - self.mean_baseline['loss_11']) * log_prob_11).mean()
optimized_loss_11 = policy_length_loss_11 + policy_loss_11 - weighted_entropy_11
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_11 += loss_11.mean()
if self.training:
self.update_baseline('loss_11', loss_11)
self.update_baseline('length_11', length_loss_11)
for k, v in rest_11.items():
rest_11[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_11['loss'] = optimized_loss_11.detach().item()
rest_11['sender_entropy'] = entropy_s_1.mean().item()
rest_11['receiver_entropy'] = entropy_r_11.mean().item()
rest_11['original_loss'] = loss_11.mean().item()
rest_11['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent 2"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.send(sender_input)
message_lengths_2 = find_lengths(message_2)
"2. Agent_2 -> Agent_1"
#message_21, log_prob_s_21, entropy_s_21 = message_2, log_prob_s_2, entropy_s_2
receiver_output_21, log_prob_r_21, entropy_r_21 = self.agent_1.receive(message_2, receiver_input, message_lengths_2)
if self.imitate:
candidates_21=receiver_output_21.argmax(dim=1)
message_reconstruction_21, prob_reconstruction_21, _ = self.agent_1.imitate(sender_input)
loss_21, loss_21_imitation, rest_21 = self.loss(sender_input, message_2, receiver_input, receiver_output_21,message_reconstruction_21,prob_reconstruction_21, labels,message_lengths_2)
#loss_21_imitation=loss_21_imitation.mean()
else:
loss_21, rest_21 = self.loss(sender_input, message_2, receiver_input, receiver_output_21, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_21)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_21)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_21 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_21.mean() * self.receiver_entropy_coeff_2
log_prob_21 = effective_log_prob_s_2 + log_prob_r_21
length_loss_21 = message_lengths_2.float() * self.length_cost
policy_length_loss_21 = ((length_loss_21.float() - self.mean_baseline['length_21']) * effective_log_prob_s_2).mean()
policy_loss_21 = ((loss_21.detach() - self.mean_baseline['loss_21']) * log_prob_21).mean()
optimized_loss_21 = policy_length_loss_21 + policy_loss_21 - weighted_entropy_21
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_21 += loss_21.mean()
if self.training:
self.update_baseline('loss_21', loss_21)
self.update_baseline('length_21', length_loss_21)
for k, v in rest_21.items():
rest_21[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_21['loss'] = optimized_loss_21.detach().item()
rest_21['sender_entropy'] = entropy_s_2.mean().item()
rest_21['receiver_entropy'] = entropy_r_21.mean().item()
rest_21['original_loss'] = loss_21.mean().item()
rest_21['mean_length'] = message_lengths_2.float().mean().item()
"2. Agent_2 -> Agent_2"
#message_22, log_prob_s_22, entropy_s_22 = message_2, log_prob_s_2, entropy_s_2
#message_lengths_22 = find_lengths(message_22)
receiver_output_22, log_prob_r_22, entropy_r_22 = self.agent_2.receive(message_2, receiver_input, message_lengths_2)
if self.imitate:
candidates_22=receiver_output_22.argmax(dim=1)
message_reconstruction_22, prob_reconstruction_22, _ = self.agent_2.imitate(sender_input)
loss_22, loss_22_imitation, rest_22 = self.loss(sender_input, message_2, receiver_input, receiver_output_22,message_reconstruction_22,prob_reconstruction_22, labels,message_lengths_2)
#loss_22_imitation=loss_22_imitation.mean()
loss_22=a_self*loss_22+a_cross*loss_21+a_im*loss_21_imitation
else:
loss_22, rest_22 = self.loss(sender_input, message_2, receiver_input, receiver_output_22, labels)
weighted_entropy_22 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_22.mean() * self.receiver_entropy_coeff_2
log_prob_22 = effective_log_prob_s_2 + log_prob_r_22
length_loss_22 = message_lengths_2.float() * self.length_cost
policy_length_loss_22 = ((length_loss_22.float() - self.mean_baseline['length_22']) * effective_log_prob_s_2).mean()
policy_loss_22 = ((loss_22.detach() - self.mean_baseline['loss_22']) * log_prob_22).mean()
optimized_loss_22 = policy_length_loss_22 + policy_loss_22 - weighted_entropy_22
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_22 += loss_22.mean()
if self.training:
self.update_baseline('loss_22', loss_22)
self.update_baseline('length_22', length_loss_22)
for k, v in rest_22.items():
rest_22[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_22['loss'] = optimized_loss_22.detach().item()
rest_22['sender_entropy'] = entropy_s_2.mean().item()
rest_22['receiver_entropy'] = entropy_r_22.mean().item()
rest_22['original_loss'] = loss_22.mean().item()
rest_22['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss_1 = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12
optimized_loss_2 = self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
optimized_loss = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12+ \
self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
rest={}
rest['loss']=self.loss_weights[0][0]*rest_11['loss'] + self.loss_weights[0][1]*rest_12['loss']+ \
self.loss_weights[1][0]*rest_21['loss'] + self.loss_weights[1][1]*rest_22['loss']
rest['sender_entropy']=self.loss_weights[0][0]*rest_11['sender_entropy'] + self.loss_weights[0][1]*rest_12['sender_entropy']+ \
self.loss_weights[1][0]*rest_21['sender_entropy'] + self.loss_weights[1][1]*rest_22['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0][0]*rest_11['receiver_entropy'] + self.loss_weights[0][1]*rest_12['receiver_entropy']+ \
self.loss_weights[1][0]*rest_21['receiver_entropy'] + self.loss_weights[1][1]*rest_22['receiver_entropy']
rest['original_loss']=self.loss_weights[0][0]*rest_11['original_loss'] + self.loss_weights[0][1]*rest_12['original_loss']+ \
self.loss_weights[1][0]*rest_21['original_loss'] + self.loss_weights[1][1]*rest_22['original_loss']
rest['mean_length']=self.loss_weights[0][0]*rest_11['mean_length'] + self.loss_weights[0][1]*rest_12['mean_length']+ \
self.loss_weights[1][0]*rest_21['mean_length'] + self.loss_weights[1][1]*rest_22['mean_length']
rest['acc']=self.loss_weights[0][0]*rest_11['acc'] + self.loss_weights[0][1]*rest_12['acc']+ \
self.loss_weights[1][0]*rest_21['acc'] + self.loss_weights[1][1]*rest_22['acc']
rest['acc_21']=rest_21['acc']
rest['acc_12']=rest_12['acc']
if not self.imitate:
return optimized_loss_11, optimized_loss_12, optimized_loss_21, optimized_loss_22, rest
else:
return optimized_loss_11, optimized_loss_12, optimized_loss_21, optimized_loss_22,loss_12_imitation,loss_21_imitation, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class SenderReceiverRnnReinforce(nn.Module):
"""
Implements Sender/Receiver game with training done via Reinforce. Both agents are supposed to
return 3-tuples of (output, log-prob of the output, entropy).
The game implementation is responsible for handling the end-of-sequence term, so that the optimized loss
corresponds either to the position of the eos term (assumed to be 0) or the end of sequence.
Sender and Receiver can be obtained by applying the corresponding wrappers.
`SenderReceiverRnnReinforce` also applies the mean baseline to the loss function to reduce the variance of the
gradient estimate.
>>> sender = nn.Linear(3, 10)
>>> sender = RnnSenderReinforce(sender, vocab_size=15, embed_dim=5, hidden_size=10, max_len=10, cell='lstm')
>>> class Receiver(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc = nn.Linear(5, 3)
... def forward(self, rnn_output, _input = None):
... return self.fc(rnn_output)
>>> receiver = RnnReceiverDeterministic(Receiver(), vocab_size=15, embed_dim=10, hidden_size=5)
>>> def loss(sender_input, _message, _receiver_input, receiver_output, _labels):
... return F.mse_loss(sender_input, receiver_output, reduction='none').mean(dim=1), {'aux': 5.0}
>>> game = SenderReceiverRnnReinforce(sender, receiver, loss, sender_entropy_coeff=0.0, receiver_entropy_coeff=0.0,
... length_cost=1e-2)
>>> input = torch.zeros((16, 3)).normal_()
>>> optimized_loss, aux_info = game(input, labels=None)
>>> sorted(list(aux_info.keys())) # returns some debug info, such as entropies of the agents, message length etc
['aux', 'loss', 'mean_length', 'original_loss', 'receiver_entropy', 'sender_entropy']
>>> aux_info['aux']
5.0
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff, receiver_entropy_coeff,
length_cost=0.0,unigram_penalty=0.0,reg=False):
"""
:param sender: sender agent
:param receiver: receiver agent
:param loss: the optimized loss that accepts
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information
of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over
all batches in the dataset.
:param sender_entropy_coeff: entropy regularization coeff for sender
:param receiver_entropy_coeff: entropy regularization coeff for receiver
:param length_cost: the penalty applied to Sender for each symbol produced
:param reg: apply the regularization scheduling (Lazy Speaker)
"""
super(SenderReceiverRnnReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.sender_entropy_coeff = sender_entropy_coeff
self.receiver_entropy_coeff = receiver_entropy_coeff
self.loss = loss
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
def forward(self, sender_input, labels, receiver_input=None):
message, log_prob_s, entropy_s = self.sender(sender_input)
message_lengths = find_lengths(message)
receiver_output, log_prob_r, entropy_r = self.receiver(message, receiver_input, message_lengths)
loss, rest = self.loss(sender_input, message, receiver_input, receiver_output, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.sender_entropy_coeff + \
entropy_r.mean() * self.receiver_entropy_coeff
log_prob = effective_log_prob_s + log_prob_r
if self.reg:
sc=rest["acc"].sum()/rest["acc"].size(0)
# Pour n_features=100
self.length_cost= sc**(45) / 5
#self.length_cost= sc**(45) / 10
#if sc>0.99:
#self.length_cost=(sc-0.99)*100 +0.01
#else:
#self.length_cost=0.
#if sc>0.995:
#self.length_cost+=0.01
#if self.length_cost==0.3:
# self.length_cost-=0.01
#print(self.length_cost)
#if sc<0.98:
#self.length_cost=0.
length_loss = message_lengths.float() * self.length_cost
policy_length_loss = ((length_loss.float() - self.mean_baseline['length']) * effective_log_prob_s).mean()
policy_loss = ((loss.detach() - self.mean_baseline['loss']) * log_prob).mean()
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss', loss)
self.update_baseline('length', length_loss)
for k, v in rest.items():
rest[k] = v.mean().item() if hasattr(v, 'mean') else v
rest['loss'] = optimized_loss.detach().item()
rest['sender_entropy'] = entropy_s.mean().item()
rest['receiver_entropy'] = entropy_r.mean().item()
rest['original_loss'] = loss.mean().item()
rest['mean_length'] = message_lengths.float().mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class SenderImpatientReceiverRnnReinforce(nn.Module):
"""
Implements Sender/ Impatient Receiver game with training done via Reinforce.
It is equivalent to SenderReceiverRnnReinforce but takes into account the intermediate predictions of Impatient Listener:
- the Impatient loss is used
- tensor shapes are adapted for variance reduction.
When reg is set to True, the regularization scheduling is applied (Lazy Speaker).
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff, receiver_entropy_coeff,
length_cost=0.0,unigram_penalty=0.0,reg=False):
"""
:param sender: sender agent
:param receiver: receiver agent
:param loss: the optimized loss that accepts
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information
of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over
all batches in the dataset.
:param sender_entropy_coeff: entropy regularization coeff for sender
:param receiver_entropy_coeff: entropy regularization coeff for receiver
:param length_cost: the penalty applied to Sender for each symbol produced
:param reg: apply the regularization scheduling (Lazy Speaker)
"""
super(SenderImpatientReceiverRnnReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.sender_entropy_coeff = sender_entropy_coeff
self.receiver_entropy_coeff = receiver_entropy_coeff
self.loss = loss
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.reg=reg
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
def forward(self, sender_input, labels, receiver_input=None):
message, log_prob_s, entropy_s = self.sender(sender_input)
message_lengths = find_lengths(message)
# If impatient 1
receiver_output, log_prob_r, entropy_r = self.receiver(message, receiver_input, message_lengths)
""" NOISE VERSION
# Randomly takes a position
rand_length=np.random.randint(0,message.size(1))
# Loss by output
loss, rest = self.loss(sender_input, message, receiver_input, receiver_output[:,rand_length,:], labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r[:,rand_length])
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r[:,rand_length])
"""
#Loss
loss, rest, crible_acc = self.loss(sender_input, message, message_lengths, receiver_input, receiver_output, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r.mean(1))
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r.mean(1))
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.sender_entropy_coeff + \
entropy_r.mean() * self.receiver_entropy_coeff
log_prob = effective_log_prob_s + log_prob_r.mean(1)
if self.reg:
sc=0.
for i in range(message_lengths.size(0)):
sc+=crible_acc[i,message_lengths[i]-1]
sc/=message_lengths.size(0)
# Regularization scheduling paper
#self.length_cost= sc**(45) / 10
# Pour n_features=100
self.length_cost= sc**(45) / 5
length_loss = message_lengths.float() * self.length_cost
policy_length_loss = ((length_loss.float() - self.mean_baseline['length']) * effective_log_prob_s).mean()
policy_loss = ((loss.detach() - self.mean_baseline['loss']) * log_prob).mean()
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss', loss)
self.update_baseline('length', length_loss)
for k, v in rest.items():
rest[k] = v.mean().item() if hasattr(v, 'mean') else v
rest['loss'] = optimized_loss.detach().item()
rest['sender_entropy'] = entropy_s.mean().item()
rest['receiver_entropy'] = entropy_r.mean().item()
rest['original_loss'] = loss.mean().item()
rest['mean_length'] = message_lengths.float().mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class CompositionalitySenderReceiverRnnReinforce(nn.Module):
"""
Adaptation of SenderReceiverRnnReinforce to inputs with several attributes.
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff, receiver_entropy_coeff,n_attributes,n_values,
length_cost=0.0,unigram_penalty=0.0,reg=False):
"""
:param sender: sender agent
:param receiver: receiver agent
:param loss: the optimized loss that accepts
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information
of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over
all batches in the dataset.
:param sender_entropy_coeff: entropy regularization coeff for sender
:param receiver_entropy_coeff: entropy regularization coeff for receiver
:param length_cost: the penalty applied to Sender for each symbol produced
"""
super(CompositionalitySenderReceiverRnnReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.sender_entropy_coeff = sender_entropy_coeff
self.receiver_entropy_coeff = receiver_entropy_coeff
self.loss = loss
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.reg=reg
self.n_attributes=n_attributes
self.n_values=n_values
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
def forward(self, sender_input, labels, receiver_input=None):
message, log_prob_s, entropy_s = self.sender(sender_input)
message_lengths = find_lengths(message)
# Noisy channel
noise_level=0.
noise_map=torch.from_numpy(1*(np.random.rand(message.size(0),message.size(1))<noise_level)).to("cuda")
noise=torch.from_numpy(np.random.randint(1,self.sender.vocab_size,size=(message.size(0),message.size(1)))).to("cuda") # random symbols
message_noise=message*(1-noise_map) + noise_map* noise
# Receiver normal
receiver_output_all_att, log_prob_r_all_att, entropy_r_all_att = self.receiver(message_noise, receiver_input, message_lengths)
#dim=[batch_size,n_att,n_val]
# reg
sc=0.
loss, rest, crible_acc = self.loss(sender_input, message, message_lengths, receiver_input, receiver_output_all_att, labels,self.n_attributes,self.n_values)
#if self.reg:
# for i in range(message_lengths.size(0)):
# sc+=crible_acc[i,message_lengths[i]-1]
log_prob_r=log_prob_r_all_att.mean(1)
entropy_r=entropy_r_all_att.mean(1)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.sender_entropy_coeff + \
entropy_r.mean() * self.receiver_entropy_coeff
log_prob = effective_log_prob_s + log_prob_r
#if self.reg:
# sc/=message_lengths.size(0)
# if sc>0.98:
# self.length_cost+=0.1
# else:
# self.length_cost=0.
#self.length_cost= sc**(60) / 2
length_loss = message_lengths.float() * self.length_cost
policy_length_loss = ((length_loss.float() - self.mean_baseline['length']) * effective_log_prob_s).mean()
policy_loss = ((loss.detach() - self.mean_baseline['loss']) * log_prob).mean()
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss', loss)
self.update_baseline('length', length_loss)
for k, v in rest.items():
rest[k] = v.mean().item() if hasattr(v, 'mean') else v
rest['loss'] = optimized_loss.detach().item()
rest['sender_entropy'] = entropy_s.mean().item()
rest['receiver_entropy'] = entropy_r.mean().item()
rest['original_loss'] = loss.mean().item()
rest['mean_length'] = message_lengths.float().mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class CompositionalitySenderImpatientReceiverRnnReinforce(nn.Module):
"""
Implements Sender/Receiver game with training done via Reinforce. Both agents are supposed to
return 3-tuples of (output, log-prob of the output, entropy).
The game implementation is responsible for handling the end-of-sequence term, so that the optimized loss
corresponds either to the position of the eos term (assumed to be 0) or the end of sequence.
Sender and Receiver can be obtained by applying the corresponding wrappers.
`SenderReceiverRnnReinforce` also applies the mean baseline to the loss function to reduce the variance of the
gradient estimate.
>>> sender = nn.Linear(3, 10)
>>> sender = RnnSenderReinforce(sender, vocab_size=15, embed_dim=5, hidden_size=10, max_len=10, cell='lstm')
>>> class Receiver(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc = nn.Linear(5, 3)
... def forward(self, rnn_output, _input = None):
... return self.fc(rnn_output)
>>> receiver = RnnReceiverDeterministic(Receiver(), vocab_size=15, embed_dim=10, hidden_size=5)
>>> def loss(sender_input, _message, _receiver_input, receiver_output, _labels):
... return F.mse_loss(sender_input, receiver_output, reduction='none').mean(dim=1), {'aux': 5.0}
>>> game = SenderReceiverRnnReinforce(sender, receiver, loss, sender_entropy_coeff=0.0, receiver_entropy_coeff=0.0,
... length_cost=1e-2)
>>> input = torch.zeros((16, 3)).normal_()
>>> optimized_loss, aux_info = game(input, labels=None)
>>> sorted(list(aux_info.keys())) # returns some debug info, such as entropies of the agents, message length etc
['aux', 'loss', 'mean_length', 'original_loss', 'receiver_entropy', 'sender_entropy']
>>> aux_info['aux']
5.0
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff, receiver_entropy_coeff,n_attributes,n_values,att_weights,
length_cost=0.0,unigram_penalty=0.0,reg=False):
"""
:param sender: sender agent
:param receiver: receiver agent
:param loss: the optimized loss that accepts
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information
of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over
all batches in the dataset.
:param sender_entropy_coeff: entropy regularization coeff for sender
:param receiver_entropy_coeff: entropy regularization coeff for receiver
:param length_cost: the penalty applied to Sender for each symbol produced
"""
super(CompositionalitySenderImpatientReceiverRnnReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.sender_entropy_coeff = sender_entropy_coeff
self.receiver_entropy_coeff = receiver_entropy_coeff
self.loss = loss
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.reg=reg
self.n_attributes=n_attributes
self.n_values=n_values
self.att_weights=att_weights
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
def forward(self, sender_input, labels, receiver_input=None):
#print(sender_input[:,11:-1])
message, log_prob_s, entropy_s = self.sender(torch.floor(sender_input))
message_lengths = find_lengths(message)
# If impatient 1
receiver_output_all_att, log_prob_r_all_att, entropy_r_all_att = self.receiver(message, receiver_input, message_lengths)
# reg
sc=0.
# Version de base
#loss, rest, crible_acc = self.loss(sender_input, message, message_lengths, receiver_input, receiver_output_all_att, labels,self.n_attributes,self.n_values,self.att_weights)
# Take into account the fact that an attribute is not sampled
loss, rest, crible_acc = self.loss(sender_input, message, message_lengths, receiver_input, receiver_output_all_att, labels,self.n_attributes,self.n_values,self.att_weights)
if self.reg:
for i in range(message_lengths.size(0)):
sc+=crible_acc[i,message_lengths[i]-1]
log_prob_r=log_prob_r_all_att.mean(1).mean(1)
entropy_r=entropy_r_all_att.mean(1).mean(1)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.sender_entropy_coeff + \
entropy_r.mean() * self.receiver_entropy_coeff
log_prob = effective_log_prob_s + log_prob_r
if self.reg:
sc/=message_lengths.size(0)
if sc>0.9 and sc<0.99:
self.length_cost=0.
if sc>0.99:
self.length_cost+=0.01
#if sc<0.9:
# self.length_cost=-0.1
#self.length_cost= sc**(60) / 2
length_loss = message_lengths.float() * self.length_cost
# Penalty redundancy
#counts_unigram=((message[:,1:]-message[:,:-1])==0).sum(axis=1).sum(axis=0)
#unigram_loss = self.unigram_penalty*counts_unigram
policy_length_loss = ((length_loss.float() - self.mean_baseline['length']) * effective_log_prob_s).mean()
policy_loss = ((loss.detach() - self.mean_baseline['loss']) * log_prob).mean()
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss', loss)
self.update_baseline('length', length_loss)
for k, v in rest.items():
rest[k] = v.mean().item() if hasattr(v, 'mean') else v
rest['loss'] = optimized_loss.detach().item()
rest['sender_entropy'] = entropy_s.mean().item()
rest['receiver_entropy'] = entropy_r.mean().item()
rest['original_loss'] = loss.mean().item()
rest['mean_length'] = message_lengths.float().mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class TransformerReceiverDeterministic(nn.Module):
def __init__(self, agent, vocab_size, max_len, embed_dim, num_heads, hidden_size, num_layers, positional_emb=True,
causal=True):
super(TransformerReceiverDeterministic, self).__init__()
self.agent = agent
self.encoder = TransformerEncoder(vocab_size=vocab_size,
max_len=max_len,
embed_dim=embed_dim,
num_heads=num_heads,
num_layers=num_layers,
hidden_size=hidden_size,
positional_embedding=positional_emb,
causal=causal)
def forward(self, message, input=None, lengths=None):
if lengths is None:
lengths = find_lengths(message)
transformed = self.encoder(message, lengths)
agent_output = self.agent(transformed, input)
logits = torch.zeros(agent_output.size(0)).to(agent_output.device)
entropy = logits
return agent_output, logits, entropy
class TransformerSenderReinforce(nn.Module):
def __init__(self, agent, vocab_size, embed_dim, max_len, num_layers, num_heads, hidden_size,
generate_style='standard', causal=True, force_eos=True):
"""
:param agent: the agent to be wrapped, returns the "encoder" state vector, which is the unrolled into a message
:param vocab_size: vocab size of the message
:param embed_dim: embedding dimensions
:param max_len: maximal length of the message (including <eos>)
:param num_layers: number of transformer layers
:param num_heads: number of attention heads
:param hidden_size: size of the FFN layers
:param causal: whether embedding of a particular symbol should only depend on the symbols to the left
:param generate_style: Two alternatives: 'standard' and 'in-place'. Suppose we are generating 4th symbol,
after three symbols [s1 s2 s3] were generated.
Then,
'standard': [s1 s2 s3] -> embeddings [[e1] [e2] [e3]] -> (s4 = argmax(linear(e3)))
'in-place': [s1 s2 s3] -> [s1 s2 s3 <need-symbol>] -> embeddings [[e1] [e2] [e3] [e4]] -> (s4 = argmax(linear(e4)))
:param force_eos: <eos> added to the end of each sequence
"""
super(TransformerSenderReinforce, self).__init__()
self.agent = agent
self.force_eos = force_eos
assert generate_style in ['standard', 'in-place']
self.generate_style = generate_style
self.causal = causal
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.transformer = TransformerDecoder(embed_dim=embed_dim,
max_len=max_len, num_layers=num_layers,
num_heads=num_heads, hidden_size=hidden_size)
self.embedding_to_vocab = nn.Linear(embed_dim, vocab_size)
self.special_symbol_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.embed_tokens = torch.nn.Embedding(vocab_size, embed_dim)
nn.init.normal_(self.embed_tokens.weight, mean=0, std=self.embed_dim ** -0.5)
self.embed_scale = math.sqrt(embed_dim)
def generate_standard(self, encoder_state):
batch_size = encoder_state.size(0)
device = encoder_state.device
sequence = []
logits = []
entropy = []
special_symbol = self.special_symbol_embedding.expand(batch_size, -1).unsqueeze(1).to(device)
input = special_symbol
for step in range(self.max_len):
if self.causal:
attn_mask = torch.triu(torch.ones(step+1, step+1).byte(), diagonal=1).to(device)
attn_mask = attn_mask.float().masked_fill(attn_mask == 1, float('-inf'))
else:
attn_mask = None
output = self.transformer(embedded_input=input, encoder_out=encoder_state, attn_mask=attn_mask)
step_logits = F.log_softmax(self.embedding_to_vocab(output[:, -1, :]), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
symbols = distr.sample()
else:
symbols = step_logits.argmax(dim=1)
logits.append(distr.log_prob(symbols))
sequence.append(symbols)
new_embedding = self.embed_tokens(symbols) * self.embed_scale
input = torch.cat([input, new_embedding.unsqueeze(dim=1)], dim=1)
return sequence, logits, entropy
def generate_inplace(self, encoder_state):
batch_size = encoder_state.size(0)
device = encoder_state.device
sequence = []
logits = []
entropy = []
special_symbol = self.special_symbol_embedding.expand(batch_size, -1).unsqueeze(1).to(encoder_state.device)
output = []
for step in range(self.max_len):
input = torch.cat(output + [special_symbol], dim=1)
if self.causal:
attn_mask = torch.triu(torch.ones(step+1, step+1).byte(), diagonal=1).to(device)
attn_mask = attn_mask.float().masked_fill(attn_mask == 1, float('-inf'))
else:
attn_mask = None
embedded = self.transformer(embedded_input=input, encoder_out=encoder_state, attn_mask=attn_mask)
step_logits = F.log_softmax(self.embedding_to_vocab(embedded[:, -1, :]), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
symbols = distr.sample()
else:
symbols = step_logits.argmax(dim=1)
logits.append(distr.log_prob(symbols))
sequence.append(symbols)
new_embedding = self.embed_tokens(symbols) * self.embed_scale
output.append(new_embedding.unsqueeze(dim=1))
return sequence, logits, entropy
def forward(self, x):
encoder_state = self.agent(x)
if self.generate_style == 'standard':
sequence, logits, entropy = self.generate_standard(encoder_state)
elif self.generate_style == 'in-place':
sequence, logits, entropy = self.generate_inplace(encoder_state)
else:
assert False, 'Unknown generate style'
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
| <filename>src/core/reinforce_wrappers.py
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from collections import defaultdict
import numpy as np
from .transformer import TransformerEncoder, TransformerDecoder
from .rnn import RnnEncoder, RnnEncoderImpatient,RnnEncoderExternalEmbedding
from .util import find_lengths
class ReinforceWrapper(nn.Module):
"""
Reinforce Wrapper for an agent. Assumes that the during the forward,
the wrapped agent returns log-probabilities over the potential outputs. During training, the wrapper
transforms them into a tuple of (sample from the multinomial, log-prob of the sample, entropy for the multinomial).
Eval-time the sample is replaced with argmax.
>>> agent = nn.Sequential(nn.Linear(10, 3), nn.LogSoftmax(dim=1))
>>> agent = ReinforceWrapper(agent)
>>> sample, log_prob, entropy = agent(torch.ones(4, 10))
>>> sample.size()
torch.Size([4])
>>> (log_prob < 0).all().item()
1
>>> (entropy > 0).all().item()
1
"""
def __init__(self, agent):
super(ReinforceWrapper, self).__init__()
self.agent = agent
def forward(self, *args, **kwargs):
logits = self.agent(*args, **kwargs)
distr = Categorical(logits=logits)
entropy = distr.entropy()
if self.training:
sample = distr.sample()
else:
sample = logits.argmax(dim=1)
log_prob = distr.log_prob(sample)
return sample, log_prob, entropy
class ReinforceDeterministicWrapper(nn.Module):
"""
Simple wrapper that makes a deterministic agent (without sampling) compatible with Reinforce-based game, by
adding zero log-probability and entropy values to the output. No sampling is run on top of the wrapped agent,
it is passed as is.
>>> agent = nn.Sequential(nn.Linear(10, 3), nn.LogSoftmax(dim=1))
>>> agent = ReinforceDeterministicWrapper(agent)
>>> sample, log_prob, entropy = agent(torch.ones(4, 10))
>>> sample.size()
torch.Size([4, 3])
>>> (log_prob == 0).all().item()
1
>>> (entropy == 0).all().item()
1
"""
def __init__(self, agent):
super(ReinforceDeterministicWrapper, self).__init__()
self.agent = agent
def forward(self, *args, **kwargs):
out = self.agent(*args, **kwargs)
return out, torch.zeros(1).to(out.device), torch.zeros(1).to(out.device)
class SymbolGameReinforce(nn.Module):
"""
A single-symbol Sender/Receiver game implemented with Reinforce.
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff=0.0, receiver_entropy_coeff=0.0):
"""
:param sender: Sender agent. On forward, returns a tuple of (message, log-prob of the message, entropy).
:param receiver: Receiver agent. On forward, accepts a message and the dedicated receiver input. Returns
a tuple of (output, log-probs, entropy).
:param loss: The loss function that accepts:
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs the end-to-end loss. Can be non-differentiable; if it is differentiable, this will be leveraged
:param sender_entropy_coeff: The entropy regularization coefficient for Sender
:param receiver_entropy_coeff: The entropy regularizatino coefficient for Receiver
"""
super(SymbolGameReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.loss = loss
self.receiver_entropy_coeff = receiver_entropy_coeff
self.sender_entropy_coeff = sender_entropy_coeff
self.mean_baseline = 0.0
self.n_points = 0.0
def forward(self, sender_input, labels, receiver_input=None):
message, sender_log_prob, sender_entropy = self.sender(sender_input)
receiver_output, receiver_log_prob, receiver_entropy = self.receiver(message, receiver_input)
loss, rest_info = self.loss(sender_input, message, receiver_input, receiver_output, labels)
policy_loss = ((loss.detach() - self.mean_baseline) * (sender_log_prob + receiver_log_prob)).mean()
entropy_loss = -(sender_entropy.mean() * self.sender_entropy_coeff + receiver_entropy.mean() * self.receiver_entropy_coeff)
if self.training:
self.n_points += 1.0
self.mean_baseline += (loss.detach().mean().item() -
self.mean_baseline) / self.n_points
full_loss = policy_loss + entropy_loss + loss.mean()
for k, v in rest_info.items():
if hasattr(v, 'mean'):
rest_info[k] = v.mean().item()
rest_info['baseline'] = self.mean_baseline
rest_info['loss'] = loss.mean().item()
rest_info['sender_entropy'] = sender_entropy.mean()
rest_info['receiver_entropy'] = receiver_entropy.mean()
return full_loss, rest_info
class RnnSenderReinforce(nn.Module):
"""
Reinforce Wrapper for Sender in variable-length message game. Assumes that during the forward,
the wrapped agent returns the initial hidden state for a RNN cell. This cell is the unrolled by the wrapper.
During training, the wrapper samples from the cell, getting the output message. Evaluation-time, the sampling
is replaced by argmax.
>>> agent = nn.Linear(10, 3)
>>> agent = RnnSenderReinforce(agent, vocab_size=5, embed_dim=5, hidden_size=3, max_len=10, cell='lstm', force_eos=False)
>>> input = torch.FloatTensor(16, 10).uniform_(-0.1, 0.1)
>>> message, logprob, entropy = agent(input)
>>> message.size()
torch.Size([16, 10])
>>> (entropy > 0).all().item()
1
>>> message.size() # batch size x max_len
torch.Size([16, 10])
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, max_len, num_layers=1, cell='rnn', force_eos=True):
"""
:param agent: the agent to be wrapped
:param vocab_size: the communication vocabulary size
:param embed_dim: the size of the embedding used to embed the output symbols
:param hidden_size: the RNN cell's hidden state size
:param max_len: maximal length of the output messages
:param cell: type of the cell used (rnn, gru, lstm)
:param force_eos: if set to True, each message is extended by an EOS symbol. To ensure that no message goes
beyond `max_len`, Sender only generates `max_len - 1` symbols from an RNN cell and appends EOS.
"""
super(RnnSenderReinforce, self).__init__()
self.agent = agent
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.norm_h = nn.LayerNorm(hidden_size)
self.norm_c = nn.LayerNorm(hidden_size)
self.vocab_size = vocab_size
self.num_layers = num_layers
self.cells = None
cell = cell.lower()
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
if cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell]
self.cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def forward(self, x):
prev_hidden = [self.agent(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class RnnSenderReinforceModel3(nn.Module):
"""
Reinforce Wrapper for Sender in variable-length message game. Assumes that during the forward,
the wrapped agent returns the initial hidden state for a RNN cell. This cell is the unrolled by the wrapper.
During training, the wrapper samples from the cell, getting the output message. Evaluation-time, the sampling
is replaced by argmax.
>>> agent = nn.Linear(10, 3)
>>> agent = RnnSenderReinforce(agent, vocab_size=5, embed_dim=5, hidden_size=3, max_len=10, cell='lstm', force_eos=False)
>>> input = torch.FloatTensor(16, 10).uniform_(-0.1, 0.1)
>>> message, logprob, entropy = agent(input)
>>> message.size()
torch.Size([16, 10])
>>> (entropy > 0).all().item()
1
>>> message.size() # batch size x max_len
torch.Size([16, 10])
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, max_len, num_layers=1, cell='rnn', force_eos=True):
"""
:param agent: the agent to be wrapped
:param vocab_size: the communication vocabulary size
:param embed_dim: the size of the embedding used to embed the output symbols
:param hidden_size: the RNN cell's hidden state size
:param max_len: maximal length of the output messages
:param cell: type of the cell used (rnn, gru, lstm)
:param force_eos: if set to True, each message is extended by an EOS symbol. To ensure that no message goes
beyond `max_len`, Sender only generates `max_len - 1` symbols from an RNN cell and appends EOS.
"""
super(RnnSenderReinforceModel3, self).__init__()
self.agent = agent
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.norm_h = torch.nn.LayerNorm(hidden_size)
self.norm_c = torch.nn.LayerNorm(hidden_size)
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.num_layers = num_layers
self.cells = None
cell = cell.lower()
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
if cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell]
self.cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def forward(self, x, imitate=False):
prev_hidden = [self.agent(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_h(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
if imitate:
logits.append(distr.probs)
else:
logits.append(distr.log_prob(x))
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
if imitate:
logits = torch.stack(logits).permute(1,2, 0)
else:
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class RnnSenderReinforceExternalEmbedding(nn.Module):
"""
Reinforce Wrapper for Sender in variable-length message game. Assumes that during the forward,
the wrapped agent returns the initial hidden state for a RNN cell. This cell is the unrolled by the wrapper.
During training, the wrapper samples from the cell, getting the output message. Evaluation-time, the sampling
is replaced by argmax.
>>> agent = nn.Linear(10, 3)
>>> agent = RnnSenderReinforce(agent, vocab_size=5, embed_dim=5, hidden_size=3, max_len=10, cell='lstm', force_eos=False)
>>> input = torch.FloatTensor(16, 10).uniform_(-0.1, 0.1)
>>> message, logprob, entropy = agent(input)
>>> message.size()
torch.Size([16, 10])
>>> (entropy > 0).all().item()
1
>>> message.size() # batch size x max_len
torch.Size([16, 10])
"""
def __init__(self, agent,embedding_layer, vocab_size, embed_dim, hidden_size, max_len, num_layers=1, cell='rnn', force_eos=True):
"""
:param agent: the agent to be wrapped
:param vocab_size: the communication vocabulary size
:param embed_dim: the size of the embedding used to embed the output symbols
:param hidden_size: the RNN cell's hidden state size
:param max_len: maximal length of the output messages
:param cell: type of the cell used (rnn, gru, lstm)
:param force_eos: if set to True, each message is extended by an EOS symbol. To ensure that no message goes
beyond `max_len`, Sender only generates `max_len - 1` symbols from an RNN cell and appends EOS.
"""
super(RnnSenderReinforceExternalEmbedding, self).__init__()
self.agent = agent
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.norm_h = torch.nn.LayerNorm(hidden_size)
self.norm_c = torch.nn.LayerNorm(hidden_size)
self.embedding = embedding_layer
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.num_layers = num_layers
self.cells = None
cell = cell.lower()
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
if cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell]
self.cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def forward(self, x, imitate=False):
prev_hidden = [self.agent(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_h(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
if imitate:
logits.append(distr.probs)
else:
logits.append(distr.log_prob(x))
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
if imitate:
logits = torch.stack(logits).permute(1,2, 0)
else:
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class RnnReceiverReinforce(nn.Module):
"""
Reinforce Wrapper for Receiver in variable-length message game. The wrapper logic feeds the message into the cell
and calls the wrapped agent on the hidden state vector for the step that either corresponds to the EOS input to the
input that reaches the maximal length of the sequence.
This output is assumed to be the tuple of (output, logprob, entropy).
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, cell='rnn', num_layers=1):
super(RnnReceiverReinforce, self).__init__()
self.agent = agent
self.encoder = RnnEncoder(vocab_size, embed_dim, hidden_size, cell, num_layers)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
sample, logits, entropy = self.agent(encoded, input)
return sample, logits, entropy
class RnnReceiverCompositionality(nn.Module):
"""
Reinforce Wrapper for Receiver in variable-length message game with several attributes (for compositionality experiments).
RnnReceiverCompositionality is equivalent to RnnReceiverReinforce but treated each attribute independently.
This output is assumed to be the tuple of (output, logprob, entropy).
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size,max_len,n_attributes, n_values, cell='rnn', num_layers=1):
super(RnnReceiverCompositionality, self).__init__()
self.agent = agent
self.n_attributes=n_attributes
self.n_values=n_values
self.encoder = RnnEncoder(vocab_size, embed_dim, hidden_size, cell, num_layers)
self.hidden_to_output = nn.Linear(hidden_size, n_attributes*n_values)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
logits = F.log_softmax(self.hidden_to_output(encoded).reshape(encoded.size(0),self.n_attributes,self.n_values), dim=2)
#entropy=-torch.exp(logits)*logits
entropy=[]
slogits= []
for i in range(logits.size(1)):
distr = Categorical(logits=logits[:,i,:])
entropy.append(distr.entropy())
x = distr.sample()
slogits.append(distr.log_prob(x))
entropy = torch.stack(entropy).permute(1, 0)
slogits = torch.stack(slogits).permute(1, 0)
return logits, slogits, entropy
class RnnReceiverDeterministic(nn.Module):
"""
Reinforce Wrapper for a deterministic Receiver in variable-length message game. The wrapper logic feeds the message
into the cell and calls the wrapped agent with the hidden state that either corresponds to the end-of-sequence
term or to the end of the sequence. The wrapper extends it with zero-valued log-prob and entropy tensors so that
the agent becomes compatible with the SenderReceiverRnnReinforce game.
As the wrapped agent does not sample, it has to be trained via regular back-propagation. This requires that both the
the agent's output and loss function and the wrapped agent are differentiable.
>>> class Agent(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc = nn.Linear(5, 3)
... def forward(self, rnn_output, _input = None):
... return self.fc(rnn_output)
>>> agent = RnnReceiverDeterministic(Agent(), vocab_size=10, embed_dim=10, hidden_size=5)
>>> message = torch.zeros((16, 10)).long().random_(0, 10) # batch of 16, 10 symbol length
>>> output, logits, entropy = agent(message)
>>> (logits == 0).all().item()
1
>>> (entropy == 0).all().item()
1
>>> output.size()
torch.Size([16, 3])
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, cell='rnn', num_layers=1):
super(RnnReceiverDeterministic, self).__init__()
self.agent = agent
self.encoder = RnnEncoder(vocab_size, embed_dim, hidden_size, cell, num_layers)
self.norm = nn.LayerNorm(hidden_size)
def forward(self, message, input=None, lengths=None,imitate=False):
encoded = self.encoder(message)
encoded=self.norm(encoded)
agent_output = self.agent(encoded, input)
if imitate:
step_logits = F.log_softmax(agent_output, dim=1)
distr = Categorical(logits=step_logits)
entropy=distr.entropy()
logits=distr.probs
entropy=entropy.to(agent_output.device)
logits=logits.to(agent_output.device)
det_logits=torch.zeros(agent_output.size(0)).to(agent_output.device)
det_entropy=det_logits
return agent_output, logits, entropy, det_logits, det_entropy
else:
logits = torch.zeros(agent_output.size(0)).to(agent_output.device)
entropy = logits
return agent_output, logits, entropy
class RnnReceiverDeterministicExternalEmbedding(nn.Module):
"""
Reinforce Wrapper for a deterministic Receiver in variable-length message game. The wrapper logic feeds the message
into the cell and calls the wrapped agent with the hidden state that either corresponds to the end-of-sequence
term or to the end of the sequence. The wrapper extends it with zero-valued log-prob and entropy tensors so that
the agent becomes compatible with the SenderReceiverRnnReinforce game.
As the wrapped agent does not sample, it has to be trained via regular back-propagation. This requires that both the
the agent's output and loss function and the wrapped agent are differentiable.
>>> class Agent(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc = nn.Linear(5, 3)
... def forward(self, rnn_output, _input = None):
... return self.fc(rnn_output)
>>> agent = RnnReceiverDeterministic(Agent(), vocab_size=10, embed_dim=10, hidden_size=5)
>>> message = torch.zeros((16, 10)).long().random_(0, 10) # batch of 16, 10 symbol length
>>> output, logits, entropy = agent(message)
>>> (logits == 0).all().item()
1
>>> (entropy == 0).all().item()
1
>>> output.size()
torch.Size([16, 3])
"""
def __init__(self, agent, embedding_layer, vocab_size, embed_dim, hidden_size, cell='rnn', num_layers=1):
super(RnnReceiverDeterministicExternalEmbedding, self).__init__()
self.agent = agent
self.encoder = RnnEncoderExternalEmbedding(embedding_layer, vocab_size, embed_dim, hidden_size, cell, num_layers)
self.norm = nn.LayerNorm(hidden_size)
def forward(self, message, input=None, lengths=None,imitate=False):
encoded = self.encoder(message)
encoded=self.norm(encoded)
agent_output = self.agent(encoded, input)
if imitate:
step_logits = F.log_softmax(agent_output, dim=1)
distr = Categorical(logits=step_logits)
entropy=distr.entropy()
logits=distr.probs
entropy=entropy.to(agent_output.device)
logits=logits.to(agent_output.device)
det_logits=torch.zeros(agent_output.size(0)).to(agent_output.device)
det_entropy=det_logits
return agent_output, logits, entropy, det_logits, det_entropy
else:
logits = torch.zeros(agent_output.size(0)).to(agent_output.device)
entropy = logits
return agent_output, logits, entropy
class RnnReceiverImpatient(nn.Module):
"""
Impatient Listener.
The wrapper logic feeds the message into the cell and calls the wrapped agent.
The wrapped agent has to returns the intermediate hidden states for every position.
All the hidden states are mapped to a categorical distribution with a single
Linear layer (hidden_to_ouput) followed by a softmax.
Thess categorical probabilities (step_logits) will then be used to compute the Impatient loss function.
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size,max_len,n_features, cell='rnn', num_layers=1):
super(RnnReceiverImpatient, self).__init__()
self.max_len = max_len
self.hidden_to_output = nn.Linear(hidden_size, n_features)
self.encoder = RnnEncoderImpatient(vocab_size, embed_dim, hidden_size, cell, num_layers)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
sequence = []
logits = []
entropy = []
for step in range(encoded.size(0)):
h_t=encoded[step,:,:]
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample() # Sampling useless ?
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
sequence.append(step_logits)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
return sequence, logits, entropy
class RnnReceiverImpatientCompositionality(nn.Module):
"""
RnnReceiverImpatientCompositionality is an adaptation of RnnReceiverImpatientCompositionality
for inputs with several attributes (compositionality experiments).
Each attribute is treated independently.
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size,max_len,n_attributes, n_values, cell='rnn', num_layers=1):
super(RnnReceiverImpatientCompositionality, self).__init__()
self.max_len = max_len
self.n_attributes=n_attributes
self.n_values=n_values
self.hidden_to_output = nn.Linear(hidden_size, n_attributes*n_values)
self.encoder = RnnEncoderImpatient(vocab_size, embed_dim, hidden_size, cell, num_layers)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
sequence = []
slogits = []
entropy = []
for step in range(encoded.size(0)):
h_t=encoded[step,:,:]
step_logits = F.log_softmax(self.hidden_to_output(h_t).reshape(h_t.size(0),self.n_attributes,self.n_values), dim=2)
distr = Categorical(logits=step_logits)
sequence.append(step_logits)
entropy_step=[]
slogits_step=[]
for i in range(step_logits.size(1)):
distr = Categorical(logits=step_logits[:,i,:])
entropy_step.append(distr.entropy())
x = distr.sample()
slogits_step.append(distr.log_prob(x))
entropy_step = torch.stack(entropy_step).permute(1, 0)
slogits_step = torch.stack(slogits_step).permute(1, 0)
entropy.append(entropy_step)
slogits.append(slogits_step)
sequence = torch.stack(sequence).permute(1,0,2,3)
entropy = torch.stack(entropy).permute(1,0,2)
slogits= torch.stack(slogits).permute(1,0,2)
#logits = torch.stack(logits).permute(1, 0)
#entropy = torch.stack(entropy).permute(1, 0)
return sequence, slogits, entropy
class RnnReceiverWithHiddenStates(nn.Module):
"""
Impatient Listener.
The wrapper logic feeds the message into the cell and calls the wrapped agent.
The wrapped agent has to returns the intermediate hidden states for every position.
All the hidden states are mapped to a categorical distribution with a single
Linear layer (hidden_to_ouput) followed by a softmax.
Thess categorical probabilities (step_logits) will then be used to compute the Impatient loss function.
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size,max_len,n_features, cell='rnn', num_layers=1):
super(RnnReceiverWithHiddenStates, self).__init__()
self.max_len = max_len
self.hidden_to_output = nn.Linear(hidden_size, n_features)
self.norm_h=nn.LayerNorm(hidden_size)
self.norm_c=nn.LayerNorm(hidden_size)
self.encoder = RnnEncoderImpatient(vocab_size, embed_dim, hidden_size, cell, num_layers)
def forward(self, message, input=None, lengths=None):
encoded = self.encoder(message)
sequence = []
logits = []
entropy = []
hidden_states = []
for step in range(encoded.size(0)):
h_t=encoded[step,:,:]
h_t=norm_h(h_t)
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample() # Sampling useless ?
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
sequence.append(step_logits)
hidden_states.append(h_t)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
hidden_states = torch.stack(hidden_states).permute(1, 0, 2)
return sequence, logits, entropy, hidden_states
class AgentBaseline(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self, receiver, sender):
super(AgentBaseline, self).__init__()
self.receiver=receiver
self.sender=sender
def forward(self, message, input=None, lengths=None):
raise NotImplementedError
class AgentModel2(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self, receiver, sender):
super(AgentModel2, self).__init__()
self.receiver=receiver
self.sender=sender
def send(self, sender_input):
return self.sender(sender_input)
def receive(self,message, receiver_input, message_lengths):
receiver_output, log_prob_r, entropy_r,hidden_states = self.receiver(message, receiver_input, message_lengths)
sequence_lm=[]
logits_lm=[]
for step in range(hidden_states.size(1)):
h_t=hidden_states[:,step,:]
step_logits_lm = F.log_softmax(self.sender.hidden_to_output(h_t), dim=1)
distr_lm = Categorical(logits=step_logits_lm)
#entropy_lm.append(distr_lm.entropy())
x = step_logits_lm.argmax(dim=1)
logits_lm.append(distr_lm.log_prob(x))
sequence_lm.append(step_logits_lm)
sequence_lm = torch.stack(sequence_lm).permute(1, 0, 2)
logits_lm = torch.stack(logits_lm).permute(1, 0)
return receiver_output, log_prob_r, entropy_r, sequence_lm, logits_lm
class AgentModel3(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self, receiver, sender):
super(AgentModel3, self).__init__()
self.receiver=receiver
self.sender=sender
def send(self, sender_input):
return self.sender(sender_input)
def receive(self,message, receiver_input, message_lengths,imitate=True):
return self.receiver(message, receiver_input, message_lengths,imitate)
def imitate(self,sender_input,imitate=True):
return self.sender(sender_input,imitate)
# New class agent
class AgentBaseline2(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_features,
vocab_size,
max_len,
embed_dim,
sender_hidden_size,
receiver_hidden_size,
sender_cell,
receiver_cell,
sender_num_layers,
receiver_num_layers,
force_eos):
super(AgentBaseline2, self).__init__()
# Common to sender and receiver
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.sender_hidden_size=sender_hidden_size
self.receiver_hidden_size=receiver_hidden_size
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
# Sender
self.agent_sender = nn.Linear(n_features, sender_hidden_size) #nn.Linear(n_features, n_hidden)
self.sender_cells = None
self.sender_num_layers = sender_num_layers
self.sender_norm_h = nn.LayerNorm(sender_hidden_size)
self.sender_norm_c = nn.LayerNorm(sender_hidden_size)
self.hidden_to_output = nn.Linear(sender_hidden_size, vocab_size)
self.sender_embedding = nn.Embedding(vocab_size, embed_dim)
sender_cell = sender_cell.lower()
if sender_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {sender_cell}")
cell_type = cell_types[sender_cell]
self.sender_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=sender_hidden_size) if i == 0 else \
cell_type(input_size=sender_hidden_size, hidden_size=sender_hidden_size) for i in range(self.sender_num_layers)])
self.reset_parameters()
# Receiver
self.agent_receiver = nn.Linear(receiver_hidden_size, n_features) #nn.Linear(n_hidden, n_features)
self.receiver_cells = None
self.receiver_num_layers = receiver_num_layers
self.receiver_norm_h = nn.LayerNorm(receiver_hidden_size)
self.receiver_norm_c = nn.LayerNorm(receiver_hidden_size)
#self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.receiver_embedding = nn.Embedding(vocab_size, embed_dim)
receiver_cell = receiver_cell.lower()
if receiver_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {receiver_cell}")
cell_types_r = {'rnn': nn.RNN, 'gru': nn.GRU, 'lstm': nn.LSTM}
cell_type = cell_types_r[receiver_cell]
self.receiver_cell = cell_types_r[receiver_cell](input_size=embed_dim, batch_first=True,
hidden_size=receiver_hidden_size, num_layers=receiver_num_layers)
#self.receiver_cells = nn.ModuleList([
# cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
# cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False,return_policies=False):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
whole_logits = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
whole_logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
whole_logits = torch.stack(whole_logits).permute(1,0, 2)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
if return_policies:
return sequence,logits,whole_logits, entropy
else:
return sequence,logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers)] # only used for LSTM
inputs = self.receiver_embedding(message)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.receiver_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.receiver_norm_h(h_t)
c_t = self.receiver_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = self.agent_receiver(h_t)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def receive_2(self,message, receiver_input, message_lengths):
emb = self.receiver_embedding(message)
if message_lengths is None:
message_lengths = find_lengths(message)
packed = nn.utils.rnn.pack_padded_sequence(
emb, message_lengths.cpu(), batch_first=True, enforce_sorted=False)
_, rnn_hidden = self.receiver_cell(packed)
if isinstance(self.receiver_cell, nn.LSTM):
rnn_hidden, _ = rnn_hidden
encoded = rnn_hidden[-1]
#encoded=self.receiver_norm_h(encoded)
agent_output = self.agent_receiver(encoded)
logits = torch.zeros(agent_output.size(0)).to(agent_output.device)
entropy = logits
return agent_output, logits, entropy
def imitate(self,x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class AgentBaselineCompositionality(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_values,
n_attributes,
vocab_size,
max_len,
embed_dim,
sender_hidden_size,
receiver_hidden_size,
sender_cell,
receiver_cell,
sender_num_layers,
receiver_num_layers,
force_eos):
super(AgentBaselineCompositionality, self).__init__()
# Common to sender and receiver
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.n_attributes=n_attributes
self.n_values=n_values
self.sender_hidden_size=sender_hidden_size
self.receiver_hidden_size=receiver_hidden_size
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
# Sender
self.agent_sender = nn.Linear(n_values*n_attributes, sender_hidden_size) #nn.Linear(n_features, n_hidden)
self.sender_cells = None
self.sender_num_layers = sender_num_layers
self.sender_norm_h = nn.LayerNorm(sender_hidden_size)
self.sender_norm_c = nn.LayerNorm(sender_hidden_size)
self.hidden_to_output = nn.Linear(sender_hidden_size, vocab_size)
self.sender_embedding = nn.Embedding(vocab_size, embed_dim)
sender_cell = sender_cell.lower()
if sender_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {sender_cell}")
cell_type = cell_types[sender_cell]
self.sender_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=sender_hidden_size) if i == 0 else \
cell_type(input_size=sender_hidden_size, hidden_size=sender_hidden_size) for i in range(self.sender_num_layers)])
self.reset_parameters()
# Receiver
self.agent_receiver = nn.Linear(receiver_hidden_size, n_values*n_attributes) #nn.Linear(n_hidden, n_features)
self.receiver_cells = None
self.receiver_num_layers = receiver_num_layers
self.receiver_norm_h = nn.LayerNorm(receiver_hidden_size)
self.receiver_norm_c = nn.LayerNorm(receiver_hidden_size)
#self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.receiver_embedding = nn.Embedding(vocab_size, embed_dim)
receiver_cell = receiver_cell.lower()
if receiver_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {receiver_cell}")
cell_types_r = {'rnn': nn.RNN, 'gru': nn.GRU, 'lstm': nn.LSTM}
cell_type = cell_types_r[receiver_cell]
self.receiver_cell = cell_types_r[receiver_cell](input_size=embed_dim, batch_first=True,
hidden_size=receiver_hidden_size, num_layers=receiver_num_layers)
#self.receiver_cells = nn.ModuleList([
# cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
# cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False,return_policies=False):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
whole_logits = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
whole_logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
whole_logits = torch.stack(whole_logits).permute(1,0, 2)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
if return_policies:
return sequence,logits,whole_logits, entropy
else:
return sequence,logits, entropy
def receive(self,message, receiver_input, message_lengths,return_policies=False,return_sample=False):
emb = self.receiver_embedding(message)
if message_lengths is None:
message_lengths = find_lengths(message)
packed = nn.utils.rnn.pack_padded_sequence(
emb, message_lengths.cpu(), batch_first=True, enforce_sorted=False)
_, rnn_hidden = self.receiver_cell(packed)
if isinstance(self.receiver_cell, nn.LSTM):
rnn_hidden, _ = rnn_hidden
encoded = rnn_hidden[-1]
agent_output = self.agent_receiver(encoded).reshape(encoded.size(0),self.n_attributes,self.n_values)
logits = F.log_softmax(agent_output,dim=2)
entropy=[]
slogits= []
sample = []
for i in range(logits.size(1)):
distr = Categorical(logits=logits[:,i,:])
entropy.append(distr.entropy())
if self.training:
#x = distr.sample()
x = logits[:,i,:].argmax(dim=1)
sample.append(x)
else:
x = logits[:,i,:].argmax(dim=1)
slogits.append(distr.log_prob(x))
entropy = torch.stack(entropy).permute(1, 0)
slogits = torch.stack(slogits).permute(1, 0)
sample = torch.stack(sample).permute(1, 0)
if return_sample:
return sample, agent_output, slogits,logits, entropy
elif return_policies:
return agent_output, slogits,logits, entropy
else:
return agent_output, slogits, entropy
def imitate(self,x):
raise NotImplementedError
class AgentBaselineKL(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_features,
vocab_size,
max_len,
embed_dim,
hidden_size,
sender_cell,
receiver_cell,
sender_num_layers,
receiver_num_layers,
force_eos):
super(AgentBaselineKL, self).__init__()
# Common to sender and receiver
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.hidden_size=hidden_size
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
# Sender
self.agent_sender = nn.Linear(n_features, hidden_size) #nn.Linear(n_features, n_hidden)
self.sender_cells = None
self.sender_num_layers = sender_num_layers
self.sender_norm_h = nn.LayerNorm(hidden_size)
self.sender_norm_c = nn.LayerNorm(hidden_size)
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.sender_embedding = nn.Embedding(vocab_size, embed_dim)
sender_cell = sender_cell.lower()
if sender_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {sender_cell}")
cell_type = cell_types[sender_cell]
self.sender_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.sender_num_layers)])
self.reset_parameters()
# Receiver
self.agent_receiver = nn.Linear(hidden_size, n_features) #nn.Linear(n_hidden, n_features)
self.receiver_cells = None
self.receiver_num_layers = receiver_num_layers
self.receiver_norm_h = nn.LayerNorm(hidden_size)
self.receiver_norm_c = nn.LayerNorm(hidden_size)
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.receiver_embedding = nn.Embedding(vocab_size, embed_dim)
receiver_cell = receiver_cell.lower()
if receiver_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {receiver_cell}")
cell_type = cell_types[receiver_cell]
self.receiver_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
whole_logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
whole_logits.append(step_logits)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
whole_logits = torch.stack(whole_logits).permute(1,0, 2)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits,whole_logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers)] # only used for LSTM
inputs = self.receiver_embedding(message)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.receiver_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.receiver_norm_h(h_t)
c_t = self.receiver_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = self.agent_receiver(h_t)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def imitate(self,x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class AgentPol(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_features,
vocab_size,
max_len,
embed_dim,
hidden_size,
sender_cell,
receiver_cell,
sender_num_layers,
receiver_num_layers,
force_eos):
super(AgentPol, self).__init__()
# Common to sender and receiver
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.hidden_size=hidden_size
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
# Memory
self.mem={}
self.w_mem={}
self.est_policy={}
for k in range(n_features):
self.mem[k]=[]
self.w_mem[k]=[]
self.est_policy[k]=torch.zeros([self.max_len,self.vocab_size]).to("cuda")
# Sender
self.agent_sender = nn.Linear(n_features, hidden_size) #nn.Linear(n_features, n_hidden)
self.sender_cells = None
self.sender_num_layers = sender_num_layers
self.sender_norm_h = nn.LayerNorm(hidden_size)
self.sender_norm_c = nn.LayerNorm(hidden_size)
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.sender_embedding = nn.Embedding(vocab_size, embed_dim)
sender_cell = sender_cell.lower()
if sender_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {sender_cell}")
cell_type = cell_types[sender_cell]
self.sender_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.sender_num_layers)])
self.reset_parameters()
# Receiver
self.agent_receiver = nn.Linear(hidden_size, n_features) #nn.Linear(n_hidden, n_features)
self.receiver_cells = None
self.receiver_num_layers = receiver_num_layers
self.receiver_norm_h = nn.LayerNorm(hidden_size)
self.receiver_norm_c = nn.LayerNorm(hidden_size)
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.receiver_embedding = nn.Embedding(vocab_size, embed_dim)
receiver_cell = receiver_cell.lower()
if receiver_cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {receiver_cell}")
cell_type = cell_types[receiver_cell]
self.receiver_cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.receiver_num_layers)] # only used for LSTM
inputs = self.receiver_embedding(message)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.receiver_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.receiver_norm_h(h_t)
c_t = self.receiver_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = self.agent_receiver(h_t)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def imitate(self,x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.sender_num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.sender_cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.sender_norm_h(h_t)
c_t = self.sender_norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.sender_norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
input = self.sender_embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class AgentSharedRNN(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
vocab_size,
max_len,
embed_dim,
hidden_size,
cell,
num_layers,
force_eos):
super(AgentSharedRNN, self).__init__()
self.agent_receiver = nn.Linear(n_hidden, n_features) #nn.Linear(n_hidden, n_features)
self.agent_sender = nn.Linear(n_features, n_hidden) #nn.Linear(n_features, n_hidden)
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.norm_h = nn.LayerNorm(hidden_size)
self.norm_c = nn.LayerNorm(hidden_size)
self.vocab_size = vocab_size
self.num_layers = num_layers
self.cells = None
self.hidden_size=hidden_size
cell = cell.lower()
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
if cell not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell]
self.cells = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
inputs = self.embedding(message)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = self.agent_receiver(h_t, None)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
#distr = Categorical(logits=step_logits)
#entropy.append(distr.entropy())
#x=step_logits.argmax(dim=1)
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def imitate(self,x):
prev_hidden = [self.agent_sender(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
input = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class AgentSharedEmbedding(nn.Module):
"""
AgentBaseline is composed of a couple of modalities:
- sender
- receiver
In AgentBaseline, Sender and Receiver parts are independent
"""
def __init__(self,
n_features,
vocab_size,
max_len,
embed_dim,
hidden_size,
cell_sender,
cell_receiver,
num_layers_sender,
num_layers_receiver,
force_eos):
super(AgentSharedEmbedding, self).__init__()
assert embed_dim==hidden_size, "embed_dim has to be equal to hidden_size"
self.FC_features = nn.Linear(n_features,hidden_size,bias=False) #nn.Linear(n_hidden, n_features)
self.FC_vocabulary = nn.Linear(hidden_size,vocab_size,bias=False)
self.force_eos = force_eos
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.embedding_speaker = nn.Embedding(vocab_size, embed_dim)
self.embedding_listener = nn.Embedding(vocab_size, embed_dim)
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.norm_h = nn.LayerNorm(hidden_size)
self.norm_c = nn.LayerNorm(hidden_size)
self.vocab_size = vocab_size
self.num_layers_sender = num_layers_sender
self.num_layers_receiver = num_layers_receiver
self.cells = None
self.hidden_size=hidden_size
cell_types = {'rnn': nn.RNNCell, 'gru': nn.GRUCell, 'lstm': nn.LSTMCell}
cell_sender = cell_sender.lower()
if cell_sender not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell_sender]
self.cells_sender = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers_sender)])
cell_receiver = cell_receiver.lower()
if cell_receiver not in cell_types:
raise ValueError(f"Unknown RNN Cell: {cell}")
cell_type = cell_types[cell_receiver]
self.cells_receiver = nn.ModuleList([
cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \
cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.num_layers_receiver)])
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def send(self, x, eval=False):
prev_hidden = [self.FC_features(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_sender - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_sender)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells_sender):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.FC_vocabulary(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training and not eval:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.log_prob(x))
#input = F.embedding(x,weight=self.FC_vocabulary.weight)
input = self.embedding_speaker(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
def receive(self,message, receiver_input, message_lengths):
if message_lengths is None:
message_lengths=find_lengths(message)
prev_hidden = [torch.zeros((message.size(0),self.hidden_size)).to("cuda")]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_receiver - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_receiver)] # only used for LSTM
#inputs = self.embedding(message)
inputs = F.embedding(message,weight=self.FC_vocabulary.weight)
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
input=inputs[:,step,:]
for i, layer in enumerate(self.cells_receiver):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
#step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1)
agent_output = F.log_softmax(F.linear(h_t,weight=self.FC_features.weight.T), dim=1)
log = torch.zeros(agent_output.size(0)).to(agent_output.device)
ent = log
#distr = Categorical(logits=step_logits)
#entropy.append(distr.entropy())
#x=step_logits.argmax(dim=1)
logits.append(log)
entropy.append(ent)
sequence.append(agent_output)
sequence = torch.stack(sequence).permute(1, 0, 2)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
# Here choose EOS
#sequence=sequence[:,-1,:]
#logits=logits[:,-1]
#entropy=entropy[:,-1]
output=[]
for j in range(sequence.size(0)):
output.append(sequence[j,message_lengths[j]-1,:])
output=torch.stack(output)
logits=logits[:,-1]
entropy=entropy[:,-1]
return output, logits, entropy
def imitate(self,x):
prev_hidden = [self.FC_features(x)]
prev_hidden.extend([torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_sender - 1)])
prev_c = [torch.zeros_like(prev_hidden[0]) for _ in range(self.num_layers_sender)] # only used for LSTM
input = torch.stack([self.sos_embedding] * x.size(0))
sequence = []
logits = []
entropy = []
for step in range(self.max_len):
for i, layer in enumerate(self.cells_sender):
if isinstance(layer, nn.LSTMCell):
h_t, c_t = layer(input, (prev_hidden[i], prev_c[i]))
h_t = self.norm_h(h_t)
c_t = self.norm_c(c_t)
prev_c[i] = c_t
else:
h_t = layer(input, prev_hidden[i])
h_t = self.norm_h(h_t)
prev_hidden[i] = h_t
input = h_t
step_logits = F.log_softmax(self.FC_vocabulary(h_t), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
x = distr.sample()
else:
x = step_logits.argmax(dim=1)
logits.append(distr.probs)
#input = F.embedding(x,weight=self.FC_vocabulary.weight)
input = self.embedding_speaker(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1,2, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
class DialogReinforce(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforce, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.agent_1.to(self.device)
self.agent_2.to(self.device)
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive_2(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive_2(message, receiver_input, message_lengths)
# Imitation
#candidates_cross=receiver_output_cross.argmax(dim=1)
#message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input)
message_to_imitate, _, _ = agent_receiver.send(sender_input,eval=True)
message_to_imitate_lengths = find_lengths(message_to_imitate)
send_output, _, _ = agent_sender.receive_2(message_to_imitate, receiver_input, message_to_imitate_lengths)
message_reconstruction, prob_reconstruction, _ = agent_sender.imitate(sender_input)
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
#loss_imitation, rest_imitation = self.loss_message_imitation(message,prob_reconstruction,message_lengths)
loss_imitation, rest_imitation = self.loss_message_imitation(message_to_imitate,prob_reconstruction,message_to_imitate_lengths)
_, rest_und_cross = self.loss_understanding(sender_input,send_output)
prob_conf=torch.exp((sender_input*F.log_softmax(send_output,dim=1)).sum(1))
loss_imitation=loss_imitation*prob_conf
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross + self.loss_weights["imitation"]*loss_imitation
loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - self.mean_baseline['loss_imitation_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - loss_imitation.detach().mean()) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross + self.loss_weights["imitation"]*policy_loss_imitation
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceSingleListener(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceSingleListener, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.agent_1.to(self.device)
self.agent_2.to(self.device)
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
self.loss_weights={"self":1.,"cross":0.,"imitation":0.}
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
self.loss_weights={"self":0.,"cross":1.,"imitation":0.}
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive_2(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive_2(message, receiver_input, message_lengths)
# Imitation
#candidates_cross=receiver_output_cross.argmax(dim=1)
#message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input)
message_to_imitate, _, _ = agent_receiver.send(sender_input,eval=True)
message_to_imitate_lengths = find_lengths(message_to_imitate)
send_output, _, _ = agent_sender.receive_2(message_to_imitate, receiver_input, message_to_imitate_lengths)
message_reconstruction, prob_reconstruction, _ = agent_sender.imitate(sender_input)
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
#loss_imitation, rest_imitation = self.loss_message_imitation(message,prob_reconstruction,message_lengths)
loss_imitation, rest_imitation = self.loss_message_imitation(message_to_imitate,prob_reconstruction,message_to_imitate_lengths)
_, rest_und_cross = self.loss_understanding(sender_input,send_output)
prob_conf=torch.exp((sender_input*F.log_softmax(send_output,dim=1)).sum(1))
loss_imitation=loss_imitation*prob_conf
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross + self.loss_weights["imitation"]*loss_imitation
loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - self.mean_baseline['loss_imitation_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - loss_imitation.detach().mean()) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross + self.loss_weights["imitation"]*policy_loss_imitation
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceCompositionality(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
n_attributes,
n_values,
loss_understanding,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceCompositionality, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.n_attributes=n_attributes
self.n_values=n_values
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.agent_1.to(self.device)
self.agent_2.to(self.device)
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
# NO IMITATION
"2. Losses computation"
loss_cross, rest_cross = self.loss_understanding(sender_input, receiver_output_cross,self.n_attributes,self.n_values)
loss_self, rest_self = self.loss_understanding(sender_input, receiver_output_self,self.n_attributes,self.n_values)
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross
loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
elif self.reward_mode=="dense":
reward_self = 1.*(rest_self["acc"].sum(1)==self.n_attributes).detach()
reward_cross = 1.*(rest_cross["acc"].sum(1)==self.n_attributes).detach()
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self.mean(1))
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self.mean(1))
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceCompositionalitySingleListener(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
n_attributes,
n_values,
loss_understanding,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceCompositionalitySingleListener, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.n_attributes=n_attributes
self.n_values=n_values
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.agent_1.to(self.device)
self.agent_2.to(self.device)
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
self.loss_weights={"self":1.,"cross":0.,"imitation":1.}
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
self.loss_weights={"self":0.,"cross":1.,"imitation":1.}
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
# NO IMITATION
"2. Losses computation"
loss_cross, rest_cross = self.loss_understanding(sender_input, receiver_output_cross,self.n_attributes,self.n_values)
loss_self, rest_self = self.loss_understanding(sender_input, receiver_output_self,self.n_attributes,self.n_values)
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross
loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
elif self.reward_mode=="dense":
reward_self = 1.*(rest_self["acc"].sum(1)==self.n_attributes).detach()
reward_cross = 1.*(rest_cross["acc"].sum(1)==self.n_attributes).detach()
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self.mean(1))
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self.mean(1))
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceCompositionalityMultiAgent(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agents,
n_attributes,
n_values,
loss_understanding,
optim_params,
loss_weights,
device,
baseline_mode="new",
reward_mode="neg_loss"):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceCompositionalityMultiAgent, self).__init__()
self.agents = Agents
self.n_attributes=n_attributes
self.n_values=n_values
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
for agent in self.agents:
self.agents[agent].to(self.device)
def forward(self,
sender_input,
unused_labels,
sender_id,
receiver_ids,
receiver_input=None,
save_probs=None):
"""
Inputs:
- direction : N means "N->0"
"""
sender_input=sender_input.to(self.device)
"0. Get sender and receiver (id + optim info) for playing the game"
# Get sender_id and sender information
agent_sender = self.agents["agent_{}".format(sender_id)]
loss_weights_sender = self.loss_weights["agent_{}".format(sender_id)]
optim_params_sender = self.optim_params["agent_{}".format(sender_id)]
# Get receiver information (receiver_id always 0)
agent_receivers={"agent_{}".format(receiver_id):self.agents["agent_{}".format(receiver_id)] for receiver_id in receiver_ids}
" 1. Agent actions and loss"
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
loss_self, rest_self = self.loss_understanding(sender_input, receiver_output_self,self.n_attributes,self.n_values)
# Cross listening
losses_cross={}
restes_cross = {}
if self.reward_mode=="dense":
samples = {}
for agent in agent_receivers:
if self.reward_mode=="dense":
sample, receiver_output_cross, log_prob_r_cross,whole_log_prob_r_cross, entropy_r_cross = agent_receivers[agent].receive(message, receiver_input, message_lengths,return_sample=True)
samples[agent] = sample
else:
receiver_output_cross, log_prob_r_cross,whole_log_prob_r_cross, entropy_r_cross = agent_receivers[agent].receive(message, receiver_input, message_lengths,return_policies=True)
loss_cross, rest_cross = self.loss_understanding(sender_input, receiver_output_cross,self.n_attributes,self.n_values)
losses_cross[agent] = loss_cross
restes_cross[agent] = rest_cross
if save_probs:
np.save(save_probs+"_receiver_probs_"+agent+".npy",whole_log_prob_r_cross.cpu().numpy())
# Imitation
# NO IMITATION
"2. Reward computation"
loss_cross= torch.stack([losses_cross[agent] for agent in losses_cross]).mean(0)# MEAN ACROSS AXIS
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = loss_weights_sender["self"]*loss_self + loss_weights_sender["cross"]*loss_cross
loss /= (loss_weights_sender["self"]+loss_weights_sender["cross"])
# Reward
if self.reward_mode=="neg_loss":
reward_self = -loss_self.detach()
reward_cross = -loss_cross.detach()
elif self.reward_mode=="proba":
reward_self = torch.exp(-loss_self.detach())
reward_cross = torch.exp(-loss_cross.detach())
elif self.reward_mode=="dense":
reward_self = 1.*(rest_self["acc"].sum(1)==self.n_attributes).detach()
reward_cross=[]
#for agent in agent_receivers:
#reward_cross.append(1.*(restes_cross[agent]["acc"].sum(1)==self.n_attributes).detach())
#reward_cross=torch.stack(reward_cross)
#reward_cross=reward_cross.mean(0)
for agent in agent_receivers:
acc = 1*(samples[agent] == sender_input.reshape([sample.size(0),sample.size(1),sender_input.size(1)//sample.size(1)]).argmax(2)).float().mean(1).detach()
acc = 1*(acc==1).float()
reward_cross.append(acc)
reward_cross=torch.stack(reward_cross)
reward_cross=reward_cross.mean(0)
elif self.reward_mode=="discrete":
reward_self = rest_self["acc"].sum(1).detach()
reward_cross = rest_cross["acc"].sum(1).detach()
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self.mean(1))
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self.mean(1))
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * optim_params_sender["sender_entropy_coeff"] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * optim_params_sender["length_cost"]
"4. Variance reduction"
if self.baseline_mode=="original":
policy_loss_self = -((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = -((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
elif self.baseline_mode=="new":
eps=1e-16
policy_loss_self = -((reward_self - reward_self.mean())/(reward_self.std()+eps) * log_prob).mean()
policy_loss_cross = -((reward_cross - reward_cross.mean())/(reward_cross.std()+eps) * log_prob).mean()
policy_length_loss = ((length_loss.float() - length_loss.float().mean()) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = loss_weights_sender["self"]*policy_loss_self + loss_weights_sender["cross"]*policy_loss_cross
policy_loss /= (loss_weights_sender["self"]+loss_weights_sender["cross"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
speaker_loss = optimized_loss.detach().item()
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_speaker'] = speaker_loss
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
for receiver_id in receiver_ids:
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = losses_cross["agent_{}".format(receiver_id)].mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
for receiver_id in receiver_ids:
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=restes_cross["agent_{}".format(receiver_id)]['acc'].mean().item()
rest['reinforce_term_{}'.format(sender_id)]=policy_loss.detach().item()
rest['baseline_term_{}'.format(sender_id)]=(policy_loss/log_prob.mean()).detach().item()
rest['policy_{}'.format(sender_id)]=whole_log_prob_s.detach()
"7. Save probs"
if save_probs:
np.save(save_probs+"_sender_input.npy",sender_input.cpu().numpy())
np.save(save_probs+"_message.npy",message.cpu().numpy())
np.save(save_probs+"_sender_probs.npy",whole_log_prob_s.cpu().numpy())
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class ForwardPassSpeakerMultiAgent(nn.Module):
def __init__(self,
Agents,
n_attributes,
n_values,
loss_imitation,
optim_params,
message_to_imitate):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(ForwardPassSpeakerMultiAgent, self).__init__()
self.agents = Agents
self.n_attributes=n_attributes
self.n_values=n_values
self.optim_params = optim_params
self.loss_imitation = loss_imitation
self.loss_weights = loss_weights
self.baseline_mode=baseline_mode
self.reward_mode=reward_mode
self.message_to_imitate = message_to_imitate
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
for agent in self.agents:
self.agents[agent].to(self.device)
def forward(self,
sender_input,
unused_labels,
sender_id,
receiver_ids,
receiver_input=None,
save_probs=None):
"""
Inputs:
- direction : N means "N->0"
"""
sender_input=sender_input.to(self.device)
"0. Get sender and receiver (id + optim info) for playing the game"
# Get sender_id and sender information
agent_sender = self.agents["agent_{}".format(sender_id)]
optim_params_sender = self.optim_params["agent_{}".format(sender_id)]
" 1. Agent actions and loss"
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input,return_policies=True)
message_lengths = find_lengths(message)
message_to_imitate_lengths = find_lengths(self.message_to_imitate)
loss_imitation, rest_imitation = self.loss_imitation(self.message_to_imitate,whole_log_prob_s,self.message_to_imitate_lengths)
"6. Store results"
rest={}
rest['loss'] = loss_imitation.detach().item()
rest['loss_{}'.format(sender_id)] = loss_imitation.detach().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
"7. Save probs"
if save_probs:
np.save(save_probs+"_sender_input.npy",sender_input.cpu().numpy())
np.save(save_probs+"_message.npy",message.cpu().numpy())
np.save(save_probs+"_sender_probs.npy",whole_log_prob_s.cpu().numpy())
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceMemory(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
vocab_size,
max_len,
n_features,
device):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceMemory, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.max_len=max_len
self.vocab_size=vocab_size
self.n_features=n_features
self.device=device
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s, entropy_s = agent_sender.send(sender_input)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
#candidates_cross=receiver_output_cross.argmax(dim=1)
#message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input)
message_to_imitate, _, _ = agent_receiver.send(sender_input,eval=True)
message_to_imitate_lengths = find_lengths(message_to_imitate)
send_output, _, _ = agent_sender.receive(message_to_imitate, receiver_input, message_to_imitate_lengths)
i_hat=send_output.argmax(1).cpu().numpy()
policy_prob=torch.exp(send_output.max(1).values)
for j in range(send_output.size(0)):
m=message_to_imitate[j]
m_dense=torch.zeros([self.max_len,self.vocab_size]).to("cuda")
for i in range(len(m)):
m_dense[i,m[i]]=1.
agent_sender.mem[i_hat[j]].append(m_dense)
agent_sender.w_mem[i_hat[j]].append(torch.exp(policy_prob[j]))
for i in agent_sender.mem:
if len(agent_sender.mem[i])>0:
agent_sender.est_policy[i]=(torch.stack(agent_sender.mem[i])*torch.stack(agent_sender.w_mem[i]).unsqueeze(1).unsqueeze(2)).sum(0)
agent_sender.est_policy[i]/=torch.stack(agent_sender.w_mem[i]).sum(0)
policy_receiver=[]
for i in range(sender_input.size(0)):
policy_receiver.append(agent_sender.est_policy[int(sender_input.argmax(1)[i].cpu().numpy())])
policy_receiver=torch.stack(policy_receiver)
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
loss_imitation=torch.zeros([1024]).to("cuda")
rest_imitation={"acc_imitation":torch.tensor([0.])}
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross
loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
policy_loss_self = ((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = ((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
def to_dense(self,m):
m_dense=torch.zeros([self.max_len,self.vocab_size])
for i in range(len(m)):
m_dense[i,m[i]]=1.
class DialogReinforceBis(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
n_features,
max_len,
batch_size,
device):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceBis, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
self.batch_size=batch_size
self.last_messages_train=torch.zeros([batch_size,max_len],dtype=int).to("cuda")
self.last_messages_eval=torch.zeros([n_features,max_len],dtype=int).to("cuda")
self.last_input_train=torch.zeros([batch_size,n_features]).to("cuda")
self.last_input_eval=torch.zeros([n_features,n_features]).to("cuda")
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s, entropy_s = agent_sender.send(sender_input)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
if sender_input.size(0)==self.batch_size:
message_to_imitate=self.last_messages_train
last_input=self.last_input_train
else:
message_to_imitate=self.last_messages_eval
last_input=self.last_input_eval
message_to_imitate_lengths = find_lengths(message_to_imitate)
send_output, _, _ = agent_sender.receive(message_to_imitate, receiver_input, message_to_imitate_lengths)
one_hots=torch.eye(100)
inp_to_imitate=[]
for i in range(send_output.size(0)):
inp_to_imitate.append(one_hots[send_output.argmax(1)[i]])
inp_to_imitate=torch.stack(inp_to_imitate).to("cuda")
message_reconstruction, prob_reconstruction, _ = agent_sender.imitate(last_input)
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
loss_imitation, rest_imitation = self.loss_message_imitation(message_to_imitate,prob_reconstruction,message_to_imitate_lengths)
_, rest_und_cross = self.loss_understanding(last_input,send_output)
loss_imitation=loss_imitation*rest_und_cross["acc"]
prob_conf=torch.exp((last_input*F.log_softmax(send_output,dim=1)).sum(1))
#loss_imitation=loss_imitation-(sender_input*F.log_softmax(send_output,dim=1)).sum(1)
loss_imitation*=prob_conf
if sender_input.size(0)==self.batch_size:
self.last_messages_train=message
self.last_input_train=sender_input
else:
self.last_messages_eval=message
self.last_input_eval=sender_input
#print(torch.exp((sender_input*F.log_softmax(send_output,dim=1)).sum(1)))
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross + self.loss_weights["imitation"]*loss_imitation
loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
policy_loss_self = ((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = ((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - self.mean_baseline['loss_imitation_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross + self.loss_weights["imitation"]*policy_loss_imitation
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceKL(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss_understanding,
loss_imitation,
optim_params,
loss_weights,
device):
"""
optim_params={"length_cost":0.,
"sender_entropy_coeff_1":0.,
"receiver_entropy_coeff_1":0.,
"sender_entropy_coeff_2":0.,
"receiver_entropy_coeff_2":0.}
loss_weights={"self":1.,
"cross":1.,
"imitation":1.,
"length_regularization":0.,
"entropy_regularization":1.}
"""
super(DialogReinforceKL, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.optim_params = optim_params
self.loss_understanding = loss_understanding
self.loss_message_imitation = loss_imitation
self.loss_weights = loss_weights
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.device=device
def forward(self,
sender_input,
unused_labels,
direction,
receiver_input=None):
"""
Inputs:
- direction : "1->2" or "2->1"
"""
sender_input=sender_input.to(self.device)
if direction=="1->2":
agent_sender=self.agent_1
agent_receiver=self.agent_2
sender_id=1
receiver_id=2
else:
agent_sender=self.agent_2
agent_receiver=self.agent_1
sender_id=2
receiver_id=1
" 1. Agent actions "
# Message sending
message, log_prob_s,whole_log_prob_s, entropy_s = agent_sender.send(sender_input)
message_lengths = find_lengths(message)
# Cross listening
receiver_output_cross, log_prob_r_cross, entropy_r_cross = agent_receiver.receive(message, receiver_input, message_lengths)
# Self listening
receiver_output_self, log_prob_r_self, entropy_r_self = agent_sender.receive(message, receiver_input, message_lengths)
# Imitation
#candidates_cross=receiver_output_cross.argmax(dim=1)
#message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input)
message_other, other_log_prob_s,other_whole_log_prob_s, _ = agent_receiver.send(sender_input,eval=True)
message_other_lengths = find_lengths(message_other)
send_output, _, _ = agent_sender.receive(message_other, receiver_input, message_other_lengths)
other_log_prob_s = send_output.max()
"2. Losses computation"
loss_self, rest_self = self.loss_understanding(sender_input,receiver_output_self)
loss_cross, rest_cross = self.loss_understanding(sender_input,receiver_output_cross)
#loss_imitation, rest_imitation = self.loss_message_imitation(message,prob_reconstruction,message_lengths)
#loss_imitation, rest_imitation = self.loss_message_imitation(message_to_imitate,prob_reconstruction,message_to_imitate_lengths)
#_, rest_und_cross = self.loss_understanding(sender_input,send_output)
#loss_imitation=loss_imitation*rest_und_cross["acc"]
prob_conf=torch.exp((sender_input*F.log_softmax(send_output,dim=1)).sum(1))
KL_div=torch.nn.KLDivLoss(reduce=False)
#loss_imitation=KL_div(whole_log_prob_s.reshape(whole_log_prob_s.size(0)*whole_log_prob_s.size(1)*whole_log_prob_s.size(2)),other_whole_log_prob_s.reshape(other_whole_log_prob_s.size(0)*other_whole_log_prob_s.size(1)*other_whole_log_prob_s.size(2)))
loss_imitation = KL_div(torch.exp(whole_log_prob_s.reshape([whole_log_prob_s.size(0)*whole_log_prob_s.size(1),whole_log_prob_s.size(2)])),torch.exp(other_whole_log_prob_s.reshape([other_whole_log_prob_s.size(0)*other_whole_log_prob_s.size(1),other_whole_log_prob_s.size(2)])))
loss_imitation=loss_imitation.reshape([whole_log_prob_s.size(0),whole_log_prob_s.size(1),whole_log_prob_s.size(2)])
loss_imitation=loss_imitation.sum(2).sum(1)
rest_imitation={"acc_imitation":torch.tensor([0.])}
loss_imitation*=prob_conf
# Average loss. Rk. Sortir loss_imitation de cette somme
loss = self.loss_weights["self"]*loss_self + self.loss_weights["cross"]*loss_cross + self.loss_weights["imitation"]*loss_imitation
loss /= (self.loss_weights["self"]+self.loss_weights["cross"]+self.loss_weights["imitation"])
"3. Entropy + length Regularization"
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r_self)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r_self)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.optim_params["sender_entropy_coeff_{}".format(sender_id)] #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob = effective_log_prob_s #+ log_prob_r_12
length_loss = message_lengths.float() * self.optim_params["length_cost"]
"4. Variance reduction"
policy_loss_self = ((loss_self.detach() - self.mean_baseline['loss_self_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_cross = ((loss_cross.detach() - self.mean_baseline['loss_cross_{}'.format(sender_id)]) * log_prob).mean()
policy_loss_imitation = ((loss_imitation.detach() - self.mean_baseline['loss_imitation_{}'.format(sender_id)]) * log_prob).mean()
policy_length_loss = ((length_loss.float() - self.mean_baseline['length_{}'.format(sender_id)]) * effective_log_prob_s).mean()
" 5. Final loss"
policy_loss = self.loss_weights["self"]*policy_loss_self + self.loss_weights["cross"]*policy_loss_cross #+ self.loss_weights["imitation"]*policy_loss_imitation
policy_loss /= (self.loss_weights["self"]+self.loss_weights["cross"])#+self.loss_weights["imitation"])
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss_self_{}'.format(sender_id), loss_self)
self.update_baseline('loss_cross_{}'.format(sender_id), loss_cross)
self.update_baseline('loss_imitation_{}'.format(sender_id), loss_imitation)
self.update_baseline('length_{}'.format(sender_id), length_loss)
"6. Store results"
rest={}
rest['loss'] = optimized_loss.detach().item()
rest['loss_{}'.format(sender_id)] = optimized_loss.detach().item()
rest['sender_entropy_{}'.format(sender_id)] = entropy_s.mean().item()
rest['mean_length_{}'.format(sender_id)] = message_lengths.float().mean().item()
rest['loss_self_{}{}'.format(sender_id,sender_id)] = loss_self.mean().item()
rest['loss_cross_{}{}'.format(sender_id,receiver_id)] = loss_cross.mean().item()
rest['loss_imitation_{}{}'.format(receiver_id,sender_id)] = loss_imitation.mean().item()
rest['acc_self_{}{}'.format(sender_id,sender_id)]=rest_self['acc'].mean().item()
rest['acc_cross_{}{}'.format(sender_id,receiver_id)]=rest_cross['acc'].mean().item()
rest['acc_imitation_{}{}'.format(receiver_id,sender_id)]=rest_imitation['acc_imitation'].mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceBaseline(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[0.5,0.5],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceBaseline, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
self.device=device
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent_1 -> Agent_2"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.sender(sender_input)
message_lengths_1 = find_lengths(message_1)
receiver_output_1, log_prob_r_1, entropy_r_1 = self.agent_2.receiver(message_1, receiver_input, message_lengths_1)
loss_1, rest_1 = self.loss(sender_input, message_1, receiver_input, receiver_output_1, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_1)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_1)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_1 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_1.mean() * self.receiver_entropy_coeff_1
log_prob_1 = effective_log_prob_s_1 + log_prob_r_1
length_loss_1 = message_lengths_1.float() * self.length_cost
policy_length_loss_1 = ((length_loss_1.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_1 = ((loss_1.detach() - self.mean_baseline['loss_1']) * log_prob_1).mean()
optimized_loss_1 = policy_length_loss_1 + policy_loss_1 - weighted_entropy_1
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_1 += loss_1.mean()
if self.training:
self.update_baseline('loss_1', loss_1)
self.update_baseline('length_1', length_loss_1)
for k, v in rest_1.items():
rest_1[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_1['loss'] = optimized_loss_1.detach().item()
rest_1['sender_entropy'] = entropy_s_1.mean().item()
rest_1['receiver_entropy'] = entropy_r_1.mean().item()
rest_1['original_loss'] = loss_1.mean().item()
rest_1['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent_2 -> Agent_1"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.sender(sender_input)
message_lengths_2 = find_lengths(message_2)
receiver_output_2, log_prob_r_2, entropy_r_2 = self.agent_1.receiver(message_2, receiver_input, message_lengths_2)
loss_2, rest_2 = self.loss(sender_input, message_2, receiver_input, receiver_output_2, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_2)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_2)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_2 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_2.mean() * self.receiver_entropy_coeff_2
log_prob_2 = effective_log_prob_s_2 + log_prob_r_2
length_loss_2 = message_lengths_2.float() * self.length_cost
policy_length_loss_2 = ((length_loss_2.float() - self.mean_baseline['length_2']) * effective_log_prob_s_2).mean()
policy_loss_2 = ((loss_2.detach() - self.mean_baseline['loss_2']) * log_prob_2).mean()
optimized_loss_2 = policy_length_loss_2 + policy_loss_2 - weighted_entropy_2
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_2 += loss_2.mean()
if self.training:
self.update_baseline('loss_2', loss_2)
self.update_baseline('length_2', length_loss_2)
for k, v in rest_2.items():
rest_2[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_2['loss'] = optimized_loss_2.detach().item()
rest_2['sender_entropy'] = entropy_s_2.mean().item()
rest_2['receiver_entropy'] = entropy_r_2.mean().item()
rest_2['original_loss'] = loss_2.mean().item()
rest_2['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss = self.loss_weights[0]*optimized_loss_1 + self.loss_weights[1]*optimized_loss_2
rest={}
rest['loss']=self.loss_weights[0]*rest_1['loss'] + self.loss_weights[1]* rest_2['loss']
rest['sender_entropy']=self.loss_weights[0]*rest_1['sender_entropy'] + self.loss_weights[1]* rest_2['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0]*rest_1['receiver_entropy'] + self.loss_weights[1]* rest_2['receiver_entropy']
rest['original_loss']=self.loss_weights[0]*rest_1['original_loss'] + self.loss_weights[1]* rest_2['original_loss']
rest['mean_length']=self.loss_weights[0]*rest_1['mean_length'] + self.loss_weights[1]* rest_2['mean_length']
rest['acc']=self.loss_weights[0]*rest_1['acc'] + self.loss_weights[1]* rest_2['acc']
return optimized_loss_1, optimized_loss_2, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel1(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[[0.25,0.25],[0.25,0.25]],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel1, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.device=device
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent 1"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.sender(sender_input)
message_lengths_1 = find_lengths(message_1)
"1.2 Agent_1 -> Agent_2"
#message_12, log_prob_s_12, entropy_s_12 = message_1, log_prob_s_1, entropy_s_1
receiver_output_12, log_prob_r_12, entropy_r_12 = self.agent_2.receiver(message_1, receiver_input, message_lengths_1)
loss_12, rest_12 = self.loss(sender_input, message_1, receiver_input, receiver_output_12, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_12)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_12)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_12 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob_12 = effective_log_prob_s_1 + log_prob_r_12
length_loss_12 = message_lengths_1.float() * self.length_cost
policy_length_loss_12 = ((length_loss_12.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_12 = ((loss_12.detach() - self.mean_baseline['loss_12']) * log_prob_12).mean()
optimized_loss_12 = policy_length_loss_12 + policy_loss_12 - weighted_entropy_12
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_12 += loss_12.mean()
if self.training:
self.update_baseline('loss_12', loss_12)
self.update_baseline('length_12', length_loss_12)
for k, v in rest_12.items():
rest_12[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_12['loss'] = optimized_loss_12.detach().item()
rest_12['sender_entropy'] = entropy_s_1.mean().item()
rest_12['receiver_entropy'] = entropy_r_12.mean().item()
rest_12['original_loss'] = loss_12.mean().item()
rest_12['mean_length'] = message_lengths_1.float().mean().item()
"1.1 Agent_1 -> Agent_1"
#message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1
receiver_output_11, log_prob_r_11, entropy_r_11 = self.agent_1.receiver(message_1, receiver_input, message_lengths_1)
loss_11, rest_11 = self.loss(sender_input, message_1, receiver_input, receiver_output_11, labels)
weighted_entropy_11 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_11.mean() * self.receiver_entropy_coeff_1
log_prob_11 = effective_log_prob_s_1 + log_prob_r_11
length_loss_11 = message_lengths_1.float() * self.length_cost
policy_length_loss_11 = ((length_loss_11.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_11 = ((loss_11.detach() - self.mean_baseline['loss_11']) * log_prob_11).mean()
optimized_loss_11 = policy_length_loss_11 + policy_loss_11 - weighted_entropy_11
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_11 += loss_11.mean()
if self.training:
self.update_baseline('loss_11', loss_11)
self.update_baseline('length_11', length_loss_11)
for k, v in rest_11.items():
rest_11[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_11['loss'] = optimized_loss_11.detach().item()
rest_11['sender_entropy'] = entropy_s_1.mean().item()
rest_11['receiver_entropy'] = entropy_r_11.mean().item()
rest_11['original_loss'] = loss_11.mean().item()
rest_11['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent 2"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.sender(sender_input)
message_lengths_2 = find_lengths(message_2)
"2. Agent_2 -> Agent_1"
#message_21, log_prob_s_21, entropy_s_21 = message_2, log_prob_s_2, entropy_s_2
receiver_output_21, log_prob_r_21, entropy_r_21 = self.agent_1.receiver(message_2, receiver_input, message_lengths_2)
loss_21, rest_21 = self.loss(sender_input, message_2, receiver_input, receiver_output_21, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_21)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_21)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_21 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_21.mean() * self.receiver_entropy_coeff_2
log_prob_21 = effective_log_prob_s_2 + log_prob_r_21
length_loss_21 = message_lengths_2.float() * self.length_cost
policy_length_loss_21 = ((length_loss_21.float() - self.mean_baseline['length_21']) * effective_log_prob_s_2).mean()
policy_loss_21 = ((loss_21.detach() - self.mean_baseline['loss_21']) * log_prob_21).mean()
optimized_loss_21 = policy_length_loss_21 + policy_loss_21 - weighted_entropy_21
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_21 += loss_21.mean()
if self.training:
self.update_baseline('loss_21', loss_21)
self.update_baseline('length_21', length_loss_21)
for k, v in rest_21.items():
rest_21[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_21['loss'] = optimized_loss_21.detach().item()
rest_21['sender_entropy'] = entropy_s_2.mean().item()
rest_21['receiver_entropy'] = entropy_r_21.mean().item()
rest_21['original_loss'] = loss_21.mean().item()
rest_21['mean_length'] = message_lengths_2.float().mean().item()
"2. Agent_2 -> Agent_2"
#message_22, log_prob_s_22, entropy_s_22 = message_2, log_prob_s_2, entropy_s_2
#message_lengths_22 = find_lengths(message_22)
receiver_output_22, log_prob_r_22, entropy_r_22 = self.agent_2.receiver(message_2, receiver_input, message_lengths_2)
loss_22, rest_22 = self.loss(sender_input, message_2, receiver_input, receiver_output_22, labels)
weighted_entropy_22 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_22.mean() * self.receiver_entropy_coeff_2
log_prob_22 = effective_log_prob_s_2 + log_prob_r_22
length_loss_22 = message_lengths_2.float() * self.length_cost
policy_length_loss_22 = ((length_loss_22.float() - self.mean_baseline['length_22']) * effective_log_prob_s_2).mean()
policy_loss_22 = ((loss_22.detach() - self.mean_baseline['loss_22']) * log_prob_22).mean()
optimized_loss_22 = policy_length_loss_22 + policy_loss_22 - weighted_entropy_22
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_22 += loss_22.mean()
if self.training:
self.update_baseline('loss_22', loss_22)
self.update_baseline('length_22', length_loss_22)
for k, v in rest_22.items():
rest_22[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_22['loss'] = optimized_loss_22.detach().item()
rest_22['sender_entropy'] = entropy_s_2.mean().item()
rest_22['receiver_entropy'] = entropy_r_22.mean().item()
rest_22['original_loss'] = loss_22.mean().item()
rest_22['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss_1 = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12
optimized_loss_2 = self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
optimized_loss = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12+ \
self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
rest={}
rest['loss']=self.loss_weights[0][0]*rest_11['loss'] + self.loss_weights[0][1]*rest_12['loss']+ \
self.loss_weights[1][0]*rest_21['loss'] + self.loss_weights[1][1]*rest_22['loss']
rest['sender_entropy']=self.loss_weights[0][0]*rest_11['sender_entropy'] + self.loss_weights[0][1]*rest_12['sender_entropy']+ \
self.loss_weights[1][0]*rest_21['sender_entropy'] + self.loss_weights[1][1]*rest_22['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0][0]*rest_11['receiver_entropy'] + self.loss_weights[0][1]*rest_12['receiver_entropy']+ \
self.loss_weights[1][0]*rest_21['receiver_entropy'] + self.loss_weights[1][1]*rest_22['receiver_entropy']
rest['original_loss']=self.loss_weights[0][0]*rest_11['original_loss'] + self.loss_weights[0][1]*rest_12['original_loss']+ \
self.loss_weights[1][0]*rest_21['original_loss'] + self.loss_weights[1][1]*rest_22['original_loss']
rest['mean_length']=self.loss_weights[0][0]*rest_11['mean_length'] + self.loss_weights[0][1]*rest_12['mean_length']+ \
self.loss_weights[1][0]*rest_21['mean_length'] + self.loss_weights[1][1]*rest_22['mean_length']
rest['acc']=self.loss_weights[0][0]*rest_11['acc'] + self.loss_weights[0][1]*rest_12['acc']+ \
self.loss_weights[1][0]*rest_21['acc'] + self.loss_weights[1][1]*rest_22['acc']
return optimized_loss_11, optimized_loss_12, optimized_loss_21, optimized_loss_22, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel2(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[0.5,0.5],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel2, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
self.device=device
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent_1 -> Agent_2"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.send(sender_input)
message_lengths_1 = find_lengths(message_1)
receiver_output_1, log_prob_r_1, entropy_r_1, sequence_lm, log_probs_lm = self.agent_2.receive(message_1, receiver_input, message_lengths_1)
# Take only the last => change to EOS position
log_prob_r_1=log_prob_r_1[:,-1]
entropy_r_1=entropy_r_1[:,-1]
loss_1, loss_lm_1, rest_1 = self.loss(sender_input, message_1, message_lengths_1, receiver_input, receiver_output_1, sequence_lm , labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_1)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_1)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_1 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_1.mean() * self.receiver_entropy_coeff_1
log_prob_1 = effective_log_prob_s_1 + log_prob_r_1
length_loss_1 = message_lengths_1.float() * self.length_cost
policy_length_loss_1 = ((length_loss_1.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_1 = ((loss_1.detach() - self.mean_baseline['loss_1']) * log_prob_1).mean()
optimized_loss_1 = policy_length_loss_1 + policy_loss_1 - weighted_entropy_1
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_1 += loss_1.mean()
# Average between task and imitation loss
optimized_loss_1 = 0.5*(optimized_loss_1 + loss_lm_1.mean())
if self.training:
self.update_baseline('loss_1', loss_1)
self.update_baseline('length_1', length_loss_1)
for k, v in rest_1.items():
rest_1[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_1['loss'] = optimized_loss_1.detach().item()
rest_1['sender_entropy'] = entropy_s_1.mean().item()
rest_1['receiver_entropy'] = entropy_r_1.mean().item()
rest_1['original_loss'] = loss_1.mean().item()
rest_1['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent_2 -> Agent_1"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.send(sender_input)
message_lengths_2 = find_lengths(message_2)
receiver_output_2, log_prob_r_2, entropy_r_2,sequence_lm, logits_lm = self.agent_1.receive(message_2, receiver_input, message_lengths_2)
# Take only the last => change to EOS position
log_prob_r_2=log_prob_r_2[:,-1]
entropy_r_2=entropy_r_2[:,-1]
loss_2, loss_lm_2, rest_2 = self.loss(sender_input, message_2, message_lengths_2, receiver_input, receiver_output_2, sequence_lm , labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_2)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_2)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_2 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_2.mean() * self.receiver_entropy_coeff_2
log_prob_2 = effective_log_prob_s_2 + log_prob_r_2
length_loss_2 = message_lengths_2.float() * self.length_cost
policy_length_loss_2 = ((length_loss_2.float() - self.mean_baseline['length_2']) * effective_log_prob_s_2).mean()
policy_loss_2 = ((loss_2.detach() - self.mean_baseline['loss_2']) * log_prob_2).mean()
optimized_loss_2 = policy_length_loss_2 + policy_loss_2 - weighted_entropy_2
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_2 += loss_2.mean()
optimized_loss_2 = 0.5*(optimized_loss_2 + loss_lm_2.mean())
if self.training:
self.update_baseline('loss_2', loss_2)
self.update_baseline('length_2', length_loss_2)
for k, v in rest_2.items():
rest_2[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_2['loss'] = optimized_loss_2.detach().item()
rest_2['sender_entropy'] = entropy_s_2.mean().item()
rest_2['receiver_entropy'] = entropy_r_2.mean().item()
rest_2['original_loss'] = loss_2.mean().item()
rest_2['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss = self.loss_weights[0]*optimized_loss_1 + self.loss_weights[1]*optimized_loss_2
rest={}
rest['loss']=self.loss_weights[0]*rest_1['loss'] + self.loss_weights[1]* rest_2['loss']
rest['sender_entropy']=self.loss_weights[0]*rest_1['sender_entropy'] + self.loss_weights[1]* rest_2['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0]*rest_1['receiver_entropy'] + self.loss_weights[1]* rest_2['receiver_entropy']
rest['original_loss']=self.loss_weights[0]*rest_1['original_loss'] + self.loss_weights[1]* rest_2['original_loss']
rest['mean_length']=self.loss_weights[0]*rest_1['mean_length'] + self.loss_weights[1]* rest_2['mean_length']
rest['acc']=self.loss_weights[0]*rest_1['acc'] + self.loss_weights[1]* rest_2['acc']
return optimized_loss_1, optimized_loss_2, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel3(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[0.5,0.5],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel3, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
self.device=device
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent_1 -> Agent_2"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.send(sender_input)
message_lengths_1 = find_lengths(message_1)
receiver_output_1, prob_r_1, _ , log_prob_r_1, entropy_r_1 = self.agent_2.receive(message_1, receiver_input, message_lengths_1,imitate=True)
candidates_1=receiver_output_1.argmax(dim=1)
message_reconstruction_1, prob_reconstruction_1, _ = self.agent_2.imitate(sender_input,imitate=True)
loss_1_comm, loss_1_imitation, rest_1 = self.loss(sender_input, message_1, receiver_input, receiver_output_1,message_reconstruction_1,prob_reconstruction_1, labels)
# Imitation loss weighted by likelihood of candidate
loss_1_imitation = loss_1_imitation #* prob_r_1.max(1).values
loss_1_imitation=loss_1_imitation.mean()
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_1)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_1)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_1 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_1.mean() * self.receiver_entropy_coeff_1
log_prob_1 = effective_log_prob_s_1 + log_prob_r_1
length_loss_1 = message_lengths_1.float() * self.length_cost
policy_length_loss_1 = ((length_loss_1.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_1 = ((loss_1_comm.detach() - self.mean_baseline['loss_1']) * log_prob_1).mean()
optimized_loss_1 = policy_length_loss_1 + policy_loss_1 - weighted_entropy_1
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_1 += loss_1_comm.mean()
if self.training:
self.update_baseline('loss_1', loss_1_comm)
self.update_baseline('length_1', length_loss_1)
for k, v in rest_1.items():
rest_1[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_1['loss'] = optimized_loss_1.detach().item()
rest_1['sender_entropy'] = entropy_s_1.mean().item()
rest_1['receiver_entropy'] = entropy_r_1.mean().item()
rest_1['original_loss'] = loss_1_comm.mean().item()
rest_1['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent_2 -> Agent_1"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.send(sender_input)
message_lengths_2 = find_lengths(message_2)
receiver_output_2, prob_r_2, _ , log_prob_r_2, entropy_r_2 = self.agent_1.receive(message_2, receiver_input, message_lengths_2,imitate=True)
candidates_2=receiver_output_2.argmax(dim=1)
message_reconstruction_2, prob_reconstruction_2, _ = self.agent_1.imitate(sender_input,imitate=True)
loss_2_comm, loss_2_imitation, rest_2 = self.loss(sender_input, message_2, receiver_input, receiver_output_2,message_reconstruction_2,prob_reconstruction_2, labels)
# Imitation loss weighted by likelihood of candidate
loss_2_imitation = loss_2_imitation #* prob_r_2.max(1).values
loss_2_imitation=loss_2_imitation.mean()
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_2)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_2)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_2 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_2.mean() * self.receiver_entropy_coeff_2
log_prob_2 = effective_log_prob_s_2 + log_prob_r_2
length_loss_2 = message_lengths_2.float() * self.length_cost
policy_length_loss_2 = ((length_loss_2.float() - self.mean_baseline['length_2']) * effective_log_prob_s_2).mean()
policy_loss_2 = ((loss_2_comm.detach() - self.mean_baseline['loss_2']) * log_prob_2).mean()
optimized_loss_2 = policy_length_loss_2 + policy_loss_2 - weighted_entropy_2
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_2 += loss_2_comm.mean()
if self.training:
self.update_baseline('loss_2', loss_2_comm)
self.update_baseline('length_2', length_loss_2)
for k, v in rest_2.items():
rest_2[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_2['loss'] = optimized_loss_2.detach().item()
rest_2['sender_entropy'] = entropy_s_2.mean().item()
rest_2['receiver_entropy'] = entropy_r_2.mean().item()
rest_2['original_loss'] = loss_2_comm.mean().item()
rest_2['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss = self.loss_weights[0]*optimized_loss_1 + self.loss_weights[1]*optimized_loss_2
rest={}
rest['loss']=self.loss_weights[0]*rest_1['loss'] + self.loss_weights[1]* rest_2['loss']
rest['sender_entropy']=self.loss_weights[0]*rest_1['sender_entropy'] + self.loss_weights[1]* rest_2['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0]*rest_1['receiver_entropy'] + self.loss_weights[1]* rest_2['receiver_entropy']
rest['original_loss']=self.loss_weights[0]*rest_1['original_loss'] + self.loss_weights[1]* rest_2['original_loss']
rest['mean_length']=self.loss_weights[0]*rest_1['mean_length'] + self.loss_weights[1]* rest_2['mean_length']
rest['acc']=self.loss_weights[0]*rest_1['acc'] + self.loss_weights[1]* rest_2['acc']
return optimized_loss_1,loss_1_imitation, optimized_loss_2, loss_2_imitation, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel4(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
device,
loss_weights=[[0.25,0.25],[0.25,0.25]],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel4, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.device=device
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent 1"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.sender(sender_input)
message_lengths_1 = find_lengths(message_1)
a_self=3.
a_cross=1.
a_im=1.
"1.2 Agent_1 -> Agent_2"
#message_12, log_prob_s_12, entropy_s_12 = message_1, log_prob_s_1, entropy_s_1
receiver_output_12, prob_r_12, _ , log_prob_r_12, entropy_r_12 = self.agent_2.receive(message_1, receiver_input, message_lengths_1,imitate=True)
candidates_12=receiver_output_12.argmax(dim=1)
message_reconstruction_12, prob_reconstruction_12, _ = self.agent_2.imitate(sender_input,imitate=True)
loss_12_comm, loss_12_imitation, rest_12 = self.loss(sender_input, message_1, receiver_input, receiver_output_12,message_reconstruction_12,prob_reconstruction_12, labels,message_lengths_1)
# Imitation loss weighted by likelihood of candidate
loss_12_imitation = loss_12_imitation #* prob_r_12.max(1).values
#loss_12_imitation=loss_12_imitation.mean()
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_12)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_12)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_12 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob_12 = effective_log_prob_s_1 + log_prob_r_12
length_loss_12 = message_lengths_1.float() * self.length_cost
policy_length_loss_12 = ((length_loss_12.float() - self.mean_baseline['length_12']) * effective_log_prob_s_1).mean()
policy_loss_12 = ((loss_12_comm.detach() - self.mean_baseline['loss_12']) * log_prob_12).mean()
optimized_loss_12 = policy_length_loss_12 + policy_loss_12 - weighted_entropy_12
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_12 += loss_12_comm.mean()
if self.training:
self.update_baseline('loss_12', loss_12_comm)
self.update_baseline('length_12', length_loss_12)
for k, v in rest_12.items():
rest_12[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_12['loss'] = optimized_loss_12.detach().item()
rest_12['sender_entropy'] = entropy_s_1.mean().item()
rest_12['receiver_entropy'] = entropy_r_12.mean().item()
rest_12['original_loss'] = loss_12_comm.mean().item()
rest_12['mean_length'] = message_lengths_1.float().mean().item()
"1.1 Agent_1 -> Agent_1"
#message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1
receiver_output_11, prob_r_11, _ , log_prob_r_11, entropy_r_11 = self.agent_1.receive(message_1, receiver_input, message_lengths_1,imitate=True)
candidates_11=receiver_output_11.argmax(dim=1)
message_reconstruction_11, prob_reconstruction_11, _ = self.agent_1.imitate(sender_input,imitate=True)
loss_11_comm, loss_11_imitation, rest_11 = self.loss(sender_input, message_1, receiver_input, receiver_output_11,message_reconstruction_11,prob_reconstruction_11, labels,message_lengths_1)
# Imitation loss weighted by likelihood of candidate
loss_11_imitation = loss_11_imitation #* prob_r_11.max(1).values
#loss_11_imitation=loss_11_imitation.mean()
loss_11_comm=a_self*loss_11_comm+a_cross*loss_12_comm+a_im*loss_12_imitation
weighted_entropy_11 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_11.mean() * self.receiver_entropy_coeff_1
log_prob_11 = effective_log_prob_s_1 + log_prob_r_11
length_loss_11 = message_lengths_1.float() * self.length_cost
policy_length_loss_11 = ((length_loss_11.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_11 = ((loss_11_comm.detach() - self.mean_baseline['loss_11']) * log_prob_11).mean()
optimized_loss_11 = policy_length_loss_11 + policy_loss_11 - weighted_entropy_11
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_11 += loss_11_comm.mean()
if self.training:
self.update_baseline('loss_11', loss_11_comm)
self.update_baseline('length_11', length_loss_11)
for k, v in rest_11.items():
rest_11[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_11['loss'] = optimized_loss_11.detach().item()
rest_11['sender_entropy'] = entropy_s_1.mean().item()
rest_11['receiver_entropy'] = entropy_r_11.mean().item()
rest_11['original_loss'] = loss_11_comm.mean().item()
rest_11['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent 2"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.sender(sender_input)
message_lengths_2 = find_lengths(message_2)
"2. Agent_2 -> Agent_1"
#message_21, log_prob_s_21, entropy_s_21 = message_2, log_prob_s_2, entropy_s_2
receiver_output_21, prob_r_21, _ , log_prob_r_21, entropy_r_21 = self.agent_1.receive(message_2, receiver_input, message_lengths_2,imitate=True)
candidates_21=receiver_output_21.argmax(dim=1)
message_reconstruction_21, prob_reconstruction_21, _ = self.agent_1.imitate(sender_input,imitate=True)
loss_21_comm, loss_21_imitation, rest_21 = self.loss(sender_input, message_2, receiver_input, receiver_output_21,message_reconstruction_21,prob_reconstruction_21, labels,message_lengths_2)
# Imitation loss weighted by likelihood of candidate
loss_21_imitation = loss_21_imitation #* prob_r_21.max(1).values
#loss_21_imitation=loss_21_imitation.mean()
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_21)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_21)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_21 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_21.mean() * self.receiver_entropy_coeff_2
log_prob_21 = effective_log_prob_s_2 + log_prob_r_21
length_loss_21 = message_lengths_2.float() * self.length_cost
policy_length_loss_21 = ((length_loss_21.float() - self.mean_baseline['length_21']) * effective_log_prob_s_2).mean()
policy_loss_21 = ((loss_21_comm.detach() - self.mean_baseline['loss_21']) * log_prob_21).mean()
optimized_loss_21 = policy_length_loss_21 + policy_loss_21 - weighted_entropy_21
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_21 += loss_21_comm.mean()
if self.training:
self.update_baseline('loss_21', loss_21_comm)
self.update_baseline('length_21', length_loss_21)
for k, v in rest_21.items():
rest_21[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_21['loss'] = optimized_loss_21.detach().item()
rest_21['sender_entropy'] = entropy_s_2.mean().item()
rest_21['receiver_entropy'] = entropy_r_21.mean().item()
rest_21['original_loss'] = loss_21_comm.mean().item()
rest_21['mean_length'] = message_lengths_2.float().mean().item()
"2. Agent_2 -> Agent_2"
#message_22, log_prob_s_22, entropy_s_22 = message_2, log_prob_s_2, entropy_s_2
#message_lengths_22 = find_lengths(message_22)
receiver_output_22, prob_r_22, _ , log_prob_r_22, entropy_r_22 = self.agent_2.receive(message_2, receiver_input, message_lengths_2,imitate=True)
candidates_22=receiver_output_22.argmax(dim=1)
message_reconstruction_22, prob_reconstruction_22, _ = self.agent_2.imitate(sender_input,imitate=True)
loss_22_comm, loss_22_imitation, rest_22 = self.loss(sender_input, message_2, receiver_input, receiver_output_22,message_reconstruction_22,prob_reconstruction_22, labels, message_lengths_2)
# Imitation loss weighted by likelihood of candidate
loss_22_imitation = loss_22_imitation #* prob_r_22.max(1).values
#loss_22_imitation=loss_22_imitation.mean()
loss_22_comm=a_self*loss_22_comm+a_cross*loss_21_comm+a_im*loss_21_imitation
weighted_entropy_22 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_22.mean() * self.receiver_entropy_coeff_2
log_prob_22 = effective_log_prob_s_2 + log_prob_r_22
length_loss_22 = message_lengths_2.float() * self.length_cost
policy_length_loss_22 = ((length_loss_22.float() - self.mean_baseline['length_22']) * effective_log_prob_s_2).mean()
policy_loss_22 = ((loss_22_comm.detach() - self.mean_baseline['loss_22']) * log_prob_22).mean()
optimized_loss_22 = policy_length_loss_22 + policy_loss_22 - weighted_entropy_22
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_22 += loss_22_comm.mean()
if self.training:
self.update_baseline('loss_22', loss_22_comm)
self.update_baseline('length_22', length_loss_22)
for k, v in rest_22.items():
rest_22[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_22['loss'] = optimized_loss_22.detach().item()
rest_22['sender_entropy'] = entropy_s_2.mean().item()
rest_22['receiver_entropy'] = entropy_r_22.mean().item()
rest_22['original_loss'] = loss_22_comm.mean().item()
rest_22['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss_1 = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12
optimized_loss_2 = self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
optimized_loss = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12+ \
self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
rest={}
rest['loss']=self.loss_weights[0][0]*rest_11['loss'] + self.loss_weights[0][1]*rest_12['loss']+ \
self.loss_weights[1][0]*rest_21['loss'] + self.loss_weights[1][1]*rest_22['loss']
rest['sender_entropy']=self.loss_weights[0][0]*rest_11['sender_entropy'] + self.loss_weights[0][1]*rest_12['sender_entropy']+ \
self.loss_weights[1][0]*rest_21['sender_entropy'] + self.loss_weights[1][1]*rest_22['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0][0]*rest_11['receiver_entropy'] + self.loss_weights[0][1]*rest_12['receiver_entropy']+ \
self.loss_weights[1][0]*rest_21['receiver_entropy'] + self.loss_weights[1][1]*rest_22['receiver_entropy']
rest['original_loss']=self.loss_weights[0][0]*rest_11['original_loss'] + self.loss_weights[0][1]*rest_12['original_loss']+ \
self.loss_weights[1][0]*rest_21['original_loss'] + self.loss_weights[1][1]*rest_22['original_loss']
rest['mean_length']=self.loss_weights[0][0]*rest_11['mean_length'] + self.loss_weights[0][1]*rest_12['mean_length']+ \
self.loss_weights[1][0]*rest_21['mean_length'] + self.loss_weights[1][1]*rest_22['mean_length']
rest['acc']=self.loss_weights[0][0]*rest_11['acc'] + self.loss_weights[0][1]*rest_12['acc']+ \
self.loss_weights[1][0]*rest_21['acc'] + self.loss_weights[1][1]*rest_22['acc']
rest["acc_11"]=rest_11["acc"]
rest["acc_12"]=rest_12["acc"]
rest["acc_21"]=rest_21["acc"]
rest["acc_22"]=rest_22["acc"]
return optimized_loss_11,loss_11_imitation, optimized_loss_12,loss_12_imitation, optimized_loss_21,loss_21_imitation, optimized_loss_22,loss_22_imitation, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class PretrainAgent(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
loss,
pretrained_messages,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
device,
n_features,
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(PretrainAgent, self).__init__()
self.agent_1 = Agent_1
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.pretrained_messages=pretrained_messages
if self.pretrained_messages is not None:
self.pretrained_messages=self.pretrained_messages.to(device)
self.loss = loss
self.n_features=n_features
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.device=device
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.sender(sender_input)
message_lengths_1 = find_lengths(message_1)
"1.1 Agent_1 -> Agent_1"
#message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1
receiver_output_11, prob_r_11, _ , log_prob_r_11, entropy_r_11 = self.agent_1.receive(message_1, receiver_input, message_lengths_1,imitate=True)
if self.pretrained_messages is not None:
pretrained_sender_input = torch.eye(self.n_features).to(self.device)
message_reconstruction_11, prob_reconstruction_11, _ = self.agent_1.imitate(pretrained_sender_input,imitate=True)
else:
message_reconstruction_11=None
prob_reconstruction_11=None
loss_11_comm, loss_11_imitation, rest_11 = self.loss(sender_input, message_1,self.pretrained_messages, receiver_input, receiver_output_11,message_reconstruction_11,prob_reconstruction_11, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_11)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_11)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_11 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_11.mean() * self.receiver_entropy_coeff_1
log_prob_11 = effective_log_prob_s_1 + log_prob_r_11
length_loss_11 = message_lengths_1.float() * self.length_cost
policy_length_loss_11 = ((length_loss_11.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_11 = ((loss_11_comm.detach() - self.mean_baseline['loss_11']) * log_prob_11).mean()
optimized_loss_11 = policy_length_loss_11 + policy_loss_11 - weighted_entropy_11
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_11 += loss_11_comm.mean()
if self.training:
self.update_baseline('loss_11', loss_11_comm)
self.update_baseline('length_11', length_loss_11)
for k, v in rest_11.items():
rest_11[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_11['loss'] = optimized_loss_11.detach().item()
rest_11['sender_entropy'] = entropy_s_1.mean().item()
rest_11['receiver_entropy'] = entropy_r_11.mean().item()
rest_11['original_loss'] = loss_11_comm.mean().item()
rest_11['mean_length'] = message_lengths_1.float().mean().item()
return optimized_loss_11,loss_11_imitation, rest_11
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class DialogReinforceModel6(nn.Module):
"""
DialogReinforce implements the Dialog game
"""
def __init__(self,
Agent_1,
Agent_2,
loss,
sender_entropy_coeff_1,
receiver_entropy_coeff_1,
sender_entropy_coeff_2,
receiver_entropy_coeff_2,
imitate,
device,
loss_weights=[[0.25,0.25],[0.25,0.25]],
length_cost=0.0,
unigram_penalty=0.0,
reg=False):
"""
"""
super(DialogReinforceModel6, self).__init__()
self.agent_1 = Agent_1
self.agent_2 = Agent_2
self.sender_entropy_coeff_1 = sender_entropy_coeff_1
self.receiver_entropy_coeff_1 = receiver_entropy_coeff_1
self.sender_entropy_coeff_2 = sender_entropy_coeff_2
self.receiver_entropy_coeff_2 = receiver_entropy_coeff_2
self.loss = loss
self.loss_weights = loss_weights
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.device=device
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
self.imitate=imitate
def forward(self, sender_input, labels, receiver_input=None):
sender_input=sender_input.to(self.device)
"1. Agent 1"
message_1, log_prob_s_1, entropy_s_1 = self.agent_1.send(sender_input)
message_lengths_1 = find_lengths(message_1)
a_self=3.
a_cross=1.
a_im=1.
"1.2 Agent_1 -> Agent_2"
#message_12, log_prob_s_12, entropy_s_12 = message_1, log_prob_s_1, entropy_s_1
receiver_output_12, log_prob_r_12, entropy_r_12 = self.agent_2.receive(message_1, receiver_input, message_lengths_1)
if self.imitate:
candidates_12=receiver_output_12.argmax(dim=1)
message_reconstruction_12, prob_reconstruction_12, _ = self.agent_2.imitate(sender_input)
loss_12, loss_12_imitation, rest_12 = self.loss(sender_input, message_1, receiver_input, receiver_output_12,message_reconstruction_12,prob_reconstruction_12, labels,message_lengths_1)
#loss_12_imitation=loss_12_imitation.mean()
else:
loss_12, rest_12 = self.loss(sender_input, message_1, receiver_input, receiver_output_12, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_1 = torch.zeros_like(entropy_r_12)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_1 = torch.zeros_like(log_prob_r_12)
for i in range(message_1.size(1)):
not_eosed_1 = (i < message_lengths_1).float()
effective_entropy_s_1 += entropy_s_1[:, i] * not_eosed_1
effective_log_prob_s_1 += log_prob_s_1[:, i] * not_eosed_1
effective_entropy_s_1 = effective_entropy_s_1 / message_lengths_1.float()
weighted_entropy_12 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_12.mean() * self.receiver_entropy_coeff_1
log_prob_12 = effective_log_prob_s_1 + log_prob_r_12
length_loss_12 = message_lengths_1.float() * self.length_cost
policy_length_loss_12 = ((length_loss_12.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_12 = ((loss_12.detach() - self.mean_baseline['loss_12']) * log_prob_12).mean()
optimized_loss_12 = policy_length_loss_12 + policy_loss_12 - weighted_entropy_12
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_12 += loss_12.mean()
if self.training:
self.update_baseline('loss_12', loss_12)
self.update_baseline('length_12', length_loss_12)
for k, v in rest_12.items():
rest_12[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_12['loss'] = optimized_loss_12.detach().item()
rest_12['sender_entropy'] = entropy_s_1.mean().item()
rest_12['receiver_entropy'] = entropy_r_12.mean().item()
rest_12['original_loss'] = loss_12.mean().item()
rest_12['mean_length'] = message_lengths_1.float().mean().item()
"1.1 Agent_1 -> Agent_1"
#message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1
receiver_output_11, log_prob_r_11, entropy_r_11 = self.agent_1.receive(message_1, receiver_input, message_lengths_1)
if self.imitate:
candidates_11=receiver_output_11.argmax(dim=1)
message_reconstruction_11, prob_reconstruction_11, _ = self.agent_1.imitate(sender_input)
loss_11, loss_11_imitation, rest_11 = self.loss(sender_input, message_1, receiver_input, receiver_output_11,message_reconstruction_11,prob_reconstruction_11, labels,message_lengths_1)
#loss_11_imitation=loss_11_imitation.mean()
loss_11=a_self*loss_11+a_cross*loss_12+a_im*loss_12_imitation
else:
loss_11, rest_11 = self.loss(sender_input, message_1, receiver_input, receiver_output_11, labels)
weighted_entropy_11 = effective_entropy_s_1.mean() * self.sender_entropy_coeff_1 + \
entropy_r_11.mean() * self.receiver_entropy_coeff_1
log_prob_11 = effective_log_prob_s_1 + log_prob_r_11
length_loss_11 = message_lengths_1.float() * self.length_cost
policy_length_loss_11 = ((length_loss_11.float() - self.mean_baseline['length_1']) * effective_log_prob_s_1).mean()
policy_loss_11 = ((loss_11.detach() - self.mean_baseline['loss_11']) * log_prob_11).mean()
optimized_loss_11 = policy_length_loss_11 + policy_loss_11 - weighted_entropy_11
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_11 += loss_11.mean()
if self.training:
self.update_baseline('loss_11', loss_11)
self.update_baseline('length_11', length_loss_11)
for k, v in rest_11.items():
rest_11[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_11['loss'] = optimized_loss_11.detach().item()
rest_11['sender_entropy'] = entropy_s_1.mean().item()
rest_11['receiver_entropy'] = entropy_r_11.mean().item()
rest_11['original_loss'] = loss_11.mean().item()
rest_11['mean_length'] = message_lengths_1.float().mean().item()
"2. Agent 2"
message_2, log_prob_s_2, entropy_s_2 = self.agent_2.send(sender_input)
message_lengths_2 = find_lengths(message_2)
"2. Agent_2 -> Agent_1"
#message_21, log_prob_s_21, entropy_s_21 = message_2, log_prob_s_2, entropy_s_2
receiver_output_21, log_prob_r_21, entropy_r_21 = self.agent_1.receive(message_2, receiver_input, message_lengths_2)
if self.imitate:
candidates_21=receiver_output_21.argmax(dim=1)
message_reconstruction_21, prob_reconstruction_21, _ = self.agent_1.imitate(sender_input)
loss_21, loss_21_imitation, rest_21 = self.loss(sender_input, message_2, receiver_input, receiver_output_21,message_reconstruction_21,prob_reconstruction_21, labels,message_lengths_2)
#loss_21_imitation=loss_21_imitation.mean()
else:
loss_21, rest_21 = self.loss(sender_input, message_2, receiver_input, receiver_output_21, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s_2 = torch.zeros_like(entropy_r_21)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s_2 = torch.zeros_like(log_prob_r_21)
for i in range(message_2.size(1)):
not_eosed_2 = (i < message_lengths_2).float()
effective_entropy_s_2 += entropy_s_2[:, i] * not_eosed_2
effective_log_prob_s_2 += log_prob_s_2[:, i] * not_eosed_2
effective_entropy_s_2 = effective_entropy_s_2 / message_lengths_2.float()
weighted_entropy_21 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_21.mean() * self.receiver_entropy_coeff_2
log_prob_21 = effective_log_prob_s_2 + log_prob_r_21
length_loss_21 = message_lengths_2.float() * self.length_cost
policy_length_loss_21 = ((length_loss_21.float() - self.mean_baseline['length_21']) * effective_log_prob_s_2).mean()
policy_loss_21 = ((loss_21.detach() - self.mean_baseline['loss_21']) * log_prob_21).mean()
optimized_loss_21 = policy_length_loss_21 + policy_loss_21 - weighted_entropy_21
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_21 += loss_21.mean()
if self.training:
self.update_baseline('loss_21', loss_21)
self.update_baseline('length_21', length_loss_21)
for k, v in rest_21.items():
rest_21[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_21['loss'] = optimized_loss_21.detach().item()
rest_21['sender_entropy'] = entropy_s_2.mean().item()
rest_21['receiver_entropy'] = entropy_r_21.mean().item()
rest_21['original_loss'] = loss_21.mean().item()
rest_21['mean_length'] = message_lengths_2.float().mean().item()
"2. Agent_2 -> Agent_2"
#message_22, log_prob_s_22, entropy_s_22 = message_2, log_prob_s_2, entropy_s_2
#message_lengths_22 = find_lengths(message_22)
receiver_output_22, log_prob_r_22, entropy_r_22 = self.agent_2.receive(message_2, receiver_input, message_lengths_2)
if self.imitate:
candidates_22=receiver_output_22.argmax(dim=1)
message_reconstruction_22, prob_reconstruction_22, _ = self.agent_2.imitate(sender_input)
loss_22, loss_22_imitation, rest_22 = self.loss(sender_input, message_2, receiver_input, receiver_output_22,message_reconstruction_22,prob_reconstruction_22, labels,message_lengths_2)
#loss_22_imitation=loss_22_imitation.mean()
loss_22=a_self*loss_22+a_cross*loss_21+a_im*loss_21_imitation
else:
loss_22, rest_22 = self.loss(sender_input, message_2, receiver_input, receiver_output_22, labels)
weighted_entropy_22 = effective_entropy_s_2.mean() * self.sender_entropy_coeff_2 + \
entropy_r_22.mean() * self.receiver_entropy_coeff_2
log_prob_22 = effective_log_prob_s_2 + log_prob_r_22
length_loss_22 = message_lengths_2.float() * self.length_cost
policy_length_loss_22 = ((length_loss_22.float() - self.mean_baseline['length_22']) * effective_log_prob_s_2).mean()
policy_loss_22 = ((loss_22.detach() - self.mean_baseline['loss_22']) * log_prob_22).mean()
optimized_loss_22 = policy_length_loss_22 + policy_loss_22 - weighted_entropy_22
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss_22 += loss_22.mean()
if self.training:
self.update_baseline('loss_22', loss_22)
self.update_baseline('length_22', length_loss_22)
for k, v in rest_22.items():
rest_22[k] = v.mean().item() if hasattr(v, 'mean') else v
rest_22['loss'] = optimized_loss_22.detach().item()
rest_22['sender_entropy'] = entropy_s_2.mean().item()
rest_22['receiver_entropy'] = entropy_r_22.mean().item()
rest_22['original_loss'] = loss_22.mean().item()
rest_22['mean_length'] = message_lengths_2.float().mean().item()
"3. Average loss"
optimized_loss_1 = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12
optimized_loss_2 = self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
optimized_loss = self.loss_weights[0][0]*optimized_loss_11 + self.loss_weights[0][1]*optimized_loss_12+ \
self.loss_weights[1][0]*optimized_loss_21 + self.loss_weights[1][1]*optimized_loss_22
rest={}
rest['loss']=self.loss_weights[0][0]*rest_11['loss'] + self.loss_weights[0][1]*rest_12['loss']+ \
self.loss_weights[1][0]*rest_21['loss'] + self.loss_weights[1][1]*rest_22['loss']
rest['sender_entropy']=self.loss_weights[0][0]*rest_11['sender_entropy'] + self.loss_weights[0][1]*rest_12['sender_entropy']+ \
self.loss_weights[1][0]*rest_21['sender_entropy'] + self.loss_weights[1][1]*rest_22['sender_entropy']
rest['receiver_entropy']=self.loss_weights[0][0]*rest_11['receiver_entropy'] + self.loss_weights[0][1]*rest_12['receiver_entropy']+ \
self.loss_weights[1][0]*rest_21['receiver_entropy'] + self.loss_weights[1][1]*rest_22['receiver_entropy']
rest['original_loss']=self.loss_weights[0][0]*rest_11['original_loss'] + self.loss_weights[0][1]*rest_12['original_loss']+ \
self.loss_weights[1][0]*rest_21['original_loss'] + self.loss_weights[1][1]*rest_22['original_loss']
rest['mean_length']=self.loss_weights[0][0]*rest_11['mean_length'] + self.loss_weights[0][1]*rest_12['mean_length']+ \
self.loss_weights[1][0]*rest_21['mean_length'] + self.loss_weights[1][1]*rest_22['mean_length']
rest['acc']=self.loss_weights[0][0]*rest_11['acc'] + self.loss_weights[0][1]*rest_12['acc']+ \
self.loss_weights[1][0]*rest_21['acc'] + self.loss_weights[1][1]*rest_22['acc']
rest['acc_21']=rest_21['acc']
rest['acc_12']=rest_12['acc']
if not self.imitate:
return optimized_loss_11, optimized_loss_12, optimized_loss_21, optimized_loss_22, rest
else:
return optimized_loss_11, optimized_loss_12, optimized_loss_21, optimized_loss_22,loss_12_imitation,loss_21_imitation, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class SenderReceiverRnnReinforce(nn.Module):
"""
Implements Sender/Receiver game with training done via Reinforce. Both agents are supposed to
return 3-tuples of (output, log-prob of the output, entropy).
The game implementation is responsible for handling the end-of-sequence term, so that the optimized loss
corresponds either to the position of the eos term (assumed to be 0) or the end of sequence.
Sender and Receiver can be obtained by applying the corresponding wrappers.
`SenderReceiverRnnReinforce` also applies the mean baseline to the loss function to reduce the variance of the
gradient estimate.
>>> sender = nn.Linear(3, 10)
>>> sender = RnnSenderReinforce(sender, vocab_size=15, embed_dim=5, hidden_size=10, max_len=10, cell='lstm')
>>> class Receiver(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc = nn.Linear(5, 3)
... def forward(self, rnn_output, _input = None):
... return self.fc(rnn_output)
>>> receiver = RnnReceiverDeterministic(Receiver(), vocab_size=15, embed_dim=10, hidden_size=5)
>>> def loss(sender_input, _message, _receiver_input, receiver_output, _labels):
... return F.mse_loss(sender_input, receiver_output, reduction='none').mean(dim=1), {'aux': 5.0}
>>> game = SenderReceiverRnnReinforce(sender, receiver, loss, sender_entropy_coeff=0.0, receiver_entropy_coeff=0.0,
... length_cost=1e-2)
>>> input = torch.zeros((16, 3)).normal_()
>>> optimized_loss, aux_info = game(input, labels=None)
>>> sorted(list(aux_info.keys())) # returns some debug info, such as entropies of the agents, message length etc
['aux', 'loss', 'mean_length', 'original_loss', 'receiver_entropy', 'sender_entropy']
>>> aux_info['aux']
5.0
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff, receiver_entropy_coeff,
length_cost=0.0,unigram_penalty=0.0,reg=False):
"""
:param sender: sender agent
:param receiver: receiver agent
:param loss: the optimized loss that accepts
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information
of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over
all batches in the dataset.
:param sender_entropy_coeff: entropy regularization coeff for sender
:param receiver_entropy_coeff: entropy regularization coeff for receiver
:param length_cost: the penalty applied to Sender for each symbol produced
:param reg: apply the regularization scheduling (Lazy Speaker)
"""
super(SenderReceiverRnnReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.sender_entropy_coeff = sender_entropy_coeff
self.receiver_entropy_coeff = receiver_entropy_coeff
self.loss = loss
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
self.reg=reg
def forward(self, sender_input, labels, receiver_input=None):
message, log_prob_s, entropy_s = self.sender(sender_input)
message_lengths = find_lengths(message)
receiver_output, log_prob_r, entropy_r = self.receiver(message, receiver_input, message_lengths)
loss, rest = self.loss(sender_input, message, receiver_input, receiver_output, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.sender_entropy_coeff + \
entropy_r.mean() * self.receiver_entropy_coeff
log_prob = effective_log_prob_s + log_prob_r
if self.reg:
sc=rest["acc"].sum()/rest["acc"].size(0)
# Pour n_features=100
self.length_cost= sc**(45) / 5
#self.length_cost= sc**(45) / 10
#if sc>0.99:
#self.length_cost=(sc-0.99)*100 +0.01
#else:
#self.length_cost=0.
#if sc>0.995:
#self.length_cost+=0.01
#if self.length_cost==0.3:
# self.length_cost-=0.01
#print(self.length_cost)
#if sc<0.98:
#self.length_cost=0.
length_loss = message_lengths.float() * self.length_cost
policy_length_loss = ((length_loss.float() - self.mean_baseline['length']) * effective_log_prob_s).mean()
policy_loss = ((loss.detach() - self.mean_baseline['loss']) * log_prob).mean()
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss', loss)
self.update_baseline('length', length_loss)
for k, v in rest.items():
rest[k] = v.mean().item() if hasattr(v, 'mean') else v
rest['loss'] = optimized_loss.detach().item()
rest['sender_entropy'] = entropy_s.mean().item()
rest['receiver_entropy'] = entropy_r.mean().item()
rest['original_loss'] = loss.mean().item()
rest['mean_length'] = message_lengths.float().mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class SenderImpatientReceiverRnnReinforce(nn.Module):
"""
Implements Sender/ Impatient Receiver game with training done via Reinforce.
It is equivalent to SenderReceiverRnnReinforce but takes into account the intermediate predictions of Impatient Listener:
- the Impatient loss is used
- tensor shapes are adapted for variance reduction.
When reg is set to True, the regularization scheduling is applied (Lazy Speaker).
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff, receiver_entropy_coeff,
length_cost=0.0,unigram_penalty=0.0,reg=False):
"""
:param sender: sender agent
:param receiver: receiver agent
:param loss: the optimized loss that accepts
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information
of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over
all batches in the dataset.
:param sender_entropy_coeff: entropy regularization coeff for sender
:param receiver_entropy_coeff: entropy regularization coeff for receiver
:param length_cost: the penalty applied to Sender for each symbol produced
:param reg: apply the regularization scheduling (Lazy Speaker)
"""
super(SenderImpatientReceiverRnnReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.sender_entropy_coeff = sender_entropy_coeff
self.receiver_entropy_coeff = receiver_entropy_coeff
self.loss = loss
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.reg=reg
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
def forward(self, sender_input, labels, receiver_input=None):
message, log_prob_s, entropy_s = self.sender(sender_input)
message_lengths = find_lengths(message)
# If impatient 1
receiver_output, log_prob_r, entropy_r = self.receiver(message, receiver_input, message_lengths)
""" NOISE VERSION
# Randomly takes a position
rand_length=np.random.randint(0,message.size(1))
# Loss by output
loss, rest = self.loss(sender_input, message, receiver_input, receiver_output[:,rand_length,:], labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r[:,rand_length])
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r[:,rand_length])
"""
#Loss
loss, rest, crible_acc = self.loss(sender_input, message, message_lengths, receiver_input, receiver_output, labels)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r.mean(1))
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r.mean(1))
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.sender_entropy_coeff + \
entropy_r.mean() * self.receiver_entropy_coeff
log_prob = effective_log_prob_s + log_prob_r.mean(1)
if self.reg:
sc=0.
for i in range(message_lengths.size(0)):
sc+=crible_acc[i,message_lengths[i]-1]
sc/=message_lengths.size(0)
# Regularization scheduling paper
#self.length_cost= sc**(45) / 10
# Pour n_features=100
self.length_cost= sc**(45) / 5
length_loss = message_lengths.float() * self.length_cost
policy_length_loss = ((length_loss.float() - self.mean_baseline['length']) * effective_log_prob_s).mean()
policy_loss = ((loss.detach() - self.mean_baseline['loss']) * log_prob).mean()
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss', loss)
self.update_baseline('length', length_loss)
for k, v in rest.items():
rest[k] = v.mean().item() if hasattr(v, 'mean') else v
rest['loss'] = optimized_loss.detach().item()
rest['sender_entropy'] = entropy_s.mean().item()
rest['receiver_entropy'] = entropy_r.mean().item()
rest['original_loss'] = loss.mean().item()
rest['mean_length'] = message_lengths.float().mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class CompositionalitySenderReceiverRnnReinforce(nn.Module):
"""
Adaptation of SenderReceiverRnnReinforce to inputs with several attributes.
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff, receiver_entropy_coeff,n_attributes,n_values,
length_cost=0.0,unigram_penalty=0.0,reg=False):
"""
:param sender: sender agent
:param receiver: receiver agent
:param loss: the optimized loss that accepts
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information
of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over
all batches in the dataset.
:param sender_entropy_coeff: entropy regularization coeff for sender
:param receiver_entropy_coeff: entropy regularization coeff for receiver
:param length_cost: the penalty applied to Sender for each symbol produced
"""
super(CompositionalitySenderReceiverRnnReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.sender_entropy_coeff = sender_entropy_coeff
self.receiver_entropy_coeff = receiver_entropy_coeff
self.loss = loss
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.reg=reg
self.n_attributes=n_attributes
self.n_values=n_values
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
def forward(self, sender_input, labels, receiver_input=None):
message, log_prob_s, entropy_s = self.sender(sender_input)
message_lengths = find_lengths(message)
# Noisy channel
noise_level=0.
noise_map=torch.from_numpy(1*(np.random.rand(message.size(0),message.size(1))<noise_level)).to("cuda")
noise=torch.from_numpy(np.random.randint(1,self.sender.vocab_size,size=(message.size(0),message.size(1)))).to("cuda") # random symbols
message_noise=message*(1-noise_map) + noise_map* noise
# Receiver normal
receiver_output_all_att, log_prob_r_all_att, entropy_r_all_att = self.receiver(message_noise, receiver_input, message_lengths)
#dim=[batch_size,n_att,n_val]
# reg
sc=0.
loss, rest, crible_acc = self.loss(sender_input, message, message_lengths, receiver_input, receiver_output_all_att, labels,self.n_attributes,self.n_values)
#if self.reg:
# for i in range(message_lengths.size(0)):
# sc+=crible_acc[i,message_lengths[i]-1]
log_prob_r=log_prob_r_all_att.mean(1)
entropy_r=entropy_r_all_att.mean(1)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.sender_entropy_coeff + \
entropy_r.mean() * self.receiver_entropy_coeff
log_prob = effective_log_prob_s + log_prob_r
#if self.reg:
# sc/=message_lengths.size(0)
# if sc>0.98:
# self.length_cost+=0.1
# else:
# self.length_cost=0.
#self.length_cost= sc**(60) / 2
length_loss = message_lengths.float() * self.length_cost
policy_length_loss = ((length_loss.float() - self.mean_baseline['length']) * effective_log_prob_s).mean()
policy_loss = ((loss.detach() - self.mean_baseline['loss']) * log_prob).mean()
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss', loss)
self.update_baseline('length', length_loss)
for k, v in rest.items():
rest[k] = v.mean().item() if hasattr(v, 'mean') else v
rest['loss'] = optimized_loss.detach().item()
rest['sender_entropy'] = entropy_s.mean().item()
rest['receiver_entropy'] = entropy_r.mean().item()
rest['original_loss'] = loss.mean().item()
rest['mean_length'] = message_lengths.float().mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class CompositionalitySenderImpatientReceiverRnnReinforce(nn.Module):
"""
Implements Sender/Receiver game with training done via Reinforce. Both agents are supposed to
return 3-tuples of (output, log-prob of the output, entropy).
The game implementation is responsible for handling the end-of-sequence term, so that the optimized loss
corresponds either to the position of the eos term (assumed to be 0) or the end of sequence.
Sender and Receiver can be obtained by applying the corresponding wrappers.
`SenderReceiverRnnReinforce` also applies the mean baseline to the loss function to reduce the variance of the
gradient estimate.
>>> sender = nn.Linear(3, 10)
>>> sender = RnnSenderReinforce(sender, vocab_size=15, embed_dim=5, hidden_size=10, max_len=10, cell='lstm')
>>> class Receiver(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc = nn.Linear(5, 3)
... def forward(self, rnn_output, _input = None):
... return self.fc(rnn_output)
>>> receiver = RnnReceiverDeterministic(Receiver(), vocab_size=15, embed_dim=10, hidden_size=5)
>>> def loss(sender_input, _message, _receiver_input, receiver_output, _labels):
... return F.mse_loss(sender_input, receiver_output, reduction='none').mean(dim=1), {'aux': 5.0}
>>> game = SenderReceiverRnnReinforce(sender, receiver, loss, sender_entropy_coeff=0.0, receiver_entropy_coeff=0.0,
... length_cost=1e-2)
>>> input = torch.zeros((16, 3)).normal_()
>>> optimized_loss, aux_info = game(input, labels=None)
>>> sorted(list(aux_info.keys())) # returns some debug info, such as entropies of the agents, message length etc
['aux', 'loss', 'mean_length', 'original_loss', 'receiver_entropy', 'sender_entropy']
>>> aux_info['aux']
5.0
"""
def __init__(self, sender, receiver, loss, sender_entropy_coeff, receiver_entropy_coeff,n_attributes,n_values,att_weights,
length_cost=0.0,unigram_penalty=0.0,reg=False):
"""
:param sender: sender agent
:param receiver: receiver agent
:param loss: the optimized loss that accepts
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information
of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over
all batches in the dataset.
:param sender_entropy_coeff: entropy regularization coeff for sender
:param receiver_entropy_coeff: entropy regularization coeff for receiver
:param length_cost: the penalty applied to Sender for each symbol produced
"""
super(CompositionalitySenderImpatientReceiverRnnReinforce, self).__init__()
self.sender = sender
self.receiver = receiver
self.sender_entropy_coeff = sender_entropy_coeff
self.receiver_entropy_coeff = receiver_entropy_coeff
self.loss = loss
self.length_cost = length_cost
self.unigram_penalty = unigram_penalty
self.reg=reg
self.n_attributes=n_attributes
self.n_values=n_values
self.att_weights=att_weights
self.mean_baseline = defaultdict(float)
self.n_points = defaultdict(float)
def forward(self, sender_input, labels, receiver_input=None):
#print(sender_input[:,11:-1])
message, log_prob_s, entropy_s = self.sender(torch.floor(sender_input))
message_lengths = find_lengths(message)
# If impatient 1
receiver_output_all_att, log_prob_r_all_att, entropy_r_all_att = self.receiver(message, receiver_input, message_lengths)
# reg
sc=0.
# Version de base
#loss, rest, crible_acc = self.loss(sender_input, message, message_lengths, receiver_input, receiver_output_all_att, labels,self.n_attributes,self.n_values,self.att_weights)
# Take into account the fact that an attribute is not sampled
loss, rest, crible_acc = self.loss(sender_input, message, message_lengths, receiver_input, receiver_output_all_att, labels,self.n_attributes,self.n_values,self.att_weights)
if self.reg:
for i in range(message_lengths.size(0)):
sc+=crible_acc[i,message_lengths[i]-1]
log_prob_r=log_prob_r_all_att.mean(1).mean(1)
entropy_r=entropy_r_all_att.mean(1).mean(1)
# the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after
effective_entropy_s = torch.zeros_like(entropy_r)
# the log prob of the choices made by S before and including the eos symbol - again, we don't
# care about the rest
effective_log_prob_s = torch.zeros_like(log_prob_r)
for i in range(message.size(1)):
not_eosed = (i < message_lengths).float()
effective_entropy_s += entropy_s[:, i] * not_eosed
effective_log_prob_s += log_prob_s[:, i] * not_eosed
effective_entropy_s = effective_entropy_s / message_lengths.float()
weighted_entropy = effective_entropy_s.mean() * self.sender_entropy_coeff + \
entropy_r.mean() * self.receiver_entropy_coeff
log_prob = effective_log_prob_s + log_prob_r
if self.reg:
sc/=message_lengths.size(0)
if sc>0.9 and sc<0.99:
self.length_cost=0.
if sc>0.99:
self.length_cost+=0.01
#if sc<0.9:
# self.length_cost=-0.1
#self.length_cost= sc**(60) / 2
length_loss = message_lengths.float() * self.length_cost
# Penalty redundancy
#counts_unigram=((message[:,1:]-message[:,:-1])==0).sum(axis=1).sum(axis=0)
#unigram_loss = self.unigram_penalty*counts_unigram
policy_length_loss = ((length_loss.float() - self.mean_baseline['length']) * effective_log_prob_s).mean()
policy_loss = ((loss.detach() - self.mean_baseline['loss']) * log_prob).mean()
optimized_loss = policy_length_loss + policy_loss - weighted_entropy
# if the receiver is deterministic/differentiable, we apply the actual loss
optimized_loss += loss.mean()
if self.training:
self.update_baseline('loss', loss)
self.update_baseline('length', length_loss)
for k, v in rest.items():
rest[k] = v.mean().item() if hasattr(v, 'mean') else v
rest['loss'] = optimized_loss.detach().item()
rest['sender_entropy'] = entropy_s.mean().item()
rest['receiver_entropy'] = entropy_r.mean().item()
rest['original_loss'] = loss.mean().item()
rest['mean_length'] = message_lengths.float().mean().item()
return optimized_loss, rest
def update_baseline(self, name, value):
self.n_points[name] += 1
self.mean_baseline[name] += (value.detach().mean().item() - self.mean_baseline[name]) / self.n_points[name]
class TransformerReceiverDeterministic(nn.Module):
def __init__(self, agent, vocab_size, max_len, embed_dim, num_heads, hidden_size, num_layers, positional_emb=True,
causal=True):
super(TransformerReceiverDeterministic, self).__init__()
self.agent = agent
self.encoder = TransformerEncoder(vocab_size=vocab_size,
max_len=max_len,
embed_dim=embed_dim,
num_heads=num_heads,
num_layers=num_layers,
hidden_size=hidden_size,
positional_embedding=positional_emb,
causal=causal)
def forward(self, message, input=None, lengths=None):
if lengths is None:
lengths = find_lengths(message)
transformed = self.encoder(message, lengths)
agent_output = self.agent(transformed, input)
logits = torch.zeros(agent_output.size(0)).to(agent_output.device)
entropy = logits
return agent_output, logits, entropy
class TransformerSenderReinforce(nn.Module):
def __init__(self, agent, vocab_size, embed_dim, max_len, num_layers, num_heads, hidden_size,
generate_style='standard', causal=True, force_eos=True):
"""
:param agent: the agent to be wrapped, returns the "encoder" state vector, which is the unrolled into a message
:param vocab_size: vocab size of the message
:param embed_dim: embedding dimensions
:param max_len: maximal length of the message (including <eos>)
:param num_layers: number of transformer layers
:param num_heads: number of attention heads
:param hidden_size: size of the FFN layers
:param causal: whether embedding of a particular symbol should only depend on the symbols to the left
:param generate_style: Two alternatives: 'standard' and 'in-place'. Suppose we are generating 4th symbol,
after three symbols [s1 s2 s3] were generated.
Then,
'standard': [s1 s2 s3] -> embeddings [[e1] [e2] [e3]] -> (s4 = argmax(linear(e3)))
'in-place': [s1 s2 s3] -> [s1 s2 s3 <need-symbol>] -> embeddings [[e1] [e2] [e3] [e4]] -> (s4 = argmax(linear(e4)))
:param force_eos: <eos> added to the end of each sequence
"""
super(TransformerSenderReinforce, self).__init__()
self.agent = agent
self.force_eos = force_eos
assert generate_style in ['standard', 'in-place']
self.generate_style = generate_style
self.causal = causal
self.max_len = max_len
if force_eos:
self.max_len -= 1
self.transformer = TransformerDecoder(embed_dim=embed_dim,
max_len=max_len, num_layers=num_layers,
num_heads=num_heads, hidden_size=hidden_size)
self.embedding_to_vocab = nn.Linear(embed_dim, vocab_size)
self.special_symbol_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.embed_tokens = torch.nn.Embedding(vocab_size, embed_dim)
nn.init.normal_(self.embed_tokens.weight, mean=0, std=self.embed_dim ** -0.5)
self.embed_scale = math.sqrt(embed_dim)
def generate_standard(self, encoder_state):
batch_size = encoder_state.size(0)
device = encoder_state.device
sequence = []
logits = []
entropy = []
special_symbol = self.special_symbol_embedding.expand(batch_size, -1).unsqueeze(1).to(device)
input = special_symbol
for step in range(self.max_len):
if self.causal:
attn_mask = torch.triu(torch.ones(step+1, step+1).byte(), diagonal=1).to(device)
attn_mask = attn_mask.float().masked_fill(attn_mask == 1, float('-inf'))
else:
attn_mask = None
output = self.transformer(embedded_input=input, encoder_out=encoder_state, attn_mask=attn_mask)
step_logits = F.log_softmax(self.embedding_to_vocab(output[:, -1, :]), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
symbols = distr.sample()
else:
symbols = step_logits.argmax(dim=1)
logits.append(distr.log_prob(symbols))
sequence.append(symbols)
new_embedding = self.embed_tokens(symbols) * self.embed_scale
input = torch.cat([input, new_embedding.unsqueeze(dim=1)], dim=1)
return sequence, logits, entropy
def generate_inplace(self, encoder_state):
batch_size = encoder_state.size(0)
device = encoder_state.device
sequence = []
logits = []
entropy = []
special_symbol = self.special_symbol_embedding.expand(batch_size, -1).unsqueeze(1).to(encoder_state.device)
output = []
for step in range(self.max_len):
input = torch.cat(output + [special_symbol], dim=1)
if self.causal:
attn_mask = torch.triu(torch.ones(step+1, step+1).byte(), diagonal=1).to(device)
attn_mask = attn_mask.float().masked_fill(attn_mask == 1, float('-inf'))
else:
attn_mask = None
embedded = self.transformer(embedded_input=input, encoder_out=encoder_state, attn_mask=attn_mask)
step_logits = F.log_softmax(self.embedding_to_vocab(embedded[:, -1, :]), dim=1)
distr = Categorical(logits=step_logits)
entropy.append(distr.entropy())
if self.training:
symbols = distr.sample()
else:
symbols = step_logits.argmax(dim=1)
logits.append(distr.log_prob(symbols))
sequence.append(symbols)
new_embedding = self.embed_tokens(symbols) * self.embed_scale
output.append(new_embedding.unsqueeze(dim=1))
return sequence, logits, entropy
def forward(self, x):
encoder_state = self.agent(x)
if self.generate_style == 'standard':
sequence, logits, entropy = self.generate_standard(encoder_state)
elif self.generate_style == 'in-place':
sequence, logits, entropy = self.generate_inplace(encoder_state)
else:
assert False, 'Unknown generate style'
sequence = torch.stack(sequence).permute(1, 0)
logits = torch.stack(logits).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
if self.force_eos:
zeros = torch.zeros((sequence.size(0), 1)).to(sequence.device)
sequence = torch.cat([sequence, zeros.long()], dim=1)
logits = torch.cat([logits, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return sequence, logits, entropy
| en | 0.776204 | # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Reinforce Wrapper for an agent. Assumes that the during the forward, the wrapped agent returns log-probabilities over the potential outputs. During training, the wrapper transforms them into a tuple of (sample from the multinomial, log-prob of the sample, entropy for the multinomial). Eval-time the sample is replaced with argmax. >>> agent = nn.Sequential(nn.Linear(10, 3), nn.LogSoftmax(dim=1)) >>> agent = ReinforceWrapper(agent) >>> sample, log_prob, entropy = agent(torch.ones(4, 10)) >>> sample.size() torch.Size([4]) >>> (log_prob < 0).all().item() 1 >>> (entropy > 0).all().item() 1 Simple wrapper that makes a deterministic agent (without sampling) compatible with Reinforce-based game, by adding zero log-probability and entropy values to the output. No sampling is run on top of the wrapped agent, it is passed as is. >>> agent = nn.Sequential(nn.Linear(10, 3), nn.LogSoftmax(dim=1)) >>> agent = ReinforceDeterministicWrapper(agent) >>> sample, log_prob, entropy = agent(torch.ones(4, 10)) >>> sample.size() torch.Size([4, 3]) >>> (log_prob == 0).all().item() 1 >>> (entropy == 0).all().item() 1 A single-symbol Sender/Receiver game implemented with Reinforce. :param sender: Sender agent. On forward, returns a tuple of (message, log-prob of the message, entropy). :param receiver: Receiver agent. On forward, accepts a message and the dedicated receiver input. Returns a tuple of (output, log-probs, entropy). :param loss: The loss function that accepts: sender_input: input of Sender message: the is sent by Sender receiver_input: input of Receiver from the dataset receiver_output: output of Receiver labels: labels assigned to Sender's input data and outputs the end-to-end loss. Can be non-differentiable; if it is differentiable, this will be leveraged :param sender_entropy_coeff: The entropy regularization coefficient for Sender :param receiver_entropy_coeff: The entropy regularizatino coefficient for Receiver Reinforce Wrapper for Sender in variable-length message game. Assumes that during the forward, the wrapped agent returns the initial hidden state for a RNN cell. This cell is the unrolled by the wrapper. During training, the wrapper samples from the cell, getting the output message. Evaluation-time, the sampling is replaced by argmax. >>> agent = nn.Linear(10, 3) >>> agent = RnnSenderReinforce(agent, vocab_size=5, embed_dim=5, hidden_size=3, max_len=10, cell='lstm', force_eos=False) >>> input = torch.FloatTensor(16, 10).uniform_(-0.1, 0.1) >>> message, logprob, entropy = agent(input) >>> message.size() torch.Size([16, 10]) >>> (entropy > 0).all().item() 1 >>> message.size() # batch size x max_len torch.Size([16, 10]) :param agent: the agent to be wrapped :param vocab_size: the communication vocabulary size :param embed_dim: the size of the embedding used to embed the output symbols :param hidden_size: the RNN cell's hidden state size :param max_len: maximal length of the output messages :param cell: type of the cell used (rnn, gru, lstm) :param force_eos: if set to True, each message is extended by an EOS symbol. To ensure that no message goes beyond `max_len`, Sender only generates `max_len - 1` symbols from an RNN cell and appends EOS. # only used for LSTM Reinforce Wrapper for Sender in variable-length message game. Assumes that during the forward, the wrapped agent returns the initial hidden state for a RNN cell. This cell is the unrolled by the wrapper. During training, the wrapper samples from the cell, getting the output message. Evaluation-time, the sampling is replaced by argmax. >>> agent = nn.Linear(10, 3) >>> agent = RnnSenderReinforce(agent, vocab_size=5, embed_dim=5, hidden_size=3, max_len=10, cell='lstm', force_eos=False) >>> input = torch.FloatTensor(16, 10).uniform_(-0.1, 0.1) >>> message, logprob, entropy = agent(input) >>> message.size() torch.Size([16, 10]) >>> (entropy > 0).all().item() 1 >>> message.size() # batch size x max_len torch.Size([16, 10]) :param agent: the agent to be wrapped :param vocab_size: the communication vocabulary size :param embed_dim: the size of the embedding used to embed the output symbols :param hidden_size: the RNN cell's hidden state size :param max_len: maximal length of the output messages :param cell: type of the cell used (rnn, gru, lstm) :param force_eos: if set to True, each message is extended by an EOS symbol. To ensure that no message goes beyond `max_len`, Sender only generates `max_len - 1` symbols from an RNN cell and appends EOS. # only used for LSTM Reinforce Wrapper for Sender in variable-length message game. Assumes that during the forward, the wrapped agent returns the initial hidden state for a RNN cell. This cell is the unrolled by the wrapper. During training, the wrapper samples from the cell, getting the output message. Evaluation-time, the sampling is replaced by argmax. >>> agent = nn.Linear(10, 3) >>> agent = RnnSenderReinforce(agent, vocab_size=5, embed_dim=5, hidden_size=3, max_len=10, cell='lstm', force_eos=False) >>> input = torch.FloatTensor(16, 10).uniform_(-0.1, 0.1) >>> message, logprob, entropy = agent(input) >>> message.size() torch.Size([16, 10]) >>> (entropy > 0).all().item() 1 >>> message.size() # batch size x max_len torch.Size([16, 10]) :param agent: the agent to be wrapped :param vocab_size: the communication vocabulary size :param embed_dim: the size of the embedding used to embed the output symbols :param hidden_size: the RNN cell's hidden state size :param max_len: maximal length of the output messages :param cell: type of the cell used (rnn, gru, lstm) :param force_eos: if set to True, each message is extended by an EOS symbol. To ensure that no message goes beyond `max_len`, Sender only generates `max_len - 1` symbols from an RNN cell and appends EOS. # only used for LSTM Reinforce Wrapper for Receiver in variable-length message game. The wrapper logic feeds the message into the cell and calls the wrapped agent on the hidden state vector for the step that either corresponds to the EOS input to the input that reaches the maximal length of the sequence. This output is assumed to be the tuple of (output, logprob, entropy). Reinforce Wrapper for Receiver in variable-length message game with several attributes (for compositionality experiments). RnnReceiverCompositionality is equivalent to RnnReceiverReinforce but treated each attribute independently. This output is assumed to be the tuple of (output, logprob, entropy). #entropy=-torch.exp(logits)*logits Reinforce Wrapper for a deterministic Receiver in variable-length message game. The wrapper logic feeds the message into the cell and calls the wrapped agent with the hidden state that either corresponds to the end-of-sequence term or to the end of the sequence. The wrapper extends it with zero-valued log-prob and entropy tensors so that the agent becomes compatible with the SenderReceiverRnnReinforce game. As the wrapped agent does not sample, it has to be trained via regular back-propagation. This requires that both the the agent's output and loss function and the wrapped agent are differentiable. >>> class Agent(nn.Module): ... def __init__(self): ... super().__init__() ... self.fc = nn.Linear(5, 3) ... def forward(self, rnn_output, _input = None): ... return self.fc(rnn_output) >>> agent = RnnReceiverDeterministic(Agent(), vocab_size=10, embed_dim=10, hidden_size=5) >>> message = torch.zeros((16, 10)).long().random_(0, 10) # batch of 16, 10 symbol length >>> output, logits, entropy = agent(message) >>> (logits == 0).all().item() 1 >>> (entropy == 0).all().item() 1 >>> output.size() torch.Size([16, 3]) Reinforce Wrapper for a deterministic Receiver in variable-length message game. The wrapper logic feeds the message into the cell and calls the wrapped agent with the hidden state that either corresponds to the end-of-sequence term or to the end of the sequence. The wrapper extends it with zero-valued log-prob and entropy tensors so that the agent becomes compatible with the SenderReceiverRnnReinforce game. As the wrapped agent does not sample, it has to be trained via regular back-propagation. This requires that both the the agent's output and loss function and the wrapped agent are differentiable. >>> class Agent(nn.Module): ... def __init__(self): ... super().__init__() ... self.fc = nn.Linear(5, 3) ... def forward(self, rnn_output, _input = None): ... return self.fc(rnn_output) >>> agent = RnnReceiverDeterministic(Agent(), vocab_size=10, embed_dim=10, hidden_size=5) >>> message = torch.zeros((16, 10)).long().random_(0, 10) # batch of 16, 10 symbol length >>> output, logits, entropy = agent(message) >>> (logits == 0).all().item() 1 >>> (entropy == 0).all().item() 1 >>> output.size() torch.Size([16, 3]) Impatient Listener. The wrapper logic feeds the message into the cell and calls the wrapped agent. The wrapped agent has to returns the intermediate hidden states for every position. All the hidden states are mapped to a categorical distribution with a single Linear layer (hidden_to_ouput) followed by a softmax. Thess categorical probabilities (step_logits) will then be used to compute the Impatient loss function. # Sampling useless ? RnnReceiverImpatientCompositionality is an adaptation of RnnReceiverImpatientCompositionality for inputs with several attributes (compositionality experiments). Each attribute is treated independently. #logits = torch.stack(logits).permute(1, 0) #entropy = torch.stack(entropy).permute(1, 0) Impatient Listener. The wrapper logic feeds the message into the cell and calls the wrapped agent. The wrapped agent has to returns the intermediate hidden states for every position. All the hidden states are mapped to a categorical distribution with a single Linear layer (hidden_to_ouput) followed by a softmax. Thess categorical probabilities (step_logits) will then be used to compute the Impatient loss function. # Sampling useless ? AgentBaseline is composed of a couple of modalities: - sender - receiver In AgentBaseline, Sender and Receiver parts are independent AgentBaseline is composed of a couple of modalities: - sender - receiver In AgentBaseline, Sender and Receiver parts are independent #entropy_lm.append(distr_lm.entropy()) AgentBaseline is composed of a couple of modalities: - sender - receiver In AgentBaseline, Sender and Receiver parts are independent # New class agent AgentBaseline is composed of a couple of modalities: - sender - receiver In AgentBaseline, Sender and Receiver parts are independent # Common to sender and receiver # Sender #nn.Linear(n_features, n_hidden) # Receiver #nn.Linear(n_hidden, n_features) #self.hidden_to_output = nn.Linear(hidden_size, vocab_size) #self.receiver_cells = nn.ModuleList([ # cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \ # cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)]) # only used for LSTM # only used for LSTM #step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1) # Here choose EOS #sequence=sequence[:,-1,:] #logits=logits[:,-1] #entropy=entropy[:,-1] #encoded=self.receiver_norm_h(encoded) # only used for LSTM AgentBaseline is composed of a couple of modalities: - sender - receiver In AgentBaseline, Sender and Receiver parts are independent # Common to sender and receiver # Sender #nn.Linear(n_features, n_hidden) # Receiver #nn.Linear(n_hidden, n_features) #self.hidden_to_output = nn.Linear(hidden_size, vocab_size) #self.receiver_cells = nn.ModuleList([ # cell_type(input_size=embed_dim, hidden_size=hidden_size) if i == 0 else \ # cell_type(input_size=hidden_size, hidden_size=hidden_size) for i in range(self.receiver_num_layers)]) # only used for LSTM #x = distr.sample() AgentBaseline is composed of a couple of modalities: - sender - receiver In AgentBaseline, Sender and Receiver parts are independent # Common to sender and receiver # Sender #nn.Linear(n_features, n_hidden) # Receiver #nn.Linear(n_hidden, n_features) # only used for LSTM # only used for LSTM #step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1) # Here choose EOS #sequence=sequence[:,-1,:] #logits=logits[:,-1] #entropy=entropy[:,-1] # only used for LSTM AgentBaseline is composed of a couple of modalities: - sender - receiver In AgentBaseline, Sender and Receiver parts are independent # Common to sender and receiver # Memory # Sender #nn.Linear(n_features, n_hidden) # Receiver #nn.Linear(n_hidden, n_features) # only used for LSTM # only used for LSTM #step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1) # Here choose EOS #sequence=sequence[:,-1,:] #logits=logits[:,-1] #entropy=entropy[:,-1] # only used for LSTM AgentBaseline is composed of a couple of modalities: - sender - receiver In AgentBaseline, Sender and Receiver parts are independent #nn.Linear(n_hidden, n_features) #nn.Linear(n_features, n_hidden) # only used for LSTM # only used for LSTM #step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1) #distr = Categorical(logits=step_logits) #entropy.append(distr.entropy()) #x=step_logits.argmax(dim=1) # Here choose EOS #sequence=sequence[:,-1,:] #logits=logits[:,-1] #entropy=entropy[:,-1] # only used for LSTM AgentBaseline is composed of a couple of modalities: - sender - receiver In AgentBaseline, Sender and Receiver parts are independent #nn.Linear(n_hidden, n_features) # only used for LSTM #input = F.embedding(x,weight=self.FC_vocabulary.weight) # only used for LSTM #inputs = self.embedding(message) #step_logits = F.log_softmax(self.agent_receiver(h_t,None), dim=1) #distr = Categorical(logits=step_logits) #entropy.append(distr.entropy()) #x=step_logits.argmax(dim=1) # Here choose EOS #sequence=sequence[:,-1,:] #logits=logits[:,-1] #entropy=entropy[:,-1] # only used for LSTM #input = F.embedding(x,weight=self.FC_vocabulary.weight) DialogReinforce implements the Dialog game optim_params={"length_cost":0., "sender_entropy_coeff_1":0., "receiver_entropy_coeff_1":0., "sender_entropy_coeff_2":0., "receiver_entropy_coeff_2":0.} loss_weights={"self":1., "cross":1., "imitation":1., "length_regularization":0., "entropy_regularization":1.} Inputs: - direction : "1->2" or "2->1" # Message sending # Cross listening # Self listening # Imitation #candidates_cross=receiver_output_cross.argmax(dim=1) #message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input) #loss_imitation, rest_imitation = self.loss_message_imitation(message,prob_reconstruction,message_lengths) # Average loss. Rk. Sortir loss_imitation de cette somme # Reward # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1 #+ log_prob_r_12 # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game optim_params={"length_cost":0., "sender_entropy_coeff_1":0., "receiver_entropy_coeff_1":0., "sender_entropy_coeff_2":0., "receiver_entropy_coeff_2":0.} loss_weights={"self":1., "cross":1., "imitation":1., "length_regularization":0., "entropy_regularization":1.} Inputs: - direction : "1->2" or "2->1" # Message sending # Cross listening # Self listening # Imitation #candidates_cross=receiver_output_cross.argmax(dim=1) #message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input) #loss_imitation, rest_imitation = self.loss_message_imitation(message,prob_reconstruction,message_lengths) # Average loss. Rk. Sortir loss_imitation de cette somme # Reward # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1 #+ log_prob_r_12 # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game optim_params={"length_cost":0., "sender_entropy_coeff_1":0., "receiver_entropy_coeff_1":0., "sender_entropy_coeff_2":0., "receiver_entropy_coeff_2":0.} loss_weights={"self":1., "cross":1., "imitation":1., "length_regularization":0., "entropy_regularization":1.} Inputs: - direction : "1->2" or "2->1" # Message sending # Cross listening # Self listening # Imitation # NO IMITATION # Average loss. Rk. Sortir loss_imitation de cette somme # Reward # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1 #+ log_prob_r_12 # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game optim_params={"length_cost":0., "sender_entropy_coeff_1":0., "receiver_entropy_coeff_1":0., "sender_entropy_coeff_2":0., "receiver_entropy_coeff_2":0.} loss_weights={"self":1., "cross":1., "imitation":1., "length_regularization":0., "entropy_regularization":1.} Inputs: - direction : "1->2" or "2->1" # Message sending # Cross listening # Self listening # Imitation # NO IMITATION # Average loss. Rk. Sortir loss_imitation de cette somme # Reward # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1 #+ log_prob_r_12 # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game optim_params={"length_cost":0., "sender_entropy_coeff_1":0., "receiver_entropy_coeff_1":0., "sender_entropy_coeff_2":0., "receiver_entropy_coeff_2":0.} loss_weights={"self":1., "cross":1., "imitation":1., "length_regularization":0., "entropy_regularization":1.} Inputs: - direction : N means "N->0" # Get sender_id and sender information # Get receiver information (receiver_id always 0) # Message sending # Self listening # Cross listening # Imitation # NO IMITATION # MEAN ACROSS AXIS # Average loss. Rk. Sortir loss_imitation de cette somme # Reward #for agent in agent_receivers: #reward_cross.append(1.*(restes_cross[agent]["acc"].sum(1)==self.n_attributes).detach()) #reward_cross=torch.stack(reward_cross) #reward_cross=reward_cross.mean(0) # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1 #+ log_prob_r_12 # if the receiver is deterministic/differentiable, we apply the actual loss optim_params={"length_cost":0., "sender_entropy_coeff_1":0., "receiver_entropy_coeff_1":0., "sender_entropy_coeff_2":0., "receiver_entropy_coeff_2":0.} loss_weights={"self":1., "cross":1., "imitation":1., "length_regularization":0., "entropy_regularization":1.} Inputs: - direction : N means "N->0" # Get sender_id and sender information # Message sending DialogReinforce implements the Dialog game optim_params={"length_cost":0., "sender_entropy_coeff_1":0., "receiver_entropy_coeff_1":0., "sender_entropy_coeff_2":0., "receiver_entropy_coeff_2":0.} loss_weights={"self":1., "cross":1., "imitation":1., "length_regularization":0., "entropy_regularization":1.} Inputs: - direction : "1->2" or "2->1" # Message sending # Cross listening # Self listening # Imitation #candidates_cross=receiver_output_cross.argmax(dim=1) #message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input) # Average loss. Rk. Sortir loss_imitation de cette somme # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1 #+ log_prob_r_12 # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game optim_params={"length_cost":0., "sender_entropy_coeff_1":0., "receiver_entropy_coeff_1":0., "sender_entropy_coeff_2":0., "receiver_entropy_coeff_2":0.} loss_weights={"self":1., "cross":1., "imitation":1., "length_regularization":0., "entropy_regularization":1.} Inputs: - direction : "1->2" or "2->1" # Message sending # Cross listening # Self listening # Imitation #loss_imitation=loss_imitation-(sender_input*F.log_softmax(send_output,dim=1)).sum(1) #print(torch.exp((sender_input*F.log_softmax(send_output,dim=1)).sum(1))) # Average loss. Rk. Sortir loss_imitation de cette somme # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1 #+ log_prob_r_12 # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game optim_params={"length_cost":0., "sender_entropy_coeff_1":0., "receiver_entropy_coeff_1":0., "sender_entropy_coeff_2":0., "receiver_entropy_coeff_2":0.} loss_weights={"self":1., "cross":1., "imitation":1., "length_regularization":0., "entropy_regularization":1.} Inputs: - direction : "1->2" or "2->1" # Message sending # Cross listening # Self listening # Imitation #candidates_cross=receiver_output_cross.argmax(dim=1) #message_reconstruction, prob_reconstruction, _ = agent_receiver.imitate(sender_input) #loss_imitation, rest_imitation = self.loss_message_imitation(message,prob_reconstruction,message_lengths) #loss_imitation, rest_imitation = self.loss_message_imitation(message_to_imitate,prob_reconstruction,message_to_imitate_lengths) #_, rest_und_cross = self.loss_understanding(sender_input,send_output) #loss_imitation=loss_imitation*rest_und_cross["acc"] #loss_imitation=KL_div(whole_log_prob_s.reshape(whole_log_prob_s.size(0)*whole_log_prob_s.size(1)*whole_log_prob_s.size(2)),other_whole_log_prob_s.reshape(other_whole_log_prob_s.size(0)*other_whole_log_prob_s.size(1)*other_whole_log_prob_s.size(2))) # Average loss. Rk. Sortir loss_imitation de cette somme # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #+ entropy_r_12.mean() * self.receiver_entropy_coeff_1 #+ log_prob_r_12 #+ self.loss_weights["imitation"]*policy_loss_imitation #+self.loss_weights["imitation"]) # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game #message_12, log_prob_s_12, entropy_s_12 = message_1, log_prob_s_1, entropy_s_1 # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss #message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1 # if the receiver is deterministic/differentiable, we apply the actual loss #message_21, log_prob_s_21, entropy_s_21 = message_2, log_prob_s_2, entropy_s_2 # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss #message_22, log_prob_s_22, entropy_s_22 = message_2, log_prob_s_2, entropy_s_2 #message_lengths_22 = find_lengths(message_22) # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game # Take only the last => change to EOS position # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss # Average between task and imitation loss # Take only the last => change to EOS position # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game # Imitation loss weighted by likelihood of candidate #* prob_r_1.max(1).values # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss # Imitation loss weighted by likelihood of candidate #* prob_r_2.max(1).values # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game #message_12, log_prob_s_12, entropy_s_12 = message_1, log_prob_s_1, entropy_s_1 # Imitation loss weighted by likelihood of candidate #* prob_r_12.max(1).values #loss_12_imitation=loss_12_imitation.mean() # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss #message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1 # Imitation loss weighted by likelihood of candidate #* prob_r_11.max(1).values #loss_11_imitation=loss_11_imitation.mean() # if the receiver is deterministic/differentiable, we apply the actual loss #message_21, log_prob_s_21, entropy_s_21 = message_2, log_prob_s_2, entropy_s_2 # Imitation loss weighted by likelihood of candidate #* prob_r_21.max(1).values #loss_21_imitation=loss_21_imitation.mean() # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss #message_22, log_prob_s_22, entropy_s_22 = message_2, log_prob_s_2, entropy_s_2 #message_lengths_22 = find_lengths(message_22) # Imitation loss weighted by likelihood of candidate #* prob_r_22.max(1).values #loss_22_imitation=loss_22_imitation.mean() # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game #message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1 # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss DialogReinforce implements the Dialog game #message_12, log_prob_s_12, entropy_s_12 = message_1, log_prob_s_1, entropy_s_1 #loss_12_imitation=loss_12_imitation.mean() # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss #message_11, log_prob_s_11, entropy_s_11 = message_1, log_prob_s_1, entropy_s_1 #loss_11_imitation=loss_11_imitation.mean() # if the receiver is deterministic/differentiable, we apply the actual loss #message_21, log_prob_s_21, entropy_s_21 = message_2, log_prob_s_2, entropy_s_2 #loss_21_imitation=loss_21_imitation.mean() # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # if the receiver is deterministic/differentiable, we apply the actual loss #message_22, log_prob_s_22, entropy_s_22 = message_2, log_prob_s_2, entropy_s_2 #message_lengths_22 = find_lengths(message_22) #loss_22_imitation=loss_22_imitation.mean() # if the receiver is deterministic/differentiable, we apply the actual loss Implements Sender/Receiver game with training done via Reinforce. Both agents are supposed to return 3-tuples of (output, log-prob of the output, entropy). The game implementation is responsible for handling the end-of-sequence term, so that the optimized loss corresponds either to the position of the eos term (assumed to be 0) or the end of sequence. Sender and Receiver can be obtained by applying the corresponding wrappers. `SenderReceiverRnnReinforce` also applies the mean baseline to the loss function to reduce the variance of the gradient estimate. >>> sender = nn.Linear(3, 10) >>> sender = RnnSenderReinforce(sender, vocab_size=15, embed_dim=5, hidden_size=10, max_len=10, cell='lstm') >>> class Receiver(nn.Module): ... def __init__(self): ... super().__init__() ... self.fc = nn.Linear(5, 3) ... def forward(self, rnn_output, _input = None): ... return self.fc(rnn_output) >>> receiver = RnnReceiverDeterministic(Receiver(), vocab_size=15, embed_dim=10, hidden_size=5) >>> def loss(sender_input, _message, _receiver_input, receiver_output, _labels): ... return F.mse_loss(sender_input, receiver_output, reduction='none').mean(dim=1), {'aux': 5.0} >>> game = SenderReceiverRnnReinforce(sender, receiver, loss, sender_entropy_coeff=0.0, receiver_entropy_coeff=0.0, ... length_cost=1e-2) >>> input = torch.zeros((16, 3)).normal_() >>> optimized_loss, aux_info = game(input, labels=None) >>> sorted(list(aux_info.keys())) # returns some debug info, such as entropies of the agents, message length etc ['aux', 'loss', 'mean_length', 'original_loss', 'receiver_entropy', 'sender_entropy'] >>> aux_info['aux'] 5.0 :param sender: sender agent :param receiver: receiver agent :param loss: the optimized loss that accepts sender_input: input of Sender message: the is sent by Sender receiver_input: input of Receiver from the dataset receiver_output: output of Receiver labels: labels assigned to Sender's input data and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over all batches in the dataset. :param sender_entropy_coeff: entropy regularization coeff for sender :param receiver_entropy_coeff: entropy regularization coeff for receiver :param length_cost: the penalty applied to Sender for each symbol produced :param reg: apply the regularization scheduling (Lazy Speaker) # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # Pour n_features=100 #self.length_cost= sc**(45) / 10 #if sc>0.99: #self.length_cost=(sc-0.99)*100 +0.01 #else: #self.length_cost=0. #if sc>0.995: #self.length_cost+=0.01 #if self.length_cost==0.3: # self.length_cost-=0.01 #print(self.length_cost) #if sc<0.98: #self.length_cost=0. # if the receiver is deterministic/differentiable, we apply the actual loss Implements Sender/ Impatient Receiver game with training done via Reinforce. It is equivalent to SenderReceiverRnnReinforce but takes into account the intermediate predictions of Impatient Listener: - the Impatient loss is used - tensor shapes are adapted for variance reduction. When reg is set to True, the regularization scheduling is applied (Lazy Speaker). :param sender: sender agent :param receiver: receiver agent :param loss: the optimized loss that accepts sender_input: input of Sender message: the is sent by Sender receiver_input: input of Receiver from the dataset receiver_output: output of Receiver labels: labels assigned to Sender's input data and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over all batches in the dataset. :param sender_entropy_coeff: entropy regularization coeff for sender :param receiver_entropy_coeff: entropy regularization coeff for receiver :param length_cost: the penalty applied to Sender for each symbol produced :param reg: apply the regularization scheduling (Lazy Speaker) # If impatient 1 NOISE VERSION # Randomly takes a position rand_length=np.random.randint(0,message.size(1)) # Loss by output loss, rest = self.loss(sender_input, message, receiver_input, receiver_output[:,rand_length,:], labels) # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after effective_entropy_s = torch.zeros_like(entropy_r[:,rand_length]) # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest effective_log_prob_s = torch.zeros_like(log_prob_r[:,rand_length]) #Loss # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest # Regularization scheduling paper #self.length_cost= sc**(45) / 10 # Pour n_features=100 # if the receiver is deterministic/differentiable, we apply the actual loss Adaptation of SenderReceiverRnnReinforce to inputs with several attributes. :param sender: sender agent :param receiver: receiver agent :param loss: the optimized loss that accepts sender_input: input of Sender message: the is sent by Sender receiver_input: input of Receiver from the dataset receiver_output: output of Receiver labels: labels assigned to Sender's input data and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over all batches in the dataset. :param sender_entropy_coeff: entropy regularization coeff for sender :param receiver_entropy_coeff: entropy regularization coeff for receiver :param length_cost: the penalty applied to Sender for each symbol produced # Noisy channel # random symbols # Receiver normal #dim=[batch_size,n_att,n_val] # reg #if self.reg: # for i in range(message_lengths.size(0)): # sc+=crible_acc[i,message_lengths[i]-1] # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #if self.reg: # sc/=message_lengths.size(0) # if sc>0.98: # self.length_cost+=0.1 # else: # self.length_cost=0. #self.length_cost= sc**(60) / 2 # if the receiver is deterministic/differentiable, we apply the actual loss Implements Sender/Receiver game with training done via Reinforce. Both agents are supposed to return 3-tuples of (output, log-prob of the output, entropy). The game implementation is responsible for handling the end-of-sequence term, so that the optimized loss corresponds either to the position of the eos term (assumed to be 0) or the end of sequence. Sender and Receiver can be obtained by applying the corresponding wrappers. `SenderReceiverRnnReinforce` also applies the mean baseline to the loss function to reduce the variance of the gradient estimate. >>> sender = nn.Linear(3, 10) >>> sender = RnnSenderReinforce(sender, vocab_size=15, embed_dim=5, hidden_size=10, max_len=10, cell='lstm') >>> class Receiver(nn.Module): ... def __init__(self): ... super().__init__() ... self.fc = nn.Linear(5, 3) ... def forward(self, rnn_output, _input = None): ... return self.fc(rnn_output) >>> receiver = RnnReceiverDeterministic(Receiver(), vocab_size=15, embed_dim=10, hidden_size=5) >>> def loss(sender_input, _message, _receiver_input, receiver_output, _labels): ... return F.mse_loss(sender_input, receiver_output, reduction='none').mean(dim=1), {'aux': 5.0} >>> game = SenderReceiverRnnReinforce(sender, receiver, loss, sender_entropy_coeff=0.0, receiver_entropy_coeff=0.0, ... length_cost=1e-2) >>> input = torch.zeros((16, 3)).normal_() >>> optimized_loss, aux_info = game(input, labels=None) >>> sorted(list(aux_info.keys())) # returns some debug info, such as entropies of the agents, message length etc ['aux', 'loss', 'mean_length', 'original_loss', 'receiver_entropy', 'sender_entropy'] >>> aux_info['aux'] 5.0 :param sender: sender agent :param receiver: receiver agent :param loss: the optimized loss that accepts sender_input: input of Sender message: the is sent by Sender receiver_input: input of Receiver from the dataset receiver_output: output of Receiver labels: labels assigned to Sender's input data and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over all batches in the dataset. :param sender_entropy_coeff: entropy regularization coeff for sender :param receiver_entropy_coeff: entropy regularization coeff for receiver :param length_cost: the penalty applied to Sender for each symbol produced #print(sender_input[:,11:-1]) # If impatient 1 # reg # Version de base #loss, rest, crible_acc = self.loss(sender_input, message, message_lengths, receiver_input, receiver_output_all_att, labels,self.n_attributes,self.n_values,self.att_weights) # Take into account the fact that an attribute is not sampled # the entropy of the outputs of S before and including the eos symbol - as we don't care about what's after # the log prob of the choices made by S before and including the eos symbol - again, we don't # care about the rest #if sc<0.9: # self.length_cost=-0.1 #self.length_cost= sc**(60) / 2 # Penalty redundancy #counts_unigram=((message[:,1:]-message[:,:-1])==0).sum(axis=1).sum(axis=0) #unigram_loss = self.unigram_penalty*counts_unigram # if the receiver is deterministic/differentiable, we apply the actual loss :param agent: the agent to be wrapped, returns the "encoder" state vector, which is the unrolled into a message :param vocab_size: vocab size of the message :param embed_dim: embedding dimensions :param max_len: maximal length of the message (including <eos>) :param num_layers: number of transformer layers :param num_heads: number of attention heads :param hidden_size: size of the FFN layers :param causal: whether embedding of a particular symbol should only depend on the symbols to the left :param generate_style: Two alternatives: 'standard' and 'in-place'. Suppose we are generating 4th symbol, after three symbols [s1 s2 s3] were generated. Then, 'standard': [s1 s2 s3] -> embeddings [[e1] [e2] [e3]] -> (s4 = argmax(linear(e3))) 'in-place': [s1 s2 s3] -> [s1 s2 s3 <need-symbol>] -> embeddings [[e1] [e2] [e3] [e4]] -> (s4 = argmax(linear(e4))) :param force_eos: <eos> added to the end of each sequence | 2.391839 | 2 |
libs/models/networks/models.py | NinV/facial-landmark-detection | 1 | 6616877 | <reponame>NinV/facial-landmark-detection<gh_stars>1-10
import torch
import torch.nn as nn
from libs.models.networks.hourglass import HGLandmarkModel
from libs.models.networks.graph_connectivity_model import GCNLandmark
from libs.models.networks.HRNet import get_face_alignment_net as get_HR_model
class LandmarkModel(nn.Module):
def __init__(self, hm_model_config, gcn_config, device="cuda", use_hrnet=False,
freeze_hm_model=False, hrnet_config='face_alignment_300w_hrnet_w18.yaml'):
"""
:param mode:
"fine_tune_graph": freeze heatmap model and train GCN model
"train_both": train both heatmap and GCN models
"inference": inference mode
"""
super(LandmarkModel, self).__init__()
self.freeze_hm_model = freeze_hm_model
self.device = device
if use_hrnet:
self.hm_model = get_HR_model(hrnet_config).to(self.device)
else:
self.hm_model = HGLandmarkModel(**hm_model_config, device=device).to(self.device)
self.gcn_model = GCNLandmark(gcn_config).to(self.device)
def forward(self, x):
if self.freeze_hm_model:
self.hm_model.eval()
with torch.no_grad():
hm = self.hm_model(x)
else:
hm = self.hm_model(x)
kps_from_hm = self.hm_model.decode_heatmap(hm, confidence_threshold=0) # (batch_size, num_classes, 3)
batch_size, num_classes, h, w = hm.size()
hm_size = torch.tensor([h, w])
node_positions = kps_from_hm[:, :, :2] # (batch_size, num_classes, 2)
out = []
for i in range(batch_size):
visual_features = []
for loc in node_positions[i]:
visual_features.append(self.hm_model.pooling_feature(i, loc))
visual_features = torch.stack(visual_features, dim=0)
xs = node_positions[i, :, 0]
ys = node_positions[i, :, 1]
classIds = torch.arange(num_classes)
node_confidences = hm[i, classIds, ys, xs]
node_positions_normalized = (node_positions[i] / hm_size).to(self.device)
out.append(self.gcn_model(node_positions_normalized, node_confidences, visual_features))
return hm, torch.stack(out, dim=0)
| import torch
import torch.nn as nn
from libs.models.networks.hourglass import HGLandmarkModel
from libs.models.networks.graph_connectivity_model import GCNLandmark
from libs.models.networks.HRNet import get_face_alignment_net as get_HR_model
class LandmarkModel(nn.Module):
def __init__(self, hm_model_config, gcn_config, device="cuda", use_hrnet=False,
freeze_hm_model=False, hrnet_config='face_alignment_300w_hrnet_w18.yaml'):
"""
:param mode:
"fine_tune_graph": freeze heatmap model and train GCN model
"train_both": train both heatmap and GCN models
"inference": inference mode
"""
super(LandmarkModel, self).__init__()
self.freeze_hm_model = freeze_hm_model
self.device = device
if use_hrnet:
self.hm_model = get_HR_model(hrnet_config).to(self.device)
else:
self.hm_model = HGLandmarkModel(**hm_model_config, device=device).to(self.device)
self.gcn_model = GCNLandmark(gcn_config).to(self.device)
def forward(self, x):
if self.freeze_hm_model:
self.hm_model.eval()
with torch.no_grad():
hm = self.hm_model(x)
else:
hm = self.hm_model(x)
kps_from_hm = self.hm_model.decode_heatmap(hm, confidence_threshold=0) # (batch_size, num_classes, 3)
batch_size, num_classes, h, w = hm.size()
hm_size = torch.tensor([h, w])
node_positions = kps_from_hm[:, :, :2] # (batch_size, num_classes, 2)
out = []
for i in range(batch_size):
visual_features = []
for loc in node_positions[i]:
visual_features.append(self.hm_model.pooling_feature(i, loc))
visual_features = torch.stack(visual_features, dim=0)
xs = node_positions[i, :, 0]
ys = node_positions[i, :, 1]
classIds = torch.arange(num_classes)
node_confidences = hm[i, classIds, ys, xs]
node_positions_normalized = (node_positions[i] / hm_size).to(self.device)
out.append(self.gcn_model(node_positions_normalized, node_confidences, visual_features))
return hm, torch.stack(out, dim=0) | en | 0.801049 | :param mode: "fine_tune_graph": freeze heatmap model and train GCN model "train_both": train both heatmap and GCN models "inference": inference mode # (batch_size, num_classes, 3) # (batch_size, num_classes, 2) | 2.222512 | 2 |
migrations/versions/006_PseudoRandomID.py | LCBRU/identity | 0 | 6616878 | <filename>migrations/versions/006_PseudoRandomID.py
from sqlalchemy import (
MetaData,
Table,
Column,
Integer,
NVARCHAR,
DateTime,
Boolean,
ForeignKey,
UniqueConstraint,
)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
u = Table("user", meta, autoload=True)
p = Table("pseudo_random_id_provider", meta, autoload=True)
t = Table(
"pseudo_random_id",
meta,
Column("id", Integer, primary_key=True),
Column("pseudo_random_id_provider_id", Integer, ForeignKey(p.c.id), index=True, nullable=False),
Column("ordinal", Integer),
Column("unique_code", Integer),
Column("check_character", NVARCHAR(1)),
Column("full_code", NVARCHAR(20)),
Column("last_updated_datetime", DateTime, nullable=False),
Column("last_updated_by_user_id", Integer, ForeignKey(u.c.id), index=True, nullable=False),
UniqueConstraint(
'pseudo_random_id_provider_id',
'ordinal',
name='uix_pseudo_random_id_pseudo_random_id_provider_id_ordinal'
),
UniqueConstraint(
'pseudo_random_id_provider_id',
'unique_code',
name='uix_pseudo_random_id_pseudo_random_id_provider_id_unique_code'
),
UniqueConstraint(
'full_code',
name='uix_pseudo_random_id_full_code'
),
)
t.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("pseudo_random_id", meta, autoload=True)
t.drop()
| <filename>migrations/versions/006_PseudoRandomID.py
from sqlalchemy import (
MetaData,
Table,
Column,
Integer,
NVARCHAR,
DateTime,
Boolean,
ForeignKey,
UniqueConstraint,
)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
u = Table("user", meta, autoload=True)
p = Table("pseudo_random_id_provider", meta, autoload=True)
t = Table(
"pseudo_random_id",
meta,
Column("id", Integer, primary_key=True),
Column("pseudo_random_id_provider_id", Integer, ForeignKey(p.c.id), index=True, nullable=False),
Column("ordinal", Integer),
Column("unique_code", Integer),
Column("check_character", NVARCHAR(1)),
Column("full_code", NVARCHAR(20)),
Column("last_updated_datetime", DateTime, nullable=False),
Column("last_updated_by_user_id", Integer, ForeignKey(u.c.id), index=True, nullable=False),
UniqueConstraint(
'pseudo_random_id_provider_id',
'ordinal',
name='uix_pseudo_random_id_pseudo_random_id_provider_id_ordinal'
),
UniqueConstraint(
'pseudo_random_id_provider_id',
'unique_code',
name='uix_pseudo_random_id_pseudo_random_id_provider_id_unique_code'
),
UniqueConstraint(
'full_code',
name='uix_pseudo_random_id_full_code'
),
)
t.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("pseudo_random_id", meta, autoload=True)
t.drop()
| none | 1 | 2.385385 | 2 | |
deep-learning-from-scratch/lib/fpga.py | tom01h/PYNQ | 2 | 6616879 | <reponame>tom01h/PYNQ<gh_stars>1-10
import numpy as np
import top as top
import struct
def float_to_int(f):
return struct.unpack('>I', struct.pack('>f', f))[0]
def int_to_float(i):
return struct.unpack('>f', struct.pack('>I', i))[0]
class _Fpga(object):
def __init__(self, bit_file):
self._send_data = {}
self._bus_size = top.bus_size()
def _evaluate(self):
if len(self._send_data) > 0:
send_data = 0
for i in range(self._bus_size):
send_data += float_to_int(self._send_data.pop(0))<<(32*i)
if send_data >= 1<<63:
send_data -= 1<<64
top.send(send_data)
self._recv_list = {}
top.evaluate()
def alloc(self, shape, dtype):
pass
def write(self, address, value):
top.write(address, value)
def send(self, data):
top.send_start()
self._send_data = data.flatten().tolist()
def send_wait(self):
while len(self._send_data) > 0:
self._evaluate()
top.send_fin()
self._evaluate()
def recv(self, data):
top.recv_start()
self._recv_data = data
self._recv_size = len(data.flatten().tolist())
self._recv_list = {}
def recv_wait(self):
data_flat = self._recv_data.ravel()
i = 0
while i < len(data_flat):
recv_data = top.recv()
if not recv_data == None:
for j in range(self._bus_size):
data_flat[i] = int_to_float(recv_data%(1<<32))
recv_data >>= 32
i += 1
self._evaluate()
top.recv_fin()
self._evaluate()
def fin(self):
top.fin()
| import numpy as np
import top as top
import struct
def float_to_int(f):
return struct.unpack('>I', struct.pack('>f', f))[0]
def int_to_float(i):
return struct.unpack('>f', struct.pack('>I', i))[0]
class _Fpga(object):
def __init__(self, bit_file):
self._send_data = {}
self._bus_size = top.bus_size()
def _evaluate(self):
if len(self._send_data) > 0:
send_data = 0
for i in range(self._bus_size):
send_data += float_to_int(self._send_data.pop(0))<<(32*i)
if send_data >= 1<<63:
send_data -= 1<<64
top.send(send_data)
self._recv_list = {}
top.evaluate()
def alloc(self, shape, dtype):
pass
def write(self, address, value):
top.write(address, value)
def send(self, data):
top.send_start()
self._send_data = data.flatten().tolist()
def send_wait(self):
while len(self._send_data) > 0:
self._evaluate()
top.send_fin()
self._evaluate()
def recv(self, data):
top.recv_start()
self._recv_data = data
self._recv_size = len(data.flatten().tolist())
self._recv_list = {}
def recv_wait(self):
data_flat = self._recv_data.ravel()
i = 0
while i < len(data_flat):
recv_data = top.recv()
if not recv_data == None:
for j in range(self._bus_size):
data_flat[i] = int_to_float(recv_data%(1<<32))
recv_data >>= 32
i += 1
self._evaluate()
top.recv_fin()
self._evaluate()
def fin(self):
top.fin() | none | 1 | 2.896389 | 3 | |
serializejson/plugins/serializejson_numpy.py | SmartAudioTools/serializejson | 0 | 6616880 | <reponame>SmartAudioTools/serializejson
try:
import numpy
from numpy import frombuffer, unpackbits, uint8, ndarray, int32, int64
from numpy import dtype as numpy_dtype
except ModuleNotFoundError:
pass
else:
import blosc
from pybase64 import b64encode_as_string, b64decode_as_bytearray
import sys
try:
# from SmartFramework import numpyB64
from SmartFramework.serialize.tools import serializejson_, constructors, blosc_compressions, authorized_classes
from SmartFramework.serialize import serialize_parameters
except:
from serializejson import serialize_parameters
from serializejson.tools import serializejson_, constructors, blosc_compressions, authorized_classes
nb_bits = sys.maxsize.bit_length() + 1
authorized_classes.update(
{
"numpy.bool_",
"numpy.int8",
"numpy.int16",
"numpy.int32",
"numpy.int64",
"numpy.uint8",
"numpy.uint16",
"numpy.uint32",
"numpy.uint64",
"numpy.float16",
"numpy.float32",
"numpy.float64",
"numpy.dtype",
"numpy.ndarray",
"numpy.array",
"numpy.frombuffer",
"numpyB64",
"numpy.core.multiarray._reconstruct",
"numpy.core.multiarray.scalar",
}
)
def numpyB64(str64, dtype=None, shape_len_compression=None, compression=None):
decoded_bytearray = b64decode_as_bytearray(str64, validate=True)
if isinstance(shape_len_compression, str):
compression = shape_len_compression
shape_len = None
else:
shape_len = shape_len_compression
if compression:
if compression == "blosc":
decoded_bytearray = blosc.decompress(decoded_bytearray, as_bytearray=True)
else:
raise Exception(f"unknow {compression} compression")
if dtype in ("bool", bool):
numpy_uint8_containing_8bits = frombuffer(decoded_bytearray, uint8) # pas de copie -> read only
numpy_uint8_containing_8bits = unpackbits(
numpy_uint8_containing_8bits
) # copie dans un numpy array de uint8 mutable
if shape_len is None:
shape_len = len(numpy_uint8_containing_8bits)
return ndarray(shape_len, dtype, numpy_uint8_containing_8bits) # pas de recopie
else:
if isinstance(dtype, list):
dtype = [(str(champName), champType) for champName, champType in dtype]
if shape_len is None:
array = frombuffer(decoded_bytearray, dtype) # pas de recopie
else:
array = ndarray(shape_len, dtype, decoded_bytearray) # pas de recopie
if (
nb_bits == 32 and serialize_parameters.numpyB64_convert_int64_to_int32_and_align_in_Python_32Bit
): # pour pouvoir deserialiser les classifiers en python 32 bit ?
if array.dtype in (int64, "int64"):
return array.astype(int32)
elif isinstance(dtype, list):
newTypes = []
for champ in dtype:
champName, champType = champ
if champName:
champType = numpy_dtype(champType)
if champType in (int64, "int64"):
newTypes.append((champName, int32))
else:
newTypes.append((champName, champType))
newDtype = numpy_dtype(newTypes, align=True)
newN = ndarray(len(array), newDtype)
for champName, champType in newTypes:
if champName:
newN[champName][:] = array[champName]
return newN
return array
constructors["numpyB64"] = numpyB64
def serializejson_ndarray(inst):
# inst = numpy.ascontiguousarray(inst)
dtype = inst.dtype
# compression = serialize_parameters.bytes_compression
if dtype.fields is None:
dtype_str = str(dtype)
max_size = serialize_parameters.numpy_array_readable_max_size
if isinstance(max_size, dict):
if dtype_str in max_size:
max_size = max_size[dtype_str]
else:
max_size = 0
if max_size is None or inst.size <= max_size:
return (
"numpy.array",
(inst.tolist(), dtype_str),
None,
) # A REVOIR : pass genial car va tester ultérieurement si tous les elements sont du même type....
else:
dtype_str = dtype.descr
# return ("numpy.array", (RawJSON(numpy.array2string(inst,separator =',')), dtype_str), None) plus lent.
if serialize_parameters.numpy_array_use_numpyB64:
if dtype == bool:
data = numpy.packbits(inst.astype(numpy.uint8))
if inst.ndim == 1:
len_or_shape = len(inst)
else:
len_or_shape = list(inst.shape)
else:
data = inst
if inst.ndim == 1:
len_or_shape = None
else:
len_or_shape = list(inst.shape)
compression = serialize_parameters.bytes_compression
if compression and data.nbytes >= serialize_parameters.bytes_size_compression_threshold:
blosc_compression = blosc_compressions.get(compression, None)
if blosc_compression:
compressed = blosc.compress(
numpy.ascontiguousarray(data),
data.itemsize,
cname=blosc_compression,
clevel=serialize_parameters.bytes_compression_level,
)
compression = "blosc"
else:
raise Exception(f"{compression} compression unknow")
if len(compressed) < data.nbytes:
if len_or_shape is None:
return (
"numpyB64",
(b64encode_as_string(compressed), dtype_str, compression),
None,
)
else:
return (
"numpyB64",
(b64encode_as_string(compressed), dtype_str, len_or_shape, compression),
None,
)
if len_or_shape is None:
return (
"numpyB64",
(b64encode_as_string(numpy.ascontiguousarray(data)), dtype_str),
None,
)
else:
return (
"numpyB64",
(b64encode_as_string(numpy.ascontiguousarray(data)), dtype_str, len_or_shape),
None,
)
else:
# if False :#inst.ndim == 1:
# return (numpy.frombuffer, (bytearray(inst), dtype_str), None)
# else:
return (
"numpy.ndarray",
(list(inst.shape), dtype_str, bytearray(inst)),
None,
)
serializejson_[numpy.ndarray] = serializejson_ndarray
def serializejson_dtype(inst):
initArgs = (str(inst),)
return (inst.__class__, initArgs, None)
serializejson_[numpy.dtype] = serializejson_dtype
def serializejson_bool_(inst):
return (inst.__class__, (bool(inst),), None)
serializejson_[numpy.bool_] = serializejson_bool_
def serializejson_int(inst):
return (inst.__class__, (int(inst),), None)
serializejson_[numpy.int8] = serializejson_int
serializejson_[numpy.int16] = serializejson_int
serializejson_[numpy.int32] = serializejson_int
serializejson_[numpy.int64] = serializejson_int
serializejson_[numpy.uint8] = serializejson_int
serializejson_[numpy.uint16] = serializejson_int
serializejson_[numpy.uint32] = serializejson_int
serializejson_[numpy.uint64] = serializejson_int
def serializejson_float(inst):
return (inst.__class__, (float(inst),), None)
serializejson_[numpy.float16] = serializejson_float
serializejson_[numpy.float32] = serializejson_float
serializejson_[numpy.float64] = serializejson_float
| try:
import numpy
from numpy import frombuffer, unpackbits, uint8, ndarray, int32, int64
from numpy import dtype as numpy_dtype
except ModuleNotFoundError:
pass
else:
import blosc
from pybase64 import b64encode_as_string, b64decode_as_bytearray
import sys
try:
# from SmartFramework import numpyB64
from SmartFramework.serialize.tools import serializejson_, constructors, blosc_compressions, authorized_classes
from SmartFramework.serialize import serialize_parameters
except:
from serializejson import serialize_parameters
from serializejson.tools import serializejson_, constructors, blosc_compressions, authorized_classes
nb_bits = sys.maxsize.bit_length() + 1
authorized_classes.update(
{
"numpy.bool_",
"numpy.int8",
"numpy.int16",
"numpy.int32",
"numpy.int64",
"numpy.uint8",
"numpy.uint16",
"numpy.uint32",
"numpy.uint64",
"numpy.float16",
"numpy.float32",
"numpy.float64",
"numpy.dtype",
"numpy.ndarray",
"numpy.array",
"numpy.frombuffer",
"numpyB64",
"numpy.core.multiarray._reconstruct",
"numpy.core.multiarray.scalar",
}
)
def numpyB64(str64, dtype=None, shape_len_compression=None, compression=None):
decoded_bytearray = b64decode_as_bytearray(str64, validate=True)
if isinstance(shape_len_compression, str):
compression = shape_len_compression
shape_len = None
else:
shape_len = shape_len_compression
if compression:
if compression == "blosc":
decoded_bytearray = blosc.decompress(decoded_bytearray, as_bytearray=True)
else:
raise Exception(f"unknow {compression} compression")
if dtype in ("bool", bool):
numpy_uint8_containing_8bits = frombuffer(decoded_bytearray, uint8) # pas de copie -> read only
numpy_uint8_containing_8bits = unpackbits(
numpy_uint8_containing_8bits
) # copie dans un numpy array de uint8 mutable
if shape_len is None:
shape_len = len(numpy_uint8_containing_8bits)
return ndarray(shape_len, dtype, numpy_uint8_containing_8bits) # pas de recopie
else:
if isinstance(dtype, list):
dtype = [(str(champName), champType) for champName, champType in dtype]
if shape_len is None:
array = frombuffer(decoded_bytearray, dtype) # pas de recopie
else:
array = ndarray(shape_len, dtype, decoded_bytearray) # pas de recopie
if (
nb_bits == 32 and serialize_parameters.numpyB64_convert_int64_to_int32_and_align_in_Python_32Bit
): # pour pouvoir deserialiser les classifiers en python 32 bit ?
if array.dtype in (int64, "int64"):
return array.astype(int32)
elif isinstance(dtype, list):
newTypes = []
for champ in dtype:
champName, champType = champ
if champName:
champType = numpy_dtype(champType)
if champType in (int64, "int64"):
newTypes.append((champName, int32))
else:
newTypes.append((champName, champType))
newDtype = numpy_dtype(newTypes, align=True)
newN = ndarray(len(array), newDtype)
for champName, champType in newTypes:
if champName:
newN[champName][:] = array[champName]
return newN
return array
constructors["numpyB64"] = numpyB64
def serializejson_ndarray(inst):
# inst = numpy.ascontiguousarray(inst)
dtype = inst.dtype
# compression = serialize_parameters.bytes_compression
if dtype.fields is None:
dtype_str = str(dtype)
max_size = serialize_parameters.numpy_array_readable_max_size
if isinstance(max_size, dict):
if dtype_str in max_size:
max_size = max_size[dtype_str]
else:
max_size = 0
if max_size is None or inst.size <= max_size:
return (
"numpy.array",
(inst.tolist(), dtype_str),
None,
) # A REVOIR : pass genial car va tester ultérieurement si tous les elements sont du même type....
else:
dtype_str = dtype.descr
# return ("numpy.array", (RawJSON(numpy.array2string(inst,separator =',')), dtype_str), None) plus lent.
if serialize_parameters.numpy_array_use_numpyB64:
if dtype == bool:
data = numpy.packbits(inst.astype(numpy.uint8))
if inst.ndim == 1:
len_or_shape = len(inst)
else:
len_or_shape = list(inst.shape)
else:
data = inst
if inst.ndim == 1:
len_or_shape = None
else:
len_or_shape = list(inst.shape)
compression = serialize_parameters.bytes_compression
if compression and data.nbytes >= serialize_parameters.bytes_size_compression_threshold:
blosc_compression = blosc_compressions.get(compression, None)
if blosc_compression:
compressed = blosc.compress(
numpy.ascontiguousarray(data),
data.itemsize,
cname=blosc_compression,
clevel=serialize_parameters.bytes_compression_level,
)
compression = "blosc"
else:
raise Exception(f"{compression} compression unknow")
if len(compressed) < data.nbytes:
if len_or_shape is None:
return (
"numpyB64",
(b64encode_as_string(compressed), dtype_str, compression),
None,
)
else:
return (
"numpyB64",
(b64encode_as_string(compressed), dtype_str, len_or_shape, compression),
None,
)
if len_or_shape is None:
return (
"numpyB64",
(b64encode_as_string(numpy.ascontiguousarray(data)), dtype_str),
None,
)
else:
return (
"numpyB64",
(b64encode_as_string(numpy.ascontiguousarray(data)), dtype_str, len_or_shape),
None,
)
else:
# if False :#inst.ndim == 1:
# return (numpy.frombuffer, (bytearray(inst), dtype_str), None)
# else:
return (
"numpy.ndarray",
(list(inst.shape), dtype_str, bytearray(inst)),
None,
)
serializejson_[numpy.ndarray] = serializejson_ndarray
def serializejson_dtype(inst):
initArgs = (str(inst),)
return (inst.__class__, initArgs, None)
serializejson_[numpy.dtype] = serializejson_dtype
def serializejson_bool_(inst):
return (inst.__class__, (bool(inst),), None)
serializejson_[numpy.bool_] = serializejson_bool_
def serializejson_int(inst):
return (inst.__class__, (int(inst),), None)
serializejson_[numpy.int8] = serializejson_int
serializejson_[numpy.int16] = serializejson_int
serializejson_[numpy.int32] = serializejson_int
serializejson_[numpy.int64] = serializejson_int
serializejson_[numpy.uint8] = serializejson_int
serializejson_[numpy.uint16] = serializejson_int
serializejson_[numpy.uint32] = serializejson_int
serializejson_[numpy.uint64] = serializejson_int
def serializejson_float(inst):
return (inst.__class__, (float(inst),), None)
serializejson_[numpy.float16] = serializejson_float
serializejson_[numpy.float32] = serializejson_float
serializejson_[numpy.float64] = serializejson_float | fr | 0.541526 | # from SmartFramework import numpyB64 # pas de copie -> read only # copie dans un numpy array de uint8 mutable # pas de recopie # pas de recopie # pas de recopie # pour pouvoir deserialiser les classifiers en python 32 bit ? # inst = numpy.ascontiguousarray(inst) # compression = serialize_parameters.bytes_compression # A REVOIR : pass genial car va tester ultérieurement si tous les elements sont du même type.... # return ("numpy.array", (RawJSON(numpy.array2string(inst,separator =',')), dtype_str), None) plus lent. # if False :#inst.ndim == 1: # return (numpy.frombuffer, (bytearray(inst), dtype_str), None) # else: | 2.242423 | 2 |
one/items.py | amosannn/OneSpider | 4 | 6616881 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SpiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class OneQuoteItem(scrapy.Item):
# 期号
vol = scrapy.Field()
# 图片
imageUrl = scrapy.Field()
# 图片类型
imageCategory = scrapy.Field()
# 句子
quote = scrapy.Field()
# 日期
publishedDate = scrapy.Field()
class OneArticleItem(scrapy.Item):
# 网址后缀
pageId = scrapy.Field()
# 来源url
url = scrapy.Field()
# 标题
title = scrapy.Field()
# 作者
author = scrapy.Field()
# 编辑
editor = scrapy.Field()
# 摘要
description = scrapy.Field()
# 正文
article = scrapy.Field()
class OneQuestionItem(scrapy.Item):
#网址后缀
pageId= scrapy.Field()
#url
url=scrapy.Field()
#问题
question=scrapy.Field()
#问题内容
questionContent=scrapy.Field()
#回答
answer=scrapy.Field()
#回答内容
answerContent=scrapy.Field()
| # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SpiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class OneQuoteItem(scrapy.Item):
# 期号
vol = scrapy.Field()
# 图片
imageUrl = scrapy.Field()
# 图片类型
imageCategory = scrapy.Field()
# 句子
quote = scrapy.Field()
# 日期
publishedDate = scrapy.Field()
class OneArticleItem(scrapy.Item):
# 网址后缀
pageId = scrapy.Field()
# 来源url
url = scrapy.Field()
# 标题
title = scrapy.Field()
# 作者
author = scrapy.Field()
# 编辑
editor = scrapy.Field()
# 摘要
description = scrapy.Field()
# 正文
article = scrapy.Field()
class OneQuestionItem(scrapy.Item):
#网址后缀
pageId= scrapy.Field()
#url
url=scrapy.Field()
#问题
question=scrapy.Field()
#问题内容
questionContent=scrapy.Field()
#回答
answer=scrapy.Field()
#回答内容
answerContent=scrapy.Field()
| zh | 0.548446 | # -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html # define the fields for your item here like: # name = scrapy.Field() # 期号 # 图片 # 图片类型 # 句子 # 日期 # 网址后缀 # 来源url # 标题 # 作者 # 编辑 # 摘要 # 正文 #网址后缀 #url #问题 #问题内容 #回答 #回答内容 | 2.53093 | 3 |
measures/coverage.py | markanewman/corpustools | 0 | 6616882 | import pathlib
from ..utils.csvfile import read_dictionary, write_dictionary
from ..utils.tarfile import file_in_corpus, read_lines_from_tar_file
from statistics import mean
def coverage(corpus, tokens, tokenizer = None):
"""
Calculates the Zif's law coverage of a given set of tokens on the corpus an a document by document basis
Parameters
----------
corpus : str
The tarball containing the corpus
tokens: str
The file containing the list of tokens to get a % coverage on
tokenizer: function
Optional: function to take in a line (str) and output a list of tokens (str[])
Example
---------
import corpustools.measure as ctm; ctm.coverage('d:/working/corpus.tar', 'd:/working/tokens.csv')
"""
corpus = pathlib.Path(corpus)
measures_file = corpus.parent.joinpath('./coverage.csv')
if measures_file.exists():
measures_file.unlink()
if tokenizer == None:
tokenizer = lambda line: [token.upper() for token in line.strip().split() if len(token) > 0]
tokens = set(read_dictionary(tokens).keys())
measures = _measures(corpus, tokens, tokenizer)
_write_measures(measures_file, measures)
return (measures_file, mean(measures.values()))
def _measures(corpus, tokens, tokenizer):
print('Measuring Coverage...')
measures = {}
for (tar_info, tar_file) in file_in_corpus(corpus):
total_tokens = 0
total_coverage = 0
for line in read_lines_from_tar_file(tar_file):
line_tokens = tokenizer(line)
total_tokens = total_tokens + len(line_tokens)
for token in line_tokens:
if token in tokens:
total_coverage = total_coverage + 1
pass
pass
pass
measures[tar_info.name] = round(total_coverage/total_tokens, 8)
return measures
def _write_measures(file_name, measures):
print('Writing Measures...')
write_dictionary(file_name, measures)
| import pathlib
from ..utils.csvfile import read_dictionary, write_dictionary
from ..utils.tarfile import file_in_corpus, read_lines_from_tar_file
from statistics import mean
def coverage(corpus, tokens, tokenizer = None):
"""
Calculates the Zif's law coverage of a given set of tokens on the corpus an a document by document basis
Parameters
----------
corpus : str
The tarball containing the corpus
tokens: str
The file containing the list of tokens to get a % coverage on
tokenizer: function
Optional: function to take in a line (str) and output a list of tokens (str[])
Example
---------
import corpustools.measure as ctm; ctm.coverage('d:/working/corpus.tar', 'd:/working/tokens.csv')
"""
corpus = pathlib.Path(corpus)
measures_file = corpus.parent.joinpath('./coverage.csv')
if measures_file.exists():
measures_file.unlink()
if tokenizer == None:
tokenizer = lambda line: [token.upper() for token in line.strip().split() if len(token) > 0]
tokens = set(read_dictionary(tokens).keys())
measures = _measures(corpus, tokens, tokenizer)
_write_measures(measures_file, measures)
return (measures_file, mean(measures.values()))
def _measures(corpus, tokens, tokenizer):
print('Measuring Coverage...')
measures = {}
for (tar_info, tar_file) in file_in_corpus(corpus):
total_tokens = 0
total_coverage = 0
for line in read_lines_from_tar_file(tar_file):
line_tokens = tokenizer(line)
total_tokens = total_tokens + len(line_tokens)
for token in line_tokens:
if token in tokens:
total_coverage = total_coverage + 1
pass
pass
pass
measures[tar_info.name] = round(total_coverage/total_tokens, 8)
return measures
def _write_measures(file_name, measures):
print('Writing Measures...')
write_dictionary(file_name, measures)
| en | 0.592897 | Calculates the Zif's law coverage of a given set of tokens on the corpus an a document by document basis Parameters ---------- corpus : str The tarball containing the corpus tokens: str The file containing the list of tokens to get a % coverage on tokenizer: function Optional: function to take in a line (str) and output a list of tokens (str[]) Example --------- import corpustools.measure as ctm; ctm.coverage('d:/working/corpus.tar', 'd:/working/tokens.csv') | 2.92745 | 3 |
backend/backend/automation.py | bogdancarpusor/flight-price-predictions | 0 | 6616883 | <filename>backend/backend/automation.py
from multiprocessing import Process
from datetime import date, timedelta
import time
from redis import Redis
channels = [
'New York',
'Bucharest',
'Berlin',
'Barcelona',
'Paris',
'London',
'Frankfurt',
'Amsterdam'
]
def pub(myredis):
end_date = date(2019, 6, 10)
current_date = date.today()
delta = end_date - current_date
for i in range(delta.days):
departure_date = current_date + timedelta(days=i)
for channel in channels:
myredis.publish(channel, departure_date.strftime('%d/%m/%Y'))
# time.sleep(5)
def default_sub(myredis, name):
pubsub = myredis.pubsub()
pubsub.subscribe([name])
for item in pubsub.listen():
print('Finding flights from {} on {}'.format(name, item['data']))
def start_automation_server():
myredis = Redis(host='redis', port=6379)
# while True:
# Process(target=pub, args=(myredis,)).start()
# Process(target=default_sub, args=(myredis, 'New York')).start()
# Process(target=default_sub, args=(myredis, 'Barcelona')).start()
# Process(target=default_sub, args=(myredis, 'Berlin')).start()
# Process(target=default_sub, args=(myredis, 'London')).start()
# Process(target=default_sub, args=(myredis, 'Paris')).start()
# Process(target=default_sub, args=(myredis, 'Frankfurt')).start()
# Process(target=default_sub, args=(myredis, 'Amsterdam')).start() | <filename>backend/backend/automation.py
from multiprocessing import Process
from datetime import date, timedelta
import time
from redis import Redis
channels = [
'New York',
'Bucharest',
'Berlin',
'Barcelona',
'Paris',
'London',
'Frankfurt',
'Amsterdam'
]
def pub(myredis):
end_date = date(2019, 6, 10)
current_date = date.today()
delta = end_date - current_date
for i in range(delta.days):
departure_date = current_date + timedelta(days=i)
for channel in channels:
myredis.publish(channel, departure_date.strftime('%d/%m/%Y'))
# time.sleep(5)
def default_sub(myredis, name):
pubsub = myredis.pubsub()
pubsub.subscribe([name])
for item in pubsub.listen():
print('Finding flights from {} on {}'.format(name, item['data']))
def start_automation_server():
myredis = Redis(host='redis', port=6379)
# while True:
# Process(target=pub, args=(myredis,)).start()
# Process(target=default_sub, args=(myredis, 'New York')).start()
# Process(target=default_sub, args=(myredis, 'Barcelona')).start()
# Process(target=default_sub, args=(myredis, 'Berlin')).start()
# Process(target=default_sub, args=(myredis, 'London')).start()
# Process(target=default_sub, args=(myredis, 'Paris')).start()
# Process(target=default_sub, args=(myredis, 'Frankfurt')).start()
# Process(target=default_sub, args=(myredis, 'Amsterdam')).start() | en | 0.103541 | # time.sleep(5) # while True: # Process(target=pub, args=(myredis,)).start() # Process(target=default_sub, args=(myredis, 'New York')).start() # Process(target=default_sub, args=(myredis, 'Barcelona')).start() # Process(target=default_sub, args=(myredis, 'Berlin')).start() # Process(target=default_sub, args=(myredis, 'London')).start() # Process(target=default_sub, args=(myredis, 'Paris')).start() # Process(target=default_sub, args=(myredis, 'Frankfurt')).start() # Process(target=default_sub, args=(myredis, 'Amsterdam')).start() | 2.925316 | 3 |
Day_11/day_11.py | alex-d-boyd/Advent-of-Code-2021 | 0 | 6616884 | #! /usr/bin/env python3
# Advent of Code
# https://adventofcode.com/2021
# Day 11: Dumbo Octopus
import argparse
from pathlib import Path
DELTAS = (
(-1, 0), # N
(-1, 1), # NE
( 0, 1), # E
( 1, 1), # SE
( 1, 0), # S
( 1, -1), # SW
( 0, -1), # W
(-1, -1), # NW
)
def parse_args():
parser = argparse.ArgumentParser(description='AoC 2021 Day 01')
parser.add_argument('-t', '--test', help='use test data', action='store_true')
args = parser.parse_args()
return args
def adjacents(r, c, rows, cols):
return [(r2, c2) for dr, dc in DELTAS
if 0 <= (r2 := r+dr) < rows and 0 <= (c2 := c+dc) < cols]
def process_step(matrix):
flashed = set()
flashers = set()
rows, cols = len(matrix), len(matrix[0])
for r in range(rows):
for c in range(cols):
matrix[r][c] += 1
if matrix[r][c] > 9:
flashers.add((r, c))
while flashers:
flash = flashers.pop()
flashed.add(flash)
adjs = adjacents(*flash, rows, cols)
for r, c in adjs:
if (r, c) in flashed:
continue
matrix[r][c] += 1
if matrix[r][c] > 9:
flashers.add((r, c))
for r, c in flashed:
matrix[r][c] = 0
return len(flashed)
if __name__ == '__main__':
args = parse_args()
if args.test:
in_file = Path('test.txt')
else:
in_file = Path('input.txt')
puzzle_input = in_file.read_text(encoding='utf-8')
octopus_matrix = [list(map(int,list(line)))
for line in puzzle_input.splitlines()]
octopus_count = len(octopus_matrix) * len(octopus_matrix[0])
total_flashes = 0
for i in range(100):
total_flashes += process_step(octopus_matrix)
while True:
i += 1
if process_step(octopus_matrix) == octopus_count:
break
print(f'Part 1: There were a total of {total_flashes} flashes')
print(f'Part 2: The first step where all flash was {i+1}')
| #! /usr/bin/env python3
# Advent of Code
# https://adventofcode.com/2021
# Day 11: Dumbo Octopus
import argparse
from pathlib import Path
DELTAS = (
(-1, 0), # N
(-1, 1), # NE
( 0, 1), # E
( 1, 1), # SE
( 1, 0), # S
( 1, -1), # SW
( 0, -1), # W
(-1, -1), # NW
)
def parse_args():
parser = argparse.ArgumentParser(description='AoC 2021 Day 01')
parser.add_argument('-t', '--test', help='use test data', action='store_true')
args = parser.parse_args()
return args
def adjacents(r, c, rows, cols):
return [(r2, c2) for dr, dc in DELTAS
if 0 <= (r2 := r+dr) < rows and 0 <= (c2 := c+dc) < cols]
def process_step(matrix):
flashed = set()
flashers = set()
rows, cols = len(matrix), len(matrix[0])
for r in range(rows):
for c in range(cols):
matrix[r][c] += 1
if matrix[r][c] > 9:
flashers.add((r, c))
while flashers:
flash = flashers.pop()
flashed.add(flash)
adjs = adjacents(*flash, rows, cols)
for r, c in adjs:
if (r, c) in flashed:
continue
matrix[r][c] += 1
if matrix[r][c] > 9:
flashers.add((r, c))
for r, c in flashed:
matrix[r][c] = 0
return len(flashed)
if __name__ == '__main__':
args = parse_args()
if args.test:
in_file = Path('test.txt')
else:
in_file = Path('input.txt')
puzzle_input = in_file.read_text(encoding='utf-8')
octopus_matrix = [list(map(int,list(line)))
for line in puzzle_input.splitlines()]
octopus_count = len(octopus_matrix) * len(octopus_matrix[0])
total_flashes = 0
for i in range(100):
total_flashes += process_step(octopus_matrix)
while True:
i += 1
if process_step(octopus_matrix) == octopus_count:
break
print(f'Part 1: There were a total of {total_flashes} flashes')
print(f'Part 2: The first step where all flash was {i+1}')
| en | 0.598246 | #! /usr/bin/env python3 # Advent of Code # https://adventofcode.com/2021 # Day 11: Dumbo Octopus # N # NE # E # SE # S # SW # W # NW | 3.567271 | 4 |
API/client/python-client-generated/swagger_client/__init__.py | zhuofusong/machine-fault-diagnosis | 2 | 6616885 | # coding: utf-8
# flake8: noqa
"""
Machine fault diagnosis
List of top level server APIs # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from swagger_client.api.model_flow_api import ModelFlowApi
from swagger_client.api.model_flow_chart_api import ModelFlowChartApi
from swagger_client.api.model_flow_chart_edge_api import ModelFlowChartEdgeApi
from swagger_client.api.model_flow_chart_extension_api import ModelFlowChartExtensionApi
from swagger_client.api.model_flow_chart_node_api import ModelFlowChartNodeApi
from swagger_client.api.model_flow_chart_node_component_api import ModelFlowChartNodeComponentApi
from swagger_client.api.model_flow_chart_validation_api import ModelFlowChartValidationApi
# import ApiClient
from swagger_client.api_client import ApiClient
from swagger_client.configuration import Configuration
# import models into sdk package
from swagger_client.models.model_flow_chart import ModelFlowChart
from swagger_client.models.model_flow_chart_edge import ModelFlowChartEdge
from swagger_client.models.model_flow_chart_edge_meta import ModelFlowChartEdgeMeta
from swagger_client.models.model_flow_chart_extension import ModelFlowChartExtension
from swagger_client.models.model_flow_chart_extension_fixed_config import ModelFlowChartExtensionFixedConfig
from swagger_client.models.model_flow_chart_extension_meta import ModelFlowChartExtensionMeta
from swagger_client.models.model_flow_chart_graph import ModelFlowChartGraph
from swagger_client.models.model_flow_chart_meta import ModelFlowChartMeta
from swagger_client.models.model_flow_chart_node import ModelFlowChartNode
from swagger_client.models.model_flow_chart_node_component import ModelFlowChartNodeComponent
from swagger_client.models.model_flow_chart_node_component_meta import ModelFlowChartNodeComponentMeta
from swagger_client.models.model_flow_chart_node_linked_edges import ModelFlowChartNodeLinkedEdges
from swagger_client.models.model_flow_chart_node_meta import ModelFlowChartNodeMeta
| # coding: utf-8
# flake8: noqa
"""
Machine fault diagnosis
List of top level server APIs # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from swagger_client.api.model_flow_api import ModelFlowApi
from swagger_client.api.model_flow_chart_api import ModelFlowChartApi
from swagger_client.api.model_flow_chart_edge_api import ModelFlowChartEdgeApi
from swagger_client.api.model_flow_chart_extension_api import ModelFlowChartExtensionApi
from swagger_client.api.model_flow_chart_node_api import ModelFlowChartNodeApi
from swagger_client.api.model_flow_chart_node_component_api import ModelFlowChartNodeComponentApi
from swagger_client.api.model_flow_chart_validation_api import ModelFlowChartValidationApi
# import ApiClient
from swagger_client.api_client import ApiClient
from swagger_client.configuration import Configuration
# import models into sdk package
from swagger_client.models.model_flow_chart import ModelFlowChart
from swagger_client.models.model_flow_chart_edge import ModelFlowChartEdge
from swagger_client.models.model_flow_chart_edge_meta import ModelFlowChartEdgeMeta
from swagger_client.models.model_flow_chart_extension import ModelFlowChartExtension
from swagger_client.models.model_flow_chart_extension_fixed_config import ModelFlowChartExtensionFixedConfig
from swagger_client.models.model_flow_chart_extension_meta import ModelFlowChartExtensionMeta
from swagger_client.models.model_flow_chart_graph import ModelFlowChartGraph
from swagger_client.models.model_flow_chart_meta import ModelFlowChartMeta
from swagger_client.models.model_flow_chart_node import ModelFlowChartNode
from swagger_client.models.model_flow_chart_node_component import ModelFlowChartNodeComponent
from swagger_client.models.model_flow_chart_node_component_meta import ModelFlowChartNodeComponentMeta
from swagger_client.models.model_flow_chart_node_linked_edges import ModelFlowChartNodeLinkedEdges
from swagger_client.models.model_flow_chart_node_meta import ModelFlowChartNodeMeta
| en | 0.615566 | # coding: utf-8 # flake8: noqa Machine fault diagnosis List of top level server APIs # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git # import apis into sdk package # import ApiClient # import models into sdk package | 1.053815 | 1 |
src/pyrin/database/model/component.py | wilsonGmn/pyrin | 0 | 6616886 | <reponame>wilsonGmn/pyrin<filename>src/pyrin/database/model/component.py
# -*- coding: utf-8 -*-
"""
model component module.
"""
from pyrin.application.decorators import component
from pyrin.database.model import ModelPackage
from pyrin.database.model.manager import ModelManager
from pyrin.application.structs import Component
@component(ModelPackage.COMPONENT_NAME)
class ModelComponent(Component, ModelManager):
"""
model component class.
"""
pass
| # -*- coding: utf-8 -*-
"""
model component module.
"""
from pyrin.application.decorators import component
from pyrin.database.model import ModelPackage
from pyrin.database.model.manager import ModelManager
from pyrin.application.structs import Component
@component(ModelPackage.COMPONENT_NAME)
class ModelComponent(Component, ModelManager):
"""
model component class.
"""
pass | en | 0.672728 | # -*- coding: utf-8 -*- model component module. model component class. | 1.872688 | 2 |
src/factory/evaluate/evaluator.py | i-pan/kaggle-melanoma | 68 | 6616887 | <reponame>i-pan/kaggle-melanoma<filename>src/factory/evaluate/evaluator.py
import torch
import pickle
import pandas as pd
import numpy as np
import os, os.path as osp
import re
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
from .metrics import *
from ..data import cudaify
class Predictor(object):
def __init__(self,
loader,
labels_available=True,
cuda=True,
debug=False,
arc_loader=None):
self.loader = loader
self.arc_loader = arc_loader
self.labels_available = labels_available
self.cuda = cuda
self.debug = debug
if self.loader.dataset.square_tta:
assert self.loader.batch_size == 1
@staticmethod
def get_center(vectors):
avg = np.mean(vectors, axis=0)
if avg.ndim == 1:
avg = avg / np.linalg.norm(avg)
elif avg.ndim == 2:
avg = avg / np.linalg.norm(avg, axis=1, keepdims=True)
return avg
def predict(self, model, criterion, epoch):
self.epoch = epoch
y_pred = []
y_true = []
losses = []
if 'arcnet' in str(model).lower():
melanoma = []
features = []
with torch.no_grad():
for i, data in tqdm(enumerate(self.arc_loader), total=len(self.arc_loader)):
# arc_loader should be a loader of MELANOMA IMAGES ONLY from training set
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
# Get feature
melanoma += [model(batch).cpu().numpy()]
for i, data in tqdm(enumerate(self.loader), total=len(self.loader)):
# Validation loader
if self.debug:
if i > 10:
break
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
features += [model(batch).cpu().numpy()]
losses += [0]
y_true += list(labels.cpu().numpy())
if self.debug:
y_true[-1] = 0
y_true[-2] = 1
melanoma = np.vstack(melanoma)
features = np.vstack(features)
# Get center of melanoma features
melanoma = self.get_center(melanoma).reshape(1, -1)
# Compute distances
distances = cosine_similarity(features, melanoma)
if len(self.arc_loader) > 10000:
print('Using distance from benign ...')
distances = -distances
return y_true, distances, losses
elif 'siamesenet' in str(model).lower():
melanoma = []
features = []
with torch.no_grad():
for i, data in tqdm(enumerate(self.arc_loader), total=len(self.arc_loader)):
# arc_loader should be a loader of MELANOMA IMAGES ONLY from training set
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
# Get feature
melanoma += [model.extract_features(batch)]
for i, data in tqdm(enumerate(self.loader), total=len(self.loader)):
# Validation loader
if self.debug:
if i > 10:
break
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
features += [model.extract_features(batch)]
losses += [0]
y_true += list(labels.cpu().numpy())
# Now that features are extracted, we must pass them to the head
melanoma = torch.cat(melanoma)
features = torch.cat(features)
similarities = [model.forward_head(melanoma, torch.stack([features[i]]*melanoma.size(0), dim=0)).mean().item() for i in tqdm(range(features.size(0)), total=features.size(0))]
# with open('/root/melanoma/src/sims.pkl', 'wb') as f:
# pickle.dump(similarities, f)
# print(len(similarities))
if self.debug:
y_true[-1] = 0
y_true[-2] = 1
return y_true, similarities, losses
else:
with torch.no_grad():
for i, data in tqdm(enumerate(self.loader), total=len(self.loader)):
if self.debug:
if i > 10:
y_true[0] = 1
y_true[1] = 0
break
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
output = model(batch)
if criterion:
if 'onehot' in str(criterion).lower():
losses += [0]
else:
losses += [criterion(output, labels).item()]
if hasattr(model, 'module'):
num_classes = model.module.fc.out_features
else:
num_classes = model.fc.out_features
if num_classes == 2:
output = torch.softmax(output, dim=1)[:,1]
elif num_classes == 3:
output = torch.softmax(output, dim=1)
elif num_classes == 1:
output = torch.sigmoid(output)
output = output.cpu().numpy()
y_pred += list(output)
if self.labels_available:
y_true += list(labels.cpu().numpy())
y_pred = np.asarray(y_pred)
y_true = np.asarray(y_true)
return y_true, y_pred, losses
class Evaluator(Predictor):
def __init__(self,
loader,
metrics,
valid_metric,
mode,
improve_thresh,
prefix,
save_checkpoint_dir,
save_best,
early_stopping=np.inf,
thresholds=np.arange(0.05, 1.05, 0.05),
cuda=True,
debug=False,
arc_loader=None):
super(Evaluator, self).__init__(
loader=loader,
cuda=cuda,
debug=debug,
arc_loader=arc_loader)
if type(metrics) is not list: metrics = list(metrics)
# if type(valid_metric) == list:
# for vm in valid_metric: assert vm in metrics
# else:
# assert valid_metric in metrics
# List of strings corresponding to desired metrics
# These strings should correspond to function names defined
# in metrics.py
self.metrics = metrics
# valid_metric should be included within metrics
# This specifies which metric we should track for validation improvement
self.valid_metric = valid_metric
# Mode should be one of ['min', 'max']
# This determines whether a lower (min) or higher (max)
# valid_metric is considered to be better
self.mode = mode
# This determines by how much the valid_metric needs to improve
# to be considered an improvement
self.improve_thresh = improve_thresh
# Specifies part of the model name
self.prefix = prefix
self.save_checkpoint_dir = save_checkpoint_dir
# save_best = True, overwrite checkpoints if score improves
# If False, save all checkpoints
self.save_best = save_best
self.metrics_file = os.path.join(save_checkpoint_dir, 'metrics.csv')
if os.path.exists(self.metrics_file): os.system('rm {}'.format(self.metrics_file))
# How many epochs of no improvement do we wait before stopping training?
self.early_stopping = early_stopping
self.stopping = 0
self.thresholds = thresholds
self.history = []
self.epoch = None
self.reset_best()
def reset_best(self):
self.best_model = None
self.best_score = -np.inf
def set_logger(self, logger):
self.logger = logger
self.print = self.logger.info
def validate(self, model, criterion, epoch):
y_true, y_pred, losses = self.predict(model, criterion, epoch)
valid_metric = self.calculate_metrics(y_true, y_pred, losses)
self.save_checkpoint(model, valid_metric, y_true, y_pred)
return valid_metric
def generate_metrics_df(self):
df = pd.concat([pd.DataFrame(d, index=[0]) for d in self.history])
df.to_csv(self.metrics_file, index=False)
# Used by Trainer class
def check_stopping(self):
return self.stopping >= self.early_stopping
def check_improvement(self, score):
# If mode is 'min', make score negative
# Then, higher score is better (i.e., -0.01 > -0.02)
multiplier = -1 if self.mode == 'min' else 1
score = multiplier * score
improved = score >= (self.best_score + self.improve_thresh)
if improved:
self.stopping = 0
self.best_score = score
else:
self.stopping += 1
return improved
def save_checkpoint(self, model, valid_metric, y_true, y_pred):
save_file = '{}_{}_VM-{:.4f}.pth'.format(self.prefix, str(self.epoch).zfill(3), valid_metric).upper()
save_file = os.path.join(self.save_checkpoint_dir, save_file)
if self.save_best:
if self.check_improvement(valid_metric):
if self.best_model is not None:
os.system('rm {}'.format(self.best_model))
self.best_model = save_file
torch.save(model.state_dict(), save_file)
# Save predictions
with open(os.path.join(self.save_checkpoint_dir, 'valid_predictions.pkl'), 'wb') as f:
pickle.dump({'y_true': y_true, 'y_pred': y_pred}, f)
else:
torch.save(model.state_dict(), save_file)
# Save predictions
with open(os.path.join(self.save_checkpoint_dir, 'valid_predictions.pkl'), 'wb') as f:
pickle.dump({'y_true': y_true, 'y_pred': y_pred}, f)
def calculate_metrics(self, y_true, y_pred, losses):
metrics_dict = {}
metrics_dict['loss'] = np.mean(losses)
for metric in self.metrics:
if metric == 'loss': continue
metric = eval(metric)
metrics_dict.update(metric(y_true, y_pred, thresholds=self.thresholds))
print_results = 'epoch {epoch} // VALIDATION'.format(epoch=self.epoch)
if type(self.valid_metric) == list:
valid_metric = np.mean([metrics_dict[vm] for vm in self.valid_metric])
else:
valid_metric = metrics_dict[self.valid_metric]
metrics_dict['vm'] = valid_metric
max_str_len = np.max([len(k) for k in metrics_dict.keys()])
for key in metrics_dict.keys():
self.print('{key} | {value:.5g}'.format(key=key.ljust(max_str_len), value=metrics_dict[key]))
metrics_dict['epoch'] = int(self.epoch)
self.history += [metrics_dict]
self.generate_metrics_df()
return valid_metric
| import torch
import pickle
import pandas as pd
import numpy as np
import os, os.path as osp
import re
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
from .metrics import *
from ..data import cudaify
class Predictor(object):
def __init__(self,
loader,
labels_available=True,
cuda=True,
debug=False,
arc_loader=None):
self.loader = loader
self.arc_loader = arc_loader
self.labels_available = labels_available
self.cuda = cuda
self.debug = debug
if self.loader.dataset.square_tta:
assert self.loader.batch_size == 1
@staticmethod
def get_center(vectors):
avg = np.mean(vectors, axis=0)
if avg.ndim == 1:
avg = avg / np.linalg.norm(avg)
elif avg.ndim == 2:
avg = avg / np.linalg.norm(avg, axis=1, keepdims=True)
return avg
def predict(self, model, criterion, epoch):
self.epoch = epoch
y_pred = []
y_true = []
losses = []
if 'arcnet' in str(model).lower():
melanoma = []
features = []
with torch.no_grad():
for i, data in tqdm(enumerate(self.arc_loader), total=len(self.arc_loader)):
# arc_loader should be a loader of MELANOMA IMAGES ONLY from training set
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
# Get feature
melanoma += [model(batch).cpu().numpy()]
for i, data in tqdm(enumerate(self.loader), total=len(self.loader)):
# Validation loader
if self.debug:
if i > 10:
break
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
features += [model(batch).cpu().numpy()]
losses += [0]
y_true += list(labels.cpu().numpy())
if self.debug:
y_true[-1] = 0
y_true[-2] = 1
melanoma = np.vstack(melanoma)
features = np.vstack(features)
# Get center of melanoma features
melanoma = self.get_center(melanoma).reshape(1, -1)
# Compute distances
distances = cosine_similarity(features, melanoma)
if len(self.arc_loader) > 10000:
print('Using distance from benign ...')
distances = -distances
return y_true, distances, losses
elif 'siamesenet' in str(model).lower():
melanoma = []
features = []
with torch.no_grad():
for i, data in tqdm(enumerate(self.arc_loader), total=len(self.arc_loader)):
# arc_loader should be a loader of MELANOMA IMAGES ONLY from training set
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
# Get feature
melanoma += [model.extract_features(batch)]
for i, data in tqdm(enumerate(self.loader), total=len(self.loader)):
# Validation loader
if self.debug:
if i > 10:
break
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
features += [model.extract_features(batch)]
losses += [0]
y_true += list(labels.cpu().numpy())
# Now that features are extracted, we must pass them to the head
melanoma = torch.cat(melanoma)
features = torch.cat(features)
similarities = [model.forward_head(melanoma, torch.stack([features[i]]*melanoma.size(0), dim=0)).mean().item() for i in tqdm(range(features.size(0)), total=features.size(0))]
# with open('/root/melanoma/src/sims.pkl', 'wb') as f:
# pickle.dump(similarities, f)
# print(len(similarities))
if self.debug:
y_true[-1] = 0
y_true[-2] = 1
return y_true, similarities, losses
else:
with torch.no_grad():
for i, data in tqdm(enumerate(self.loader), total=len(self.loader)):
if self.debug:
if i > 10:
y_true[0] = 1
y_true[1] = 0
break
batch, labels = data
if self.cuda:
batch, labels = cudaify(batch, labels)
output = model(batch)
if criterion:
if 'onehot' in str(criterion).lower():
losses += [0]
else:
losses += [criterion(output, labels).item()]
if hasattr(model, 'module'):
num_classes = model.module.fc.out_features
else:
num_classes = model.fc.out_features
if num_classes == 2:
output = torch.softmax(output, dim=1)[:,1]
elif num_classes == 3:
output = torch.softmax(output, dim=1)
elif num_classes == 1:
output = torch.sigmoid(output)
output = output.cpu().numpy()
y_pred += list(output)
if self.labels_available:
y_true += list(labels.cpu().numpy())
y_pred = np.asarray(y_pred)
y_true = np.asarray(y_true)
return y_true, y_pred, losses
class Evaluator(Predictor):
def __init__(self,
loader,
metrics,
valid_metric,
mode,
improve_thresh,
prefix,
save_checkpoint_dir,
save_best,
early_stopping=np.inf,
thresholds=np.arange(0.05, 1.05, 0.05),
cuda=True,
debug=False,
arc_loader=None):
super(Evaluator, self).__init__(
loader=loader,
cuda=cuda,
debug=debug,
arc_loader=arc_loader)
if type(metrics) is not list: metrics = list(metrics)
# if type(valid_metric) == list:
# for vm in valid_metric: assert vm in metrics
# else:
# assert valid_metric in metrics
# List of strings corresponding to desired metrics
# These strings should correspond to function names defined
# in metrics.py
self.metrics = metrics
# valid_metric should be included within metrics
# This specifies which metric we should track for validation improvement
self.valid_metric = valid_metric
# Mode should be one of ['min', 'max']
# This determines whether a lower (min) or higher (max)
# valid_metric is considered to be better
self.mode = mode
# This determines by how much the valid_metric needs to improve
# to be considered an improvement
self.improve_thresh = improve_thresh
# Specifies part of the model name
self.prefix = prefix
self.save_checkpoint_dir = save_checkpoint_dir
# save_best = True, overwrite checkpoints if score improves
# If False, save all checkpoints
self.save_best = save_best
self.metrics_file = os.path.join(save_checkpoint_dir, 'metrics.csv')
if os.path.exists(self.metrics_file): os.system('rm {}'.format(self.metrics_file))
# How many epochs of no improvement do we wait before stopping training?
self.early_stopping = early_stopping
self.stopping = 0
self.thresholds = thresholds
self.history = []
self.epoch = None
self.reset_best()
def reset_best(self):
self.best_model = None
self.best_score = -np.inf
def set_logger(self, logger):
self.logger = logger
self.print = self.logger.info
def validate(self, model, criterion, epoch):
y_true, y_pred, losses = self.predict(model, criterion, epoch)
valid_metric = self.calculate_metrics(y_true, y_pred, losses)
self.save_checkpoint(model, valid_metric, y_true, y_pred)
return valid_metric
def generate_metrics_df(self):
df = pd.concat([pd.DataFrame(d, index=[0]) for d in self.history])
df.to_csv(self.metrics_file, index=False)
# Used by Trainer class
def check_stopping(self):
return self.stopping >= self.early_stopping
def check_improvement(self, score):
# If mode is 'min', make score negative
# Then, higher score is better (i.e., -0.01 > -0.02)
multiplier = -1 if self.mode == 'min' else 1
score = multiplier * score
improved = score >= (self.best_score + self.improve_thresh)
if improved:
self.stopping = 0
self.best_score = score
else:
self.stopping += 1
return improved
def save_checkpoint(self, model, valid_metric, y_true, y_pred):
save_file = '{}_{}_VM-{:.4f}.pth'.format(self.prefix, str(self.epoch).zfill(3), valid_metric).upper()
save_file = os.path.join(self.save_checkpoint_dir, save_file)
if self.save_best:
if self.check_improvement(valid_metric):
if self.best_model is not None:
os.system('rm {}'.format(self.best_model))
self.best_model = save_file
torch.save(model.state_dict(), save_file)
# Save predictions
with open(os.path.join(self.save_checkpoint_dir, 'valid_predictions.pkl'), 'wb') as f:
pickle.dump({'y_true': y_true, 'y_pred': y_pred}, f)
else:
torch.save(model.state_dict(), save_file)
# Save predictions
with open(os.path.join(self.save_checkpoint_dir, 'valid_predictions.pkl'), 'wb') as f:
pickle.dump({'y_true': y_true, 'y_pred': y_pred}, f)
def calculate_metrics(self, y_true, y_pred, losses):
metrics_dict = {}
metrics_dict['loss'] = np.mean(losses)
for metric in self.metrics:
if metric == 'loss': continue
metric = eval(metric)
metrics_dict.update(metric(y_true, y_pred, thresholds=self.thresholds))
print_results = 'epoch {epoch} // VALIDATION'.format(epoch=self.epoch)
if type(self.valid_metric) == list:
valid_metric = np.mean([metrics_dict[vm] for vm in self.valid_metric])
else:
valid_metric = metrics_dict[self.valid_metric]
metrics_dict['vm'] = valid_metric
max_str_len = np.max([len(k) for k in metrics_dict.keys()])
for key in metrics_dict.keys():
self.print('{key} | {value:.5g}'.format(key=key.ljust(max_str_len), value=metrics_dict[key]))
metrics_dict['epoch'] = int(self.epoch)
self.history += [metrics_dict]
self.generate_metrics_df()
return valid_metric | en | 0.795087 | # arc_loader should be a loader of MELANOMA IMAGES ONLY from training set # Get feature # Validation loader # Get center of melanoma features # Compute distances # arc_loader should be a loader of MELANOMA IMAGES ONLY from training set # Get feature # Validation loader # Now that features are extracted, we must pass them to the head # with open('/root/melanoma/src/sims.pkl', 'wb') as f: # pickle.dump(similarities, f) # print(len(similarities)) # if type(valid_metric) == list: # for vm in valid_metric: assert vm in metrics # else: # assert valid_metric in metrics # List of strings corresponding to desired metrics # These strings should correspond to function names defined # in metrics.py # valid_metric should be included within metrics # This specifies which metric we should track for validation improvement # Mode should be one of ['min', 'max'] # This determines whether a lower (min) or higher (max) # valid_metric is considered to be better # This determines by how much the valid_metric needs to improve # to be considered an improvement # Specifies part of the model name # save_best = True, overwrite checkpoints if score improves # If False, save all checkpoints # How many epochs of no improvement do we wait before stopping training? # Used by Trainer class # If mode is 'min', make score negative # Then, higher score is better (i.e., -0.01 > -0.02) # Save predictions # Save predictions | 2.102082 | 2 |
tests/py310_only.py | ariebovenberg/canned | 4 | 6616888 | """Module containing python3.10-only syntax.
A separate module is needed so its import can be skipped completely
in older python versions.
"""
import pytest
from cans import Just, Maybe, Nothing
def _myfunc(m):
match m:
case Just(n):
return str(n)
case Nothing():
return 'Nothing!'
case _:
return 'what?'
def test_maybe_pattern_match():
assert _myfunc(Just(5)) == '5'
assert _myfunc(Nothing()) == 'Nothing!'
assert _myfunc(5) == 'what?'
| """Module containing python3.10-only syntax.
A separate module is needed so its import can be skipped completely
in older python versions.
"""
import pytest
from cans import Just, Maybe, Nothing
def _myfunc(m):
match m:
case Just(n):
return str(n)
case Nothing():
return 'Nothing!'
case _:
return 'what?'
def test_maybe_pattern_match():
assert _myfunc(Just(5)) == '5'
assert _myfunc(Nothing()) == 'Nothing!'
assert _myfunc(5) == 'what?'
| en | 0.826929 | Module containing python3.10-only syntax. A separate module is needed so its import can be skipped completely in older python versions. | 2.526087 | 3 |
CoincidenceIndex/frequencyPlotter.py | AD9000/Security | 0 | 6616889 | #!/usr/bin/python3
import string
import matplotlib.pyplot as plt
from sampleData import text1, formatText
'''
Finds the letter frequency in the given piece of
Formats text using formatText() method
'''
def findLetterFrequency(text: str):
frequencyList = 26 * [0]
text = formatText(text)
baseAsciiValue = 65
for letter in text:
frequencyList[ord(letter) - baseAsciiValue] += 1
return frequencyList
'''
Plots frequency of letters to
'''
def frequencyPlotter(text: str):
baseAsciiValue = 65
# frequency = list(zip(findLetterFrequency(text), list(string.ascii_uppercase)))
# print(frequency)
fig = plt.bar(list(string.ascii_uppercase), findLetterFrequency(text))
plt.title('Letter Frequency of each letter of the alphabet')
plt.ylabel('Letter frequency')
plt.xlabel('letters')
# langs = ['C', 'C++', 'Java', 'Python', 'PHP']
# students = [23,17,35,29,12]
plt.show()
frequencyPlotter(text1) | #!/usr/bin/python3
import string
import matplotlib.pyplot as plt
from sampleData import text1, formatText
'''
Finds the letter frequency in the given piece of
Formats text using formatText() method
'''
def findLetterFrequency(text: str):
frequencyList = 26 * [0]
text = formatText(text)
baseAsciiValue = 65
for letter in text:
frequencyList[ord(letter) - baseAsciiValue] += 1
return frequencyList
'''
Plots frequency of letters to
'''
def frequencyPlotter(text: str):
baseAsciiValue = 65
# frequency = list(zip(findLetterFrequency(text), list(string.ascii_uppercase)))
# print(frequency)
fig = plt.bar(list(string.ascii_uppercase), findLetterFrequency(text))
plt.title('Letter Frequency of each letter of the alphabet')
plt.ylabel('Letter frequency')
plt.xlabel('letters')
# langs = ['C', 'C++', 'Java', 'Python', 'PHP']
# students = [23,17,35,29,12]
plt.show()
frequencyPlotter(text1) | en | 0.534978 | #!/usr/bin/python3 Finds the letter frequency in the given piece of Formats text using formatText() method Plots frequency of letters to # frequency = list(zip(findLetterFrequency(text), list(string.ascii_uppercase))) # print(frequency) # langs = ['C', 'C++', 'Java', 'Python', 'PHP'] # students = [23,17,35,29,12] | 4.050535 | 4 |
dictfile/api/exceptions.py | iliapolo/fileconfig | 4 | 6616890 | #############################################################################
# Copyright (c) 2018 <NAME>. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
#############################################################################
class ApiException(BaseException):
pass
class VersionNotFoundException(ApiException):
def __init__(self, alias, version):
self.alias = alias
self.version = version
super(VersionNotFoundException, self).__init__(self.__str__())
def __str__(self):
return 'Version {0} not found for alias: {1}'.format(self.version, self.alias)
class AliasNotFoundException(ApiException):
def __init__(self, alias):
self.alias = alias
super(AliasNotFoundException, self).__init__(self.__str__())
def __str__(self):
return 'Alias {0} not found'.format(self.alias)
class FileIsDirectoryException(ApiException):
def __init__(self, file_path):
self.file_path = file_path
super(FileIsDirectoryException, self).__init__(self.__str__())
def __str__(self):
return '{0} is a directory, not a file'.format(self.file_path)
class FileNotFoundException(ApiException):
def __init__(self, file_path):
self.file_path = file_path
super(FileNotFoundException, self).__init__(self.__str__())
def __str__(self):
return 'File {0} does not exist'.format(self.file_path)
class AliasAlreadyExistsException(ApiException):
def __init__(self, alias):
self.file_path = alias
super(AliasAlreadyExistsException, self).__init__(self.__str__())
def __str__(self):
return 'Alias {0} already exists'.format(self.file_path)
class KeyNotFoundException(ApiException):
def __init__(self, key):
self.key = key
super(KeyNotFoundException, self).__init__(self.__str__())
def __str__(self):
return "Key '{0}' does not exist".format(self.key)
class InvalidKeyTypeException(ApiException):
def __init__(self, key, expected_types, actual_type):
self.expected_types = expected_types
self.actual_type = actual_type
self.key = key
super(InvalidKeyTypeException, self).__init__(self.__str__())
def __str__(self):
return "Invalid key type: {0}. Expected: {1}, Actual: {2}".format(
self.key,
','.join([str(t) for t in self.expected_types]),
self.actual_type)
class InvalidValueTypeException(ApiException):
def __init__(self, expected_types, actual_type):
self.expected_types = expected_types
self.actual_type = actual_type
super(InvalidValueTypeException, self).__init__(self.__str__())
def __str__(self):
return "Invalid value type. Expected: {0}, Actual: {1}".format(
','.join([str(t) for t in self.expected_types]),
self.actual_type)
class UnsupportedFormatException(ApiException):
def __init__(self, fmt):
self.fmt = fmt
super(UnsupportedFormatException, self).__init__(self.__str__())
def __str__(self):
return 'Unsupported Format: {0}'.format(self.fmt)
class UnsupportedOperationException(ApiException):
def __init__(self, fmt, operation):
self.operation = operation
self.fmt = fmt
super(UnsupportedOperationException, self).__init__(self.__str__())
def __str__(self):
return "Unsupported operation: {0} (format={1}) ".format(self.operation, self.fmt)
class CorruptFileException(ApiException):
def __init__(self, file_path, message, alias=None):
self.alias = alias
self.file_path = file_path
self.message = message
super(CorruptFileException, self).__init__(self.__str__())
def __str__(self):
return 'Corrupted File ({0}): {1}'.format(self.file_path, self.message)
class IllegalAliasException(ApiException):
def __init__(self, alias):
self.alias = alias
super(IllegalAliasException, self).__init__(self.__str__())
def __str__(self):
return 'Alias is illegal (Must not contain spaces nor path separators)'
class InvalidArgumentsException(ApiException):
def __init__(self, message):
self.message = message
super(InvalidArgumentsException, self).__init__(self.__str__())
def __str__(self):
return self.message
| #############################################################################
# Copyright (c) 2018 <NAME>. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
#############################################################################
class ApiException(BaseException):
pass
class VersionNotFoundException(ApiException):
def __init__(self, alias, version):
self.alias = alias
self.version = version
super(VersionNotFoundException, self).__init__(self.__str__())
def __str__(self):
return 'Version {0} not found for alias: {1}'.format(self.version, self.alias)
class AliasNotFoundException(ApiException):
def __init__(self, alias):
self.alias = alias
super(AliasNotFoundException, self).__init__(self.__str__())
def __str__(self):
return 'Alias {0} not found'.format(self.alias)
class FileIsDirectoryException(ApiException):
def __init__(self, file_path):
self.file_path = file_path
super(FileIsDirectoryException, self).__init__(self.__str__())
def __str__(self):
return '{0} is a directory, not a file'.format(self.file_path)
class FileNotFoundException(ApiException):
def __init__(self, file_path):
self.file_path = file_path
super(FileNotFoundException, self).__init__(self.__str__())
def __str__(self):
return 'File {0} does not exist'.format(self.file_path)
class AliasAlreadyExistsException(ApiException):
def __init__(self, alias):
self.file_path = alias
super(AliasAlreadyExistsException, self).__init__(self.__str__())
def __str__(self):
return 'Alias {0} already exists'.format(self.file_path)
class KeyNotFoundException(ApiException):
def __init__(self, key):
self.key = key
super(KeyNotFoundException, self).__init__(self.__str__())
def __str__(self):
return "Key '{0}' does not exist".format(self.key)
class InvalidKeyTypeException(ApiException):
def __init__(self, key, expected_types, actual_type):
self.expected_types = expected_types
self.actual_type = actual_type
self.key = key
super(InvalidKeyTypeException, self).__init__(self.__str__())
def __str__(self):
return "Invalid key type: {0}. Expected: {1}, Actual: {2}".format(
self.key,
','.join([str(t) for t in self.expected_types]),
self.actual_type)
class InvalidValueTypeException(ApiException):
def __init__(self, expected_types, actual_type):
self.expected_types = expected_types
self.actual_type = actual_type
super(InvalidValueTypeException, self).__init__(self.__str__())
def __str__(self):
return "Invalid value type. Expected: {0}, Actual: {1}".format(
','.join([str(t) for t in self.expected_types]),
self.actual_type)
class UnsupportedFormatException(ApiException):
def __init__(self, fmt):
self.fmt = fmt
super(UnsupportedFormatException, self).__init__(self.__str__())
def __str__(self):
return 'Unsupported Format: {0}'.format(self.fmt)
class UnsupportedOperationException(ApiException):
def __init__(self, fmt, operation):
self.operation = operation
self.fmt = fmt
super(UnsupportedOperationException, self).__init__(self.__str__())
def __str__(self):
return "Unsupported operation: {0} (format={1}) ".format(self.operation, self.fmt)
class CorruptFileException(ApiException):
def __init__(self, file_path, message, alias=None):
self.alias = alias
self.file_path = file_path
self.message = message
super(CorruptFileException, self).__init__(self.__str__())
def __str__(self):
return 'Corrupted File ({0}): {1}'.format(self.file_path, self.message)
class IllegalAliasException(ApiException):
def __init__(self, alias):
self.alias = alias
super(IllegalAliasException, self).__init__(self.__str__())
def __str__(self):
return 'Alias is illegal (Must not contain spaces nor path separators)'
class InvalidArgumentsException(ApiException):
def __init__(self, message):
self.message = message
super(InvalidArgumentsException, self).__init__(self.__str__())
def __str__(self):
return self.message
| en | 0.621554 | ############################################################################# # Copyright (c) 2018 <NAME>. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. # ############################################################################# | 2.08487 | 2 |
src/lib/idol/mar/codegen/schema/type_struct.py | lyric-com/idol | 0 | 6616891 | <reponame>lyric-com/idol
# DO NOT EDIT
# This file was generated by idol_mar, any changes will be lost when idol_mar is rerun again
from marshmallow import Schema
from .literal import SchemaLiteralField
from .primitive_type import SchemaPrimitiveTypeField
from .reference import SchemaReferenceField
from .struct_kind import SchemaStructKindField
from importlib import import_module
from ..__idol__ import wrap_field
from marshmallow.fields import Nested
class SchemaTypeStructSchema(Schema):
literal = SchemaLiteralField(
dump_to="literal", load_from="literal", allow_none=True
)
primitive_type = SchemaPrimitiveTypeField(
dump_to="primitive_type", load_from="primitive_type", allow_none=False
)
reference = SchemaReferenceField(
dump_to="reference", load_from="reference", allow_none=False
)
struct_kind = SchemaStructKindField(
dump_to="struct_kind", load_from="struct_kind", allow_none=False
)
SchemaTypeStructField = wrap_field(
Nested,
"SchemaTypeStructField",
(lambda: import_module("...schema.type_struct", __package__).TypeStructSchema),
)
| # DO NOT EDIT
# This file was generated by idol_mar, any changes will be lost when idol_mar is rerun again
from marshmallow import Schema
from .literal import SchemaLiteralField
from .primitive_type import SchemaPrimitiveTypeField
from .reference import SchemaReferenceField
from .struct_kind import SchemaStructKindField
from importlib import import_module
from ..__idol__ import wrap_field
from marshmallow.fields import Nested
class SchemaTypeStructSchema(Schema):
literal = SchemaLiteralField(
dump_to="literal", load_from="literal", allow_none=True
)
primitive_type = SchemaPrimitiveTypeField(
dump_to="primitive_type", load_from="primitive_type", allow_none=False
)
reference = SchemaReferenceField(
dump_to="reference", load_from="reference", allow_none=False
)
struct_kind = SchemaStructKindField(
dump_to="struct_kind", load_from="struct_kind", allow_none=False
)
SchemaTypeStructField = wrap_field(
Nested,
"SchemaTypeStructField",
(lambda: import_module("...schema.type_struct", __package__).TypeStructSchema),
) | en | 0.968548 | # DO NOT EDIT # This file was generated by idol_mar, any changes will be lost when idol_mar is rerun again | 1.839241 | 2 |
contrib/liveupdate/cluster_state_to_json.py | stevec7/gpfs | 2 | 6616892 | <gh_stars>1-10
#!/usr/bin/env python
import argparse
import json
import operator
import sys
import time
from collections import defaultdict
from gpfs.cluster import GPFSCluster
from gpfs.node import Node
from fabric.api import run, execute, env
from fabric.context_managers import settings, hide, show
from fabric.operations import reboot
def tree():
return defaultdict(tree)
def main(args):
state = tree()
env.hosts = [ args.destnode, ]
env.use_hostbased = True
cluster = GPFSCluster(state)
# this builds a complete GPFS cluster state defaultdict
with settings(
hide('running'),
output_prefix='',
warn_only=True
):
execute(cluster.build_cluster_state)
execute(cluster.get_managers)
execute(cluster.get_all_kernel_and_arch)
execute(cluster.get_all_gpfs_baserpm)
# write all of this to a json dump
json.dump(state, open(args.jsonfile, 'w'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='grabs GPFS cluster state \
and dumps to a json file')
parser.add_argument('-f', '--file', dest='jsonfile', required=True,
help='json file to dump to')
parser.add_argument('-n', '--node', dest='destnode', required=True,
help='node to grab cluster state from')
args = parser.parse_args()
main(args)
| #!/usr/bin/env python
import argparse
import json
import operator
import sys
import time
from collections import defaultdict
from gpfs.cluster import GPFSCluster
from gpfs.node import Node
from fabric.api import run, execute, env
from fabric.context_managers import settings, hide, show
from fabric.operations import reboot
def tree():
return defaultdict(tree)
def main(args):
state = tree()
env.hosts = [ args.destnode, ]
env.use_hostbased = True
cluster = GPFSCluster(state)
# this builds a complete GPFS cluster state defaultdict
with settings(
hide('running'),
output_prefix='',
warn_only=True
):
execute(cluster.build_cluster_state)
execute(cluster.get_managers)
execute(cluster.get_all_kernel_and_arch)
execute(cluster.get_all_gpfs_baserpm)
# write all of this to a json dump
json.dump(state, open(args.jsonfile, 'w'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='grabs GPFS cluster state \
and dumps to a json file')
parser.add_argument('-f', '--file', dest='jsonfile', required=True,
help='json file to dump to')
parser.add_argument('-n', '--node', dest='destnode', required=True,
help='node to grab cluster state from')
args = parser.parse_args()
main(args) | en | 0.551238 | #!/usr/bin/env python # this builds a complete GPFS cluster state defaultdict # write all of this to a json dump | 2.183463 | 2 |
service.py | evox95/OpenTextExpander | 0 | 6616893 | <gh_stars>0
import keyboard
_ote = {}
def _watch_for_word(lookout, replace_with):
def word_typed():
if not _ote.get('enabled', False):
print('- word \'%s\' typed but OTE is disabled' % lookout)
return
for i in range(len(lookout) + 1):
keyboard.press('backspace')
keyboard.write(replace_with)
print('+ \'%s\' - \'%s\'' % (lookout, replace_with))
keyboard.add_word_listener(lookout, callback=word_typed, timeout=2, triggers=['space'])
def _add_word_listeners():
keyboard.unhook_all()
for lookout, replace_with in _ote['config']['word_listeners'].items():
_watch_for_word(lookout, replace_with)
def run(child_conn):
global _ote
try:
while not _ote.get('exit', False):
_ote = child_conn.recv()
if _ote['reload_config']:
_add_word_listeners()
except EOFError:
pass
keyboard.unhook_all() | import keyboard
_ote = {}
def _watch_for_word(lookout, replace_with):
def word_typed():
if not _ote.get('enabled', False):
print('- word \'%s\' typed but OTE is disabled' % lookout)
return
for i in range(len(lookout) + 1):
keyboard.press('backspace')
keyboard.write(replace_with)
print('+ \'%s\' - \'%s\'' % (lookout, replace_with))
keyboard.add_word_listener(lookout, callback=word_typed, timeout=2, triggers=['space'])
def _add_word_listeners():
keyboard.unhook_all()
for lookout, replace_with in _ote['config']['word_listeners'].items():
_watch_for_word(lookout, replace_with)
def run(child_conn):
global _ote
try:
while not _ote.get('exit', False):
_ote = child_conn.recv()
if _ote['reload_config']:
_add_word_listeners()
except EOFError:
pass
keyboard.unhook_all() | none | 1 | 2.504956 | 3 | |
tests/__init__.py | trampfox/nimbus-phantom-rest-client | 0 | 6616894 | __author__ = 'trampfox'
| __author__ = 'trampfox'
| none | 1 | 0.973959 | 1 | |
compile_commands-files.py | yantaozhao/json2cmakelists | 5 | 6616895 | import sys
import os
import json
import re
import subprocess
import argparse
def loadCompilecommandsJson(jsonfile: str) -> dict:
with open(jsonfile, encoding='utf-8') as fd:
js = json.load(fd)
return js
def changeCompilerCommand(cmdline: str) -> str:
"""
add -MM, delete -o
"""
# if cmdline.find('\\') >= 0:
# raise Exception('\\ found in cmdline: {}'.format(cmdline))
parts = cmdline.split()
o_idx = -1
o_n = 0
for i, p in enumerate(parts):
if p == '-o':
o_idx = i
o_n += 1
if o_n > 1:
raise Exception('multi -o found: {}'.format(cmdline))
if o_idx >= 0:
assert o_idx != 0, '-o at the head of: {}'.format(cmdline)
parts[o_idx] = ''
parts[o_idx+1] = ''
parts.insert(1, '-MM')
cmdline2 = ' '.join(parts)
return cmdline2
def runCmd(cmdline: str, env: dict = None) -> str:
cp = None
if env and len(env):
cp = subprocess.run(cmdline, shell=True, check=True, capture_output=True, text=True, env=env)
else:
cp = subprocess.run(cmdline, shell=True, check=True, capture_output=True, text=True,)
return cp.stdout
def extractFilesFromMakeRule(rule: str) -> dict:
"""
make's rule -> dict
"""
dic = {
'target': '',
'src': '',
'include': []
}
assert len(re.findall(':', rule)) == 1, rule
colon = rule.find(':')
target = rule[:colon].strip()
others = rule[colon+1:].strip()
parts = re.split(r'\s+|\\', others)
parts = list(filter(lambda f: len(f), map(lambda p: p.strip(), parts)))
dic['target'] = target
dic['src'] = parts[0] # FIXME: is the 1st file really the source code?
dic['include'] = parts[1:]
return dic
def mainImpl(cwd: str, cc_json_file: str, output_file: str,
paths_unique: bool = True, paths_compact: bool = True, path_abs: bool = True):
cwd0 = cwd
os.chdir(cwd)
exists = set()
exts = set() # file extension
js = loadCompilecommandsJson(cc_json_file)
with open(output_file, mode='w+', encoding='utf-8') as fd:
for ji, dic in enumerate(js, start=1):
print('{}/{}'.format(ji, len(js)))
cur_dir = dic['directory']
cur_fil = dic['file']
cur_cmd = dic.get('command')
if not cur_cmd:
cur_cmd = ' '.join(dic.get('arguments'))
if not os.path.isabs(cur_dir):
cur_dir = os.path.abspath(os.path.join(cwd0, cur_dir))
if cwd != cur_dir:
os.chdir(cur_dir)
cwd = cur_dir
if not os.path.isabs(cur_fil):
cur_fil = os.path.abspath(os.path.join(cur_dir, cur_fil))
if cur_fil.find('\\') >= 0:
print('Warning: \\ found in path, result maybe incorrect: {}'.format(cur_fil))
cur_fil_dir = os.path.dirname(cur_fil)
cmdline = changeCompilerCommand(cur_cmd)
rule = runCmd(cmdline)
rule_dic = extractFilesFromMakeRule(rule)
# get src and include files
srcs: list = [cur_fil, rule_dic['src']]
includes: list = rule_dic['include']
srcs = map(lambda s: s if os.path.isabs(s) else os.path.abspath(os.path.join(cur_dir, s)), srcs)
srcs = list(set(srcs))
assert len(srcs) == 1, '{} duplicated!'.format(srcs) # to check or not?
if includes:
includes = list(map(lambda h: h if os.path.isabs(h) else os.path.abspath(os.path.join(cur_fil_dir, h)), includes))
# write path of src and include files
for f in srcs+includes:
ext = os.path.splitext(f)[-1]
if ext:
exts.add(ext)
if paths_unique:
if f in exists:
continue
else:
exists.add(f)
# TODO: relative path
print(f, file=fd)
if not paths_compact:
print('', file=fd) # empty line
print('file extensions: {}'.format(sorted(exts)))
def main(args):
cwd = os.getcwd()
compile_commands_json_file = args.input
output_file = args.output
paths_unique = True
paths_compact = True
path_abs = True
compile_commands_json_file = os.path.abspath(os.path.join(cwd, compile_commands_json_file))
if os.path.exists(output_file) and os.path.isfile(output_file):
while True:
yn = input('{} already exist! Overwrite?[y/N]:'.format(output_file))
if yn in ('y', 'Y',):
break
if yn in ('n', 'N', '',):
print('exit.')
sys.exit()
print('make a choice...')
output_file = os.path.abspath(os.path.join(cwd, output_file))
if args.paths == 'unique':
paths_unique = True
elif args.paths == 'full':
paths_unique = False
else:
raise Exception('unknown value: {}'.format(args.paths))
if args.no_compact_paths:
paths_compact = False
else:
paths_compact = True
if args.path_style == 'absolute':
path_abs = True
elif args.path_style == 'relative':
path_abs = False
else:
raise Exception('unknown value: {}'.format(args.path_style))
json_cwd = os.path.dirname(compile_commands_json_file)
print('input:', compile_commands_json_file)
mainImpl(cwd=json_cwd, cc_json_file=compile_commands_json_file, output_file=output_file,
paths_unique=paths_unique, paths_compact=paths_compact, path_abs=path_abs)
print('output:', output_file)
def parse_args():
desc = r"""
SYNOPSIS: get all src and included files, by adding `-MM` options to compiler and parse the output.
Supported compilers: gcc/g++, clang/clang++"""
ap = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawDescriptionHelpFormatter)
ap.add_argument('input', type=str, default='compile_commands.json', nargs='?',
help='path to {0}. [default: {0}]'.format('compile_commands.json'))
ap.add_argument('output', type=str, default='compile_commands_filelist.txt', nargs='?',
help='path to result file. [default: compile_commands_filelist.txt]')
ap.add_argument('--paths', type=str, choices=['unique', 'full'], default='unique',
help='control if the output content paths can be duplicated. [default: unique]')
ap.add_argument('--no-compact-paths', action='store_true',
help='insert an empty line between path groups in content.')
ap.add_argument('--path-style', type=str, choices=['absolute', 'relative'], default='absolute',
help="the style file's path in content. [default: absolute]. (NOT implemented)")
args = ap.parse_args()
return args
if __name__ == '__main__':
main(parse_args())
| import sys
import os
import json
import re
import subprocess
import argparse
def loadCompilecommandsJson(jsonfile: str) -> dict:
with open(jsonfile, encoding='utf-8') as fd:
js = json.load(fd)
return js
def changeCompilerCommand(cmdline: str) -> str:
"""
add -MM, delete -o
"""
# if cmdline.find('\\') >= 0:
# raise Exception('\\ found in cmdline: {}'.format(cmdline))
parts = cmdline.split()
o_idx = -1
o_n = 0
for i, p in enumerate(parts):
if p == '-o':
o_idx = i
o_n += 1
if o_n > 1:
raise Exception('multi -o found: {}'.format(cmdline))
if o_idx >= 0:
assert o_idx != 0, '-o at the head of: {}'.format(cmdline)
parts[o_idx] = ''
parts[o_idx+1] = ''
parts.insert(1, '-MM')
cmdline2 = ' '.join(parts)
return cmdline2
def runCmd(cmdline: str, env: dict = None) -> str:
cp = None
if env and len(env):
cp = subprocess.run(cmdline, shell=True, check=True, capture_output=True, text=True, env=env)
else:
cp = subprocess.run(cmdline, shell=True, check=True, capture_output=True, text=True,)
return cp.stdout
def extractFilesFromMakeRule(rule: str) -> dict:
"""
make's rule -> dict
"""
dic = {
'target': '',
'src': '',
'include': []
}
assert len(re.findall(':', rule)) == 1, rule
colon = rule.find(':')
target = rule[:colon].strip()
others = rule[colon+1:].strip()
parts = re.split(r'\s+|\\', others)
parts = list(filter(lambda f: len(f), map(lambda p: p.strip(), parts)))
dic['target'] = target
dic['src'] = parts[0] # FIXME: is the 1st file really the source code?
dic['include'] = parts[1:]
return dic
def mainImpl(cwd: str, cc_json_file: str, output_file: str,
paths_unique: bool = True, paths_compact: bool = True, path_abs: bool = True):
cwd0 = cwd
os.chdir(cwd)
exists = set()
exts = set() # file extension
js = loadCompilecommandsJson(cc_json_file)
with open(output_file, mode='w+', encoding='utf-8') as fd:
for ji, dic in enumerate(js, start=1):
print('{}/{}'.format(ji, len(js)))
cur_dir = dic['directory']
cur_fil = dic['file']
cur_cmd = dic.get('command')
if not cur_cmd:
cur_cmd = ' '.join(dic.get('arguments'))
if not os.path.isabs(cur_dir):
cur_dir = os.path.abspath(os.path.join(cwd0, cur_dir))
if cwd != cur_dir:
os.chdir(cur_dir)
cwd = cur_dir
if not os.path.isabs(cur_fil):
cur_fil = os.path.abspath(os.path.join(cur_dir, cur_fil))
if cur_fil.find('\\') >= 0:
print('Warning: \\ found in path, result maybe incorrect: {}'.format(cur_fil))
cur_fil_dir = os.path.dirname(cur_fil)
cmdline = changeCompilerCommand(cur_cmd)
rule = runCmd(cmdline)
rule_dic = extractFilesFromMakeRule(rule)
# get src and include files
srcs: list = [cur_fil, rule_dic['src']]
includes: list = rule_dic['include']
srcs = map(lambda s: s if os.path.isabs(s) else os.path.abspath(os.path.join(cur_dir, s)), srcs)
srcs = list(set(srcs))
assert len(srcs) == 1, '{} duplicated!'.format(srcs) # to check or not?
if includes:
includes = list(map(lambda h: h if os.path.isabs(h) else os.path.abspath(os.path.join(cur_fil_dir, h)), includes))
# write path of src and include files
for f in srcs+includes:
ext = os.path.splitext(f)[-1]
if ext:
exts.add(ext)
if paths_unique:
if f in exists:
continue
else:
exists.add(f)
# TODO: relative path
print(f, file=fd)
if not paths_compact:
print('', file=fd) # empty line
print('file extensions: {}'.format(sorted(exts)))
def main(args):
cwd = os.getcwd()
compile_commands_json_file = args.input
output_file = args.output
paths_unique = True
paths_compact = True
path_abs = True
compile_commands_json_file = os.path.abspath(os.path.join(cwd, compile_commands_json_file))
if os.path.exists(output_file) and os.path.isfile(output_file):
while True:
yn = input('{} already exist! Overwrite?[y/N]:'.format(output_file))
if yn in ('y', 'Y',):
break
if yn in ('n', 'N', '',):
print('exit.')
sys.exit()
print('make a choice...')
output_file = os.path.abspath(os.path.join(cwd, output_file))
if args.paths == 'unique':
paths_unique = True
elif args.paths == 'full':
paths_unique = False
else:
raise Exception('unknown value: {}'.format(args.paths))
if args.no_compact_paths:
paths_compact = False
else:
paths_compact = True
if args.path_style == 'absolute':
path_abs = True
elif args.path_style == 'relative':
path_abs = False
else:
raise Exception('unknown value: {}'.format(args.path_style))
json_cwd = os.path.dirname(compile_commands_json_file)
print('input:', compile_commands_json_file)
mainImpl(cwd=json_cwd, cc_json_file=compile_commands_json_file, output_file=output_file,
paths_unique=paths_unique, paths_compact=paths_compact, path_abs=path_abs)
print('output:', output_file)
def parse_args():
desc = r"""
SYNOPSIS: get all src and included files, by adding `-MM` options to compiler and parse the output.
Supported compilers: gcc/g++, clang/clang++"""
ap = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawDescriptionHelpFormatter)
ap.add_argument('input', type=str, default='compile_commands.json', nargs='?',
help='path to {0}. [default: {0}]'.format('compile_commands.json'))
ap.add_argument('output', type=str, default='compile_commands_filelist.txt', nargs='?',
help='path to result file. [default: compile_commands_filelist.txt]')
ap.add_argument('--paths', type=str, choices=['unique', 'full'], default='unique',
help='control if the output content paths can be duplicated. [default: unique]')
ap.add_argument('--no-compact-paths', action='store_true',
help='insert an empty line between path groups in content.')
ap.add_argument('--path-style', type=str, choices=['absolute', 'relative'], default='absolute',
help="the style file's path in content. [default: absolute]. (NOT implemented)")
args = ap.parse_args()
return args
if __name__ == '__main__':
main(parse_args())
| en | 0.816324 | add -MM, delete -o # if cmdline.find('\\') >= 0: # raise Exception('\\ found in cmdline: {}'.format(cmdline)) make's rule -> dict # FIXME: is the 1st file really the source code? # file extension # get src and include files # to check or not? # write path of src and include files # TODO: relative path # empty line SYNOPSIS: get all src and included files, by adding `-MM` options to compiler and parse the output. Supported compilers: gcc/g++, clang/clang++ | 2.372978 | 2 |
wavespin/scattering1d/frontend/tensorflow_frontend.py | OverLordGoldDragon/dev_tg | 2 | 6616896 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2022- <NAME>
#
# Distributed under the terms of the MIT License
# (see wavespin/__init__.py for details)
# -----------------------------------------------------------------------------
import tensorflow as tf
import math
from ...frontend.tensorflow_frontend import ScatteringTensorFlow
from ..core.scattering1d import scattering1d
from ..core.timefrequency_scattering1d import timefrequency_scattering1d
from ...toolkit import pack_coeffs_jtfs
from .base_frontend import ScatteringBase1D, TimeFrequencyScatteringBase1D
from ..filter_bank_jtfs import _check_runtime_args_jtfs, _handle_args_jtfs
class ScatteringTensorFlow1D(ScatteringTensorFlow, ScatteringBase1D):
"""
This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/frontend/
tensorflow_frontend.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
def __init__(self, J, shape, Q=1, T=None, max_order=2, average=True,
oversampling=0, out_type='array', pad_mode='reflect',
max_pad_factor=1, analytic=False, normalize='l1-energy',
r_psi=math.sqrt(.5), backend='tensorflow', name='Scattering1D'):
ScatteringTensorFlow.__init__(self, name=name)
ScatteringBase1D.__init__(
self, J, shape, Q, T, max_order, average, oversampling, out_type,
pad_mode, max_pad_factor, analytic, normalize, r_psi, backend)
ScatteringBase1D._instantiate_backend(self,
'wavespin.scattering1d.backend.')
ScatteringBase1D.build(self)
ScatteringBase1D.create_filters(self)
def scattering(self, x):
# basic checking, should be improved
if len(x.shape) < 1:
raise ValueError(
'Input tensor x should have at least one axis, got {}'.format(
len(x.shape)))
if self.out_type not in ('array', 'list'):
raise RuntimeError("`out_type` must be one of: 'array', 'list'.")
if self.out_type == 'array' and not self.average:
raise ValueError("out_type=='array' and average==False are mutually "
"incompatible. Please set out_type='list'.")
batch_shape = tf.shape(x)[:-1]
signal_shape = tf.shape(x)[-1:]
x = tf.reshape(x, tf.concat(((-1, 1), signal_shape), 0))
S = scattering1d(x, self.pad_fn, self.backend.unpad, self.backend,
self.J, self.log2_T, self.psi1_f, self.psi2_f,
self.phi_f, max_order=self.max_order,
average=self.average, ind_start=self.ind_start,
ind_end=self.ind_end, oversampling=self.oversampling,
out_type=self.out_type)
if self.out_type == 'array':
scattering_shape = tf.shape(S)[-2:]
new_shape = tf.concat((batch_shape, scattering_shape), 0)
S = tf.reshape(S, new_shape)
else:
for x in S:
scattering_shape = tf.shape(x['coef'])[-1:]
new_shape = tf.concat((batch_shape, scattering_shape), 0)
x['coef'] = tf.reshape(x['coef'], new_shape)
return S
ScatteringTensorFlow1D._document()
class TimeFrequencyScatteringTensorFlow1D(TimeFrequencyScatteringBase1D,
ScatteringTensorFlow1D):
def __init__(self, J, shape, Q, J_fr=None, Q_fr=2, T=None, F=None,
average=True, average_fr=False, oversampling=0, out_type="array",
pad_mode='reflect', max_pad_factor=1, analytic=True,
normalize='l1-energy', r_psi=math.sqrt(.5), implementation=None,
backend="tensorflow", name='TimeFrequencyScattering1D',
**kwargs):
max_order_tm, subcls_out_type = _handle_args_jtfs(out_type)
# Second-order scattering object for the time variable
ScatteringTensorFlow1D.__init__(
self, J, shape, Q, T, max_order_tm, average, oversampling,
subcls_out_type, pad_mode, max_pad_factor, analytic,
normalize, r_psi, backend=backend)
# Frequential scattering object
TimeFrequencyScatteringBase1D.__init__(
self, J_fr, Q_fr, F, average_fr, out_type, implementation, **kwargs)
TimeFrequencyScatteringBase1D.build(self)
def scattering(self, x, Tx=None):
if len(x.shape) < 1:
raise ValueError(
'Input tensor x should have at least one axis, got {}'.format(
len(x.shape)))
_check_runtime_args_jtfs(self.average, self.average_fr, self.out_type,
self.out_3D)
signal_shape = tf.shape(x)[-1:]
x = tf.reshape(x, tf.concat(((-1, 1), signal_shape), 0))
S = timefrequency_scattering1d(
x,
Tx,
self.backend.unpad,
self.backend,
self.J,
self.log2_T,
self.psi1_f, self.psi2_f, self.phi_f,
self.scf,
self.pad_fn,
average=self.average,
average_global=self.average_global,
average_global_phi=self.average_global_phi,
pad_left=self.pad_left, pad_right=self.pad_right,
ind_start=self.ind_start, ind_end=self.ind_end,
oversampling=self.oversampling,
oversampling_fr=self.oversampling_fr,
aligned=self.aligned,
F_kind=self.F_kind,
out_type=self.out_type,
out_3D=self.out_3D,
out_exclude=self.out_exclude,
paths_exclude=self.paths_exclude,
pad_mode=self.pad_mode)
if self.out_structure is not None:
S = pack_coeffs_jtfs(S, self.meta(), self.out_structure,
separate_lowpass=True,
sampling_psi_fr=self.sampling_psi_fr)
return S
def scf_compute_padding_fr(self):
raise NotImplementedError("Here for docs; implemented in "
"`_FrequencyScatteringBase`.")
def scf_compute_J_pad_fr(self):
raise NotImplementedError("Here for docs; implemented in "
"`_FrequencyScatteringBase`.")
TimeFrequencyScatteringTensorFlow1D._document()
__all__ = ['ScatteringTensorFlow1D', 'TimeFrequencyScatteringTensorFlow1D']
| # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2022- <NAME>
#
# Distributed under the terms of the MIT License
# (see wavespin/__init__.py for details)
# -----------------------------------------------------------------------------
import tensorflow as tf
import math
from ...frontend.tensorflow_frontend import ScatteringTensorFlow
from ..core.scattering1d import scattering1d
from ..core.timefrequency_scattering1d import timefrequency_scattering1d
from ...toolkit import pack_coeffs_jtfs
from .base_frontend import ScatteringBase1D, TimeFrequencyScatteringBase1D
from ..filter_bank_jtfs import _check_runtime_args_jtfs, _handle_args_jtfs
class ScatteringTensorFlow1D(ScatteringTensorFlow, ScatteringBase1D):
"""
This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/frontend/
tensorflow_frontend.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
def __init__(self, J, shape, Q=1, T=None, max_order=2, average=True,
oversampling=0, out_type='array', pad_mode='reflect',
max_pad_factor=1, analytic=False, normalize='l1-energy',
r_psi=math.sqrt(.5), backend='tensorflow', name='Scattering1D'):
ScatteringTensorFlow.__init__(self, name=name)
ScatteringBase1D.__init__(
self, J, shape, Q, T, max_order, average, oversampling, out_type,
pad_mode, max_pad_factor, analytic, normalize, r_psi, backend)
ScatteringBase1D._instantiate_backend(self,
'wavespin.scattering1d.backend.')
ScatteringBase1D.build(self)
ScatteringBase1D.create_filters(self)
def scattering(self, x):
# basic checking, should be improved
if len(x.shape) < 1:
raise ValueError(
'Input tensor x should have at least one axis, got {}'.format(
len(x.shape)))
if self.out_type not in ('array', 'list'):
raise RuntimeError("`out_type` must be one of: 'array', 'list'.")
if self.out_type == 'array' and not self.average:
raise ValueError("out_type=='array' and average==False are mutually "
"incompatible. Please set out_type='list'.")
batch_shape = tf.shape(x)[:-1]
signal_shape = tf.shape(x)[-1:]
x = tf.reshape(x, tf.concat(((-1, 1), signal_shape), 0))
S = scattering1d(x, self.pad_fn, self.backend.unpad, self.backend,
self.J, self.log2_T, self.psi1_f, self.psi2_f,
self.phi_f, max_order=self.max_order,
average=self.average, ind_start=self.ind_start,
ind_end=self.ind_end, oversampling=self.oversampling,
out_type=self.out_type)
if self.out_type == 'array':
scattering_shape = tf.shape(S)[-2:]
new_shape = tf.concat((batch_shape, scattering_shape), 0)
S = tf.reshape(S, new_shape)
else:
for x in S:
scattering_shape = tf.shape(x['coef'])[-1:]
new_shape = tf.concat((batch_shape, scattering_shape), 0)
x['coef'] = tf.reshape(x['coef'], new_shape)
return S
ScatteringTensorFlow1D._document()
class TimeFrequencyScatteringTensorFlow1D(TimeFrequencyScatteringBase1D,
ScatteringTensorFlow1D):
def __init__(self, J, shape, Q, J_fr=None, Q_fr=2, T=None, F=None,
average=True, average_fr=False, oversampling=0, out_type="array",
pad_mode='reflect', max_pad_factor=1, analytic=True,
normalize='l1-energy', r_psi=math.sqrt(.5), implementation=None,
backend="tensorflow", name='TimeFrequencyScattering1D',
**kwargs):
max_order_tm, subcls_out_type = _handle_args_jtfs(out_type)
# Second-order scattering object for the time variable
ScatteringTensorFlow1D.__init__(
self, J, shape, Q, T, max_order_tm, average, oversampling,
subcls_out_type, pad_mode, max_pad_factor, analytic,
normalize, r_psi, backend=backend)
# Frequential scattering object
TimeFrequencyScatteringBase1D.__init__(
self, J_fr, Q_fr, F, average_fr, out_type, implementation, **kwargs)
TimeFrequencyScatteringBase1D.build(self)
def scattering(self, x, Tx=None):
if len(x.shape) < 1:
raise ValueError(
'Input tensor x should have at least one axis, got {}'.format(
len(x.shape)))
_check_runtime_args_jtfs(self.average, self.average_fr, self.out_type,
self.out_3D)
signal_shape = tf.shape(x)[-1:]
x = tf.reshape(x, tf.concat(((-1, 1), signal_shape), 0))
S = timefrequency_scattering1d(
x,
Tx,
self.backend.unpad,
self.backend,
self.J,
self.log2_T,
self.psi1_f, self.psi2_f, self.phi_f,
self.scf,
self.pad_fn,
average=self.average,
average_global=self.average_global,
average_global_phi=self.average_global_phi,
pad_left=self.pad_left, pad_right=self.pad_right,
ind_start=self.ind_start, ind_end=self.ind_end,
oversampling=self.oversampling,
oversampling_fr=self.oversampling_fr,
aligned=self.aligned,
F_kind=self.F_kind,
out_type=self.out_type,
out_3D=self.out_3D,
out_exclude=self.out_exclude,
paths_exclude=self.paths_exclude,
pad_mode=self.pad_mode)
if self.out_structure is not None:
S = pack_coeffs_jtfs(S, self.meta(), self.out_structure,
separate_lowpass=True,
sampling_psi_fr=self.sampling_psi_fr)
return S
def scf_compute_padding_fr(self):
raise NotImplementedError("Here for docs; implemented in "
"`_FrequencyScatteringBase`.")
def scf_compute_J_pad_fr(self):
raise NotImplementedError("Here for docs; implemented in "
"`_FrequencyScatteringBase`.")
TimeFrequencyScatteringTensorFlow1D._document()
__all__ = ['ScatteringTensorFlow1D', 'TimeFrequencyScatteringTensorFlow1D'] | en | 0.513259 | # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2022- <NAME> # # Distributed under the terms of the MIT License # (see wavespin/__init__.py for details) # ----------------------------------------------------------------------------- This is a modification of https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/frontend/ tensorflow_frontend.py Kymatio, (C) 2018-present. The Kymatio developers. # basic checking, should be improved # Second-order scattering object for the time variable # Frequential scattering object | 1.822664 | 2 |
examples/2_application_placement.py | ihsan1852/edge | 0 | 6616897 | import logging
import random
import simpy
from leaf.application import Application, SourceTask, ProcessingTask, SinkTask
from leaf.infrastructure import Node, Link, Infrastructure
from leaf.orchestrator import Orchestrator
from leaf.power import PowerModelNode, PowerModelNodeShared, PowerModelLink, PowerMeter
RANDOM_SEED = 1
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s\t%(message)s')
def simple_example():
"""Simple example without any behaviour over time.
Read the explanations of :func:`create_infrastructure`, :func:`create_application` and :class:`SimpleOrchestrator`
for details on the scenario setup.
The PowerMeters can be configured to periodically measure the power consumption of one or more PowerAware entities
such as applications, tasks, data flows, compute nodes, network links or the entire infrastructure.
The scenario is running for 5 time steps.
"""
infrastructure = create_infrastructure()
application = create_application(source_node=infrastructure.node("sensor"), sink_node=infrastructure.node("cloud"))
orchestrator = SimpleOrchestrator(infrastructure)
orchestrator.place(application)
env = simpy.Environment()
PowerMeter(env, name="application_meter", entities=application, delay=0.5, end_time=3)
PowerMeter(env, name="cloud_and_fog_meter", entities=[infrastructure.node("cloud"), infrastructure.node("fog")])
PowerMeter(env, name="infrastructure_meter", entities=infrastructure, measurement_interval=2)
env.run(until=5)
def create_infrastructure():
"""Create the scenario's infrastructure graph.
It consists of three nodes:
- A sensor that can compute up to 1000 million instructions per second (MIPS).
It has a maximum power usage of 1.8 Watt and a power usage of 0.2 Watt when being idle.
- A fog node which can compute up to 400000 MIPS; 200 Watt max and 30 Watt static power usage
- A node representing a cloud data center with unlimited processing power that consumes 700 W/MIPS
And two network links that connect the nodes:
- A WiFi connection between the sensor and fog node that consumes 300 J/bit
- A wide are network (WAN) connection between the fog node and cloud that consumes 6000 J/bit
"""
infrastructure = Infrastructure()
sensor = Node("sensor", mips=1000, power_model=PowerModelNode(max_power=1.8, static_power=0.2))
fog_node = Node("fog", mips=400000, power_model=PowerModelNode(max_power=200, static_power=30))
cloud = Node("cloud", power_model=PowerModelNodeShared(power_per_mips=700))
wifi_link_up = Link(sensor, fog_node, latency=10, bandwidth=30e6, power_model=PowerModelLink(300))
wan_link_up = Link(fog_node, cloud, latency=5, bandwidth=1e9, power_model=PowerModelLink(6000))
infrastructure.add_link(wifi_link_up)
infrastructure.add_link(wan_link_up)
return infrastructure
def create_application(source_node: Node, sink_node: Node):
"""Create the application running in the scenario.
It consists of three tasks and two data flows between these tasks:
- A source task that is bound to the sensor node and requires 100 MIPS (for measuring data)
- A processing task that receives 1000 bit/s from the source task, requires 5000 MIPS (for aggregating the data)
and returns 200 bit/s to the sink task
- A sink task that is bound to the cloud node and requires 500 MIPS (for storing the data)
"""
application = Application()
source_task = SourceTask(mips=100, bound_node=source_node)
processing_task = ProcessingTask(mips=5000)
sink_task = SinkTask(mips=100, bound_node=sink_node)
application.add_task(source_task)
application.add_task(processing_task, incoming_data_flows=[(source_task, 1000)])
application.add_task(sink_task, incoming_data_flows=[(processing_task, 200)])
return application
class SimpleOrchestrator(Orchestrator):
"""Very simple orchestrator that places the processing task on the fog node.
You can try out other placements here and see how the placement may consume more energy ("cloud")
or fail because there are not enough resources available ("sensor").
"""
def _processing_task_placement(self, processing_task: ProcessingTask, application: Application) -> Node:
return self.infrastructure.node("fog")
if __name__ == '__main__':
random.seed(RANDOM_SEED)
simple_example()
| import logging
import random
import simpy
from leaf.application import Application, SourceTask, ProcessingTask, SinkTask
from leaf.infrastructure import Node, Link, Infrastructure
from leaf.orchestrator import Orchestrator
from leaf.power import PowerModelNode, PowerModelNodeShared, PowerModelLink, PowerMeter
RANDOM_SEED = 1
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s\t%(message)s')
def simple_example():
"""Simple example without any behaviour over time.
Read the explanations of :func:`create_infrastructure`, :func:`create_application` and :class:`SimpleOrchestrator`
for details on the scenario setup.
The PowerMeters can be configured to periodically measure the power consumption of one or more PowerAware entities
such as applications, tasks, data flows, compute nodes, network links or the entire infrastructure.
The scenario is running for 5 time steps.
"""
infrastructure = create_infrastructure()
application = create_application(source_node=infrastructure.node("sensor"), sink_node=infrastructure.node("cloud"))
orchestrator = SimpleOrchestrator(infrastructure)
orchestrator.place(application)
env = simpy.Environment()
PowerMeter(env, name="application_meter", entities=application, delay=0.5, end_time=3)
PowerMeter(env, name="cloud_and_fog_meter", entities=[infrastructure.node("cloud"), infrastructure.node("fog")])
PowerMeter(env, name="infrastructure_meter", entities=infrastructure, measurement_interval=2)
env.run(until=5)
def create_infrastructure():
"""Create the scenario's infrastructure graph.
It consists of three nodes:
- A sensor that can compute up to 1000 million instructions per second (MIPS).
It has a maximum power usage of 1.8 Watt and a power usage of 0.2 Watt when being idle.
- A fog node which can compute up to 400000 MIPS; 200 Watt max and 30 Watt static power usage
- A node representing a cloud data center with unlimited processing power that consumes 700 W/MIPS
And two network links that connect the nodes:
- A WiFi connection between the sensor and fog node that consumes 300 J/bit
- A wide are network (WAN) connection between the fog node and cloud that consumes 6000 J/bit
"""
infrastructure = Infrastructure()
sensor = Node("sensor", mips=1000, power_model=PowerModelNode(max_power=1.8, static_power=0.2))
fog_node = Node("fog", mips=400000, power_model=PowerModelNode(max_power=200, static_power=30))
cloud = Node("cloud", power_model=PowerModelNodeShared(power_per_mips=700))
wifi_link_up = Link(sensor, fog_node, latency=10, bandwidth=30e6, power_model=PowerModelLink(300))
wan_link_up = Link(fog_node, cloud, latency=5, bandwidth=1e9, power_model=PowerModelLink(6000))
infrastructure.add_link(wifi_link_up)
infrastructure.add_link(wan_link_up)
return infrastructure
def create_application(source_node: Node, sink_node: Node):
"""Create the application running in the scenario.
It consists of three tasks and two data flows between these tasks:
- A source task that is bound to the sensor node and requires 100 MIPS (for measuring data)
- A processing task that receives 1000 bit/s from the source task, requires 5000 MIPS (for aggregating the data)
and returns 200 bit/s to the sink task
- A sink task that is bound to the cloud node and requires 500 MIPS (for storing the data)
"""
application = Application()
source_task = SourceTask(mips=100, bound_node=source_node)
processing_task = ProcessingTask(mips=5000)
sink_task = SinkTask(mips=100, bound_node=sink_node)
application.add_task(source_task)
application.add_task(processing_task, incoming_data_flows=[(source_task, 1000)])
application.add_task(sink_task, incoming_data_flows=[(processing_task, 200)])
return application
class SimpleOrchestrator(Orchestrator):
"""Very simple orchestrator that places the processing task on the fog node.
You can try out other placements here and see how the placement may consume more energy ("cloud")
or fail because there are not enough resources available ("sensor").
"""
def _processing_task_placement(self, processing_task: ProcessingTask, application: Application) -> Node:
return self.infrastructure.node("fog")
if __name__ == '__main__':
random.seed(RANDOM_SEED)
simple_example()
| en | 0.864984 | Simple example without any behaviour over time. Read the explanations of :func:`create_infrastructure`, :func:`create_application` and :class:`SimpleOrchestrator` for details on the scenario setup. The PowerMeters can be configured to periodically measure the power consumption of one or more PowerAware entities such as applications, tasks, data flows, compute nodes, network links or the entire infrastructure. The scenario is running for 5 time steps. Create the scenario's infrastructure graph. It consists of three nodes: - A sensor that can compute up to 1000 million instructions per second (MIPS). It has a maximum power usage of 1.8 Watt and a power usage of 0.2 Watt when being idle. - A fog node which can compute up to 400000 MIPS; 200 Watt max and 30 Watt static power usage - A node representing a cloud data center with unlimited processing power that consumes 700 W/MIPS And two network links that connect the nodes: - A WiFi connection between the sensor and fog node that consumes 300 J/bit - A wide are network (WAN) connection between the fog node and cloud that consumes 6000 J/bit Create the application running in the scenario. It consists of three tasks and two data flows between these tasks: - A source task that is bound to the sensor node and requires 100 MIPS (for measuring data) - A processing task that receives 1000 bit/s from the source task, requires 5000 MIPS (for aggregating the data) and returns 200 bit/s to the sink task - A sink task that is bound to the cloud node and requires 500 MIPS (for storing the data) Very simple orchestrator that places the processing task on the fog node. You can try out other placements here and see how the placement may consume more energy ("cloud") or fail because there are not enough resources available ("sensor"). | 2.598078 | 3 |
tests/test_marathon/test_marathon_migrator.py | Ganasagar/migration-tools-repo | 0 | 6616898 | <gh_stars>0
from dcos_migrate.plugins.marathon import MarathonMigrator, NodeLabelTracker
from dcos_migrate.system import Manifest, ManifestList
from kubernetes.client.models import V1Deployment, V1ObjectMeta, V1Secret # type: ignore
import json
import pytest
def test_simple():
with open('tests/examples/simple.json') as json_file:
data = json.load(json_file)
m = MarathonMigrator(object=data)
mres = m.migrate()
assert mres is not None
# assert m.manifest[0]['metadata']['name'] == 'predictionio-server.group1'
assert m.manifest[0].metadata.name == 'group1.predictionio-server'
@pytest.mark.xfail
def test_simple_portmapping():
with open('tests/examples/simplePortmapping.json') as json_file:
data = json.load(json_file)
m = MarathonMigrator(object=data)
mres = m.migrate()
assert mres is not None
# assert m.manifest[0]['metadata']['name'] == 'predictionio-server.group1'
assert m.manifest[0]['metadata']['name'] == 'group1.predictionio-server'
def test_simple_with_secret():
ml = ManifestList(path='tests/examples/simpleWithSecret')
ml.load()
assert len(ml) == 2
with open('tests/examples/simpleWithSecret.json') as json_file:
data = json.load(json_file)
m = MarathonMigrator(object=data, manifest_list=ml)
mres = m.migrate()
assert len(m.manifest) == 3
assert mres is not None
app_id = 'group1.predictionio-server'
app_label = 'group1-predictionio-server'
assert m.manifest[0].metadata.name == app_id
assert m.manifest[1].metadata.name == app_label
assert m.manifest[2].metadata.name == 'marathonsecret-group1.predictionio-server'
assert m.manifest[0].metadata.labels['app'] == app_label
assert m.manifest[0].spec.template.spec.containers[0].env[
0].value_from.secret_key_ref.name == "marathonsecret-group1.predictionio-server"
assert m.manifest[0].spec.template.spec.containers[0].env[0].value_from.secret_key_ref.key == "secret1"
assert m.manifest[0].spec.template.spec.containers[0].env[
1].value_from.secret_key_ref.name == "marathonsecret-group1.predictionio-server"
assert m.manifest[0].spec.template.spec.containers[0].env[1].value_from.secret_key_ref.key == "test.secret2"
assert m.manifest[1].kind == 'Service'
assert m.manifest[1].spec.selector['app'] == app_label
assert 'secret1' in m.manifest[2].data
assert 'test.secret2' in m.manifest[2].data
def test_docker_pull_config_secret():
pull_config_str = '{"auths":{"example.com":{"username":"jd","password":"<PASSWORD>",'\
'"email":"<EMAIL>","auth":"f00BA7"}}}'
migrated_dcos_secret = V1Secret(kind='Secret',
api_version='v1',
metadata=V1ObjectMeta(name='nothing-depends-on-this-name'),
data={'nothing-depends-on-the-name-of-this-key': pull_config_str})
input_manifest_list = ManifestList()
input_manifest_list.append(
Manifest(pluginName="secret", manifestName="foo.docker-c_nfig", data=[migrated_dcos_secret]))
app = {
"id": "/foo/barify",
"container": {
"docker": {
"pullConfig": {
"secret": "pull-config"
}
}
},
"env": {
"BAR": {
"secret": "pull-config"
}
}, # See the NOTE below
"secrets": {
"pull-config": {
"source": "/foo/docker-c@nfig"
},
"unused": {
"source": "unused"
},
},
}
migrator = MarathonMigrator(object=app, manifest_list=input_manifest_list)
manifest = migrator.migrate()
# NOTE: Thit test expects that two secrets will be created:
# one for the image pull config and another for everything else.
# This might be not the optimal migration startegy.
[deployment] = [m for m in manifest if isinstance(m, V1Deployment)]
[pull_secret] = [m for m in manifest \
if isinstance(m, V1Secret) and m.type == "kubernetes.io/dockerconfigjson"]
[generic_secret] = [m for m in manifest \
if isinstance(m, V1Secret) and m.type != "kubernetes.io/dockerconfigjson"]
assert deployment.spec.template.spec.image_pull_secrets[0].name == pull_secret.metadata.name
assert pull_secret.data[".dockerconfigjson"] == pull_config_str
assert generic_secret.data["foo.docker-c_nfig"] == pull_config_str
def test_constraint_node_labels():
apps = [{
"id": "/foo",
"constraints": [["@hostname", "IS", "10.123.45.67"], ["baz", "UNIQUE"]]
}, {
"id": "/bar",
"constraints": [["@zone", "LIKE", "antarctic1"], ["baz", "UNIQUE"]]
}]
tracker = NodeLabelTracker()
for app in apps:
MarathonMigrator(node_label_tracker=tracker, object=app).migrate()
apps_by_label = tracker.get_apps_by_label()
assert apps_by_label == {
"baz": {'/foo', '/bar'},
"topology.kubernetes.io/zone": {'/bar'},
"dcos.io/former-dcos-hostname": {'/foo'}
}
def test_dcos_apps_are_skipped():
app = {"id": "/foo", "labels": {"DCOS_PACKAGE_FRAMEWORK_NAME": "dcos-foo"}}
migrator = MarathonMigrator(object=app)
manifest = migrator.migrate()
assert not manifest
| from dcos_migrate.plugins.marathon import MarathonMigrator, NodeLabelTracker
from dcos_migrate.system import Manifest, ManifestList
from kubernetes.client.models import V1Deployment, V1ObjectMeta, V1Secret # type: ignore
import json
import pytest
def test_simple():
with open('tests/examples/simple.json') as json_file:
data = json.load(json_file)
m = MarathonMigrator(object=data)
mres = m.migrate()
assert mres is not None
# assert m.manifest[0]['metadata']['name'] == 'predictionio-server.group1'
assert m.manifest[0].metadata.name == 'group1.predictionio-server'
@pytest.mark.xfail
def test_simple_portmapping():
with open('tests/examples/simplePortmapping.json') as json_file:
data = json.load(json_file)
m = MarathonMigrator(object=data)
mres = m.migrate()
assert mres is not None
# assert m.manifest[0]['metadata']['name'] == 'predictionio-server.group1'
assert m.manifest[0]['metadata']['name'] == 'group1.predictionio-server'
def test_simple_with_secret():
ml = ManifestList(path='tests/examples/simpleWithSecret')
ml.load()
assert len(ml) == 2
with open('tests/examples/simpleWithSecret.json') as json_file:
data = json.load(json_file)
m = MarathonMigrator(object=data, manifest_list=ml)
mres = m.migrate()
assert len(m.manifest) == 3
assert mres is not None
app_id = 'group1.predictionio-server'
app_label = 'group1-predictionio-server'
assert m.manifest[0].metadata.name == app_id
assert m.manifest[1].metadata.name == app_label
assert m.manifest[2].metadata.name == 'marathonsecret-group1.predictionio-server'
assert m.manifest[0].metadata.labels['app'] == app_label
assert m.manifest[0].spec.template.spec.containers[0].env[
0].value_from.secret_key_ref.name == "marathonsecret-group1.predictionio-server"
assert m.manifest[0].spec.template.spec.containers[0].env[0].value_from.secret_key_ref.key == "secret1"
assert m.manifest[0].spec.template.spec.containers[0].env[
1].value_from.secret_key_ref.name == "marathonsecret-group1.predictionio-server"
assert m.manifest[0].spec.template.spec.containers[0].env[1].value_from.secret_key_ref.key == "test.secret2"
assert m.manifest[1].kind == 'Service'
assert m.manifest[1].spec.selector['app'] == app_label
assert 'secret1' in m.manifest[2].data
assert 'test.secret2' in m.manifest[2].data
def test_docker_pull_config_secret():
pull_config_str = '{"auths":{"example.com":{"username":"jd","password":"<PASSWORD>",'\
'"email":"<EMAIL>","auth":"f00BA7"}}}'
migrated_dcos_secret = V1Secret(kind='Secret',
api_version='v1',
metadata=V1ObjectMeta(name='nothing-depends-on-this-name'),
data={'nothing-depends-on-the-name-of-this-key': pull_config_str})
input_manifest_list = ManifestList()
input_manifest_list.append(
Manifest(pluginName="secret", manifestName="foo.docker-c_nfig", data=[migrated_dcos_secret]))
app = {
"id": "/foo/barify",
"container": {
"docker": {
"pullConfig": {
"secret": "pull-config"
}
}
},
"env": {
"BAR": {
"secret": "pull-config"
}
}, # See the NOTE below
"secrets": {
"pull-config": {
"source": "/foo/docker-c@nfig"
},
"unused": {
"source": "unused"
},
},
}
migrator = MarathonMigrator(object=app, manifest_list=input_manifest_list)
manifest = migrator.migrate()
# NOTE: Thit test expects that two secrets will be created:
# one for the image pull config and another for everything else.
# This might be not the optimal migration startegy.
[deployment] = [m for m in manifest if isinstance(m, V1Deployment)]
[pull_secret] = [m for m in manifest \
if isinstance(m, V1Secret) and m.type == "kubernetes.io/dockerconfigjson"]
[generic_secret] = [m for m in manifest \
if isinstance(m, V1Secret) and m.type != "kubernetes.io/dockerconfigjson"]
assert deployment.spec.template.spec.image_pull_secrets[0].name == pull_secret.metadata.name
assert pull_secret.data[".dockerconfigjson"] == pull_config_str
assert generic_secret.data["foo.docker-c_nfig"] == pull_config_str
def test_constraint_node_labels():
apps = [{
"id": "/foo",
"constraints": [["@hostname", "IS", "10.123.45.67"], ["baz", "UNIQUE"]]
}, {
"id": "/bar",
"constraints": [["@zone", "LIKE", "antarctic1"], ["baz", "UNIQUE"]]
}]
tracker = NodeLabelTracker()
for app in apps:
MarathonMigrator(node_label_tracker=tracker, object=app).migrate()
apps_by_label = tracker.get_apps_by_label()
assert apps_by_label == {
"baz": {'/foo', '/bar'},
"topology.kubernetes.io/zone": {'/bar'},
"dcos.io/former-dcos-hostname": {'/foo'}
}
def test_dcos_apps_are_skipped():
app = {"id": "/foo", "labels": {"DCOS_PACKAGE_FRAMEWORK_NAME": "dcos-foo"}}
migrator = MarathonMigrator(object=app)
manifest = migrator.migrate()
assert not manifest | en | 0.495326 | # type: ignore # assert m.manifest[0]['metadata']['name'] == 'predictionio-server.group1' # assert m.manifest[0]['metadata']['name'] == 'predictionio-server.group1' # See the NOTE below # NOTE: Thit test expects that two secrets will be created: # one for the image pull config and another for everything else. # This might be not the optimal migration startegy. | 1.984997 | 2 |
static2/ida/ida.py | pAplakidis/qira | 0 | 6616899 | <reponame>pAplakidis/qira
#!/usr/bin/env python2.7
import sys
import os
import struct
from ida_consts import *
import time
# ugh, qira_base
def ghex(a):
if a == None:
return None
return hex(a).strip("L")
def fhex(a):
try:
return int(a, 16)
except:
return None
# fixes the help issue
os.environ['PATH'] += ":"+IDAPATH
os.environ['LD_LIBRARY_PATH'] = IDAPATH
os.environ['IDADIR'] = IDAPATH
if sys.maxsize > 2**32:
if __name__ == "__main__":
print "relaunching as 32-bit python"
os.system("python32/Python/python "+__file__+" "+" ".join(sys.argv[1:]))
exit(0)
from remotectypes32 import *
else:
from ctypes import *
def remote_func(x):
return x
done = False
argc = 1
argv = None
idle_fxn = None
CALLUI = CFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p)
def set_done(b):
global done
done = b
def set_idle_fxn(f):
global idle_fxn
idle_fxn = f
def uicallback(a,b,c,d,e,f,g,h,i):
b_ptr = cast(b, POINTER(c_long))
b_ptr[0] = 0
if c == 17: # ui_banner
b_ptr[0] = 1
return 0
elif c == 28: # ui_clearbreak
return 0
elif c == 29: # ui_wasbreak
# ui_wasbreak, always return 0
return 0
#print "callback",a,b,c,d,e,f
#return 0
elif c == 23:
#st = cast(d, c_char_p).value.strip()
#print st
"""
if "%s" in st and f != None:
print cast(f, c_char_p).value.strip()
"""
#print cast(f, c_char_p).value
libc.vprintf(d, e)
return 0
elif c == 21:
# MBOX
libc.vprintf(e, f)
print ""
return 0
elif c == 50:
if d == None:
d = 0
if d == 527:
# WTF USELESS?
return 0
if d == 53: # auto_empty_finally
set_done(True)
return 0
if d < len(idp_notify):
#print "idp_notify",d,idp_notify[d]
pass
else:
return 0
#print "idp_notify",d
#st = struct.unpack("I", cast(e, c_char_p).value[0:4])[0]
#print cast(st, c_char_p).value.strip()
#ret = ida.invoke_callbacks(0, d, e)
#print "RETURN 0"
# ugh hacks
b_ptr[0] = 0
"""
if d == 2 or d == 3:
print "returning 1"
libc.memset(b, 1, 1)
#if d == 0 or d == None:
libc.memset(b, 0, 4)
elif d == 4:
print "newfile",cast(e, c_char_p).value.strip()
"""
#print cast(b, POINTER(c_int)).contents
#print cast(b, POINTER(c_int)).contents
return 1
print "callback", ui_msgs[c], c,d,e,f,g,h,i
if c == 43:
print "load_file:",cast(d, c_char_p).value.strip(), hex(e), hex(f)
b_ptr[0] = 1
lst = ida.build_loaders_list(e)
print "loaders_list", hex(lst)
ret = ida.load_nonbinary_file(FILE, e, ".", NEF_FIRST, lst)
print ret
#ida.init_loader_options(e, lst)
if c == 18:
print "got set idle",d
set_idle_fxn(CFUNCTYPE(c_int)(d))
if c == 25:
print "ask_file:",cast(e, c_char_p).value.strip(),cast(f, c_char_p).value.strip()
global buf # OMG GC
buf = create_string_buffer(FILE)
b_ptr[0] = addressof(buf)
#b_ptr[0] = 0xAABBCCDD
return 0
def run_ida():
global done
done = False
while not done:
idle_fxn()
print "*** run_ida finished"
def fetch_tags():
import collections
tags = collections.defaultdict(dict)
for i in range(0, ida.get_nlist_size()):
ea = ida.get_nlist_ea(i)
name = c_char_p(ida.get_nlist_name(i)).value.strip()
#print hex(ea), name
tags[ea]['name'] = name
def parse_addr(i):
flags = ida.get_flags_ex(i, 0)
# is code
if (flags&0x600) == 0x600:
#print ghex(i)
tags[i]['flags'] = flags
tags[i]['flow'] = []
tags[i]['semantics'] = []
tags[i]['len'] = ida.decode_insn(i)
if ida.is_call_insn(i):
tags[i]['semantics'].append("call")
if ida.is_ret_insn(i, 1):
tags[i]['semantics'].append("ret")
if ida.is_basic_block_end(0):
tags[i]['semantics'].append("endbb")
#print ghex(i), tags[ghex(i)]['len']
return flags
i = 0
while 1:
i = ida.nextaddr(i)
if i == -0x1:
break
parse_addr(i)
fxn_count = ida.get_func_qty()
for i in range(0, fxn_count):
#print i
fxn = cast(ida.getn_func(i), POINTER(c_long))
fxn = [fxn[0], fxn[1]]
tags[fxn[0]]['funclength'] = fxn[1]-fxn[0]
#print hex(fxn[0]), hex(fxn[1])
# get the flags for each address in the function
for i in range(fxn[0], fxn[1]):
# this should be the only thing set here
#flags = parse_addr(i)
flags = ida.get_flags_ex(i, 0)
if (flags&0x600) == 0x600:
tags[i]['scope'] = ghex(fxn[0])
cref = ida.get_first_fcref_from(i)
while cref != -1:
if cref >= fxn[0] and cref < fxn[1]:
tags[i]['flow'].append(ghex(cref))
#print " ",ghex(cref)
cref = ida.get_next_fcref_from(i, cref)
return tags
def set_name(ea, name):
ida.set_name(ea, create_string_buffer(name), 0)
def set_comment(ea, text):
# all repeatable
ida.set_cmt(ea, create_string_buffer(text), 1)
def get_name(ea):
# TODO(ryan): why do i have to malloc here?
tmp = libc.malloc(80)
#tmp = create_string_buffer(80)
ida.get_name.restype=c_char_p
ret = ida.get_name(BADADDR, ea, tmp, 80)
if ret != None:
return ret
return None
def get_name_ea(name):
ea = ida.get_name_ea(BADADDR, create_string_buffer(name))
if ea == BADADDR:
return None
return ea
def init_with_binary(filename):
global ida, libc, FILE
FILE = "/tmp/qida/ida_binary"
os.system("rm -rf /tmp/qida; mkdir -p /tmp/qida")
os.system("cp "+filename+" "+FILE)
if sys.platform == 'darwin':
ida = cdll.LoadLibrary(IDAPATH+"/libida.dylib")
libc = cdll.LoadLibrary("libc.dylib")
elif sys.platform == 'win32':
print 'TODO: windows support'
return False
else:
# Linux
ida = cdll.LoadLibrary(IDAPATH+"/libida.so")
libc = cdll.LoadLibrary("libc.so.6")
fxn = CALLUI(remote_func(uicallback))
# how hack is that, KFC
rsc = "\xB9"+struct.pack("I", cast(fxn, c_void_p).value)+"\xFF\xD1\x59\x83\xC4\x04\xFF\xE1"
sc = create_string_buffer(rsc)
libc.mprotect(addressof(sc) & 0xFFFFF000, 0x1000, 7)
print "*** ida.init_kernel", ida.init_kernel(sc, argc, argv)
newfile = c_int(0)
print "*** ida.init_database", ida.init_database(argc, argv, pointer(newfile))
run_ida()
if __name__ == "__main__":
init_with_binary(sys.argv[1])
print fetch_tags()
| #!/usr/bin/env python2.7
import sys
import os
import struct
from ida_consts import *
import time
# ugh, qira_base
def ghex(a):
if a == None:
return None
return hex(a).strip("L")
def fhex(a):
try:
return int(a, 16)
except:
return None
# fixes the help issue
os.environ['PATH'] += ":"+IDAPATH
os.environ['LD_LIBRARY_PATH'] = IDAPATH
os.environ['IDADIR'] = IDAPATH
if sys.maxsize > 2**32:
if __name__ == "__main__":
print "relaunching as 32-bit python"
os.system("python32/Python/python "+__file__+" "+" ".join(sys.argv[1:]))
exit(0)
from remotectypes32 import *
else:
from ctypes import *
def remote_func(x):
return x
done = False
argc = 1
argv = None
idle_fxn = None
CALLUI = CFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p)
def set_done(b):
global done
done = b
def set_idle_fxn(f):
global idle_fxn
idle_fxn = f
def uicallback(a,b,c,d,e,f,g,h,i):
b_ptr = cast(b, POINTER(c_long))
b_ptr[0] = 0
if c == 17: # ui_banner
b_ptr[0] = 1
return 0
elif c == 28: # ui_clearbreak
return 0
elif c == 29: # ui_wasbreak
# ui_wasbreak, always return 0
return 0
#print "callback",a,b,c,d,e,f
#return 0
elif c == 23:
#st = cast(d, c_char_p).value.strip()
#print st
"""
if "%s" in st and f != None:
print cast(f, c_char_p).value.strip()
"""
#print cast(f, c_char_p).value
libc.vprintf(d, e)
return 0
elif c == 21:
# MBOX
libc.vprintf(e, f)
print ""
return 0
elif c == 50:
if d == None:
d = 0
if d == 527:
# WTF USELESS?
return 0
if d == 53: # auto_empty_finally
set_done(True)
return 0
if d < len(idp_notify):
#print "idp_notify",d,idp_notify[d]
pass
else:
return 0
#print "idp_notify",d
#st = struct.unpack("I", cast(e, c_char_p).value[0:4])[0]
#print cast(st, c_char_p).value.strip()
#ret = ida.invoke_callbacks(0, d, e)
#print "RETURN 0"
# ugh hacks
b_ptr[0] = 0
"""
if d == 2 or d == 3:
print "returning 1"
libc.memset(b, 1, 1)
#if d == 0 or d == None:
libc.memset(b, 0, 4)
elif d == 4:
print "newfile",cast(e, c_char_p).value.strip()
"""
#print cast(b, POINTER(c_int)).contents
#print cast(b, POINTER(c_int)).contents
return 1
print "callback", ui_msgs[c], c,d,e,f,g,h,i
if c == 43:
print "load_file:",cast(d, c_char_p).value.strip(), hex(e), hex(f)
b_ptr[0] = 1
lst = ida.build_loaders_list(e)
print "loaders_list", hex(lst)
ret = ida.load_nonbinary_file(FILE, e, ".", NEF_FIRST, lst)
print ret
#ida.init_loader_options(e, lst)
if c == 18:
print "got set idle",d
set_idle_fxn(CFUNCTYPE(c_int)(d))
if c == 25:
print "ask_file:",cast(e, c_char_p).value.strip(),cast(f, c_char_p).value.strip()
global buf # OMG GC
buf = create_string_buffer(FILE)
b_ptr[0] = addressof(buf)
#b_ptr[0] = 0xAABBCCDD
return 0
def run_ida():
global done
done = False
while not done:
idle_fxn()
print "*** run_ida finished"
def fetch_tags():
import collections
tags = collections.defaultdict(dict)
for i in range(0, ida.get_nlist_size()):
ea = ida.get_nlist_ea(i)
name = c_char_p(ida.get_nlist_name(i)).value.strip()
#print hex(ea), name
tags[ea]['name'] = name
def parse_addr(i):
flags = ida.get_flags_ex(i, 0)
# is code
if (flags&0x600) == 0x600:
#print ghex(i)
tags[i]['flags'] = flags
tags[i]['flow'] = []
tags[i]['semantics'] = []
tags[i]['len'] = ida.decode_insn(i)
if ida.is_call_insn(i):
tags[i]['semantics'].append("call")
if ida.is_ret_insn(i, 1):
tags[i]['semantics'].append("ret")
if ida.is_basic_block_end(0):
tags[i]['semantics'].append("endbb")
#print ghex(i), tags[ghex(i)]['len']
return flags
i = 0
while 1:
i = ida.nextaddr(i)
if i == -0x1:
break
parse_addr(i)
fxn_count = ida.get_func_qty()
for i in range(0, fxn_count):
#print i
fxn = cast(ida.getn_func(i), POINTER(c_long))
fxn = [fxn[0], fxn[1]]
tags[fxn[0]]['funclength'] = fxn[1]-fxn[0]
#print hex(fxn[0]), hex(fxn[1])
# get the flags for each address in the function
for i in range(fxn[0], fxn[1]):
# this should be the only thing set here
#flags = parse_addr(i)
flags = ida.get_flags_ex(i, 0)
if (flags&0x600) == 0x600:
tags[i]['scope'] = ghex(fxn[0])
cref = ida.get_first_fcref_from(i)
while cref != -1:
if cref >= fxn[0] and cref < fxn[1]:
tags[i]['flow'].append(ghex(cref))
#print " ",ghex(cref)
cref = ida.get_next_fcref_from(i, cref)
return tags
def set_name(ea, name):
ida.set_name(ea, create_string_buffer(name), 0)
def set_comment(ea, text):
# all repeatable
ida.set_cmt(ea, create_string_buffer(text), 1)
def get_name(ea):
# TODO(ryan): why do i have to malloc here?
tmp = libc.malloc(80)
#tmp = create_string_buffer(80)
ida.get_name.restype=c_char_p
ret = ida.get_name(BADADDR, ea, tmp, 80)
if ret != None:
return ret
return None
def get_name_ea(name):
ea = ida.get_name_ea(BADADDR, create_string_buffer(name))
if ea == BADADDR:
return None
return ea
def init_with_binary(filename):
global ida, libc, FILE
FILE = "/tmp/qida/ida_binary"
os.system("rm -rf /tmp/qida; mkdir -p /tmp/qida")
os.system("cp "+filename+" "+FILE)
if sys.platform == 'darwin':
ida = cdll.LoadLibrary(IDAPATH+"/libida.dylib")
libc = cdll.LoadLibrary("libc.dylib")
elif sys.platform == 'win32':
print 'TODO: windows support'
return False
else:
# Linux
ida = cdll.LoadLibrary(IDAPATH+"/libida.so")
libc = cdll.LoadLibrary("libc.so.6")
fxn = CALLUI(remote_func(uicallback))
# how hack is that, KFC
rsc = "\xB9"+struct.pack("I", cast(fxn, c_void_p).value)+"\xFF\xD1\x59\x83\xC4\x04\xFF\xE1"
sc = create_string_buffer(rsc)
libc.mprotect(addressof(sc) & 0xFFFFF000, 0x1000, 7)
print "*** ida.init_kernel", ida.init_kernel(sc, argc, argv)
newfile = c_int(0)
print "*** ida.init_database", ida.init_database(argc, argv, pointer(newfile))
run_ida()
if __name__ == "__main__":
init_with_binary(sys.argv[1])
print fetch_tags() | en | 0.38252 | #!/usr/bin/env python2.7 # ugh, qira_base # fixes the help issue # ui_banner # ui_clearbreak # ui_wasbreak # ui_wasbreak, always return 0 #print "callback",a,b,c,d,e,f #return 0 #st = cast(d, c_char_p).value.strip() #print st if "%s" in st and f != None: print cast(f, c_char_p).value.strip() #print cast(f, c_char_p).value # MBOX # WTF USELESS? # auto_empty_finally #print "idp_notify",d,idp_notify[d] #print "idp_notify",d #st = struct.unpack("I", cast(e, c_char_p).value[0:4])[0] #print cast(st, c_char_p).value.strip() #ret = ida.invoke_callbacks(0, d, e) #print "RETURN 0" # ugh hacks if d == 2 or d == 3: print "returning 1" libc.memset(b, 1, 1) #if d == 0 or d == None: libc.memset(b, 0, 4) elif d == 4: print "newfile",cast(e, c_char_p).value.strip() #print cast(b, POINTER(c_int)).contents #print cast(b, POINTER(c_int)).contents #ida.init_loader_options(e, lst) # OMG GC #b_ptr[0] = 0xAABBCCDD #print hex(ea), name # is code #print ghex(i) #print ghex(i), tags[ghex(i)]['len'] #print i #print hex(fxn[0]), hex(fxn[1]) # get the flags for each address in the function # this should be the only thing set here #flags = parse_addr(i) #print " ",ghex(cref) # all repeatable # TODO(ryan): why do i have to malloc here? #tmp = create_string_buffer(80) # Linux # how hack is that, KFC | 1.941378 | 2 |
02_data_structures/c09_a01.py | jar211/py4e | 0 | 6616900 | # 9.4 Write a program to read through the mbox-short.txt and figure out who has the sent the greatest number of
# mail messages. The program looks for 'From ' lines and takes the second word of those lines as the person who
# sent the mail. The program creates a Python dictionary that maps the sender's mail address to a count of the
# number of times they appear in the file. After the dictionary is produced, the program reads through the dictionary
# using a maximum loop to find the most prolific committer.
file_name = 'mbox-short.txt'
file_handle = open(file_name)
froms = dict()
# get all of the "from" addresses and their count
for line in file_handle:
if line.startswith('From '):
from_address = line.strip().split()[1]
# Add one to the value matching the key 'from_address' or create the k/v pair if one is not found.
froms[from_address] = froms.get(from_address, 0) + 1
# find the from address with the highest occurrence
bigaddr = None
bigcount = None
for addr, count in froms.items():
if bigcount is None or bigcount < count:
bigaddr = addr
bigcount = count
print(bigaddr, bigcount) | # 9.4 Write a program to read through the mbox-short.txt and figure out who has the sent the greatest number of
# mail messages. The program looks for 'From ' lines and takes the second word of those lines as the person who
# sent the mail. The program creates a Python dictionary that maps the sender's mail address to a count of the
# number of times they appear in the file. After the dictionary is produced, the program reads through the dictionary
# using a maximum loop to find the most prolific committer.
file_name = 'mbox-short.txt'
file_handle = open(file_name)
froms = dict()
# get all of the "from" addresses and their count
for line in file_handle:
if line.startswith('From '):
from_address = line.strip().split()[1]
# Add one to the value matching the key 'from_address' or create the k/v pair if one is not found.
froms[from_address] = froms.get(from_address, 0) + 1
# find the from address with the highest occurrence
bigaddr = None
bigcount = None
for addr, count in froms.items():
if bigcount is None or bigcount < count:
bigaddr = addr
bigcount = count
print(bigaddr, bigcount) | en | 0.924132 | # 9.4 Write a program to read through the mbox-short.txt and figure out who has the sent the greatest number of # mail messages. The program looks for 'From ' lines and takes the second word of those lines as the person who # sent the mail. The program creates a Python dictionary that maps the sender's mail address to a count of the # number of times they appear in the file. After the dictionary is produced, the program reads through the dictionary # using a maximum loop to find the most prolific committer. # get all of the "from" addresses and their count # Add one to the value matching the key 'from_address' or create the k/v pair if one is not found. # find the from address with the highest occurrence | 3.695491 | 4 |
emma/interface/admin.py | djangowebstudio/emma | 0 | 6616901 | from django.contrib import admin
from emma.interface.models import *
from models import *
from django.contrib.flatpages.models import FlatPage
from django.contrib.flatpages.admin import FlatPageAdmin
# from django.contrib.auth.models import User
# from django.contrib.auth.admin import UserAdmin
def set_to_active(modeladmin, request, queryset):
queryset.update(is_active=True)
set_to_active.short_description = "Set selected users to active"
def set_to_inactive(modeladmin, request, queryset):
queryset.update(is_active=False)
set_to_inactive.short_description = "Set selected users to inactive"
# class UserAdmin(UserAdmin):
# actions = [set_to_active, set_to_inactive]
# admin.site.unregister(User)
# admin.site.register(User, UserAdmin)
class FlatPageAdmin(FlatPageAdmin):
class Media:
js = ('/s/js/tiny_mce/tiny_mce.js',
'/s/js/tiny_mce/textareas.js',)
# We have to unregister it, and then reregister
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin)
class ImageAdmin(admin.ModelAdmin):
list_display = ['image_LNID', 'image_category', 'date_created', 'date_modified']
search_fields = ['image_LNID', 'image_category']
admin.site.register(Image, ImageAdmin)
class OrderAdmin(admin.ModelAdmin):
list_display = ('image_LNID', 'client', 'project', 'is_published', 'status', 'album_identifier', 'ts')
search_fields = ['client']
admin.site.register(Order, OrderAdmin)
class KeywordAdmin(admin.ModelAdmin):
list_display = ['image_LNID', 'subject', 'keywords', 'source']
search_fields = ['image_LNID', 'subject', 'keywords', 'source']
admin.site.register(Keyword, KeywordAdmin)
class MetadataAdmin(admin.ModelAdmin):
save_on_top = True
search_fields = ['image_LNID', 'subject', 'album', 'mime_type']
fieldsets = (
('About you', {'fields': ('caption_writer',)}),
('Image information', {
'classes': ('collapse',),
'fields': ('subject', 'copyright', 'profile', 'keywords', 'description', 'instructions', 'source', 'location', 'city', 'provincestate', 'country', 'datetimeoriginal')}),
('Attachments', { 'classes': ('collapse',),
'fields': ('document',)}),
('Author information', {
'classes': ('collapse',),
'fields':('author', 'creator', 'credit')}),
('Album information', {
'classes': ('collapse',),
'fields':('album', 'headline')})
)
list_display = ('thumb','image_LNID','subject','copyright','profile',
'has_attachment', 'keywords', 'mime_type')
list_editable = ('subject', 'copyright', 'keywords')
admin.site.register(Metadata, MetadataAdmin)
class KeywordCountAdmin(admin.ModelAdmin):
search_fields = ['keyword']
admin.site.register(KeywordCount, KeywordCountAdmin)
class ContractAdmin(admin.ModelAdmin):
list_display = ('username', 'contract', 'date_signed')
search_fields = ['username']
admin.site.register(Contract, ContractAdmin)
admin.site.register(Favorite)
class AlbumAdmin(admin.ModelAdmin):
search_fields = ['album_name', 'album_identifier']
fieldsets = (
('Album information', {
'fields': ('album_name',)}),
('Attachments', {
'fields': ('document', )},),
('Items', {'classes': ('collapse',), 'fields': ('image',)})
)
filter_horizontal = ['image']
list_display = ('album_name', 'album_identifier', 'document')
admin.site.register(Album, AlbumAdmin)
class UserAdmin(admin.ModelAdmin):
exlude = ['setstr2']
admin.site.register(User, UserAdmin)
admin.site.register(Query)
class ProjectAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ['number','name', 'slug', 'active', 'is_complete',]
admin.site.register(Project, ProjectAdmin)
admin.site.register(ImageCount) | from django.contrib import admin
from emma.interface.models import *
from models import *
from django.contrib.flatpages.models import FlatPage
from django.contrib.flatpages.admin import FlatPageAdmin
# from django.contrib.auth.models import User
# from django.contrib.auth.admin import UserAdmin
def set_to_active(modeladmin, request, queryset):
queryset.update(is_active=True)
set_to_active.short_description = "Set selected users to active"
def set_to_inactive(modeladmin, request, queryset):
queryset.update(is_active=False)
set_to_inactive.short_description = "Set selected users to inactive"
# class UserAdmin(UserAdmin):
# actions = [set_to_active, set_to_inactive]
# admin.site.unregister(User)
# admin.site.register(User, UserAdmin)
class FlatPageAdmin(FlatPageAdmin):
class Media:
js = ('/s/js/tiny_mce/tiny_mce.js',
'/s/js/tiny_mce/textareas.js',)
# We have to unregister it, and then reregister
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin)
class ImageAdmin(admin.ModelAdmin):
list_display = ['image_LNID', 'image_category', 'date_created', 'date_modified']
search_fields = ['image_LNID', 'image_category']
admin.site.register(Image, ImageAdmin)
class OrderAdmin(admin.ModelAdmin):
list_display = ('image_LNID', 'client', 'project', 'is_published', 'status', 'album_identifier', 'ts')
search_fields = ['client']
admin.site.register(Order, OrderAdmin)
class KeywordAdmin(admin.ModelAdmin):
list_display = ['image_LNID', 'subject', 'keywords', 'source']
search_fields = ['image_LNID', 'subject', 'keywords', 'source']
admin.site.register(Keyword, KeywordAdmin)
class MetadataAdmin(admin.ModelAdmin):
save_on_top = True
search_fields = ['image_LNID', 'subject', 'album', 'mime_type']
fieldsets = (
('About you', {'fields': ('caption_writer',)}),
('Image information', {
'classes': ('collapse',),
'fields': ('subject', 'copyright', 'profile', 'keywords', 'description', 'instructions', 'source', 'location', 'city', 'provincestate', 'country', 'datetimeoriginal')}),
('Attachments', { 'classes': ('collapse',),
'fields': ('document',)}),
('Author information', {
'classes': ('collapse',),
'fields':('author', 'creator', 'credit')}),
('Album information', {
'classes': ('collapse',),
'fields':('album', 'headline')})
)
list_display = ('thumb','image_LNID','subject','copyright','profile',
'has_attachment', 'keywords', 'mime_type')
list_editable = ('subject', 'copyright', 'keywords')
admin.site.register(Metadata, MetadataAdmin)
class KeywordCountAdmin(admin.ModelAdmin):
search_fields = ['keyword']
admin.site.register(KeywordCount, KeywordCountAdmin)
class ContractAdmin(admin.ModelAdmin):
list_display = ('username', 'contract', 'date_signed')
search_fields = ['username']
admin.site.register(Contract, ContractAdmin)
admin.site.register(Favorite)
class AlbumAdmin(admin.ModelAdmin):
search_fields = ['album_name', 'album_identifier']
fieldsets = (
('Album information', {
'fields': ('album_name',)}),
('Attachments', {
'fields': ('document', )},),
('Items', {'classes': ('collapse',), 'fields': ('image',)})
)
filter_horizontal = ['image']
list_display = ('album_name', 'album_identifier', 'document')
admin.site.register(Album, AlbumAdmin)
class UserAdmin(admin.ModelAdmin):
exlude = ['setstr2']
admin.site.register(User, UserAdmin)
admin.site.register(Query)
class ProjectAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ['number','name', 'slug', 'active', 'is_complete',]
admin.site.register(Project, ProjectAdmin)
admin.site.register(ImageCount) | en | 0.768845 | # from django.contrib.auth.models import User # from django.contrib.auth.admin import UserAdmin # class UserAdmin(UserAdmin): # actions = [set_to_active, set_to_inactive] # admin.site.unregister(User) # admin.site.register(User, UserAdmin) # We have to unregister it, and then reregister | 1.904015 | 2 |
src/captioning/experiment.py | rubencart/es-img-captioning | 0 | 6616902 | from collections import namedtuple
import torch
from algorithm.tools.experiment import Experiment
from algorithm.tools.utils import Config
_opt_fields = ['input_json', 'input_fc_dir', 'input_att_dir', 'input_label_h5', 'use_att', 'use_box',
'norm_att_feat', 'norm_box_feat', 'input_box_dir', 'train_only', 'seq_per_img', 'fitness']
CaptionOptions = namedtuple('CaptionOptions', field_names=_opt_fields, defaults=(None,) * len(_opt_fields))
class MSCocoExperiment(Experiment):
"""
Subclass for MSCOCO experiment
"""
def __init__(self, exp, config: Config, master=True):
self.opt: CaptionOptions = CaptionOptions(**exp['caption_options'])
super().__init__(exp, config, master=master)
self.vocab_size = self.trainloader.loader.vocab_size
self.seq_length = self.trainloader.loader.seq_length
exp['policy_options']['model_options'].update({
'vocab_size': self.vocab_size,
'seq_length': self.seq_length,
})
def init_loaders(self, config=None, batch_size=None, workers=None, _=None):
assert not (config is None and batch_size is None)
from captioning.dataloader import DataLoader
tloader = DataLoader(opt=self.opt, config=config, batch_size=batch_size)
val_bs = config.val_batch_size if config and config.val_batch_size else batch_size
vloader = DataLoader(opt=self.opt, config=config, batch_size=val_bs)
trainloader = MSCocoDataLdrWrapper(loader=tloader, split='train')
valloader = MSCocoDataLdrWrapper(loader=vloader, split='val')
testloader = MSCocoDataLdrWrapper(loader=vloader, split='test')
self.trainloader, self.valloader, self.testloader = trainloader, valloader, testloader
self._orig_trainloader_lth = len(self.trainloader)
def take_ref_batch(self, batch_size):
return self.trainloader.take_ref_batch(bs=batch_size)
class MSCocoDataLdrWrapper:
"""
Wrapper for to map API from dataloader from https://github.com/ruotianluo/self-critical.pytorch
to expected API
"""
def __init__(self, loader, split):
from captioning.dataloader import DataLoader
self.loader: DataLoader = loader
self.split = split
self.batch_size = loader.batch_size
self.seq_per_img = loader.seq_per_img
self.get_vocab = loader.get_vocab
def reset(self):
self.loader.reset_iterator(split=self.split)
def __iter__(self):
return self
def __next__(self):
# todo raise stopiter
return self.loader.get_batch(self.split)
def __len__(self):
return self.loader.length_of_split(self.split) // self.loader.batch_size
def take_ref_batch(self, bs):
return torch.from_numpy(self.loader.get_batch(self.split, batch_size=bs)['fc_feats'])
| from collections import namedtuple
import torch
from algorithm.tools.experiment import Experiment
from algorithm.tools.utils import Config
_opt_fields = ['input_json', 'input_fc_dir', 'input_att_dir', 'input_label_h5', 'use_att', 'use_box',
'norm_att_feat', 'norm_box_feat', 'input_box_dir', 'train_only', 'seq_per_img', 'fitness']
CaptionOptions = namedtuple('CaptionOptions', field_names=_opt_fields, defaults=(None,) * len(_opt_fields))
class MSCocoExperiment(Experiment):
"""
Subclass for MSCOCO experiment
"""
def __init__(self, exp, config: Config, master=True):
self.opt: CaptionOptions = CaptionOptions(**exp['caption_options'])
super().__init__(exp, config, master=master)
self.vocab_size = self.trainloader.loader.vocab_size
self.seq_length = self.trainloader.loader.seq_length
exp['policy_options']['model_options'].update({
'vocab_size': self.vocab_size,
'seq_length': self.seq_length,
})
def init_loaders(self, config=None, batch_size=None, workers=None, _=None):
assert not (config is None and batch_size is None)
from captioning.dataloader import DataLoader
tloader = DataLoader(opt=self.opt, config=config, batch_size=batch_size)
val_bs = config.val_batch_size if config and config.val_batch_size else batch_size
vloader = DataLoader(opt=self.opt, config=config, batch_size=val_bs)
trainloader = MSCocoDataLdrWrapper(loader=tloader, split='train')
valloader = MSCocoDataLdrWrapper(loader=vloader, split='val')
testloader = MSCocoDataLdrWrapper(loader=vloader, split='test')
self.trainloader, self.valloader, self.testloader = trainloader, valloader, testloader
self._orig_trainloader_lth = len(self.trainloader)
def take_ref_batch(self, batch_size):
return self.trainloader.take_ref_batch(bs=batch_size)
class MSCocoDataLdrWrapper:
"""
Wrapper for to map API from dataloader from https://github.com/ruotianluo/self-critical.pytorch
to expected API
"""
def __init__(self, loader, split):
from captioning.dataloader import DataLoader
self.loader: DataLoader = loader
self.split = split
self.batch_size = loader.batch_size
self.seq_per_img = loader.seq_per_img
self.get_vocab = loader.get_vocab
def reset(self):
self.loader.reset_iterator(split=self.split)
def __iter__(self):
return self
def __next__(self):
# todo raise stopiter
return self.loader.get_batch(self.split)
def __len__(self):
return self.loader.length_of_split(self.split) // self.loader.batch_size
def take_ref_batch(self, bs):
return torch.from_numpy(self.loader.get_batch(self.split, batch_size=bs)['fc_feats'])
| en | 0.706296 | Subclass for MSCOCO experiment Wrapper for to map API from dataloader from https://github.com/ruotianluo/self-critical.pytorch to expected API # todo raise stopiter | 2.108527 | 2 |
bot/modules/sed.py | zeroone2numeral2/regex-bot | 1 | 6616903 | import logging
from html import escape as html_escape
from telegram.error import BadRequest
from telegram.constants import MAX_MESSAGE_LENGTH
from bot.customfilters import Filters
from bot.regexer import Regex
from bot.filteredregexhandler import FilteredRegexHandler
logger = logging.getLogger(__name__)
COMMAND_REGEX = r"^([s*]?/?)/((?:\\/|[^/])+)/((?:\\/|[^/])*)(?:/(.*))?"
MODES = {
"": "<b>Did you mean:</b>\n{}",
"s": "{}",
"*": "*{}"
}
def get_response(mode, string):
mode = mode.replace("/", "")
return MODES[mode].format(html_escape(string))
def on_sed(_, update, groups):
text = update.message.reply_to_message.text or update.message.reply_to_message.caption
mode = groups[0]
pattern = groups[1]
replacement = groups[2].replace('\\/', '/') # ??? https://github.com/SijmenSchoon/regexbot/blob/master/regexbot.py#L25
flags = groups[3] if len(groups) > 3 else None
logger.info(
"\nmode: %s\ntext: %s\npattern: %s\nreplacement: %s\nflags: %s",
mode,
text,
pattern,
replacement,
flags
)
regex = Regex(text, pattern, replacement, flags)
try:
new_string, n_subs = regex.subn()
logger.info("re.subn result:\nnew_string: %s\nn_subs: %d", new_string, n_subs)
except Exception as e:
logger.info("re.subn exception: %s", str(e), exc_info=True)
# update.message.reply_text(s.oopsie_woopsie) # might be the user who fucked up the regex
return # don't proceed further
if n_subs > 0:
if len(new_string) > MAX_MESSAGE_LENGTH:
logger.info("result too long: substringing...")
new_string = new_string[:MAX_MESSAGE_LENGTH-16] # -16: "*Did you mean:*\n"
update.message.reply_to_message.reply_html(get_response(mode, new_string), disable_web_page_preview=True)
if mode.endswith("/"): # try to delete the command
try:
update.message.delete()
except BadRequest as e: # the bot doesn't have the permission to delete the message
logger.info("exception while trying to delete a message: %s", e)
class module:
name = "sed"
handlers = (
FilteredRegexHandler(COMMAND_REGEX, on_sed, pass_groups=True, filters=Filters.reply_text),
)
| import logging
from html import escape as html_escape
from telegram.error import BadRequest
from telegram.constants import MAX_MESSAGE_LENGTH
from bot.customfilters import Filters
from bot.regexer import Regex
from bot.filteredregexhandler import FilteredRegexHandler
logger = logging.getLogger(__name__)
COMMAND_REGEX = r"^([s*]?/?)/((?:\\/|[^/])+)/((?:\\/|[^/])*)(?:/(.*))?"
MODES = {
"": "<b>Did you mean:</b>\n{}",
"s": "{}",
"*": "*{}"
}
def get_response(mode, string):
mode = mode.replace("/", "")
return MODES[mode].format(html_escape(string))
def on_sed(_, update, groups):
text = update.message.reply_to_message.text or update.message.reply_to_message.caption
mode = groups[0]
pattern = groups[1]
replacement = groups[2].replace('\\/', '/') # ??? https://github.com/SijmenSchoon/regexbot/blob/master/regexbot.py#L25
flags = groups[3] if len(groups) > 3 else None
logger.info(
"\nmode: %s\ntext: %s\npattern: %s\nreplacement: %s\nflags: %s",
mode,
text,
pattern,
replacement,
flags
)
regex = Regex(text, pattern, replacement, flags)
try:
new_string, n_subs = regex.subn()
logger.info("re.subn result:\nnew_string: %s\nn_subs: %d", new_string, n_subs)
except Exception as e:
logger.info("re.subn exception: %s", str(e), exc_info=True)
# update.message.reply_text(s.oopsie_woopsie) # might be the user who fucked up the regex
return # don't proceed further
if n_subs > 0:
if len(new_string) > MAX_MESSAGE_LENGTH:
logger.info("result too long: substringing...")
new_string = new_string[:MAX_MESSAGE_LENGTH-16] # -16: "*Did you mean:*\n"
update.message.reply_to_message.reply_html(get_response(mode, new_string), disable_web_page_preview=True)
if mode.endswith("/"): # try to delete the command
try:
update.message.delete()
except BadRequest as e: # the bot doesn't have the permission to delete the message
logger.info("exception while trying to delete a message: %s", e)
class module:
name = "sed"
handlers = (
FilteredRegexHandler(COMMAND_REGEX, on_sed, pass_groups=True, filters=Filters.reply_text),
)
| en | 0.827893 | # ??? https://github.com/SijmenSchoon/regexbot/blob/master/regexbot.py#L25 # update.message.reply_text(s.oopsie_woopsie) # might be the user who fucked up the regex # don't proceed further # -16: "*Did you mean:*\n" # try to delete the command # the bot doesn't have the permission to delete the message | 2.35788 | 2 |
pset_pandas1_wine_reviews/selecting_data/solutions/p5.py | mottaquikarim/pydev-psets | 5 | 6616904 | """
Selecting Data V - Subsets
"""
import numpy as np
import pandas as pd
wine_reviews = pd.read_csv('../../winemag-data-130k.csv')
# Create a new dataframe called "wine_ratings" that is a subset of wine_reviews. It should have these columns in this order:
### title
### country
### points
### price
wine_ratings = wine_reviews[['title', 'country', 'points', 'price']]
# Print out the number of rows and columns in wine_ratings.
print(wine_ratings.shape) # (129971, 4)
# Print out the first 10 rows of wine_ratings.
print(wine_ratings.head(10))
"""
title country points price
0 Nicosia 2013 <NAME> (Etna) Italy 87 NaN
1 Quinta dos Avidagos 2011 Avidagos Red (Douro) Portugal 87 15.0
2 Rainstorm 2013 Pinot Gris (Willamette Valley) US 87 14.0
3 St. Julian 2013 Reserve Late Harvest Riesling ... US 87 13.0
4 Sweet Cheeks 2012 Vintner's Reserve Wild Child... US 87 65.0
5 Tandem 2011 Ars In Vitro Tempranillo-Merlot (N... Spain 87 15.0
6 <NAME> 2013 <NAME> (Vittoria) Italy 87 16.0
7 Trimbach 2012 Gewurztraminer (Alsace) France 87 24.0
8 <NAME> 2013 <NAME>er (Rheinhe... Germany 87 12.0
9 <NAME> 2012 Les Natures Pinot Gris... France 87 27.0
"""
| """
Selecting Data V - Subsets
"""
import numpy as np
import pandas as pd
wine_reviews = pd.read_csv('../../winemag-data-130k.csv')
# Create a new dataframe called "wine_ratings" that is a subset of wine_reviews. It should have these columns in this order:
### title
### country
### points
### price
wine_ratings = wine_reviews[['title', 'country', 'points', 'price']]
# Print out the number of rows and columns in wine_ratings.
print(wine_ratings.shape) # (129971, 4)
# Print out the first 10 rows of wine_ratings.
print(wine_ratings.head(10))
"""
title country points price
0 Nicosia 2013 <NAME> (Etna) Italy 87 NaN
1 Quinta dos Avidagos 2011 Avidagos Red (Douro) Portugal 87 15.0
2 Rainstorm 2013 Pinot Gris (Willamette Valley) US 87 14.0
3 St. Julian 2013 Reserve Late Harvest Riesling ... US 87 13.0
4 Sweet Cheeks 2012 Vintner's Reserve Wild Child... US 87 65.0
5 Tandem 2011 Ars In Vitro Tempranillo-Merlot (N... Spain 87 15.0
6 <NAME> 2013 <NAME> (Vittoria) Italy 87 16.0
7 Trimbach 2012 Gewurztraminer (Alsace) France 87 24.0
8 <NAME> 2013 <NAME>er (Rheinhe... Germany 87 12.0
9 <NAME> 2012 Les Natures Pinot Gris... France 87 27.0
"""
| en | 0.603977 | Selecting Data V - Subsets # Create a new dataframe called "wine_ratings" that is a subset of wine_reviews. It should have these columns in this order: ### title ### country ### points ### price # Print out the number of rows and columns in wine_ratings. # (129971, 4) # Print out the first 10 rows of wine_ratings. title country points price 0 Nicosia 2013 <NAME> (Etna) Italy 87 NaN 1 Quinta dos Avidagos 2011 Avidagos Red (Douro) Portugal 87 15.0 2 Rainstorm 2013 Pinot Gris (Willamette Valley) US 87 14.0 3 St. Julian 2013 Reserve Late Harvest Riesling ... US 87 13.0 4 Sweet Cheeks 2012 Vintner's Reserve Wild Child... US 87 65.0 5 Tandem 2011 Ars In Vitro Tempranillo-Merlot (N... Spain 87 15.0 6 <NAME> 2013 <NAME> (Vittoria) Italy 87 16.0 7 Trimbach 2012 Gewurztraminer (Alsace) France 87 24.0 8 <NAME> 2013 <NAME>er (Rheinhe... Germany 87 12.0 9 <NAME> 2012 Les Natures Pinot Gris... France 87 27.0 | 4.016652 | 4 |
src/swell/deployment/prep_exp_dirs.py | danholdaway/swell | 4 | 6616905 | # (C) Copyright 2021-2022 United States Government as represented by the Administrator of the
# National Aeronautics and Space Administration. All Rights Reserved.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# --------------------------------------------------------------------------------------------------
import importlib
import os
import pathlib
import shutil
from swell.install_path import swell_install_path
from swell.suites.suites import return_suite_path
from swell.utilities.string_utils import replace_vars
# --------------------------------------------------------------------------------------------------
def add_dir_to_conf_mkdir(logger, experiment_dict, experiment_dict_key, experiment_sub_dir,
make_dir=True):
# Get experiment directory
experiment_dir = experiment_dict['experiment_dir']
experiment_sub_dir_full = os.path.join(experiment_dir, experiment_sub_dir)
if make_dir:
# Make the new directory
os.makedirs(experiment_sub_dir_full, exist_ok=True)
# Set permissions
os.chmod(experiment_sub_dir_full, 0o755)
# Add the associated key to the dictionary
experiment_dict.update({experiment_dict_key: experiment_sub_dir_full})
# --------------------------------------------------------------------------------------------------
def copy_suite_files(logger, experiment_dict):
# Extract config
# --------------
suite_dir = experiment_dict['suite_dir']
suite_dict = experiment_dict['suite']
suite_name = suite_dict['suite name']
# Copy suite related files to the suite directory
# -----------------------------------------------
suite_path = return_suite_path()
for s in [os.path.join(suite_name, 'jedi_config.yaml'), os.path.join(suite_name, 'flow.cylc')]:
src_file = os.path.split(s)[1]
src_path_file = os.path.join(suite_path, os.path.split(s)[0], src_file)
dst_path_file = os.path.join(suite_dir, '{}'.format(src_file))
if os.path.exists(src_path_file):
logger.trace('Copying {} to {}'.format(src_path_file, dst_path_file))
shutil.copy(src_path_file, dst_path_file)
# Copy platform related files to the suite directory
# --------------------------------------------------
if 'platform' in suite_dict:
platform = suite_dict['platform']
plat_mod = importlib.import_module('swell.deployment.platforms.'+platform+'.install_path')
return_platform_install_path_call = getattr(plat_mod, 'return_platform_install_path')
platform_path = return_platform_install_path_call()
for s in ['modules', 'r2d2_config.yaml']:
src_file = os.path.split(s)[1]
src_path_file = os.path.join(platform_path, os.path.split(s)[0], src_file)
dst_path_file = os.path.join(suite_dir, '{}'.format(src_file))
if os.path.exists(src_path_file):
logger.trace('Copying {} to {}'.format(src_path_file, dst_path_file))
shutil.copy(src_path_file, dst_path_file)
# --------------------------------------------------------------------------------------------------
def set_swell_path_in_modules(logger, experiment_dict):
# Extract config
# --------------
suite_dir = experiment_dict['suite_dir']
# Modules file
# ------------
modules_file = os.path.join(suite_dir, 'modules')
# Only do if the suite needs modules
# ----------------------------------
if os.path.exists(modules_file):
# Swell bin path
# --------------
swell_bin_path = shutil.which("swell_task")
swell_bin_path = os.path.split(swell_bin_path)[0]
# Swell lib path
# --------------
swell_lib_path = swell_install_path()
swell_lib_path = os.path.split(swell_lib_path)[0]
# Swell suite path
# ----------------
swell_sui_path = return_suite_path()
# Dictionary of definitions
# -------------------------
swell_paths = {}
swell_paths['swell_bin_path'] = swell_bin_path
swell_paths['swell_lib_path'] = swell_lib_path
swell_paths['swell_sui_path'] = swell_sui_path
# Open the file
# -------------
with open(modules_file, 'r') as modules_file_open:
modules_file_str = modules_file_open.read()
modules_file_str = replace_vars(modules_file_str, **swell_paths)
# Overwrite the file
# ------------------
with open(modules_file, 'w') as modules_file_open:
modules_file_open.write(modules_file_str)
# --------------------------------------------------------------------------------------------------
def create_modules_csh(logger, experiment_dict):
# Extract config
# --------------
suite_dir = experiment_dict['suite_dir']
# Modules file
# ------------
modules_file = os.path.join(suite_dir, 'modules')
# Only do if the suite needs modules
# ----------------------------------
if os.path.exists(modules_file):
# Open the file
# -------------
with open(modules_file, 'r') as modules_file_open:
modules_file_lines = modules_file_open.readlines()
# Replace some things
# -------------------
for idx, modules_file_line in enumerate(modules_file_lines):
# 'bash' to 'csh'
if 'bash' in modules_file_line:
modules_file_lines[idx] = modules_file_lines[idx].replace('bash', 'csh')
# Export to setenv
if 'export' in modules_file_line:
modules_file_lines[idx] = modules_file_lines[idx].replace('export', 'setenv')
modules_file_lines[idx] = modules_file_lines[idx].replace('=', ' ')
# Set PYTHONPATH
if 'PYTHONPATH=' in modules_file_line:
modules_file_lines[idx] = modules_file_lines[idx].replace('PYTHONPATH=',
'setenv PYTHONPATH ')
# Set path
if 'PATH=' in modules_file_line:
modules_file_lines[idx] = modules_file_lines[idx].replace('PATH=', 'set path = (')
modules_file_lines[idx] = modules_file_lines[idx].replace(':$PATH', ' $path)')
# Overwrite the file
# ------------------
with open(modules_file+'-csh', 'w') as modules_file_open:
for modules_file_line in modules_file_lines:
modules_file_open.write(modules_file_line)
# --------------------------------------------------------------------------------------------------
| # (C) Copyright 2021-2022 United States Government as represented by the Administrator of the
# National Aeronautics and Space Administration. All Rights Reserved.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# --------------------------------------------------------------------------------------------------
import importlib
import os
import pathlib
import shutil
from swell.install_path import swell_install_path
from swell.suites.suites import return_suite_path
from swell.utilities.string_utils import replace_vars
# --------------------------------------------------------------------------------------------------
def add_dir_to_conf_mkdir(logger, experiment_dict, experiment_dict_key, experiment_sub_dir,
make_dir=True):
# Get experiment directory
experiment_dir = experiment_dict['experiment_dir']
experiment_sub_dir_full = os.path.join(experiment_dir, experiment_sub_dir)
if make_dir:
# Make the new directory
os.makedirs(experiment_sub_dir_full, exist_ok=True)
# Set permissions
os.chmod(experiment_sub_dir_full, 0o755)
# Add the associated key to the dictionary
experiment_dict.update({experiment_dict_key: experiment_sub_dir_full})
# --------------------------------------------------------------------------------------------------
def copy_suite_files(logger, experiment_dict):
# Extract config
# --------------
suite_dir = experiment_dict['suite_dir']
suite_dict = experiment_dict['suite']
suite_name = suite_dict['suite name']
# Copy suite related files to the suite directory
# -----------------------------------------------
suite_path = return_suite_path()
for s in [os.path.join(suite_name, 'jedi_config.yaml'), os.path.join(suite_name, 'flow.cylc')]:
src_file = os.path.split(s)[1]
src_path_file = os.path.join(suite_path, os.path.split(s)[0], src_file)
dst_path_file = os.path.join(suite_dir, '{}'.format(src_file))
if os.path.exists(src_path_file):
logger.trace('Copying {} to {}'.format(src_path_file, dst_path_file))
shutil.copy(src_path_file, dst_path_file)
# Copy platform related files to the suite directory
# --------------------------------------------------
if 'platform' in suite_dict:
platform = suite_dict['platform']
plat_mod = importlib.import_module('swell.deployment.platforms.'+platform+'.install_path')
return_platform_install_path_call = getattr(plat_mod, 'return_platform_install_path')
platform_path = return_platform_install_path_call()
for s in ['modules', 'r2d2_config.yaml']:
src_file = os.path.split(s)[1]
src_path_file = os.path.join(platform_path, os.path.split(s)[0], src_file)
dst_path_file = os.path.join(suite_dir, '{}'.format(src_file))
if os.path.exists(src_path_file):
logger.trace('Copying {} to {}'.format(src_path_file, dst_path_file))
shutil.copy(src_path_file, dst_path_file)
# --------------------------------------------------------------------------------------------------
def set_swell_path_in_modules(logger, experiment_dict):
# Extract config
# --------------
suite_dir = experiment_dict['suite_dir']
# Modules file
# ------------
modules_file = os.path.join(suite_dir, 'modules')
# Only do if the suite needs modules
# ----------------------------------
if os.path.exists(modules_file):
# Swell bin path
# --------------
swell_bin_path = shutil.which("swell_task")
swell_bin_path = os.path.split(swell_bin_path)[0]
# Swell lib path
# --------------
swell_lib_path = swell_install_path()
swell_lib_path = os.path.split(swell_lib_path)[0]
# Swell suite path
# ----------------
swell_sui_path = return_suite_path()
# Dictionary of definitions
# -------------------------
swell_paths = {}
swell_paths['swell_bin_path'] = swell_bin_path
swell_paths['swell_lib_path'] = swell_lib_path
swell_paths['swell_sui_path'] = swell_sui_path
# Open the file
# -------------
with open(modules_file, 'r') as modules_file_open:
modules_file_str = modules_file_open.read()
modules_file_str = replace_vars(modules_file_str, **swell_paths)
# Overwrite the file
# ------------------
with open(modules_file, 'w') as modules_file_open:
modules_file_open.write(modules_file_str)
# --------------------------------------------------------------------------------------------------
def create_modules_csh(logger, experiment_dict):
# Extract config
# --------------
suite_dir = experiment_dict['suite_dir']
# Modules file
# ------------
modules_file = os.path.join(suite_dir, 'modules')
# Only do if the suite needs modules
# ----------------------------------
if os.path.exists(modules_file):
# Open the file
# -------------
with open(modules_file, 'r') as modules_file_open:
modules_file_lines = modules_file_open.readlines()
# Replace some things
# -------------------
for idx, modules_file_line in enumerate(modules_file_lines):
# 'bash' to 'csh'
if 'bash' in modules_file_line:
modules_file_lines[idx] = modules_file_lines[idx].replace('bash', 'csh')
# Export to setenv
if 'export' in modules_file_line:
modules_file_lines[idx] = modules_file_lines[idx].replace('export', 'setenv')
modules_file_lines[idx] = modules_file_lines[idx].replace('=', ' ')
# Set PYTHONPATH
if 'PYTHONPATH=' in modules_file_line:
modules_file_lines[idx] = modules_file_lines[idx].replace('PYTHONPATH=',
'setenv PYTHONPATH ')
# Set path
if 'PATH=' in modules_file_line:
modules_file_lines[idx] = modules_file_lines[idx].replace('PATH=', 'set path = (')
modules_file_lines[idx] = modules_file_lines[idx].replace(':$PATH', ' $path)')
# Overwrite the file
# ------------------
with open(modules_file+'-csh', 'w') as modules_file_open:
for modules_file_line in modules_file_lines:
modules_file_open.write(modules_file_line)
# --------------------------------------------------------------------------------------------------
| en | 0.393854 | # (C) Copyright 2021-2022 United States Government as represented by the Administrator of the # National Aeronautics and Space Administration. All Rights Reserved. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. # -------------------------------------------------------------------------------------------------- # -------------------------------------------------------------------------------------------------- # Get experiment directory # Make the new directory # Set permissions # Add the associated key to the dictionary # -------------------------------------------------------------------------------------------------- # Extract config # -------------- # Copy suite related files to the suite directory # ----------------------------------------------- # Copy platform related files to the suite directory # -------------------------------------------------- # -------------------------------------------------------------------------------------------------- # Extract config # -------------- # Modules file # ------------ # Only do if the suite needs modules # ---------------------------------- # Swell bin path # -------------- # Swell lib path # -------------- # Swell suite path # ---------------- # Dictionary of definitions # ------------------------- # Open the file # ------------- # Overwrite the file # ------------------ # -------------------------------------------------------------------------------------------------- # Extract config # -------------- # Modules file # ------------ # Only do if the suite needs modules # ---------------------------------- # Open the file # ------------- # Replace some things # ------------------- # 'bash' to 'csh' # Export to setenv # Set PYTHONPATH # Set path # Overwrite the file # ------------------ # -------------------------------------------------------------------------------------------------- | 1.978353 | 2 |
scripts/goref_parser/goref.py | kltm/go-site | 0 | 6616906 | import os
import io
from typing import Dict, Optional, Tuple, Union
import yamldown
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class GoRef:
def __init__(self, goref_path: str) -> None:
self.yd_path = goref_path
@property
def goref_path(self):
return os.path.join(self.yd_path)
@goref_path.setter
def goref_path(self, goref_path: str):
self.yd_path = goref_path
def parse(
self, portion: Optional[str] = None
) -> Union[Tuple[Dict, str], Dict, str]:
with open(self.yd_path, "r") as file:
yd_content = file.read()
f = io.StringIO(yd_content)
yaml, md = yamldown.load(f)
if portion == "yaml":
return yaml
elif portion == "md":
return md
return (yaml, md)
| import os
import io
from typing import Dict, Optional, Tuple, Union
import yamldown
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class GoRef:
def __init__(self, goref_path: str) -> None:
self.yd_path = goref_path
@property
def goref_path(self):
return os.path.join(self.yd_path)
@goref_path.setter
def goref_path(self, goref_path: str):
self.yd_path = goref_path
def parse(
self, portion: Optional[str] = None
) -> Union[Tuple[Dict, str], Dict, str]:
with open(self.yd_path, "r") as file:
yd_content = file.read()
f = io.StringIO(yd_content)
yaml, md = yamldown.load(f)
if portion == "yaml":
return yaml
elif portion == "md":
return md
return (yaml, md)
| none | 1 | 2.773463 | 3 | |
nt_functions/congruences.py | KiyonoKara/Number-Theory-Functions | 0 | 6616907 | <reponame>KiyonoKara/Number-Theory-Functions<filename>nt_functions/congruences.py<gh_stars>0
# Use the Euclidean algorithm since this applies
from .euclidean_algorithm import *
def axEqualsCModM(a, c, m):
"""
Returns the integer x to solve for ax being equivalent to c(mod m)
ax ≡ c(mod m)
:param a:
:param c:
:param m:
:return:
"""
g, u, v = euclideanAlgorithm1(a, m)
x = ((u * c) // g) % m
return x
| # Use the Euclidean algorithm since this applies
from .euclidean_algorithm import *
def axEqualsCModM(a, c, m):
"""
Returns the integer x to solve for ax being equivalent to c(mod m)
ax ≡ c(mod m)
:param a:
:param c:
:param m:
:return:
"""
g, u, v = euclideanAlgorithm1(a, m)
x = ((u * c) // g) % m
return x | en | 0.699985 | # Use the Euclidean algorithm since this applies Returns the integer x to solve for ax being equivalent to c(mod m) ax ≡ c(mod m) :param a: :param c: :param m: :return: | 3.318939 | 3 |
examples/pylot/carla_operator.py | yujialuo/erdos | 0 | 6616908 | <reponame>yujialuo/erdos
import numpy as np
from std_msgs.msg import Float64
from carla.client import CarlaClient
from carla.sensor import Camera, Lidar
from carla.settings import CarlaSettings
from carla.transform import Transform
from erdos.data_stream import DataStream
from erdos.message import Message
from erdos.op import Op
from erdos.timestamp import Timestamp
from erdos.utils import frequency, setup_logging
import messages
import ray
class CarlaOperator(Op):
"""Provides an ERDOS interface to the CARLA simulator.
Args:
synchronous_mode (bool): whether the simulator will wait for control
input from the client.
"""
def __init__(self,
name,
flags,
camera_setups=[],
lidar_stream_names=[],
log_file_name=None):
super(CarlaOperator, self).__init__(name)
self._flags = flags
self._logger = setup_logging(self.name, log_file_name)
self.message_num = 0
if self._flags.carla_high_quality:
quality = 'Epic'
else:
quality = 'Low'
self.settings = CarlaSettings()
self.settings.set(
SynchronousMode=self._flags.carla_synchronous_mode,
SendNonPlayerAgentsInfo=True,
NumberOfVehicles=self._flags.carla_num_vehicles,
NumberOfPedestrians=self._flags.carla_num_pedestrians,
WeatherId=self._flags.carla_weather,
QualityLevel=quality)
self.settings.randomize_seeds()
self.lidar_streams = []
for (camera_stream_name, camera_type) in camera_setups:
self.__add_camera(name=camera_stream_name,
postprocessing=camera_type)
for lidar_stream_name in lidar_stream_names:
self.__add_lidar(name=lidar_stream_name)
@staticmethod
def setup_streams(input_streams, camera_setups, lidar_stream_names):
input_streams.add_callback(CarlaOperator.update_control)
camera_streams = [DataStream(name=camera,
labels={'sensor_type': 'camera',
'camera_type': camera_type})
for (camera, camera_type) in camera_setups]
lidar_streams = [DataStream(name=lidar)
for lidar in lidar_stream_names]
return [
DataStream(name='world_transform'),
DataStream(name='vehicle_pos'),
DataStream(name='acceleration'),
DataStream(data_type=Float64, name='forward_speed'),
DataStream(data_type=Float64, name='vehicle_collisions'),
DataStream(data_type=Float64, name='pedestrian_collisions'),
DataStream(data_type=Float64, name='other_collisions'),
DataStream(data_type=Float64, name='other_lane'),
DataStream(data_type=Float64, name='offroad'),
DataStream(name='traffic_lights'),
DataStream(name='pedestrians'),
DataStream(name='vehicles'),
DataStream(name='traffic_signs'),
] + camera_streams + lidar_streams
def __add_camera(self,
name,
postprocessing,
field_of_view=90.0,
image_size=(800, 600),
position=(0.3, 0, 1.3),
rotation_pitch=0,
rotation_roll=0,
rotation_yaw=0):
"""Adds a camera and a corresponding output stream.
Args:
name: A string naming the camera.
postprocessing: "SceneFinal", "Depth", "SemanticSegmentation".
"""
camera = Camera(
name,
PostProcessing=postprocessing,
FOV=field_of_view,
ImageSizeX=image_size[0],
ImageSizeY=image_size[1],
PositionX=position[0],
PositionY=position[1],
PositionZ=position[2],
RotationPitch=rotation_pitch,
RotationRoll=rotation_roll,
RotationYaw=rotation_yaw)
self.settings.add_sensor(camera)
def __add_lidar(self,
name,
channels=32,
max_range=50,
points_per_second=100000,
rotation_frequency=10,
upper_fov_limit=10,
lower_fov_limit=-30,
position=(0, 0, 1.4),
rotation_pitch=0,
rotation_yaw=0,
rotation_roll=0):
"""Adds a LIDAR sensor and a corresponding output stream.
Args:
name: A string naming the camera.
"""
lidar = Lidar(
name,
Channels=channels,
Range=max_range,
PointsPerSecond=points_per_second,
RotationFrequency=rotation_frequency,
UpperFovLimit=upper_fov_limit,
LowerFovLimit=lower_fov_limit,
PositionX=position[0],
PositionY=position[1],
PositionZ=position[2],
RotationPitch=rotation_pitch,
RotationYaw=rotation_yaw,
RotationRoll=rotation_roll)
self.settings.add_sensor(lidar)
output_stream = DataStream(name=name, labels={"sensor_type": "lidar"})
self.lidar_streams.append(output_stream)
# TODO(ionel): Set the frequency programmatically.
@frequency(10)
def step(self):
measurements, sensor_data = self.client.read_data()
# Send measurements
player_measurements = measurements.player_measurements
vehicle_pos = ((player_measurements.transform.location.x,
player_measurements.transform.location.y,
player_measurements.transform.location.z),
(player_measurements.transform.orientation.x,
player_measurements.transform.orientation.y,
player_measurements.transform.orientation.z))
world_transform = Transform(player_measurements.transform)
timestamp = Timestamp(coordinates=[self.message_num])
self.message_num += 1
ray.register_custom_serializer(Message, use_pickle=True)
self.get_output_stream('world_transform').send(
Message(world_transform, timestamp))
self.get_output_stream('vehicle_pos').send(
Message(vehicle_pos, timestamp))
acceleration = (player_measurements.acceleration.x,
player_measurements.acceleration.y,
player_measurements.acceleration.z)
self.get_output_stream('acceleration').send(
Message(acceleration, timestamp))
self.get_output_stream('forward_speed').send(
Message(player_measurements.forward_speed, timestamp))
self.get_output_stream('vehicle_collisions').send(
Message(player_measurements.collision_vehicles, timestamp))
self.get_output_stream('pedestrian_collisions').send(
Message(player_measurements.collision_pedestrians, timestamp))
self.get_output_stream('other_collisions').send(
Message(player_measurements.collision_other, timestamp))
self.get_output_stream('other_lane').send(
Message(player_measurements.intersection_otherlane, timestamp))
self.get_output_stream('offroad').send(
Message(player_measurements.intersection_offroad, timestamp))
vehicles = []
pedestrians = []
traffic_lights = []
speed_limit_signs = []
for agent in measurements.non_player_agents:
if agent.HasField('vehicle'):
pos = messages.Transform(agent.vehicle.transform)
bb = messages.BoundingBox(agent.vehicle.bounding_box)
forward_speed = agent.vehicle.forward_speed
vehicle = messages.Vehicle(pos, bb, forward_speed)
vehicles.append(vehicle)
elif agent.HasField('pedestrian'):
pos = messages.Transform(agent.pedestrian.transform)
bb = messages.BoundingBox(agent.pedestrian.bounding_box)
forward_speed = agent.pedestrian.forward_speed
pedestrian = messages.Pedestrian(pos, bb, forward_speed)
pedestrians.append(pedestrian)
elif agent.HasField('traffic_light'):
transform = messages.Transform(agent.traffic_light.transform)
traffic_light = messages.TrafficLight(
transform, agent.traffic_light.state)
traffic_lights.append(traffic_light)
elif agent.HasField('speed_limit_sign'):
transform = messages.Transform(agent.speed_limit_sign.transform)
speed_sign = messages.SpeedLimitSign(
transform, agent.speed_limit_sign.speed_limit)
speed_limit_signs.append(speed_sign)
vehicles_msg = Message(vehicles, timestamp)
self.get_output_stream('vehicles').send(vehicles_msg)
pedestrians_msg = Message(pedestrians, timestamp)
self.get_output_stream('pedestrians').send(pedestrians_msg)
traffic_lights_msg = Message(traffic_lights, timestamp)
self.get_output_stream('traffic_lights').send(traffic_lights_msg)
traffic_sings_msg = Message(speed_limit_signs, timestamp)
self.get_output_stream('traffic_signs').send(traffic_sings_msg)
# Send sensor data
for name, measurement in sensor_data.items():
self.get_output_stream(name).send(Message(measurement, timestamp))
self.client.send_control(**self.control)
def update_control(self, msg):
"""Updates the control dict"""
self.control.update(msg.data)
def execute(self):
# Subscribe to control streams
self.control = {
'steer': 0.0,
'throttle': 0.0,
'break': 0.0,
'hand_break': False,
'reverse': False
}
self.client = CarlaClient(self._flags.carla_host,
self._flags.carla_port,
timeout=10)
self.client.connect()
scene = self.client.load_settings(self.settings)
# Choose one player start at random.
number_of_player_starts = len(scene.player_start_spots)
player_start = self._flags.carla_start_player_num
if self._flags.carla_random_player_start:
player_start = np.random.randint(
0, max(0, number_of_player_starts - 1))
self.client.start_episode(player_start)
self.step()
self.spin()
| import numpy as np
from std_msgs.msg import Float64
from carla.client import CarlaClient
from carla.sensor import Camera, Lidar
from carla.settings import CarlaSettings
from carla.transform import Transform
from erdos.data_stream import DataStream
from erdos.message import Message
from erdos.op import Op
from erdos.timestamp import Timestamp
from erdos.utils import frequency, setup_logging
import messages
import ray
class CarlaOperator(Op):
"""Provides an ERDOS interface to the CARLA simulator.
Args:
synchronous_mode (bool): whether the simulator will wait for control
input from the client.
"""
def __init__(self,
name,
flags,
camera_setups=[],
lidar_stream_names=[],
log_file_name=None):
super(CarlaOperator, self).__init__(name)
self._flags = flags
self._logger = setup_logging(self.name, log_file_name)
self.message_num = 0
if self._flags.carla_high_quality:
quality = 'Epic'
else:
quality = 'Low'
self.settings = CarlaSettings()
self.settings.set(
SynchronousMode=self._flags.carla_synchronous_mode,
SendNonPlayerAgentsInfo=True,
NumberOfVehicles=self._flags.carla_num_vehicles,
NumberOfPedestrians=self._flags.carla_num_pedestrians,
WeatherId=self._flags.carla_weather,
QualityLevel=quality)
self.settings.randomize_seeds()
self.lidar_streams = []
for (camera_stream_name, camera_type) in camera_setups:
self.__add_camera(name=camera_stream_name,
postprocessing=camera_type)
for lidar_stream_name in lidar_stream_names:
self.__add_lidar(name=lidar_stream_name)
@staticmethod
def setup_streams(input_streams, camera_setups, lidar_stream_names):
input_streams.add_callback(CarlaOperator.update_control)
camera_streams = [DataStream(name=camera,
labels={'sensor_type': 'camera',
'camera_type': camera_type})
for (camera, camera_type) in camera_setups]
lidar_streams = [DataStream(name=lidar)
for lidar in lidar_stream_names]
return [
DataStream(name='world_transform'),
DataStream(name='vehicle_pos'),
DataStream(name='acceleration'),
DataStream(data_type=Float64, name='forward_speed'),
DataStream(data_type=Float64, name='vehicle_collisions'),
DataStream(data_type=Float64, name='pedestrian_collisions'),
DataStream(data_type=Float64, name='other_collisions'),
DataStream(data_type=Float64, name='other_lane'),
DataStream(data_type=Float64, name='offroad'),
DataStream(name='traffic_lights'),
DataStream(name='pedestrians'),
DataStream(name='vehicles'),
DataStream(name='traffic_signs'),
] + camera_streams + lidar_streams
def __add_camera(self,
name,
postprocessing,
field_of_view=90.0,
image_size=(800, 600),
position=(0.3, 0, 1.3),
rotation_pitch=0,
rotation_roll=0,
rotation_yaw=0):
"""Adds a camera and a corresponding output stream.
Args:
name: A string naming the camera.
postprocessing: "SceneFinal", "Depth", "SemanticSegmentation".
"""
camera = Camera(
name,
PostProcessing=postprocessing,
FOV=field_of_view,
ImageSizeX=image_size[0],
ImageSizeY=image_size[1],
PositionX=position[0],
PositionY=position[1],
PositionZ=position[2],
RotationPitch=rotation_pitch,
RotationRoll=rotation_roll,
RotationYaw=rotation_yaw)
self.settings.add_sensor(camera)
def __add_lidar(self,
name,
channels=32,
max_range=50,
points_per_second=100000,
rotation_frequency=10,
upper_fov_limit=10,
lower_fov_limit=-30,
position=(0, 0, 1.4),
rotation_pitch=0,
rotation_yaw=0,
rotation_roll=0):
"""Adds a LIDAR sensor and a corresponding output stream.
Args:
name: A string naming the camera.
"""
lidar = Lidar(
name,
Channels=channels,
Range=max_range,
PointsPerSecond=points_per_second,
RotationFrequency=rotation_frequency,
UpperFovLimit=upper_fov_limit,
LowerFovLimit=lower_fov_limit,
PositionX=position[0],
PositionY=position[1],
PositionZ=position[2],
RotationPitch=rotation_pitch,
RotationYaw=rotation_yaw,
RotationRoll=rotation_roll)
self.settings.add_sensor(lidar)
output_stream = DataStream(name=name, labels={"sensor_type": "lidar"})
self.lidar_streams.append(output_stream)
# TODO(ionel): Set the frequency programmatically.
@frequency(10)
def step(self):
measurements, sensor_data = self.client.read_data()
# Send measurements
player_measurements = measurements.player_measurements
vehicle_pos = ((player_measurements.transform.location.x,
player_measurements.transform.location.y,
player_measurements.transform.location.z),
(player_measurements.transform.orientation.x,
player_measurements.transform.orientation.y,
player_measurements.transform.orientation.z))
world_transform = Transform(player_measurements.transform)
timestamp = Timestamp(coordinates=[self.message_num])
self.message_num += 1
ray.register_custom_serializer(Message, use_pickle=True)
self.get_output_stream('world_transform').send(
Message(world_transform, timestamp))
self.get_output_stream('vehicle_pos').send(
Message(vehicle_pos, timestamp))
acceleration = (player_measurements.acceleration.x,
player_measurements.acceleration.y,
player_measurements.acceleration.z)
self.get_output_stream('acceleration').send(
Message(acceleration, timestamp))
self.get_output_stream('forward_speed').send(
Message(player_measurements.forward_speed, timestamp))
self.get_output_stream('vehicle_collisions').send(
Message(player_measurements.collision_vehicles, timestamp))
self.get_output_stream('pedestrian_collisions').send(
Message(player_measurements.collision_pedestrians, timestamp))
self.get_output_stream('other_collisions').send(
Message(player_measurements.collision_other, timestamp))
self.get_output_stream('other_lane').send(
Message(player_measurements.intersection_otherlane, timestamp))
self.get_output_stream('offroad').send(
Message(player_measurements.intersection_offroad, timestamp))
vehicles = []
pedestrians = []
traffic_lights = []
speed_limit_signs = []
for agent in measurements.non_player_agents:
if agent.HasField('vehicle'):
pos = messages.Transform(agent.vehicle.transform)
bb = messages.BoundingBox(agent.vehicle.bounding_box)
forward_speed = agent.vehicle.forward_speed
vehicle = messages.Vehicle(pos, bb, forward_speed)
vehicles.append(vehicle)
elif agent.HasField('pedestrian'):
pos = messages.Transform(agent.pedestrian.transform)
bb = messages.BoundingBox(agent.pedestrian.bounding_box)
forward_speed = agent.pedestrian.forward_speed
pedestrian = messages.Pedestrian(pos, bb, forward_speed)
pedestrians.append(pedestrian)
elif agent.HasField('traffic_light'):
transform = messages.Transform(agent.traffic_light.transform)
traffic_light = messages.TrafficLight(
transform, agent.traffic_light.state)
traffic_lights.append(traffic_light)
elif agent.HasField('speed_limit_sign'):
transform = messages.Transform(agent.speed_limit_sign.transform)
speed_sign = messages.SpeedLimitSign(
transform, agent.speed_limit_sign.speed_limit)
speed_limit_signs.append(speed_sign)
vehicles_msg = Message(vehicles, timestamp)
self.get_output_stream('vehicles').send(vehicles_msg)
pedestrians_msg = Message(pedestrians, timestamp)
self.get_output_stream('pedestrians').send(pedestrians_msg)
traffic_lights_msg = Message(traffic_lights, timestamp)
self.get_output_stream('traffic_lights').send(traffic_lights_msg)
traffic_sings_msg = Message(speed_limit_signs, timestamp)
self.get_output_stream('traffic_signs').send(traffic_sings_msg)
# Send sensor data
for name, measurement in sensor_data.items():
self.get_output_stream(name).send(Message(measurement, timestamp))
self.client.send_control(**self.control)
def update_control(self, msg):
"""Updates the control dict"""
self.control.update(msg.data)
def execute(self):
# Subscribe to control streams
self.control = {
'steer': 0.0,
'throttle': 0.0,
'break': 0.0,
'hand_break': False,
'reverse': False
}
self.client = CarlaClient(self._flags.carla_host,
self._flags.carla_port,
timeout=10)
self.client.connect()
scene = self.client.load_settings(self.settings)
# Choose one player start at random.
number_of_player_starts = len(scene.player_start_spots)
player_start = self._flags.carla_start_player_num
if self._flags.carla_random_player_start:
player_start = np.random.randint(
0, max(0, number_of_player_starts - 1))
self.client.start_episode(player_start)
self.step()
self.spin() | en | 0.676871 | Provides an ERDOS interface to the CARLA simulator. Args: synchronous_mode (bool): whether the simulator will wait for control input from the client. Adds a camera and a corresponding output stream. Args: name: A string naming the camera. postprocessing: "SceneFinal", "Depth", "SemanticSegmentation". Adds a LIDAR sensor and a corresponding output stream. Args: name: A string naming the camera. # TODO(ionel): Set the frequency programmatically. # Send measurements # Send sensor data Updates the control dict # Subscribe to control streams # Choose one player start at random. | 2.316917 | 2 |
actionnetwork_activist_sync/actionnetwork.py | afitts/actionnetwork_activist_sync | 0 | 6616909 | <filename>actionnetwork_activist_sync/actionnetwork.py
# -*- coding: utf-8 -*-
"""Interacts with ActionNetwork API
https://actionnetwork.org/docs
"""
from pyactionnetwork import ActionNetworkApi
from actionnetwork_activist_sync.osdi import Person
class ActionNetwork(ActionNetworkApi):
"""Helper class to interact with the ActionNetwork API
Expects env var to be set
"""
def __init__(self,api_key):
super().__init__(api_key=api_key)
def remove_member_by_email(self, email):
"""Update custom field that flags membership (is_member)
Args:
email (str): email address to update
Returns:
list of Person objects with updated data
"""
updated_people = []
people = self.get_people_by_email(email)
for person in people:
response = self.update_person(
person_id=person.get_actionnetwork_id(),
custom_fields={'is_member': 'False'}
)
updated_people.append(Person(**response))
return updated_people
def get_people_by_email(self, email):
"""Search for people by email
Args:
email (str): email address to update
Returns:
list of Person objects with updated data
"""
response = self.get_person(search_string=email)
return [Person(**p) for p in response['_embedded']['osdi:people']]
| <filename>actionnetwork_activist_sync/actionnetwork.py
# -*- coding: utf-8 -*-
"""Interacts with ActionNetwork API
https://actionnetwork.org/docs
"""
from pyactionnetwork import ActionNetworkApi
from actionnetwork_activist_sync.osdi import Person
class ActionNetwork(ActionNetworkApi):
"""Helper class to interact with the ActionNetwork API
Expects env var to be set
"""
def __init__(self,api_key):
super().__init__(api_key=api_key)
def remove_member_by_email(self, email):
"""Update custom field that flags membership (is_member)
Args:
email (str): email address to update
Returns:
list of Person objects with updated data
"""
updated_people = []
people = self.get_people_by_email(email)
for person in people:
response = self.update_person(
person_id=person.get_actionnetwork_id(),
custom_fields={'is_member': 'False'}
)
updated_people.append(Person(**response))
return updated_people
def get_people_by_email(self, email):
"""Search for people by email
Args:
email (str): email address to update
Returns:
list of Person objects with updated data
"""
response = self.get_person(search_string=email)
return [Person(**p) for p in response['_embedded']['osdi:people']]
| en | 0.753484 | # -*- coding: utf-8 -*- Interacts with ActionNetwork API https://actionnetwork.org/docs Helper class to interact with the ActionNetwork API Expects env var to be set Update custom field that flags membership (is_member) Args: email (str): email address to update Returns: list of Person objects with updated data Search for people by email Args: email (str): email address to update Returns: list of Person objects with updated data | 2.347635 | 2 |
reits.py | alxfed/real | 0 | 6616910 | <filename>reits.py
"""
REITs
"""
#https://www.investor.gov/introduction-investing/basics/investment-products/real-estate-investment-trusts-reits
| <filename>reits.py
"""
REITs
"""
#https://www.investor.gov/introduction-investing/basics/investment-products/real-estate-investment-trusts-reits
| en | 0.750138 | REITs #https://www.investor.gov/introduction-investing/basics/investment-products/real-estate-investment-trusts-reits | 1.179099 | 1 |
src/hpccm_containers/cfdem/cfdem.py | 0luhancheng0/hpccm-containers | 0 | 6616911 | <filename>src/hpccm_containers/cfdem/cfdem.py
from os import environ
from hpccm import config, Stage
from hpccm.building_blocks import gnu, openmpi, packages, boost, python, generic_build
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.primitives import label, baseimage, comment
from fire import Fire
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
from hpccm_containers.utils import from_library, from_prefix, shell_with_log, add_flags, add_library_path, add_include_path
def build(container_format='singularity', openmpi_version='2.0.4', gnu_version='10', cfdem_prefix='/usr/local/cfdem',
cfdem_version='3.8.0', liggghts_prefix='/usr/local/ligghts', lpp_prefix='/usr/local/lpp', image='ubuntu:20.04',
mlton_version='on-20210117-release', gmp_version='6.2.1'):
config.set_container_format(container_format)
stage0 = Stage(name='stage0')
stage0 += baseimage(image=image, _bootstrap='docker')
stage0 += label(metadata={'maintainer': '<NAME>', 'email': '<EMAIL>'})
stage0 += shell(commands=['rm /usr/bin/sh', 'ln -s /usr/bin/bash /usr/bin/sh', '/usr/bin/bash'])
stage0 += packages(apt=['locales', 'wget', 'software-properties-common', 'git', 'build-essential', 'flex',
'bison', 'cmake', 'zlib1g-dev', 'gnuplot', 'libreadline-dev', 'libncurses-dev',
'libxt-dev', 'libscotch-dev', 'libptscotch-dev', 'libvtk6-dev', 'python-numpy',
'python-dev', 'qt5-default', 'git-core', 'libboost-system-dev', 'libboost-thread-dev',
'libqt5x11extras5-dev', 'qttools5-dev', 'curl', 'libgl1-mesa-dev', 'libosmesa6-dev', 'libssh2-1',
'libtool'])
compilers = gnu(version=gnu_version)
stage0 += compilers
openmpi_building_block = openmpi(version=openmpi_version, toolchain=compilers.toolchain, cuda=False)
stage0 += openmpi_building_block
stage0 += generic_autotools(
url=f'https://gmplib.org/download/gmp/gmp-{gmp_version}.tar.xz',
prefix='/usr/local/gmp',
directory=f'gmp-{gmp_version}/',
)
stage0 += environment(variables=from_library('/usr/local/gmp'))
stage0 += generic_build(
repository='https://github.com/MLton/mlton.git',
branch=mlton_version,
build=['make -j'],
install=['make PREFIX=/usr/local/mlton']
)
if cfdem_version == '3.8.0':
OF_release = '5.x'
OF_commitHashtag = '538044ac05c4672b37c7df607dca1116fa88df88'
else:
raise Exception('Check https://github.com/CFDEMproject/CFDEMcoupling-PUBLIC/blob/master/src/lagrangian/cfdemParticle/cfdTools/versionInfo.H')
stage0 += comment('Obtain CFDEM source')
stage0 += shell(commands=[
f'mkdir -p {cfdem_prefix} {liggghts_prefix} {lpp_prefix}',
f'git clone --branch {cfdem_version} https://github.com/CFDEMproject/CFDEMcoupling-PUBLIC.git {cfdem_prefix}',
f'git clone --branch {cfdem_version} https://github.com/CFDEMproject/LIGGGHTS-PUBLIC.git {liggghts_prefix}',
f'git clone https://github.com/CFDEMproject/LPP.git {lpp_prefix}'
])
stage0 += comment('Install OpenFoam')
openfoam_prefix = f'/usr/local/OpenFOAM-{OF_release}'
thirdparty_prefix = f'/usr/local/ThirdParty-{OF_release}'
stage0 += shell(commands=[
f'mkdir -p {openfoam_prefix} {thirdparty_prefix}',
f'git clone https://github.com/OpenFOAM/OpenFOAM-{OF_release}.git {openfoam_prefix} && cd {openfoam_prefix} && git checkout {OF_commitHashtag}',
f'git clone https://github.com/OpenFOAM/ThirdParty-{OF_release}.git {thirdparty_prefix}',
])
stage0 += shell(commands=[
f'echo "source {openfoam_prefix}/etc/bashrc" >> ~/.bashrc',
])
# DLIB_PATH = '/usr/lib/x86_64-linux-gnu'
# INCLUDE_PATH = '/usr/include'
stage0 += shell_with_log(commands=[
f'{thirdparty_prefix}/Allwmake -j', # this breaks with openmpi >= 3, error: static assertion failed: "MPI_Type_extent was removed in MPI-3.0. Use MPI_Type_get_extent instead."
# f'{thirdparty_prefix}/makeParaView -mpi -mesa -mesa-lib {DLIB_PATH}/libOSMesa.so -mesa-include {INCLUDE_PATH}/GL -verbose',
f'{thirdparty_prefix}/makeParaView -mpi'
'wmRefresh'
])
stage0 += shell(commands=[
f'{openfoam_prefix}/Allwmake -j',
])
# /usr/bin/g++ -fPIC -O3 -DNDEBUG -Wl,--no-undefined -lc -shared -Wl,-soname,libvtkCommonSystem-pv5.4.so.1 -o ../../../lib/libvtkCommonSystem-pv5.4.so.1 CMakeFiles/vtkCommonSystem.dir/vtkClientSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkDirectory.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkServerSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkSocketCollection.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkThreadMessager.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkTimerLog.cxx.o -Wl,-rpath,/usr/local/ThirdParty-5.x/build/linux64Gcc/ParaView-5.4.0/lib: ../../../lib/libvtkCommonCore-pv5.4.so.1 ../../../lib/libvtksys-pv5.4.so.1 -lpthread -ldl
return stage0
if __name__ == '__main__':
Fire(build)
| <filename>src/hpccm_containers/cfdem/cfdem.py
from os import environ
from hpccm import config, Stage
from hpccm.building_blocks import gnu, openmpi, packages, boost, python, generic_build
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.primitives import label, baseimage, comment
from fire import Fire
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
from hpccm_containers.utils import from_library, from_prefix, shell_with_log, add_flags, add_library_path, add_include_path
def build(container_format='singularity', openmpi_version='2.0.4', gnu_version='10', cfdem_prefix='/usr/local/cfdem',
cfdem_version='3.8.0', liggghts_prefix='/usr/local/ligghts', lpp_prefix='/usr/local/lpp', image='ubuntu:20.04',
mlton_version='on-20210117-release', gmp_version='6.2.1'):
config.set_container_format(container_format)
stage0 = Stage(name='stage0')
stage0 += baseimage(image=image, _bootstrap='docker')
stage0 += label(metadata={'maintainer': '<NAME>', 'email': '<EMAIL>'})
stage0 += shell(commands=['rm /usr/bin/sh', 'ln -s /usr/bin/bash /usr/bin/sh', '/usr/bin/bash'])
stage0 += packages(apt=['locales', 'wget', 'software-properties-common', 'git', 'build-essential', 'flex',
'bison', 'cmake', 'zlib1g-dev', 'gnuplot', 'libreadline-dev', 'libncurses-dev',
'libxt-dev', 'libscotch-dev', 'libptscotch-dev', 'libvtk6-dev', 'python-numpy',
'python-dev', 'qt5-default', 'git-core', 'libboost-system-dev', 'libboost-thread-dev',
'libqt5x11extras5-dev', 'qttools5-dev', 'curl', 'libgl1-mesa-dev', 'libosmesa6-dev', 'libssh2-1',
'libtool'])
compilers = gnu(version=gnu_version)
stage0 += compilers
openmpi_building_block = openmpi(version=openmpi_version, toolchain=compilers.toolchain, cuda=False)
stage0 += openmpi_building_block
stage0 += generic_autotools(
url=f'https://gmplib.org/download/gmp/gmp-{gmp_version}.tar.xz',
prefix='/usr/local/gmp',
directory=f'gmp-{gmp_version}/',
)
stage0 += environment(variables=from_library('/usr/local/gmp'))
stage0 += generic_build(
repository='https://github.com/MLton/mlton.git',
branch=mlton_version,
build=['make -j'],
install=['make PREFIX=/usr/local/mlton']
)
if cfdem_version == '3.8.0':
OF_release = '5.x'
OF_commitHashtag = '538044ac05c4672b37c7df607dca1116fa88df88'
else:
raise Exception('Check https://github.com/CFDEMproject/CFDEMcoupling-PUBLIC/blob/master/src/lagrangian/cfdemParticle/cfdTools/versionInfo.H')
stage0 += comment('Obtain CFDEM source')
stage0 += shell(commands=[
f'mkdir -p {cfdem_prefix} {liggghts_prefix} {lpp_prefix}',
f'git clone --branch {cfdem_version} https://github.com/CFDEMproject/CFDEMcoupling-PUBLIC.git {cfdem_prefix}',
f'git clone --branch {cfdem_version} https://github.com/CFDEMproject/LIGGGHTS-PUBLIC.git {liggghts_prefix}',
f'git clone https://github.com/CFDEMproject/LPP.git {lpp_prefix}'
])
stage0 += comment('Install OpenFoam')
openfoam_prefix = f'/usr/local/OpenFOAM-{OF_release}'
thirdparty_prefix = f'/usr/local/ThirdParty-{OF_release}'
stage0 += shell(commands=[
f'mkdir -p {openfoam_prefix} {thirdparty_prefix}',
f'git clone https://github.com/OpenFOAM/OpenFOAM-{OF_release}.git {openfoam_prefix} && cd {openfoam_prefix} && git checkout {OF_commitHashtag}',
f'git clone https://github.com/OpenFOAM/ThirdParty-{OF_release}.git {thirdparty_prefix}',
])
stage0 += shell(commands=[
f'echo "source {openfoam_prefix}/etc/bashrc" >> ~/.bashrc',
])
# DLIB_PATH = '/usr/lib/x86_64-linux-gnu'
# INCLUDE_PATH = '/usr/include'
stage0 += shell_with_log(commands=[
f'{thirdparty_prefix}/Allwmake -j', # this breaks with openmpi >= 3, error: static assertion failed: "MPI_Type_extent was removed in MPI-3.0. Use MPI_Type_get_extent instead."
# f'{thirdparty_prefix}/makeParaView -mpi -mesa -mesa-lib {DLIB_PATH}/libOSMesa.so -mesa-include {INCLUDE_PATH}/GL -verbose',
f'{thirdparty_prefix}/makeParaView -mpi'
'wmRefresh'
])
stage0 += shell(commands=[
f'{openfoam_prefix}/Allwmake -j',
])
# /usr/bin/g++ -fPIC -O3 -DNDEBUG -Wl,--no-undefined -lc -shared -Wl,-soname,libvtkCommonSystem-pv5.4.so.1 -o ../../../lib/libvtkCommonSystem-pv5.4.so.1 CMakeFiles/vtkCommonSystem.dir/vtkClientSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkDirectory.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkServerSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkSocketCollection.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkThreadMessager.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkTimerLog.cxx.o -Wl,-rpath,/usr/local/ThirdParty-5.x/build/linux64Gcc/ParaView-5.4.0/lib: ../../../lib/libvtkCommonCore-pv5.4.so.1 ../../../lib/libvtksys-pv5.4.so.1 -lpthread -ldl
return stage0
if __name__ == '__main__':
Fire(build)
| en | 0.397145 | # DLIB_PATH = '/usr/lib/x86_64-linux-gnu' # INCLUDE_PATH = '/usr/include' # this breaks with openmpi >= 3, error: static assertion failed: "MPI_Type_extent was removed in MPI-3.0. Use MPI_Type_get_extent instead." # f'{thirdparty_prefix}/makeParaView -mpi -mesa -mesa-lib {DLIB_PATH}/libOSMesa.so -mesa-include {INCLUDE_PATH}/GL -verbose', # /usr/bin/g++ -fPIC -O3 -DNDEBUG -Wl,--no-undefined -lc -shared -Wl,-soname,libvtkCommonSystem-pv5.4.so.1 -o ../../../lib/libvtkCommonSystem-pv5.4.so.1 CMakeFiles/vtkCommonSystem.dir/vtkClientSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkDirectory.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkServerSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkSocketCollection.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkThreadMessager.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkTimerLog.cxx.o -Wl,-rpath,/usr/local/ThirdParty-5.x/build/linux64Gcc/ParaView-5.4.0/lib: ../../../lib/libvtkCommonCore-pv5.4.so.1 ../../../lib/libvtksys-pv5.4.so.1 -lpthread -ldl | 1.508178 | 2 |
shapefile/process_mt.py | dgketchum/itype | 0 | 6616912 | <gh_stars>0
import os
from collections import OrderedDict
import fiona
def process_mt(shp, year, out_file=None):
features, centroids = [], []
with fiona.open(shp) as src:
[features.append(f) for f in src]
meta = src.meta
meta['schema']['properties'] = OrderedDict([('FID', 'int:10'),
('YEAR', 'int:10'),
('IType', 'str:10')])
ct = 0
with fiona.open(out_file, 'w', **meta) as dst:
for f in features:
if not f['properties']['IType']:
continue
if f['geometry']['type'] != 'Polygon':
continue
out_feat = {'type': 'Feature',
'properties': OrderedDict([('FID', ct),
('YEAR', year),
('IType', f['properties']['IType'])]),
'geometry': f['geometry']}
dst.write(out_feat)
ct += 1
if __name__ == '__main__':
in_ = '/media/hdisk/itype/mt_flu/raw_flu_wgs'
out_ = '/media/hdisk/itype/mt_flu/flu_itype'
_shapes = sorted([os.path.join(in_, x) for x in os.listdir(in_) if x.endswith('.shp')])
for s in _shapes:
yr = int(s.split('.')[0][-4:])
outfile = os.path.join(out_, 'mt_itype_{}.shp'.format(yr))
print(outfile)
process_mt(s, yr, out_file=outfile)
# ========================= EOF ====================================================================
| import os
from collections import OrderedDict
import fiona
def process_mt(shp, year, out_file=None):
features, centroids = [], []
with fiona.open(shp) as src:
[features.append(f) for f in src]
meta = src.meta
meta['schema']['properties'] = OrderedDict([('FID', 'int:10'),
('YEAR', 'int:10'),
('IType', 'str:10')])
ct = 0
with fiona.open(out_file, 'w', **meta) as dst:
for f in features:
if not f['properties']['IType']:
continue
if f['geometry']['type'] != 'Polygon':
continue
out_feat = {'type': 'Feature',
'properties': OrderedDict([('FID', ct),
('YEAR', year),
('IType', f['properties']['IType'])]),
'geometry': f['geometry']}
dst.write(out_feat)
ct += 1
if __name__ == '__main__':
in_ = '/media/hdisk/itype/mt_flu/raw_flu_wgs'
out_ = '/media/hdisk/itype/mt_flu/flu_itype'
_shapes = sorted([os.path.join(in_, x) for x in os.listdir(in_) if x.endswith('.shp')])
for s in _shapes:
yr = int(s.split('.')[0][-4:])
outfile = os.path.join(out_, 'mt_itype_{}.shp'.format(yr))
print(outfile)
process_mt(s, yr, out_file=outfile)
# ========================= EOF ==================================================================== | en | 0.354309 | # ========================= EOF ==================================================================== | 2.464518 | 2 |
controlling/Dummies/DummyMovementEngine.py | wtjerry/prenNetworkConnection | 0 | 6616913 | from concurrent.futures import ThreadPoolExecutor
from time import sleep
from controlling.AsyncProcessor import AsyncProcessor
class DummyMovementEngine:
x_pos = 0
speed = 0
def __init__(self):
self.is_moving = False
self._executor = AsyncProcessor(ThreadPoolExecutor(max_workers=2))
def start(self, speed):
print("Started to move at speed", speed)
self.set_speed(speed)
self.is_moving = True
self._executor.enqueue(self._calc_x)
def stop(self):
print("stopped moving")
self.is_moving = False
def set_speed(self, speed):
self.speed = (speed*10) - 7
def _calc_x(self):
while self.is_moving:
self.x_pos += self.speed
sleep(0.025)
def get_x(self):
return self.x_pos
| from concurrent.futures import ThreadPoolExecutor
from time import sleep
from controlling.AsyncProcessor import AsyncProcessor
class DummyMovementEngine:
x_pos = 0
speed = 0
def __init__(self):
self.is_moving = False
self._executor = AsyncProcessor(ThreadPoolExecutor(max_workers=2))
def start(self, speed):
print("Started to move at speed", speed)
self.set_speed(speed)
self.is_moving = True
self._executor.enqueue(self._calc_x)
def stop(self):
print("stopped moving")
self.is_moving = False
def set_speed(self, speed):
self.speed = (speed*10) - 7
def _calc_x(self):
while self.is_moving:
self.x_pos += self.speed
sleep(0.025)
def get_x(self):
return self.x_pos
| none | 1 | 3.353285 | 3 | |
Stats.py | matteozavatteri/cstnud-miner | 0 | 6616914 | import math
from statistics import mean
MetricNames = ['MT', 'NC', 'NM', 'CR', 'TWC', 'TDC']
if __name__ == "__main__":
for network in ["stn","stnd","stnu","cstn","stnud","cstnd","cstnu","cstnud"]:
Metrics = dict()
Metrics['MT'] = list()
Metrics['NC'] = list()
Metrics['NM'] = list()
Metrics['TWC'] = list()
Metrics['TDC'] = list()
Metrics['CR'] = list()
with open(f"mined/stats/{network}", "r") as f:
for line in f:
(log,S,MT,NC,NM,WC,TWC,DC,TDC) = tuple(line.strip().split())
S = int(S)
Metrics['MT'].append(float(MT))
NC = int(NC)
Metrics['NC'].append(NC)
NM = int(NM)
Metrics['NM'].append(NM)
WC = int(WC)
Metrics['TWC'].append(float(TWC))
DC = int(DC)
Metrics['TDC'].append(float(TDC))
assert(S == 1)
assert(WC == 1)
assert(DC == 1)
Metrics['CR'].append(100 - ((NM * 100) / NC))
#print(f"{log}")
print(f"{network}")
for m in MetricNames:
print("{}=({},{},{})".format(m, round(min(Metrics[m]),3), round(mean(Metrics[m]),3),round(max(Metrics[m]),3)))
| import math
from statistics import mean
MetricNames = ['MT', 'NC', 'NM', 'CR', 'TWC', 'TDC']
if __name__ == "__main__":
for network in ["stn","stnd","stnu","cstn","stnud","cstnd","cstnu","cstnud"]:
Metrics = dict()
Metrics['MT'] = list()
Metrics['NC'] = list()
Metrics['NM'] = list()
Metrics['TWC'] = list()
Metrics['TDC'] = list()
Metrics['CR'] = list()
with open(f"mined/stats/{network}", "r") as f:
for line in f:
(log,S,MT,NC,NM,WC,TWC,DC,TDC) = tuple(line.strip().split())
S = int(S)
Metrics['MT'].append(float(MT))
NC = int(NC)
Metrics['NC'].append(NC)
NM = int(NM)
Metrics['NM'].append(NM)
WC = int(WC)
Metrics['TWC'].append(float(TWC))
DC = int(DC)
Metrics['TDC'].append(float(TDC))
assert(S == 1)
assert(WC == 1)
assert(DC == 1)
Metrics['CR'].append(100 - ((NM * 100) / NC))
#print(f"{log}")
print(f"{network}")
for m in MetricNames:
print("{}=({},{},{})".format(m, round(min(Metrics[m]),3), round(mean(Metrics[m]),3),round(max(Metrics[m]),3)))
| en | 0.352739 | #print(f"{log}") | 3.101378 | 3 |
cycle_2018/migrations/0031_auto_20180813_1622.py | RobBickel/nyt-fec | 17 | 6616915 | # Generated by Django 2.0.1 on 2018-08-13 16:22
import django.contrib.postgres.indexes
import django.contrib.postgres.search
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cycle_2018', '0030_filing_computed_ie_total_for_f24'),
]
operations = [
migrations.AddField(
model_name='scheduleb',
name='address_search',
field=django.contrib.postgres.search.SearchVectorField(null=True),
),
migrations.AddIndex(
model_name='scheduleb',
index=django.contrib.postgres.indexes.GinIndex(fields=['address_search'], name='fec_schedul_address_5ea139_gin'),
),
migrations.RunSQL(
sql='''
CREATE TRIGGER expenditure_address_update_trigger
BEFORE INSERT OR UPDATE OF payee_street_1, payee_street_2, payee_city, payee_zip, address_search
ON fec_scheduleb
FOR EACH ROW EXECUTE PROCEDURE
tsvector_update_trigger(
address_search, 'pg_catalog.english', payee_street_1, payee_street_2, payee_city, payee_zip);
UPDATE fec_scheduleb SET address_search = NULL;
''',
reverse_sql='''
DROP TRIGGER IF EXISTS expenditure_address_update_trigger
ON fec_scheduleb;
'''),
]
| # Generated by Django 2.0.1 on 2018-08-13 16:22
import django.contrib.postgres.indexes
import django.contrib.postgres.search
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cycle_2018', '0030_filing_computed_ie_total_for_f24'),
]
operations = [
migrations.AddField(
model_name='scheduleb',
name='address_search',
field=django.contrib.postgres.search.SearchVectorField(null=True),
),
migrations.AddIndex(
model_name='scheduleb',
index=django.contrib.postgres.indexes.GinIndex(fields=['address_search'], name='fec_schedul_address_5ea139_gin'),
),
migrations.RunSQL(
sql='''
CREATE TRIGGER expenditure_address_update_trigger
BEFORE INSERT OR UPDATE OF payee_street_1, payee_street_2, payee_city, payee_zip, address_search
ON fec_scheduleb
FOR EACH ROW EXECUTE PROCEDURE
tsvector_update_trigger(
address_search, 'pg_catalog.english', payee_street_1, payee_street_2, payee_city, payee_zip);
UPDATE fec_scheduleb SET address_search = NULL;
''',
reverse_sql='''
DROP TRIGGER IF EXISTS expenditure_address_update_trigger
ON fec_scheduleb;
'''),
]
| en | 0.557996 | # Generated by Django 2.0.1 on 2018-08-13 16:22 CREATE TRIGGER expenditure_address_update_trigger BEFORE INSERT OR UPDATE OF payee_street_1, payee_street_2, payee_city, payee_zip, address_search ON fec_scheduleb FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger( address_search, 'pg_catalog.english', payee_street_1, payee_street_2, payee_city, payee_zip); UPDATE fec_scheduleb SET address_search = NULL; DROP TRIGGER IF EXISTS expenditure_address_update_trigger ON fec_scheduleb; | 1.862768 | 2 |
gtrends/gtrends.py | amccreevey/GTrends | 0 | 6616916 | <gh_stars>0
from pytrends.request import TrendReq
import pandas as pd
def returndict(kw_list):
pytrends = TrendReq(hl='en-US', tz=360)
df_kw_list = dict()
for item in kw_list:
pytrends.build_payload([item], cat=0, timeframe='today 3-m', geo='', gprop='')
df_kw_list[item] = pytrends.interest_over_time().drop(columns="isPartial")
return df_kw_list
dFrames = returndict(["Electroneum", "Bitcoin", "Ethereum"])
print(dFrames["Electroneum"].loc[dFrames["Electroneum"].idxmax()].index.get_values())
print(dFrames["Bitcoin"].loc[dFrames["Bitcoin"].idxmax()])
print(dFrames["Bitcoin"].loc[dFrames["Bitcoin"].idxmax()].index.get_values())
print(dFrames["Ethereum"].loc[dFrames["Ethereum"].idxmax()])
print(dFrames["Ethereum"].loc[dFrames["Ethereum"].idxmax()].index.get_values())
| from pytrends.request import TrendReq
import pandas as pd
def returndict(kw_list):
pytrends = TrendReq(hl='en-US', tz=360)
df_kw_list = dict()
for item in kw_list:
pytrends.build_payload([item], cat=0, timeframe='today 3-m', geo='', gprop='')
df_kw_list[item] = pytrends.interest_over_time().drop(columns="isPartial")
return df_kw_list
dFrames = returndict(["Electroneum", "Bitcoin", "Ethereum"])
print(dFrames["Electroneum"].loc[dFrames["Electroneum"].idxmax()].index.get_values())
print(dFrames["Bitcoin"].loc[dFrames["Bitcoin"].idxmax()])
print(dFrames["Bitcoin"].loc[dFrames["Bitcoin"].idxmax()].index.get_values())
print(dFrames["Ethereum"].loc[dFrames["Ethereum"].idxmax()])
print(dFrames["Ethereum"].loc[dFrames["Ethereum"].idxmax()].index.get_values()) | none | 1 | 2.50447 | 3 | |
macronizer.py | triethuynh2301/macronizer-project | 0 | 6616917 | <reponame>triethuynh2301/macronizer-project<filename>macronizer.py
from macronizer_cores import create_app
from config import ProductionConfig, DevelopmentConfig, FLASK_ENV
# determine the environment to run the app (development by default)
app_config = DevelopmentConfig
if FLASK_ENV == 'production':
app_config = ProductionConfig
app = create_app(app_config)
if __name__ == '__main__':
app.run() | from macronizer_cores import create_app
from config import ProductionConfig, DevelopmentConfig, FLASK_ENV
# determine the environment to run the app (development by default)
app_config = DevelopmentConfig
if FLASK_ENV == 'production':
app_config = ProductionConfig
app = create_app(app_config)
if __name__ == '__main__':
app.run() | en | 0.891654 | # determine the environment to run the app (development by default) | 1.768343 | 2 |
widgetbox/migrations/0008_auto_20150710_1450.py | logithr/djangocms-widgetbox | 0 | 6616918 | <reponame>logithr/djangocms-widgetbox
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cms.models.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0011_auto_20150419_1006'),
('widgetbox', '0007_auto_20150522_1634'),
]
operations = [
migrations.AddField(
model_name='galleryimage',
name='link_custom',
field=models.CharField(max_length=400, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='galleryimage',
name='link_to_page',
field=cms.models.fields.PageField(blank=True, to='cms.Page', null=True),
preserve_default=True,
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cms.models.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0011_auto_20150419_1006'),
('widgetbox', '0007_auto_20150522_1634'),
]
operations = [
migrations.AddField(
model_name='galleryimage',
name='link_custom',
field=models.CharField(max_length=400, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='galleryimage',
name='link_to_page',
field=cms.models.fields.PageField(blank=True, to='cms.Page', null=True),
preserve_default=True,
),
] | en | 0.769321 | # -*- coding: utf-8 -*- | 1.58421 | 2 |
meshreg/models/manoutils.py | pgrady3/handobjectconsist | 103 | 6616919 | <reponame>pgrady3/handobjectconsist
import torch
from manopth import manolayer
def get_closed_faces():
mano_layer = manolayer.ManoLayer(
joint_rot_mode="axisang", use_pca=False, mano_root="assets/mano", center_idx=None, flat_hand_mean=True
)
close_faces = torch.Tensor(
[
[92, 38, 122],
[234, 92, 122],
[239, 234, 122],
[279, 239, 122],
[215, 279, 122],
[215, 122, 118],
[215, 118, 117],
[215, 117, 119],
[215, 119, 120],
[215, 120, 108],
[215, 108, 79],
[215, 79, 78],
[215, 78, 121],
[214, 215, 121],
]
)
closed_faces = torch.cat([mano_layer.th_faces, close_faces.long()])
# Indices of faces added during closing --> should be ignored as they match the wrist
# part of the hand, which is not an external surface of the human
# Valid because added closed faces are at the end
hand_ignore_faces = [1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551]
return closed_faces, hand_ignore_faces
| import torch
from manopth import manolayer
def get_closed_faces():
mano_layer = manolayer.ManoLayer(
joint_rot_mode="axisang", use_pca=False, mano_root="assets/mano", center_idx=None, flat_hand_mean=True
)
close_faces = torch.Tensor(
[
[92, 38, 122],
[234, 92, 122],
[239, 234, 122],
[279, 239, 122],
[215, 279, 122],
[215, 122, 118],
[215, 118, 117],
[215, 117, 119],
[215, 119, 120],
[215, 120, 108],
[215, 108, 79],
[215, 79, 78],
[215, 78, 121],
[214, 215, 121],
]
)
closed_faces = torch.cat([mano_layer.th_faces, close_faces.long()])
# Indices of faces added during closing --> should be ignored as they match the wrist
# part of the hand, which is not an external surface of the human
# Valid because added closed faces are at the end
hand_ignore_faces = [1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551]
return closed_faces, hand_ignore_faces | en | 0.985493 | # Indices of faces added during closing --> should be ignored as they match the wrist # part of the hand, which is not an external surface of the human # Valid because added closed faces are at the end | 2.347466 | 2 |
tilse/evaluation/rouge.py | arianpasquali/tilse | 0 | 6616920 | from __future__ import division
import collections
import os
import nltk
import numpy
from scipy import optimize
# import pyrouge
from tilse.evaluation import util
from rouge import Rouge
class TimelineRougeEvaluator:
""" Evaluate timelines with respect to a set of reference timelines.
This class implements several evaluation metrics based on ROUGE to
compare predicted timelines with a set of reference timelines. The
measures are described in Martschat and Markert (2017).
References:
<NAME> and <NAME> (2017).
Improving ROUGE for Timeline Summarization.
In Proceedings of the 15th Conference of the European Chapter of the
Association for Computational Linguistics, volume 2: Short Papers,
Valencia, Spain, 3-7 April 2017.
Attributes:
measures (set(str)): ROUGE measures to use when computing scores.
rouge (pyrouge.Rouge155 or RougeReimplementation): Object to perform
ROUGE computation.
beta (float): Value controlling the recall/precision trade-off when
computing F_beta scores. Defaults to 1.
"""
def __init__(self, measures={"rouge_1"}, beta=1):
""" Initialize the evaluator.
Args:
measures (set(str)): ROUGE measures to use when computing scores.
Defaults to `rouge_1`.
rouge_computation (str): Whether to use the original ROUGE perl
script ("original") or an approximate Python reimplementation
("reimpl"). Defaults to "reimpl".
beta (float): Value controlling the recall/precision trade-off when
computing F_beta scores. Defaults to 1.
"""
self.measures = measures
self.beta = beta
self.rouge = RougeReimplementation()
def evaluate_concat(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using the
'concat' ROUGE variant.
This variant first concatenates all daily summaries of the respective timelines. The
resulting documents are then evaluated using the ROUGE measure.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
pred_sents = []
for date in sorted(list(predicted_timeline.get_dates())):
pred_sents.extend([sent.split() for sent in predicted_timeline[date]])
ref_sents = {}
for i, timeline in enumerate(reference_timelines.timelines):
ref_sents[str(i)] = []
timeline_dates = sorted(list(timeline.get_dates()))
for date in timeline_dates:
ref_sents[str(i)].extend([sent.split() for sent in timeline[date]])
scores = self._get_rouge_counts(pred_sents, ref_sents)
output_scores = {}
for measure in self.measures:
prec = scores[measure]["prec_num"]
rec = scores[measure]["rec_num"]
if(scores[measure]["prec_denom"] > 0):
prec = scores[measure]["prec_num"] / scores[measure]["prec_denom"]
if(scores[measure]["rec_denom"] > 0):
rec = scores[measure]["rec_num"] / scores[measure]["rec_denom"]
output_scores[measure] = {
"precision": prec,
"recall": rec,
"f_score": util.get_f_score(prec, rec, beta=self.beta)
}
return output_scores
def evaluate_agreement(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using the
'agreement' ROUGE variant.
This variant compares the daily summaries of a date if the date appears in both the
predicted timeline and in one of the reference timelines.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
precision_numerator = collections.defaultdict(list)
precision_denominator = collections.defaultdict(list)
recall_numerator = collections.defaultdict(list)
recall_denominator = collections.defaultdict(list)
pred_dates = predicted_timeline.get_dates()
ref_dates = reference_timelines.get_dates()
all_dates = pred_dates.union(ref_dates)
for date in all_dates:
temp_groundtruth = reference_timelines[date]
groundtruth = {}
for name in temp_groundtruth:
groundtruth[name] = [sent.split() for sent in temp_groundtruth[name]]
scores = self._get_rouge_counts(
[sent.split() for sent in predicted_timeline[date]],
groundtruth
)
for measure in self.measures:
if date in pred_dates:
precision_numerator[measure].append(scores[measure]["prec_num"])
precision_denominator[measure].append(scores[measure]["prec_denom"])
if date in ref_dates:
recall_numerator[measure].append(scores[measure]["rec_num"])
recall_denominator[measure].append(scores[measure]["rec_denom"])
output_scores = {}
for measure in self.measures:
prec_denom_sum = sum(precision_denominator[measure])
if prec_denom_sum == 0:
prec = 0
else:
prec = sum(precision_numerator[measure]) / prec_denom_sum
rec_denom_sum = sum(recall_denominator[measure])
if rec_denom_sum == 0:
rec = 0
else:
rec = sum(recall_numerator[measure]) / rec_denom_sum
output_scores[measure] = {
"precision": prec,
"recall": rec,
"f_score": util.get_f_score(prec, rec, beta=self.beta)
}
return output_scores
def evaluate_align_date_costs(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using an injective alignment
that uses costs based on date differences.
This variant first aligns dates in predicted and reference timelines based on costs induced by
date distance. In then compares the summaries of the aligned dates using ROUGE and weights the
score by date distance.
In our EACL'17 paper we denoted this variant as 'align'.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
return self._evaluate_per_day_mapping_micro(
predicted_timeline,
reference_timelines,
TimelineRougeEvaluator._get_date_costs,
optimize.linear_sum_assignment
)
def evaluate_align_date_content_costs(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using an injective alignment
that uses costs based on date differences and content overlap.
This variant first aligns dates in predicted and reference timelines based on costs induced by
date distance and content overlap (computed by an approximation of ROUGE-1). It then compares the summaries
of the aligned dates using ROUGE and weights the score by date distance.
In our EACL'17 paper we denoted this variant as 'align+'.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
return self._evaluate_per_day_mapping_micro(
predicted_timeline,
reference_timelines,
TimelineRougeEvaluator._get_date_content_costs,
optimize.linear_sum_assignment
)
def evaluate_align_date_content_costs_many_to_one(
self,
predicted_timeline,
reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using a many-to-one
alignment that uses costs based on date differences and content overlap.
This variant first (many-to-one-)aligns dates in predicted and reference timelines based on costs induced by
date distance and content overlap (computed by an approximation of ROUGE-1). It then compares the summaries
of the aligned dates using ROUGE and weights the score by date distance.
In our EACL'17 paper we denoted this variant as 'align+ m:1'.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
return self._evaluate_per_day_mapping_micro(
predicted_timeline,
reference_timelines,
TimelineRougeEvaluator._get_date_content_costs,
TimelineRougeEvaluator._assign_to_min_cost
)
def evaluate_all(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using the metrics 'concat',
'agreement', 'align', 'align+' and 'align+ m:1'.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A collections.OrderedDict object, mapping a description of the metric of the metric to the
corresponding dict(str, dict(str, str)) object describing precision/recall/f scores for each
underlying ROUGE measure in `self.measures`.
Metric Description
------ -----------
concat concat
agreement agreement
align align_date_costs
align+ align_date_content_costs
align+ m:1 align_date_content_costs_many_to_one
One example entry is
{"concat": {"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}}
"""
return collections.OrderedDict([
("concat", self.evaluate_concat(predicted_timeline, reference_timelines)),
("agreement", self.evaluate_agreement(predicted_timeline, reference_timelines)),
("align_date_costs", self.evaluate_align_date_costs(predicted_timeline, reference_timelines)),
("align_date_content_costs", self.evaluate_align_date_content_costs(predicted_timeline, reference_timelines)),
("align_date_content_costs_many_to_one",
self.evaluate_align_date_content_costs_many_to_one(predicted_timeline, reference_timelines)),
])
def _evaluate_per_day_mapping_micro(
self,
predicted_timeline,
reference_timelines,
compute_costs,
optimize_assignment):
precision_numerator = collections.defaultdict(list)
precision_denominator = collections.defaultdict(list)
recall_numerator = collections.defaultdict(list)
recall_denominator = collections.defaultdict(list)
pred_dates = sorted(list(predicted_timeline.get_dates()))
ref_dates = sorted(list(reference_timelines.get_dates()))
print("ref_dates", ref_dates)
print("pred_dates", pred_dates)
prec_costs = compute_costs(pred_dates, ref_dates, predicted_timeline,
reference_timelines, axis=0)
rec_costs = compute_costs(pred_dates, ref_dates, predicted_timeline,
reference_timelines, axis=1)
print("prec_costs", prec_costs)
print("prec_costs", rec_costs)
prec_row, prec_col = optimize_assignment(prec_costs)
rec_row, rec_col = optimize_assignment(rec_costs)
# precision
for row, col in zip(prec_row, prec_col):
pred_date = pred_dates[row]
ref_date = ref_dates[col]
temp_groundtruth = reference_timelines[ref_date]
groundtruth = {}
for name in temp_groundtruth:
groundtruth[name] = [sent.split() for sent in temp_groundtruth[name]]
scores = self._get_rouge_counts(
[sent.split() for sent in predicted_timeline[pred_date]],
groundtruth
)
for measure in self.measures:
precision_numerator[measure].append(
(1 / (abs(pred_date.toordinal() - ref_date.toordinal()) + 1)) * scores[measure]["prec_num"])
precision_denominator[measure].append(scores[measure]["prec_denom"])
matched_prec = set(list(prec_row))
for i, date in enumerate(pred_dates):
if i not in matched_prec:
pred_date = pred_dates[i]
scores = self._get_rouge_counts(
[sent.split() for sent in predicted_timeline[pred_date]],
{str(i): [[""]] for i, _ in enumerate(reference_timelines.timelines)}
)
for measure in self.measures:
precision_numerator[measure].append(scores[measure]["prec_num"])
precision_denominator[measure].append(scores[measure]["prec_denom"])
# recall
for row, col in zip(rec_row, rec_col):
pred_date = pred_dates[col]
ref_date = ref_dates[row]
temp_groundtruth = reference_timelines[ref_date]
groundtruth = {}
for name in temp_groundtruth:
groundtruth[name] = [sent.split() for sent in temp_groundtruth[name]]
scores = self._get_rouge_counts(
[sent.split() for sent in predicted_timeline[pred_date]],
groundtruth
)
for measure in self.measures:
recall_numerator[measure].append(
(1 / (abs(pred_date.toordinal() - ref_date.toordinal()) + 1)) * scores[measure]["rec_num"])
recall_denominator[measure].append(scores[measure]["rec_denom"])
matched_rec = set(list(rec_row))
for i, date in enumerate(ref_dates):
if i not in matched_rec:
ref_date = ref_dates[i]
temp_groundtruth = reference_timelines[ref_date]
groundtruth = {}
for name in temp_groundtruth:
groundtruth[name] = [sent.split() for sent in temp_groundtruth[name]]
scores = self._get_rouge_counts(
[[""]],
groundtruth
)
for measure in self.measures:
recall_numerator[measure].append(scores[measure]["rec_num"])
recall_denominator[measure].append(scores[measure]["rec_denom"])
output_scores = {}
for measure in self.measures:
prec_denom_sum = sum(precision_denominator[measure])
if prec_denom_sum == 0:
prec = 0
else:
prec = sum(precision_numerator[measure]) / prec_denom_sum
rec_denom_sum = sum(recall_denominator[measure])
if rec_denom_sum == 0:
rec = 0
else:
rec = sum(recall_numerator[measure]) / rec_denom_sum
output_scores[measure] = {
"precision": prec,
"recall": rec,
"f_score": util.get_f_score(prec, rec, beta=self.beta)
}
return output_scores
@staticmethod
def _get_date_costs(source_dates, target_dates, tl, ref_tls, axis=0):
costs = []
if axis == 0:
(a, b) = (source_dates, target_dates)
elif axis == 1:
(a, b) = (target_dates, source_dates)
for s_date in a:
to_add = []
for t_date in b:
to_add.append(1 - 1 / (abs(s_date.toordinal() - t_date.toordinal()) + 1))
costs.append(to_add)
return numpy.array(costs)
@staticmethod
def _get_date_content_costs(
source_dates,
target_dates,
tl,
ref_tls,
axis=0):
costs = []
if axis == 0:
(a, b) = (source_dates, target_dates)
elif axis == 1:
(a, b) = (target_dates, source_dates)
for s_date in a:
to_add = []
for t_date in b:
date_factor = 1 - 1 / (abs(s_date.toordinal() - t_date.toordinal()) + 1)
date_pred = s_date
date_ref = t_date
if axis == 1:
date_pred = t_date
date_ref = s_date
content_factor = 1 - util.compute_rouge_approximation(
tl[date_pred],
[ref_tls[date_ref][name] for name in ref_tls[date_ref]]
)
to_add.append(date_factor * content_factor)
costs.append(to_add)
return numpy.array(costs)
@staticmethod
def _assign_to_min_cost(cost_matrix):
row_indices = []
column_indices = []
for i, row in enumerate(cost_matrix):
row_indices.append(i)
column_indices.append(row.argmin())
return numpy.array(row_indices), numpy.array(column_indices)
def _get_rouge_counts(self, pred, ref):
scores = {}
temp_scores = self.rouge.score_summary(pred, ref)
for measure in self.measures:
scores[measure] = {}
scores[measure]["prec_num"] = temp_scores[measure + "_h_count"]
scores[measure]["prec_denom"] = temp_scores[measure + "_p_count"]
scores[measure]["rec_num"] = temp_scores[measure + "_h_count"]
scores[measure]["rec_denom"] = temp_scores[measure + "_m_count"]
return scores
class RougeReimplementation:
"""
An approximate reimplementation of ROUGE-1 and ROUGE-2.
It does not exactly match scores from the Perl script. It therefore
should not be used for computing scores on development and test
sets when preparing results for papers or for comparison to other
systems. However, due to improved speed it is useful during development
(scores also should not differ too much from the original
implementation).
Attributes:
stem (bool): Whether to stem words before evaluation.
ignore_stopwords (bool): Whether to ignore stopwords before
evaluation.
porter_stemmer (PorterStemmer): nltk's implementation of the
Porter stemmer.
stem_function (func): Utility function for performing stemming.
stopwords (set(str)): Stopwords, set to the list used in
ROUGE's Perl evaluation script.
"""
def __init__(self, stem=True, ignore_stopwords=True):
"""
Initializes ROUGE reimplementation.
Params:
stem (bool): Whether to stem words before evaluation. Defaults
to True.
ignore_stopwords (bool): Whether to ignore stopwords before
evaluation. Defaults to True.
"""
self.stem = stem
self.ignore_stopwords = ignore_stopwords
self.stopwords = set()
self.porter_stemmer = nltk.stem.PorterStemmer()
self.stem_function = self._identity
if stem:
self.stem_function = self._robust_porter_stemmer
dir_path = os.path.dirname(os.path.realpath(__file__))
if ignore_stopwords:
with open(dir_path + "/smart_common_words.txt") as my_file:
self.stopwords = set(my_file.read().splitlines())
def score_summary(self, summary, references):
"""
Scores a summary with ROUGE-1 and ROUGE-2.
Params:
summary (list(list(str))): A list of tokenized sentences,
representing a predicted summary.
references dict(int, list(list(str))): A mapping of integers
to lists of tokenized sentences, representing reference
summaries.
Returns:
A mapping from strings to integers, with the
following meaning (same representation as pyrouge):
"rouge_1_h_count": ROUGE-1 recall/precision numerator,
"rouge_1_p_count": ROUGE-1 precision denominator,
"rouge_1_m_count": ROUGE-1 recall denominator.
Analogous for ROUGE-2.
"""
punctuation = [".", ",", ";", ":", "``", "''", "-", '"']
to_ignore = self.stopwords.union(punctuation)
pred_tokens_lowercased = [self.stem_function(k.lower()) for sent in summary for k in sent
if k.lower() not in to_ignore]
ref_tokens_lowercased = {}
for i, ref_summary in references.items():
ref_tokens_lowercased[i] = [self.stem_function(k.lower()) for sent in ref_summary for k
in sent if k.lower() not in to_ignore]
eval_scores = {}
eval_scores.update(
self._rouge_1(pred_tokens_lowercased, ref_tokens_lowercased))
eval_scores.update(
self._rouge_2(pred_tokens_lowercased, ref_tokens_lowercased))
return eval_scores
def _identity(self, x):
return x
def _robust_porter_stemmer(self, x):
stem = x
try:
stem = self.porter_stemmer.stem(x)
except IndexError:
pass
return stem
def _rouge_1(self, pred_tokens, ref_tokens):
# unigrams
pred_counts = collections.Counter(pred_tokens)
ref_counts = {}
for i, tokens in ref_tokens.items():
ref_counts[i] = collections.Counter(tokens)
# approximate ROUGE-1 score
match = 0
for tok in pred_counts:
match += sum([min(pred_counts[tok], ref_counts[x][tok]) for x in
ref_counts.keys()])
prec_denom = (len(ref_counts.keys()) * sum(pred_counts.values()))
recall_denom = sum([sum(ref_counts[x].values()) for x in ref_counts])
return {
"rouge_1_h_count": match,
"rouge_1_p_count": prec_denom,
"rouge_1_m_count": recall_denom,
}
def _rouge_2(self, pred_tokens, ref_tokens):
pred_counts = collections.Counter(zip(pred_tokens, pred_tokens[1:]))
ref_counts = {}
for i, tokens in ref_tokens.items():
ref_counts[i] = collections.Counter(zip(tokens, tokens[1:]))
# approximate ROUGE-1 score
match = 0
for tok in pred_counts:
match += sum([min(pred_counts[tok], ref_counts[x][tok]) for x in
ref_counts.keys()])
prec_denom = (len(ref_counts.keys()) * sum(pred_counts.values()))
recall_denom = sum([sum(ref_counts[x].values()) for x in ref_counts])
return {
"rouge_2_h_count": match,
"rouge_2_p_count": prec_denom,
"rouge_2_m_count": recall_denom,
}
| from __future__ import division
import collections
import os
import nltk
import numpy
from scipy import optimize
# import pyrouge
from tilse.evaluation import util
from rouge import Rouge
class TimelineRougeEvaluator:
""" Evaluate timelines with respect to a set of reference timelines.
This class implements several evaluation metrics based on ROUGE to
compare predicted timelines with a set of reference timelines. The
measures are described in Martschat and Markert (2017).
References:
<NAME> and <NAME> (2017).
Improving ROUGE for Timeline Summarization.
In Proceedings of the 15th Conference of the European Chapter of the
Association for Computational Linguistics, volume 2: Short Papers,
Valencia, Spain, 3-7 April 2017.
Attributes:
measures (set(str)): ROUGE measures to use when computing scores.
rouge (pyrouge.Rouge155 or RougeReimplementation): Object to perform
ROUGE computation.
beta (float): Value controlling the recall/precision trade-off when
computing F_beta scores. Defaults to 1.
"""
def __init__(self, measures={"rouge_1"}, beta=1):
""" Initialize the evaluator.
Args:
measures (set(str)): ROUGE measures to use when computing scores.
Defaults to `rouge_1`.
rouge_computation (str): Whether to use the original ROUGE perl
script ("original") or an approximate Python reimplementation
("reimpl"). Defaults to "reimpl".
beta (float): Value controlling the recall/precision trade-off when
computing F_beta scores. Defaults to 1.
"""
self.measures = measures
self.beta = beta
self.rouge = RougeReimplementation()
def evaluate_concat(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using the
'concat' ROUGE variant.
This variant first concatenates all daily summaries of the respective timelines. The
resulting documents are then evaluated using the ROUGE measure.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
pred_sents = []
for date in sorted(list(predicted_timeline.get_dates())):
pred_sents.extend([sent.split() for sent in predicted_timeline[date]])
ref_sents = {}
for i, timeline in enumerate(reference_timelines.timelines):
ref_sents[str(i)] = []
timeline_dates = sorted(list(timeline.get_dates()))
for date in timeline_dates:
ref_sents[str(i)].extend([sent.split() for sent in timeline[date]])
scores = self._get_rouge_counts(pred_sents, ref_sents)
output_scores = {}
for measure in self.measures:
prec = scores[measure]["prec_num"]
rec = scores[measure]["rec_num"]
if(scores[measure]["prec_denom"] > 0):
prec = scores[measure]["prec_num"] / scores[measure]["prec_denom"]
if(scores[measure]["rec_denom"] > 0):
rec = scores[measure]["rec_num"] / scores[measure]["rec_denom"]
output_scores[measure] = {
"precision": prec,
"recall": rec,
"f_score": util.get_f_score(prec, rec, beta=self.beta)
}
return output_scores
def evaluate_agreement(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using the
'agreement' ROUGE variant.
This variant compares the daily summaries of a date if the date appears in both the
predicted timeline and in one of the reference timelines.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
precision_numerator = collections.defaultdict(list)
precision_denominator = collections.defaultdict(list)
recall_numerator = collections.defaultdict(list)
recall_denominator = collections.defaultdict(list)
pred_dates = predicted_timeline.get_dates()
ref_dates = reference_timelines.get_dates()
all_dates = pred_dates.union(ref_dates)
for date in all_dates:
temp_groundtruth = reference_timelines[date]
groundtruth = {}
for name in temp_groundtruth:
groundtruth[name] = [sent.split() for sent in temp_groundtruth[name]]
scores = self._get_rouge_counts(
[sent.split() for sent in predicted_timeline[date]],
groundtruth
)
for measure in self.measures:
if date in pred_dates:
precision_numerator[measure].append(scores[measure]["prec_num"])
precision_denominator[measure].append(scores[measure]["prec_denom"])
if date in ref_dates:
recall_numerator[measure].append(scores[measure]["rec_num"])
recall_denominator[measure].append(scores[measure]["rec_denom"])
output_scores = {}
for measure in self.measures:
prec_denom_sum = sum(precision_denominator[measure])
if prec_denom_sum == 0:
prec = 0
else:
prec = sum(precision_numerator[measure]) / prec_denom_sum
rec_denom_sum = sum(recall_denominator[measure])
if rec_denom_sum == 0:
rec = 0
else:
rec = sum(recall_numerator[measure]) / rec_denom_sum
output_scores[measure] = {
"precision": prec,
"recall": rec,
"f_score": util.get_f_score(prec, rec, beta=self.beta)
}
return output_scores
def evaluate_align_date_costs(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using an injective alignment
that uses costs based on date differences.
This variant first aligns dates in predicted and reference timelines based on costs induced by
date distance. In then compares the summaries of the aligned dates using ROUGE and weights the
score by date distance.
In our EACL'17 paper we denoted this variant as 'align'.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
return self._evaluate_per_day_mapping_micro(
predicted_timeline,
reference_timelines,
TimelineRougeEvaluator._get_date_costs,
optimize.linear_sum_assignment
)
def evaluate_align_date_content_costs(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using an injective alignment
that uses costs based on date differences and content overlap.
This variant first aligns dates in predicted and reference timelines based on costs induced by
date distance and content overlap (computed by an approximation of ROUGE-1). It then compares the summaries
of the aligned dates using ROUGE and weights the score by date distance.
In our EACL'17 paper we denoted this variant as 'align+'.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
return self._evaluate_per_day_mapping_micro(
predicted_timeline,
reference_timelines,
TimelineRougeEvaluator._get_date_content_costs,
optimize.linear_sum_assignment
)
def evaluate_align_date_content_costs_many_to_one(
self,
predicted_timeline,
reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using a many-to-one
alignment that uses costs based on date differences and content overlap.
This variant first (many-to-one-)aligns dates in predicted and reference timelines based on costs induced by
date distance and content overlap (computed by an approximation of ROUGE-1). It then compares the summaries
of the aligned dates using ROUGE and weights the score by date distance.
In our EACL'17 paper we denoted this variant as 'align+ m:1'.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures`
to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values,
e.g.
{"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}
"""
return self._evaluate_per_day_mapping_micro(
predicted_timeline,
reference_timelines,
TimelineRougeEvaluator._get_date_content_costs,
TimelineRougeEvaluator._assign_to_min_cost
)
def evaluate_all(self, predicted_timeline, reference_timelines):
""" Evaluate a predicted timeline w.r.t. a set of reference timelines using the metrics 'concat',
'agreement', 'align', 'align+' and 'align+ m:1'.
Args:
predicted_timeline (data.timelines.Timeline): A timeline.
reference_timelines (data.timelines.GroundTruth): A ground truth of timelines.
Returns:
A collections.OrderedDict object, mapping a description of the metric of the metric to the
corresponding dict(str, dict(str, str)) object describing precision/recall/f scores for each
underlying ROUGE measure in `self.measures`.
Metric Description
------ -----------
concat concat
agreement agreement
align align_date_costs
align+ align_date_content_costs
align+ m:1 align_date_content_costs_many_to_one
One example entry is
{"concat": {"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}}
"""
return collections.OrderedDict([
("concat", self.evaluate_concat(predicted_timeline, reference_timelines)),
("agreement", self.evaluate_agreement(predicted_timeline, reference_timelines)),
("align_date_costs", self.evaluate_align_date_costs(predicted_timeline, reference_timelines)),
("align_date_content_costs", self.evaluate_align_date_content_costs(predicted_timeline, reference_timelines)),
("align_date_content_costs_many_to_one",
self.evaluate_align_date_content_costs_many_to_one(predicted_timeline, reference_timelines)),
])
def _evaluate_per_day_mapping_micro(
self,
predicted_timeline,
reference_timelines,
compute_costs,
optimize_assignment):
precision_numerator = collections.defaultdict(list)
precision_denominator = collections.defaultdict(list)
recall_numerator = collections.defaultdict(list)
recall_denominator = collections.defaultdict(list)
pred_dates = sorted(list(predicted_timeline.get_dates()))
ref_dates = sorted(list(reference_timelines.get_dates()))
print("ref_dates", ref_dates)
print("pred_dates", pred_dates)
prec_costs = compute_costs(pred_dates, ref_dates, predicted_timeline,
reference_timelines, axis=0)
rec_costs = compute_costs(pred_dates, ref_dates, predicted_timeline,
reference_timelines, axis=1)
print("prec_costs", prec_costs)
print("prec_costs", rec_costs)
prec_row, prec_col = optimize_assignment(prec_costs)
rec_row, rec_col = optimize_assignment(rec_costs)
# precision
for row, col in zip(prec_row, prec_col):
pred_date = pred_dates[row]
ref_date = ref_dates[col]
temp_groundtruth = reference_timelines[ref_date]
groundtruth = {}
for name in temp_groundtruth:
groundtruth[name] = [sent.split() for sent in temp_groundtruth[name]]
scores = self._get_rouge_counts(
[sent.split() for sent in predicted_timeline[pred_date]],
groundtruth
)
for measure in self.measures:
precision_numerator[measure].append(
(1 / (abs(pred_date.toordinal() - ref_date.toordinal()) + 1)) * scores[measure]["prec_num"])
precision_denominator[measure].append(scores[measure]["prec_denom"])
matched_prec = set(list(prec_row))
for i, date in enumerate(pred_dates):
if i not in matched_prec:
pred_date = pred_dates[i]
scores = self._get_rouge_counts(
[sent.split() for sent in predicted_timeline[pred_date]],
{str(i): [[""]] for i, _ in enumerate(reference_timelines.timelines)}
)
for measure in self.measures:
precision_numerator[measure].append(scores[measure]["prec_num"])
precision_denominator[measure].append(scores[measure]["prec_denom"])
# recall
for row, col in zip(rec_row, rec_col):
pred_date = pred_dates[col]
ref_date = ref_dates[row]
temp_groundtruth = reference_timelines[ref_date]
groundtruth = {}
for name in temp_groundtruth:
groundtruth[name] = [sent.split() for sent in temp_groundtruth[name]]
scores = self._get_rouge_counts(
[sent.split() for sent in predicted_timeline[pred_date]],
groundtruth
)
for measure in self.measures:
recall_numerator[measure].append(
(1 / (abs(pred_date.toordinal() - ref_date.toordinal()) + 1)) * scores[measure]["rec_num"])
recall_denominator[measure].append(scores[measure]["rec_denom"])
matched_rec = set(list(rec_row))
for i, date in enumerate(ref_dates):
if i not in matched_rec:
ref_date = ref_dates[i]
temp_groundtruth = reference_timelines[ref_date]
groundtruth = {}
for name in temp_groundtruth:
groundtruth[name] = [sent.split() for sent in temp_groundtruth[name]]
scores = self._get_rouge_counts(
[[""]],
groundtruth
)
for measure in self.measures:
recall_numerator[measure].append(scores[measure]["rec_num"])
recall_denominator[measure].append(scores[measure]["rec_denom"])
output_scores = {}
for measure in self.measures:
prec_denom_sum = sum(precision_denominator[measure])
if prec_denom_sum == 0:
prec = 0
else:
prec = sum(precision_numerator[measure]) / prec_denom_sum
rec_denom_sum = sum(recall_denominator[measure])
if rec_denom_sum == 0:
rec = 0
else:
rec = sum(recall_numerator[measure]) / rec_denom_sum
output_scores[measure] = {
"precision": prec,
"recall": rec,
"f_score": util.get_f_score(prec, rec, beta=self.beta)
}
return output_scores
@staticmethod
def _get_date_costs(source_dates, target_dates, tl, ref_tls, axis=0):
costs = []
if axis == 0:
(a, b) = (source_dates, target_dates)
elif axis == 1:
(a, b) = (target_dates, source_dates)
for s_date in a:
to_add = []
for t_date in b:
to_add.append(1 - 1 / (abs(s_date.toordinal() - t_date.toordinal()) + 1))
costs.append(to_add)
return numpy.array(costs)
@staticmethod
def _get_date_content_costs(
source_dates,
target_dates,
tl,
ref_tls,
axis=0):
costs = []
if axis == 0:
(a, b) = (source_dates, target_dates)
elif axis == 1:
(a, b) = (target_dates, source_dates)
for s_date in a:
to_add = []
for t_date in b:
date_factor = 1 - 1 / (abs(s_date.toordinal() - t_date.toordinal()) + 1)
date_pred = s_date
date_ref = t_date
if axis == 1:
date_pred = t_date
date_ref = s_date
content_factor = 1 - util.compute_rouge_approximation(
tl[date_pred],
[ref_tls[date_ref][name] for name in ref_tls[date_ref]]
)
to_add.append(date_factor * content_factor)
costs.append(to_add)
return numpy.array(costs)
@staticmethod
def _assign_to_min_cost(cost_matrix):
row_indices = []
column_indices = []
for i, row in enumerate(cost_matrix):
row_indices.append(i)
column_indices.append(row.argmin())
return numpy.array(row_indices), numpy.array(column_indices)
def _get_rouge_counts(self, pred, ref):
scores = {}
temp_scores = self.rouge.score_summary(pred, ref)
for measure in self.measures:
scores[measure] = {}
scores[measure]["prec_num"] = temp_scores[measure + "_h_count"]
scores[measure]["prec_denom"] = temp_scores[measure + "_p_count"]
scores[measure]["rec_num"] = temp_scores[measure + "_h_count"]
scores[measure]["rec_denom"] = temp_scores[measure + "_m_count"]
return scores
class RougeReimplementation:
"""
An approximate reimplementation of ROUGE-1 and ROUGE-2.
It does not exactly match scores from the Perl script. It therefore
should not be used for computing scores on development and test
sets when preparing results for papers or for comparison to other
systems. However, due to improved speed it is useful during development
(scores also should not differ too much from the original
implementation).
Attributes:
stem (bool): Whether to stem words before evaluation.
ignore_stopwords (bool): Whether to ignore stopwords before
evaluation.
porter_stemmer (PorterStemmer): nltk's implementation of the
Porter stemmer.
stem_function (func): Utility function for performing stemming.
stopwords (set(str)): Stopwords, set to the list used in
ROUGE's Perl evaluation script.
"""
def __init__(self, stem=True, ignore_stopwords=True):
"""
Initializes ROUGE reimplementation.
Params:
stem (bool): Whether to stem words before evaluation. Defaults
to True.
ignore_stopwords (bool): Whether to ignore stopwords before
evaluation. Defaults to True.
"""
self.stem = stem
self.ignore_stopwords = ignore_stopwords
self.stopwords = set()
self.porter_stemmer = nltk.stem.PorterStemmer()
self.stem_function = self._identity
if stem:
self.stem_function = self._robust_porter_stemmer
dir_path = os.path.dirname(os.path.realpath(__file__))
if ignore_stopwords:
with open(dir_path + "/smart_common_words.txt") as my_file:
self.stopwords = set(my_file.read().splitlines())
def score_summary(self, summary, references):
"""
Scores a summary with ROUGE-1 and ROUGE-2.
Params:
summary (list(list(str))): A list of tokenized sentences,
representing a predicted summary.
references dict(int, list(list(str))): A mapping of integers
to lists of tokenized sentences, representing reference
summaries.
Returns:
A mapping from strings to integers, with the
following meaning (same representation as pyrouge):
"rouge_1_h_count": ROUGE-1 recall/precision numerator,
"rouge_1_p_count": ROUGE-1 precision denominator,
"rouge_1_m_count": ROUGE-1 recall denominator.
Analogous for ROUGE-2.
"""
punctuation = [".", ",", ";", ":", "``", "''", "-", '"']
to_ignore = self.stopwords.union(punctuation)
pred_tokens_lowercased = [self.stem_function(k.lower()) for sent in summary for k in sent
if k.lower() not in to_ignore]
ref_tokens_lowercased = {}
for i, ref_summary in references.items():
ref_tokens_lowercased[i] = [self.stem_function(k.lower()) for sent in ref_summary for k
in sent if k.lower() not in to_ignore]
eval_scores = {}
eval_scores.update(
self._rouge_1(pred_tokens_lowercased, ref_tokens_lowercased))
eval_scores.update(
self._rouge_2(pred_tokens_lowercased, ref_tokens_lowercased))
return eval_scores
def _identity(self, x):
return x
def _robust_porter_stemmer(self, x):
stem = x
try:
stem = self.porter_stemmer.stem(x)
except IndexError:
pass
return stem
def _rouge_1(self, pred_tokens, ref_tokens):
# unigrams
pred_counts = collections.Counter(pred_tokens)
ref_counts = {}
for i, tokens in ref_tokens.items():
ref_counts[i] = collections.Counter(tokens)
# approximate ROUGE-1 score
match = 0
for tok in pred_counts:
match += sum([min(pred_counts[tok], ref_counts[x][tok]) for x in
ref_counts.keys()])
prec_denom = (len(ref_counts.keys()) * sum(pred_counts.values()))
recall_denom = sum([sum(ref_counts[x].values()) for x in ref_counts])
return {
"rouge_1_h_count": match,
"rouge_1_p_count": prec_denom,
"rouge_1_m_count": recall_denom,
}
def _rouge_2(self, pred_tokens, ref_tokens):
pred_counts = collections.Counter(zip(pred_tokens, pred_tokens[1:]))
ref_counts = {}
for i, tokens in ref_tokens.items():
ref_counts[i] = collections.Counter(zip(tokens, tokens[1:]))
# approximate ROUGE-1 score
match = 0
for tok in pred_counts:
match += sum([min(pred_counts[tok], ref_counts[x][tok]) for x in
ref_counts.keys()])
prec_denom = (len(ref_counts.keys()) * sum(pred_counts.values()))
recall_denom = sum([sum(ref_counts[x].values()) for x in ref_counts])
return {
"rouge_2_h_count": match,
"rouge_2_p_count": prec_denom,
"rouge_2_m_count": recall_denom,
}
| en | 0.717402 | # import pyrouge Evaluate timelines with respect to a set of reference timelines. This class implements several evaluation metrics based on ROUGE to compare predicted timelines with a set of reference timelines. The measures are described in Martschat and Markert (2017). References: <NAME> and <NAME> (2017). Improving ROUGE for Timeline Summarization. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics, volume 2: Short Papers, Valencia, Spain, 3-7 April 2017. Attributes: measures (set(str)): ROUGE measures to use when computing scores. rouge (pyrouge.Rouge155 or RougeReimplementation): Object to perform ROUGE computation. beta (float): Value controlling the recall/precision trade-off when computing F_beta scores. Defaults to 1. Initialize the evaluator. Args: measures (set(str)): ROUGE measures to use when computing scores. Defaults to `rouge_1`. rouge_computation (str): Whether to use the original ROUGE perl script ("original") or an approximate Python reimplementation ("reimpl"). Defaults to "reimpl". beta (float): Value controlling the recall/precision trade-off when computing F_beta scores. Defaults to 1. Evaluate a predicted timeline w.r.t. a set of reference timelines using the 'concat' ROUGE variant. This variant first concatenates all daily summaries of the respective timelines. The resulting documents are then evaluated using the ROUGE measure. Args: predicted_timeline (data.timelines.Timeline): A timeline. reference_timelines (data.timelines.GroundTruth): A ground truth of timelines. Returns: A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures` to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values, e.g. {"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}} Evaluate a predicted timeline w.r.t. a set of reference timelines using the 'agreement' ROUGE variant. This variant compares the daily summaries of a date if the date appears in both the predicted timeline and in one of the reference timelines. Args: predicted_timeline (data.timelines.Timeline): A timeline. reference_timelines (data.timelines.GroundTruth): A ground truth of timelines. Returns: A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures` to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values, e.g. {"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}} Evaluate a predicted timeline w.r.t. a set of reference timelines using an injective alignment that uses costs based on date differences. This variant first aligns dates in predicted and reference timelines based on costs induced by date distance. In then compares the summaries of the aligned dates using ROUGE and weights the score by date distance. In our EACL'17 paper we denoted this variant as 'align'. Args: predicted_timeline (data.timelines.Timeline): A timeline. reference_timelines (data.timelines.GroundTruth): A ground truth of timelines. Returns: A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures` to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values, e.g. {"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}} Evaluate a predicted timeline w.r.t. a set of reference timelines using an injective alignment that uses costs based on date differences and content overlap. This variant first aligns dates in predicted and reference timelines based on costs induced by date distance and content overlap (computed by an approximation of ROUGE-1). It then compares the summaries of the aligned dates using ROUGE and weights the score by date distance. In our EACL'17 paper we denoted this variant as 'align+'. Args: predicted_timeline (data.timelines.Timeline): A timeline. reference_timelines (data.timelines.GroundTruth): A ground truth of timelines. Returns: A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures` to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values, e.g. {"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}} Evaluate a predicted timeline w.r.t. a set of reference timelines using a many-to-one alignment that uses costs based on date differences and content overlap. This variant first (many-to-one-)aligns dates in predicted and reference timelines based on costs induced by date distance and content overlap (computed by an approximation of ROUGE-1). It then compares the summaries of the aligned dates using ROUGE and weights the score by date distance. In our EACL'17 paper we denoted this variant as 'align+ m:1'. Args: predicted_timeline (data.timelines.Timeline): A timeline. reference_timelines (data.timelines.GroundTruth): A ground truth of timelines. Returns: A dict(str, dict(str, str)) object mapping each ROUGE measure in `self.measures` to a dict that maps 'precision', 'recall' and 'f_score' to the corresponding values, e.g. {"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}} Evaluate a predicted timeline w.r.t. a set of reference timelines using the metrics 'concat', 'agreement', 'align', 'align+' and 'align+ m:1'. Args: predicted_timeline (data.timelines.Timeline): A timeline. reference_timelines (data.timelines.GroundTruth): A ground truth of timelines. Returns: A collections.OrderedDict object, mapping a description of the metric of the metric to the corresponding dict(str, dict(str, str)) object describing precision/recall/f scores for each underlying ROUGE measure in `self.measures`. Metric Description ------ ----------- concat concat agreement agreement align align_date_costs align+ align_date_content_costs align+ m:1 align_date_content_costs_many_to_one One example entry is {"concat": {"rouge_1": {"precision": 1.0, "recall": 1.0, "f_score": 1.0}}} # precision # recall An approximate reimplementation of ROUGE-1 and ROUGE-2. It does not exactly match scores from the Perl script. It therefore should not be used for computing scores on development and test sets when preparing results for papers or for comparison to other systems. However, due to improved speed it is useful during development (scores also should not differ too much from the original implementation). Attributes: stem (bool): Whether to stem words before evaluation. ignore_stopwords (bool): Whether to ignore stopwords before evaluation. porter_stemmer (PorterStemmer): nltk's implementation of the Porter stemmer. stem_function (func): Utility function for performing stemming. stopwords (set(str)): Stopwords, set to the list used in ROUGE's Perl evaluation script. Initializes ROUGE reimplementation. Params: stem (bool): Whether to stem words before evaluation. Defaults to True. ignore_stopwords (bool): Whether to ignore stopwords before evaluation. Defaults to True. Scores a summary with ROUGE-1 and ROUGE-2. Params: summary (list(list(str))): A list of tokenized sentences, representing a predicted summary. references dict(int, list(list(str))): A mapping of integers to lists of tokenized sentences, representing reference summaries. Returns: A mapping from strings to integers, with the following meaning (same representation as pyrouge): "rouge_1_h_count": ROUGE-1 recall/precision numerator, "rouge_1_p_count": ROUGE-1 precision denominator, "rouge_1_m_count": ROUGE-1 recall denominator. Analogous for ROUGE-2. # unigrams # approximate ROUGE-1 score # approximate ROUGE-1 score | 2.536837 | 3 |
setup.py | arxanchain/python-common | 1 | 6616921 | #!/usr/bin/env python
from setuptools import setup, find_packages
import io
with open('./requirements.txt') as reqs_txt:
requirements = [line for line in reqs_txt]
setup(
name='py-common',
version='3.0',
description="Python common SDKs for Arxanchain.",
long_description=io.open('README.md', encoding='utf-8').read(),
url='https://github.com/arxanchain/py-common/',
download_url='https://github.com/arxanchain/py-common/',
packages=find_packages(),
platforms='any',
install_requires=requirements,
dependency_links=[
"git+<EMAIL>:gabrielfalcao/HTTPretty.git#egg=httpretty"
],
package_data={
"cryption": ["cryption/ecc/certs/tls/tls.cert",
"cryption/ecc/certs/users/pWEzB4yMM1518346407/pWEzB4yMM1518346407.key"
"cryption/utils/utils.so",
"cryption/utils/utils.h"
]
},
zip_safe=False,
include_package_data=True,
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
import io
with open('./requirements.txt') as reqs_txt:
requirements = [line for line in reqs_txt]
setup(
name='py-common',
version='3.0',
description="Python common SDKs for Arxanchain.",
long_description=io.open('README.md', encoding='utf-8').read(),
url='https://github.com/arxanchain/py-common/',
download_url='https://github.com/arxanchain/py-common/',
packages=find_packages(),
platforms='any',
install_requires=requirements,
dependency_links=[
"git+<EMAIL>:gabrielfalcao/HTTPretty.git#egg=httpretty"
],
package_data={
"cryption": ["cryption/ecc/certs/tls/tls.cert",
"cryption/ecc/certs/users/pWEzB4yMM1518346407/pWEzB4yMM1518346407.key"
"cryption/utils/utils.so",
"cryption/utils/utils.h"
]
},
zip_safe=False,
include_package_data=True,
)
| en | 0.11852 | #!/usr/bin/env python #egg=httpretty" | 1.50733 | 2 |
img-gen/dinteg.py | slivingston/fmrbenchmark-website | 1 | 6616922 | """
Illustration of the scaling double-integrators domain.
SCL; 23 Sep 2014
"""
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# The system is composed of double integrators block-wise.
#
# Even indices are (abstract) position; odd indices are velocity for
# the position corresponding to the immediately preceding index.
def dinteg_ode(x, t):
return (x[1], -0.5*x[0], x[3], -0.5*x[2], x[5], -x[4])
def gen_obs(offset=None, side=1):
if offset is None:
offset = np.zeros(3)
return (np.array([[offset[0], offset[0], offset[0]+side, offset[0]+side, offset[0]],
[offset[0], offset[0], offset[0]+side, offset[0]+side, offset[0]]]),
np.array([[offset[1]+side, offset[1]+side, offset[1]+side, offset[1]+side, offset[1]+side],
[offset[1], offset[1], offset[1], offset[1], offset[1]]]),
np.array([[offset[2], offset[2]+side, offset[2]+side, offset[2], offset[2]],
[offset[2], offset[2]+side, offset[2]+side, offset[2], offset[2]]]))
t = np.linspace(0, 10, 100)
x = odeint(dinteg_ode, [1,1, 2,0, 3,0], t)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot(x.T[0], x.T[2], x.T[4])
obstacle1 = gen_obs((-2, -1, 0))
obstacle2 = gen_obs((-1.5, -1.5, -1))
ax.plot_surface(*obstacle1, rstride=1, cstride=1, color="gray")
ax.plot_surface(*obstacle2, rstride=1, cstride=1, color="gray")
goal = gen_obs((1.5, 1.5, -3), side=0.4)
ax.plot_surface(*goal, rstride=1, cstride=1, color="green")
plt.axis("equal")
-168, 8
plt.savefig("dinteg_illustration.svg")
| """
Illustration of the scaling double-integrators domain.
SCL; 23 Sep 2014
"""
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# The system is composed of double integrators block-wise.
#
# Even indices are (abstract) position; odd indices are velocity for
# the position corresponding to the immediately preceding index.
def dinteg_ode(x, t):
return (x[1], -0.5*x[0], x[3], -0.5*x[2], x[5], -x[4])
def gen_obs(offset=None, side=1):
if offset is None:
offset = np.zeros(3)
return (np.array([[offset[0], offset[0], offset[0]+side, offset[0]+side, offset[0]],
[offset[0], offset[0], offset[0]+side, offset[0]+side, offset[0]]]),
np.array([[offset[1]+side, offset[1]+side, offset[1]+side, offset[1]+side, offset[1]+side],
[offset[1], offset[1], offset[1], offset[1], offset[1]]]),
np.array([[offset[2], offset[2]+side, offset[2]+side, offset[2], offset[2]],
[offset[2], offset[2]+side, offset[2]+side, offset[2], offset[2]]]))
t = np.linspace(0, 10, 100)
x = odeint(dinteg_ode, [1,1, 2,0, 3,0], t)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot(x.T[0], x.T[2], x.T[4])
obstacle1 = gen_obs((-2, -1, 0))
obstacle2 = gen_obs((-1.5, -1.5, -1))
ax.plot_surface(*obstacle1, rstride=1, cstride=1, color="gray")
ax.plot_surface(*obstacle2, rstride=1, cstride=1, color="gray")
goal = gen_obs((1.5, 1.5, -3), side=0.4)
ax.plot_surface(*goal, rstride=1, cstride=1, color="green")
plt.axis("equal")
-168, 8
plt.savefig("dinteg_illustration.svg")
| en | 0.864055 | Illustration of the scaling double-integrators domain. SCL; 23 Sep 2014 # The system is composed of double integrators block-wise. # # Even indices are (abstract) position; odd indices are velocity for # the position corresponding to the immediately preceding index. | 3.117974 | 3 |
examples/load.py | oyamoh-brian/pyjadx | 1 | 6616923 | import pyjadx
import argparse
import threading
from multiprocessing import Process
from threading import Thread
import time
def load_apk(apk, i):
print(apk, i)
jadx = pyjadx.Jadx()
jadx.load(args.apk)
#time.sleep(10)
def test_threads(apk):
th = []
for i in range(3):
process = Thread(target=load_apk, args=[apk, i])
process.start()
th.append(process)
for t in th:
t.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Load an APK with Pyjadx")
parser.add_argument("apk", help="Target apk")
args = parser.parse_args()
test_threads(args.apk)
| import pyjadx
import argparse
import threading
from multiprocessing import Process
from threading import Thread
import time
def load_apk(apk, i):
print(apk, i)
jadx = pyjadx.Jadx()
jadx.load(args.apk)
#time.sleep(10)
def test_threads(apk):
th = []
for i in range(3):
process = Thread(target=load_apk, args=[apk, i])
process.start()
th.append(process)
for t in th:
t.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Load an APK with Pyjadx")
parser.add_argument("apk", help="Target apk")
args = parser.parse_args()
test_threads(args.apk)
| en | 0.299324 | #time.sleep(10) | 2.624197 | 3 |
api/models.py | AtharvaTawde/task-tracker | 0 | 6616924 | from . import db
class Task(db.Model):
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer)
name = db.Column(db.String(50))
description = db.Column(db.String(200))
date = db.Column(db.String(50))
| from . import db
class Task(db.Model):
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer)
name = db.Column(db.String(50))
description = db.Column(db.String(200))
date = db.Column(db.String(50))
| none | 1 | 2.431414 | 2 | |
UTAx-CSE1309X/Q6P5.py | aliyyousuff/MOOC | 0 | 6616925 | __author__ = '<NAME>, <EMAIL>, fb.com/aliyyousuf'
# Write a function named calculate_expenses that receives a filename as argument. The file contains the information
# about a person's expenses on items. Your function should return a list of tuples sorted based on the name of the items.
# Each tuple consists of the name of the item and total expense of that item as shown below:
# milk,2.35
# bread , 1.95
# chips , 2.54
# milk , 2.38
# milk,2.31
# bread, 1.90
#Notice that each line of the file only includes an item and the purchase price of that item separated by a comma.
# There may be spaces before or after the item or the price. Then your function should read the file and return a list
# of tuples such as:
# [('bread', '$3.85'), ('chips', '$2.54'), ('milk', '$7.04')]
# Notes:
# Tuples are sorted based on the item names i.e. bread comes before chips which comes before milk.
# The total expenses are strings which start with a $ and they have two digits of accuracy after the decimal point.
# Hint: Use "${:.2f}" to properly create and format strings for the total expenses.
def calculate_expenses(file):
file = open(file,'r')
file = file.readlines()
LL = []
for line in file:
line = line.rstrip().split(',') # Remove newLine character and split by comma.
line = [x.replace(' ','') for x in line] # Remove extra space
LL += [line]
LN = []
for i in LL: # Collect all the items name with duplicates
LN += [i[0]]
D = {}
for name in LN:
summ = 0
if LN.count(name) > 1: # Check if items occur more than one
for item in LL:
if item[0] == name:
summ += float(item[1]) # Then sum all items cost
D[name] = summ
else:
for item in LL:
if item[0] == name:
D[name] = float(item[1])
from collections import OrderedDict
from operator import itemgetter
D = OrderedDict(sorted(D.items(), key=itemgetter(0))) # Sort by item names
LLL = []
for k, v in D.items():
LLL += [(k,'${0:1.2f}'.format(float(v)))] # follows question specification
return LLL
#print(calculate_expenses('a2.txt')) | __author__ = '<NAME>, <EMAIL>, fb.com/aliyyousuf'
# Write a function named calculate_expenses that receives a filename as argument. The file contains the information
# about a person's expenses on items. Your function should return a list of tuples sorted based on the name of the items.
# Each tuple consists of the name of the item and total expense of that item as shown below:
# milk,2.35
# bread , 1.95
# chips , 2.54
# milk , 2.38
# milk,2.31
# bread, 1.90
#Notice that each line of the file only includes an item and the purchase price of that item separated by a comma.
# There may be spaces before or after the item or the price. Then your function should read the file and return a list
# of tuples such as:
# [('bread', '$3.85'), ('chips', '$2.54'), ('milk', '$7.04')]
# Notes:
# Tuples are sorted based on the item names i.e. bread comes before chips which comes before milk.
# The total expenses are strings which start with a $ and they have two digits of accuracy after the decimal point.
# Hint: Use "${:.2f}" to properly create and format strings for the total expenses.
def calculate_expenses(file):
file = open(file,'r')
file = file.readlines()
LL = []
for line in file:
line = line.rstrip().split(',') # Remove newLine character and split by comma.
line = [x.replace(' ','') for x in line] # Remove extra space
LL += [line]
LN = []
for i in LL: # Collect all the items name with duplicates
LN += [i[0]]
D = {}
for name in LN:
summ = 0
if LN.count(name) > 1: # Check if items occur more than one
for item in LL:
if item[0] == name:
summ += float(item[1]) # Then sum all items cost
D[name] = summ
else:
for item in LL:
if item[0] == name:
D[name] = float(item[1])
from collections import OrderedDict
from operator import itemgetter
D = OrderedDict(sorted(D.items(), key=itemgetter(0))) # Sort by item names
LLL = []
for k, v in D.items():
LLL += [(k,'${0:1.2f}'.format(float(v)))] # follows question specification
return LLL
#print(calculate_expenses('a2.txt')) | en | 0.872596 | # Write a function named calculate_expenses that receives a filename as argument. The file contains the information # about a person's expenses on items. Your function should return a list of tuples sorted based on the name of the items. # Each tuple consists of the name of the item and total expense of that item as shown below: # milk,2.35 # bread , 1.95 # chips , 2.54 # milk , 2.38 # milk,2.31 # bread, 1.90 #Notice that each line of the file only includes an item and the purchase price of that item separated by a comma. # There may be spaces before or after the item or the price. Then your function should read the file and return a list # of tuples such as: # [('bread', '$3.85'), ('chips', '$2.54'), ('milk', '$7.04')] # Notes: # Tuples are sorted based on the item names i.e. bread comes before chips which comes before milk. # The total expenses are strings which start with a $ and they have two digits of accuracy after the decimal point. # Hint: Use "${:.2f}" to properly create and format strings for the total expenses. # Remove newLine character and split by comma. # Remove extra space # Collect all the items name with duplicates # Check if items occur more than one # Then sum all items cost # Sort by item names # follows question specification #print(calculate_expenses('a2.txt')) | 4.080663 | 4 |
tests/test_server.py | unicredit/charade | 29 | 6616926 | <reponame>unicredit/charade<gh_stars>10-100
from unittest import TestCase
from nose.tools import raises
from server import _resolve
from services import Service, MissingService
def mk_services(services):
result = {}
for service in services:
task = service.task
name = service.name
if not task in result:
result[task] = {}
result[task][name] = service
return result
class TestTaskResolution(TestCase):
services = mk_services([
Service('task1', 'name1', []),
Service('task2', 'name2', []),
Service('task3', 'name1', ['task1']),
Service('task1', 'name2', []),
Service('task4', 'name2', ['task3', 'task2']),
Service('task5', 'name1', [], ['task1'])
])
# Simple example of task resolution
def test_simple(self):
request = {
'tasks': [
{'task': 'task3', 'name': 'name1'}
]
}
response = {}
resolution = _resolve(request, response, TestTaskResolution.services)
expected = [('task1', 'name1'), ('task3', 'name1')]
self.assertEqual(resolution, expected)
# Recursive task resolution
def test_recursive(self):
request = {
'tasks': [
{'task': 'task4', 'name': 'name2'}
]
}
response = {}
resolution = _resolve(request, response, TestTaskResolution.services)
expected = [('task1', 'name1'), ('task3', 'name1'), ('task2', 'name2'), ('task4', 'name2')]
self.assertEqual(resolution, expected)
# Task resolution under the constraint that a specific
# version of task1 was requested by the user
def test_explicit(self):
request = {
'tasks': [
{'task': 'task3', 'name': 'name1'},
{'task': 'task1', 'name': 'name2'}
]
}
response = {}
resolution = _resolve(request, response, TestTaskResolution.services)
expected = [('task1', 'name2'), ('task3', 'name1')]
self.assertEqual(resolution, expected)
# Task resolution assuming the response is partially
# provided by the user
def test_partial_response(self):
request = {
'tasks': [
{'task': 'task3', 'name': 'name1'},
{'task': 'task2', 'name': 'name2'}
]
}
response = {
'task1': []
}
resolution = _resolve(request, response, TestTaskResolution.services)
expected = [('task1', None), ('task3', 'name1'), ('task2', 'name2')]
self.assertEqual(resolution, expected)
# Task resolution when one of the tasks has an optional dependence.
# if the optional dependence is requested, it should be performed in advance
def test_optional_dependence(self):
request0 = {
'tasks': [
{'task': 'task5', 'name': 'name1'},
{'task': 'task1', 'name': 'name2'}
]
}
request1 = {
'tasks': [
{'task': 'task5', 'name': 'name1'}
]
}
response = {}
resolution0 = _resolve(request0, response, TestTaskResolution.services)
resolution1 = _resolve(request1, response, TestTaskResolution.services)
expected0 = [('task1','name2'), ('task5','name1')]
expected1 = [('task5','name1')]
self.assertEqual((resolution0, resolution1), (expected0, expected1))
# When services are missing, appropriate exceptions are raised
@raises(MissingService)
def test_missing_service(self):
request = {
'tasks': [
{'task': 'task3', 'name': 'name1'},
{'task': 'task2', 'name': 'name2'},
{'task': 'taskx', 'name': 'namex'}
]
}
response = {}
resolution = _resolve(request, response, TestTaskResolution.services) | from unittest import TestCase
from nose.tools import raises
from server import _resolve
from services import Service, MissingService
def mk_services(services):
result = {}
for service in services:
task = service.task
name = service.name
if not task in result:
result[task] = {}
result[task][name] = service
return result
class TestTaskResolution(TestCase):
services = mk_services([
Service('task1', 'name1', []),
Service('task2', 'name2', []),
Service('task3', 'name1', ['task1']),
Service('task1', 'name2', []),
Service('task4', 'name2', ['task3', 'task2']),
Service('task5', 'name1', [], ['task1'])
])
# Simple example of task resolution
def test_simple(self):
request = {
'tasks': [
{'task': 'task3', 'name': 'name1'}
]
}
response = {}
resolution = _resolve(request, response, TestTaskResolution.services)
expected = [('task1', 'name1'), ('task3', 'name1')]
self.assertEqual(resolution, expected)
# Recursive task resolution
def test_recursive(self):
request = {
'tasks': [
{'task': 'task4', 'name': 'name2'}
]
}
response = {}
resolution = _resolve(request, response, TestTaskResolution.services)
expected = [('task1', 'name1'), ('task3', 'name1'), ('task2', 'name2'), ('task4', 'name2')]
self.assertEqual(resolution, expected)
# Task resolution under the constraint that a specific
# version of task1 was requested by the user
def test_explicit(self):
request = {
'tasks': [
{'task': 'task3', 'name': 'name1'},
{'task': 'task1', 'name': 'name2'}
]
}
response = {}
resolution = _resolve(request, response, TestTaskResolution.services)
expected = [('task1', 'name2'), ('task3', 'name1')]
self.assertEqual(resolution, expected)
# Task resolution assuming the response is partially
# provided by the user
def test_partial_response(self):
request = {
'tasks': [
{'task': 'task3', 'name': 'name1'},
{'task': 'task2', 'name': 'name2'}
]
}
response = {
'task1': []
}
resolution = _resolve(request, response, TestTaskResolution.services)
expected = [('task1', None), ('task3', 'name1'), ('task2', 'name2')]
self.assertEqual(resolution, expected)
# Task resolution when one of the tasks has an optional dependence.
# if the optional dependence is requested, it should be performed in advance
def test_optional_dependence(self):
request0 = {
'tasks': [
{'task': 'task5', 'name': 'name1'},
{'task': 'task1', 'name': 'name2'}
]
}
request1 = {
'tasks': [
{'task': 'task5', 'name': 'name1'}
]
}
response = {}
resolution0 = _resolve(request0, response, TestTaskResolution.services)
resolution1 = _resolve(request1, response, TestTaskResolution.services)
expected0 = [('task1','name2'), ('task5','name1')]
expected1 = [('task5','name1')]
self.assertEqual((resolution0, resolution1), (expected0, expected1))
# When services are missing, appropriate exceptions are raised
@raises(MissingService)
def test_missing_service(self):
request = {
'tasks': [
{'task': 'task3', 'name': 'name1'},
{'task': 'task2', 'name': 'name2'},
{'task': 'taskx', 'name': 'namex'}
]
}
response = {}
resolution = _resolve(request, response, TestTaskResolution.services) | en | 0.924197 | # Simple example of task resolution # Recursive task resolution # Task resolution under the constraint that a specific # version of task1 was requested by the user # Task resolution assuming the response is partially # provided by the user # Task resolution when one of the tasks has an optional dependence. # if the optional dependence is requested, it should be performed in advance # When services are missing, appropriate exceptions are raised | 2.861134 | 3 |
yolov3/configs_tmpl.py | escudero/TFYolo | 0 | 6616927 | <filename>yolov3/configs_tmpl.py
# YOLO options
YOLO_TYPE="yolov3" # yolov4 or yolov3
YOLO_FRAMEWORK="tf" # "tf" or "trt"
YOLO_V3_WEIGHTS="model_data/yolov3.weights"
YOLO_V4_WEIGHTS="model_data/yolov4.weights"
YOLO_V3_TINY_WEIGHTS="model_data/yolov3-tiny.weights"
YOLO_V4_TINY_WEIGHTS="model_data/yolov4-tiny.weights"
YOLO_CLASSES="model_data/coco.names"
YOLO_STRIDES=[8, 16, 32]
YOLO_IOU_LOSS_THRESH=0.5
YOLO_ANCHOR_PER_SCALE=3
YOLO_MAX_BBOX_PER_SCALE=100
YOLO_INPUT_SIZE=416
YOLO_METHOD_ENSEMBLEBOXES="nms" # nms, soft_nms, nmw, wbf
# Train options
TRAIN_YOLO_TINY=False
TRAIN_SAVE_BEST_ONLY=True # saves only best model according validation loss (True recommended)
TRAIN_SAVE_CHECKPOINT=False # saves all best validated checkpoints in training process (may require a lot disk space) (False recommended)
TRAIN_ANNOT_PATH="mnist/mnist_train.txt"
TRAIN_LOGDIR="log"
TRAIN_CHECKPOINTS_FOLDER="checkpoints"
TRAIN_MODEL_NAME="model"
TRAIN_LOAD_IMAGES_TO_RAM=True # With True faster training, but need more RAM
TRAIN_BATCH_SIZE=4
TRAIN_DATA_AUG=True
TRAIN_TRANSFER=True
TRAIN_FROM_CHECKPOINT=False # "checkpoints/yolov3_custom"
TRAIN_LR_INIT=1e-4
TRAIN_LR_END=1e-6
TRAIN_WARMUP_EPOCHS=2
TRAIN_EPOCHS=100
# TEST options
TEST_ANNOT_PATH="mnist/mnist_test.txt"
TEST_DATA_AUG=False
TEST_SCORE_THRESHOLD=0.3
TEST_IOU_THRESHOLD=0.45
#YOLOv3-TINY and YOLOv4-TINY WORKAROUND
if YOLO_TYPE == "yolov4":
YOLO_ANCHORS = [[[12, 16], [19, 36], [40, 28]],
[[36, 75], [76, 55], [72, 146]],
[[142,110], [192, 243], [459, 401]]]
if YOLO_TYPE == "yolov3":
YOLO_ANCHORS = [[[10, 13], [16, 30], [33, 23]],
[[30, 61], [62, 45], [59, 119]],
[[116, 90], [156, 198], [373, 326]]]
if TRAIN_YOLO_TINY:
YOLO_STRIDES = [16, 32, 64]
YOLO_ANCHORS = [[[10, 14], [23, 27], [37, 58]],
[[81, 82], [135, 169], [344, 319]],
[[0, 0], [0, 0], [0, 0]]]
| <filename>yolov3/configs_tmpl.py
# YOLO options
YOLO_TYPE="yolov3" # yolov4 or yolov3
YOLO_FRAMEWORK="tf" # "tf" or "trt"
YOLO_V3_WEIGHTS="model_data/yolov3.weights"
YOLO_V4_WEIGHTS="model_data/yolov4.weights"
YOLO_V3_TINY_WEIGHTS="model_data/yolov3-tiny.weights"
YOLO_V4_TINY_WEIGHTS="model_data/yolov4-tiny.weights"
YOLO_CLASSES="model_data/coco.names"
YOLO_STRIDES=[8, 16, 32]
YOLO_IOU_LOSS_THRESH=0.5
YOLO_ANCHOR_PER_SCALE=3
YOLO_MAX_BBOX_PER_SCALE=100
YOLO_INPUT_SIZE=416
YOLO_METHOD_ENSEMBLEBOXES="nms" # nms, soft_nms, nmw, wbf
# Train options
TRAIN_YOLO_TINY=False
TRAIN_SAVE_BEST_ONLY=True # saves only best model according validation loss (True recommended)
TRAIN_SAVE_CHECKPOINT=False # saves all best validated checkpoints in training process (may require a lot disk space) (False recommended)
TRAIN_ANNOT_PATH="mnist/mnist_train.txt"
TRAIN_LOGDIR="log"
TRAIN_CHECKPOINTS_FOLDER="checkpoints"
TRAIN_MODEL_NAME="model"
TRAIN_LOAD_IMAGES_TO_RAM=True # With True faster training, but need more RAM
TRAIN_BATCH_SIZE=4
TRAIN_DATA_AUG=True
TRAIN_TRANSFER=True
TRAIN_FROM_CHECKPOINT=False # "checkpoints/yolov3_custom"
TRAIN_LR_INIT=1e-4
TRAIN_LR_END=1e-6
TRAIN_WARMUP_EPOCHS=2
TRAIN_EPOCHS=100
# TEST options
TEST_ANNOT_PATH="mnist/mnist_test.txt"
TEST_DATA_AUG=False
TEST_SCORE_THRESHOLD=0.3
TEST_IOU_THRESHOLD=0.45
#YOLOv3-TINY and YOLOv4-TINY WORKAROUND
if YOLO_TYPE == "yolov4":
YOLO_ANCHORS = [[[12, 16], [19, 36], [40, 28]],
[[36, 75], [76, 55], [72, 146]],
[[142,110], [192, 243], [459, 401]]]
if YOLO_TYPE == "yolov3":
YOLO_ANCHORS = [[[10, 13], [16, 30], [33, 23]],
[[30, 61], [62, 45], [59, 119]],
[[116, 90], [156, 198], [373, 326]]]
if TRAIN_YOLO_TINY:
YOLO_STRIDES = [16, 32, 64]
YOLO_ANCHORS = [[[10, 14], [23, 27], [37, 58]],
[[81, 82], [135, 169], [344, 319]],
[[0, 0], [0, 0], [0, 0]]]
| en | 0.750757 | # YOLO options # yolov4 or yolov3 # "tf" or "trt" # nms, soft_nms, nmw, wbf # Train options # saves only best model according validation loss (True recommended) # saves all best validated checkpoints in training process (may require a lot disk space) (False recommended) # With True faster training, but need more RAM # "checkpoints/yolov3_custom" # TEST options #YOLOv3-TINY and YOLOv4-TINY WORKAROUND | 1.689182 | 2 |
intergalactic/__init__.py | rollinginsanity/IntergalacticProgenitor | 0 | 6616928 | <filename>intergalactic/__init__.py<gh_stars>0
#The main script that pulls together the other components of the What's My IP service.
from flask import Flask
app = Flask(__name__)
from intergalactic import routes
| <filename>intergalactic/__init__.py<gh_stars>0
#The main script that pulls together the other components of the What's My IP service.
from flask import Flask
app = Flask(__name__)
from intergalactic import routes
| en | 0.899392 | #The main script that pulls together the other components of the What's My IP service. | 1.75161 | 2 |
pipeline.py | upendra-k14/indic_tagger | 0 | 6616929 | <reponame>upendra-k14/indic_tagger
'''
Pipeline code for Indic Tagger
Example:
python pipeline.py -p train -o outputs -l tel -t chunk -m crf -i data/test/tel/test.utf.conll.chunk -e utf -f conll
-p, --pipeline_type - train, test, predict
-l, --language - te, hi, ta, pu, mr, be, ur, ka, ml
-t, --tag_type - pos, chunk
-m, --model_type - crf, hmm, cnn, lstm
-f, --data_format - ssf, tnt, text
-e, --encoding - utf8, wx (default: utf8)
-i, --input_file - path to the test data file
-o, --output_file - path to the output file
-s, --sent_split - split the sentences in the test data (default: True)
'''
from polyglot_tokenizer import Tokenizer
from tagger.src.algorithm.CRF import CRF
from sklearn.model_selection import train_test_split
from time import time
import numpy as np
import pickle
import logging
import argparse
import tagger.utils.writer as data_writer
import tagger.src.generate_features as generate_features
import tagger.src.data_reader as data_reader
import lstmcrf
from lstmcrf.utils import load_data_and_labels
from lstmcrf.wrapper import Sequence
import sys
import os.path as path
import os
sys.path.append(path.dirname(path.abspath(__file__)))
logger = logging.getLogger(__name__)
def get_args():
''' This function parses and return arguments passed in'''
parser = argparse.ArgumentParser(description='Scorer pipeline')
parser.add_argument("-p", '--pipeline_type', type=str, required=True,
help='Pipeline Type (train, test, predict)')
parser.add_argument("-l", "--language", dest="language", type=str, metavar='<str>', required=True,
help="Language of the dataset: te (telugu), hi (hindi), ta (tamil), ka (kannada), pu (pubjabi), mr (Marathi), be (Bengali), ur (Urdu), ml (Malayalam)")
parser.add_argument("-t", "--tag_type", dest="tag_type", type=str, metavar='<str>', required=True,
help="Tag type: pos, chunk, parse, NER")
parser.add_argument("-m", "--model_type", dest="model_type", type=str, metavar='<str>', required=True,
help="Model type (crf|hmm|cnn|lstm:) (default=crf)")
parser.add_argument("-e", "--encoding", dest="encoding", type=str, metavar='<str>', required=False,
help="Encoding of the data (utf8, wx)",
default="utf8")
parser.add_argument("-f", "--data_format", dest="data_format", type=str, metavar='<str>', required=True,
help="Data format (ssf, tnt, txt)")
parser.add_argument("-i", "--input_file", dest="test_data", type=str, metavar='<str>', required=False,
help="Test data path ex: data/test/te/test.txt")
parser.add_argument("-s", "--sent_split", dest="sent_split", type=str, metavar='<str>', required=False,
help="Sentence Split ex: True or False",
default=True)
parser.add_argument("-o", "--output_file", dest="output_path", type=str, metavar='<str>',
help="The path to the output file",
default=path.join(path.dirname(path.abspath(__file__)), "outputs", "output_file"))
return parser.parse_args()
def pipeline():
curr_dir = path.dirname(path.abspath(__file__))
args = get_args()
output_dir = path.join(path.dirname(path.abspath(__file__)), "outputs")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data_writer.set_logger(args.model_type, output_dir)
if True:
model_path = "%s/models/%s/%s.%s.%s.model" % (
curr_dir, args.language, args.model_type, args.tag_type, args.encoding)
if args.model_type == "lstm":
if args.tag_type == "pos":
model_path = "%s/models/%s/lstm/" % (curr_dir, args.language)
elif args.tag_type == "chunk":
model_path = "%s/models/%s/lstm/chunk/" % (
curr_dir, args.language)
elif args.tag_type == "ner":
model_path = "%s/models/%s/lstm/ner/" % (
curr_dir, args.language)
if args.tag_type != "parse":
if not os.path.exists(model_path):
os.makedirs(model_path)
if args.pipeline_type == 'train':
logger.info('Start Training#')
logger.info('Tagger model type: %s' % (args.model_type))
data_path = "%s/data/train/%s/train.%s.%s" % (
curr_dir, args.language, args.encoding, args.data_format)
if args.tag_type == "ner":
data_path = data_path+".ner"
data_sents = data_reader.load_data(
args.data_format, data_path, args.language)
no_words = sum(len(sent) for sent in data_sents)
logger.info("No. of words: %d" % (no_words))
logger.info("No. of sents: %d" % (len(data_sents)))
X_data = [generate_features.sent2features(
s, args.tag_type, args.model_type) for s in data_sents]
y_data = [generate_features.sent2labels(
s, args.tag_type) for s in data_sents]
X_train, X_test, y_train, y_test = train_test_split(
X_data, y_data, test_size=0.10, random_state=42)
print('Train data size:', len(X_train), len(y_train))
print('Test data size:', len(X_test), len(y_test))
print('Lang:', args.language)
print('Train data: ', data_path)
print('Model Path: ', model_path)
if args.model_type == "crf":
tagger = CRF(model_path)
tagger.train(X_train, y_train)
tagger.load_model()
tagger.test(X_test, y_test)
elif args.model_type == "lstm":
x_data, y_data1, y_data2 = load_data_and_labels(data_path)
if args.tag_type == "pos":
x_train, x_test, y_train1, y_test1 = train_test_split(
x_data, y_data1, test_size=0.10, random_state=42) # Split the data into train and test
model = Sequence() # Intialize BiLSTM model
# Train the model for 10 echos
model.fit(x_train, y_train1, epochs=10)
# Run the model on test data
print(model.score(x_test, y_test1))
model.save(model_path+"/weights.h5", model_path +
"/params.json", model_path+"/preprocessor.json")
if args.tag_type == "chunk":
x_train, x_test, y_train2, y_test2 = train_test_split(
x_data, y_data2, test_size=0.10, random_state=42) # Split the data into train and test
model = Sequence() # Intialize BiLSTM model
# Train the model for 10 echos
model.fit(x_train, y_train2, epochs=10)
# Run the model on test data
print(model.score(x_test, y_test2))
model.save(model_path+"/weights.h5", model_path +
"/params.json", model_path+"/preprocessor.json")
if args.tag_type == "ner":
x_train, x_test, y_train1, y_test1 = train_test_split(
x_data, y_data1, test_size=0.10, random_state=42) # Split the data into train and test
model = Sequence() # Intialize BiLSTM model
# Train the model for 10 echos
model.fit(x_train, y_train1, epochs=10)
# Run the model on test data
print(model.score(x_test, y_test1))
model.save(model_path+"/weights.h5", model_path +
"/params.json", model_path+"/preprocessor.json")
if args.pipeline_type == "test":
if args.model_type == "crf":
test_data_path = "%s/%s" % (curr_dir, args.test_data)
test_sents = data_reader.load_data(
args.data_format, test_data_path, args.language, tokenize_text=False)
X_test = [generate_features.sent2features(
s, args.tag_type, args.model_type) for s in test_sents]
y_test = [generate_features.sent2labels(
s, args.tag_type) for s in test_sents]
tagger = CRF(model_path)
tagger.load_model()
tagger.test(X_test, y_test)
if args.pipeline_type == "predict":
test_data_path = "%s" % (args.test_data)
test_sents = data_reader.load_data(
args.data_format, test_data_path, args.language, tokenize_text=True, split_sent=args.sent_split)
if args.tag_type == "parse":
# Pos tagging
X_test = [generate_features.sent2features(
s, "pos", args.model_type) for s in test_sents]
tag_model_path = "%s/models/%s/%s.%s.%s.model" % (
curr_dir, args.language, args.model_type, "pos", args.encoding)
chunk_model_path = "%s/models/%s/%s.%s.%s.model" % (
curr_dir, args.language, args.model_type, "chunk", args.encoding)
if args.model_type == "crf":
tagger = CRF(tag_model_path)
tagger.load_model()
y_pos = tagger.predict(X_test)
test_sents_pos = generate_features.append_tags(
test_sents, "pos", y_pos)
X_test = [generate_features.sent2features(
s, "chunk", args.model_type) for s in test_sents_pos]
chunker = CRF(chunk_model_path)
chunker.load_model()
y_chunk = chunker.predict(X_test)
test_fname = path.basename(test_data_path)
output_file = "%s/%s.parse" % (output_dir, test_fname)
data_writer.write_anno_to_file(
output_file, test_sents_pos, y_chunk, "chunk")
logger.info("Output in: %s" % output_file)
data_writer.write_to_screen(output_file)
else:
X_test = [generate_features.sent2features(
s, args.tag_type, args.model_type) for s in test_sents]
if args.model_type == "crf":
tagger = CRF(model_path)
tagger.load_model()
y_pred = tagger.predict(X_test)
data_writer.write_anno_to_file(
args.output_path, test_sents, y_pred, args.tag_type)
data_writer.write_to_screen(args.output_path)
logger.info("Output in: %s" % args.output_path)
if args.model_type == "lstm":
model = Sequence().load(model_path+"/weights.h5", model_path +
"/params.json", model_path+"/preprocessor.json")
f = open(args.test_data, "r")
sent = f.read()
tok = Tokenizer(lang=args.language, split_sen=True)
tokenized_sents = tok.tokenize(sent)
for tokens in tokenized_sents:
for token in tokens:
sent = sent + " " + token
sent = sent.strip()
print(model.analyze(sent))
if __name__ == '__main__':
pipeline()
| '''
Pipeline code for Indic Tagger
Example:
python pipeline.py -p train -o outputs -l tel -t chunk -m crf -i data/test/tel/test.utf.conll.chunk -e utf -f conll
-p, --pipeline_type - train, test, predict
-l, --language - te, hi, ta, pu, mr, be, ur, ka, ml
-t, --tag_type - pos, chunk
-m, --model_type - crf, hmm, cnn, lstm
-f, --data_format - ssf, tnt, text
-e, --encoding - utf8, wx (default: utf8)
-i, --input_file - path to the test data file
-o, --output_file - path to the output file
-s, --sent_split - split the sentences in the test data (default: True)
'''
from polyglot_tokenizer import Tokenizer
from tagger.src.algorithm.CRF import CRF
from sklearn.model_selection import train_test_split
from time import time
import numpy as np
import pickle
import logging
import argparse
import tagger.utils.writer as data_writer
import tagger.src.generate_features as generate_features
import tagger.src.data_reader as data_reader
import lstmcrf
from lstmcrf.utils import load_data_and_labels
from lstmcrf.wrapper import Sequence
import sys
import os.path as path
import os
sys.path.append(path.dirname(path.abspath(__file__)))
logger = logging.getLogger(__name__)
def get_args():
''' This function parses and return arguments passed in'''
parser = argparse.ArgumentParser(description='Scorer pipeline')
parser.add_argument("-p", '--pipeline_type', type=str, required=True,
help='Pipeline Type (train, test, predict)')
parser.add_argument("-l", "--language", dest="language", type=str, metavar='<str>', required=True,
help="Language of the dataset: te (telugu), hi (hindi), ta (tamil), ka (kannada), pu (pubjabi), mr (Marathi), be (Bengali), ur (Urdu), ml (Malayalam)")
parser.add_argument("-t", "--tag_type", dest="tag_type", type=str, metavar='<str>', required=True,
help="Tag type: pos, chunk, parse, NER")
parser.add_argument("-m", "--model_type", dest="model_type", type=str, metavar='<str>', required=True,
help="Model type (crf|hmm|cnn|lstm:) (default=crf)")
parser.add_argument("-e", "--encoding", dest="encoding", type=str, metavar='<str>', required=False,
help="Encoding of the data (utf8, wx)",
default="utf8")
parser.add_argument("-f", "--data_format", dest="data_format", type=str, metavar='<str>', required=True,
help="Data format (ssf, tnt, txt)")
parser.add_argument("-i", "--input_file", dest="test_data", type=str, metavar='<str>', required=False,
help="Test data path ex: data/test/te/test.txt")
parser.add_argument("-s", "--sent_split", dest="sent_split", type=str, metavar='<str>', required=False,
help="Sentence Split ex: True or False",
default=True)
parser.add_argument("-o", "--output_file", dest="output_path", type=str, metavar='<str>',
help="The path to the output file",
default=path.join(path.dirname(path.abspath(__file__)), "outputs", "output_file"))
return parser.parse_args()
def pipeline():
curr_dir = path.dirname(path.abspath(__file__))
args = get_args()
output_dir = path.join(path.dirname(path.abspath(__file__)), "outputs")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data_writer.set_logger(args.model_type, output_dir)
if True:
model_path = "%s/models/%s/%s.%s.%s.model" % (
curr_dir, args.language, args.model_type, args.tag_type, args.encoding)
if args.model_type == "lstm":
if args.tag_type == "pos":
model_path = "%s/models/%s/lstm/" % (curr_dir, args.language)
elif args.tag_type == "chunk":
model_path = "%s/models/%s/lstm/chunk/" % (
curr_dir, args.language)
elif args.tag_type == "ner":
model_path = "%s/models/%s/lstm/ner/" % (
curr_dir, args.language)
if args.tag_type != "parse":
if not os.path.exists(model_path):
os.makedirs(model_path)
if args.pipeline_type == 'train':
logger.info('Start Training#')
logger.info('Tagger model type: %s' % (args.model_type))
data_path = "%s/data/train/%s/train.%s.%s" % (
curr_dir, args.language, args.encoding, args.data_format)
if args.tag_type == "ner":
data_path = data_path+".ner"
data_sents = data_reader.load_data(
args.data_format, data_path, args.language)
no_words = sum(len(sent) for sent in data_sents)
logger.info("No. of words: %d" % (no_words))
logger.info("No. of sents: %d" % (len(data_sents)))
X_data = [generate_features.sent2features(
s, args.tag_type, args.model_type) for s in data_sents]
y_data = [generate_features.sent2labels(
s, args.tag_type) for s in data_sents]
X_train, X_test, y_train, y_test = train_test_split(
X_data, y_data, test_size=0.10, random_state=42)
print('Train data size:', len(X_train), len(y_train))
print('Test data size:', len(X_test), len(y_test))
print('Lang:', args.language)
print('Train data: ', data_path)
print('Model Path: ', model_path)
if args.model_type == "crf":
tagger = CRF(model_path)
tagger.train(X_train, y_train)
tagger.load_model()
tagger.test(X_test, y_test)
elif args.model_type == "lstm":
x_data, y_data1, y_data2 = load_data_and_labels(data_path)
if args.tag_type == "pos":
x_train, x_test, y_train1, y_test1 = train_test_split(
x_data, y_data1, test_size=0.10, random_state=42) # Split the data into train and test
model = Sequence() # Intialize BiLSTM model
# Train the model for 10 echos
model.fit(x_train, y_train1, epochs=10)
# Run the model on test data
print(model.score(x_test, y_test1))
model.save(model_path+"/weights.h5", model_path +
"/params.json", model_path+"/preprocessor.json")
if args.tag_type == "chunk":
x_train, x_test, y_train2, y_test2 = train_test_split(
x_data, y_data2, test_size=0.10, random_state=42) # Split the data into train and test
model = Sequence() # Intialize BiLSTM model
# Train the model for 10 echos
model.fit(x_train, y_train2, epochs=10)
# Run the model on test data
print(model.score(x_test, y_test2))
model.save(model_path+"/weights.h5", model_path +
"/params.json", model_path+"/preprocessor.json")
if args.tag_type == "ner":
x_train, x_test, y_train1, y_test1 = train_test_split(
x_data, y_data1, test_size=0.10, random_state=42) # Split the data into train and test
model = Sequence() # Intialize BiLSTM model
# Train the model for 10 echos
model.fit(x_train, y_train1, epochs=10)
# Run the model on test data
print(model.score(x_test, y_test1))
model.save(model_path+"/weights.h5", model_path +
"/params.json", model_path+"/preprocessor.json")
if args.pipeline_type == "test":
if args.model_type == "crf":
test_data_path = "%s/%s" % (curr_dir, args.test_data)
test_sents = data_reader.load_data(
args.data_format, test_data_path, args.language, tokenize_text=False)
X_test = [generate_features.sent2features(
s, args.tag_type, args.model_type) for s in test_sents]
y_test = [generate_features.sent2labels(
s, args.tag_type) for s in test_sents]
tagger = CRF(model_path)
tagger.load_model()
tagger.test(X_test, y_test)
if args.pipeline_type == "predict":
test_data_path = "%s" % (args.test_data)
test_sents = data_reader.load_data(
args.data_format, test_data_path, args.language, tokenize_text=True, split_sent=args.sent_split)
if args.tag_type == "parse":
# Pos tagging
X_test = [generate_features.sent2features(
s, "pos", args.model_type) for s in test_sents]
tag_model_path = "%s/models/%s/%s.%s.%s.model" % (
curr_dir, args.language, args.model_type, "pos", args.encoding)
chunk_model_path = "%s/models/%s/%s.%s.%s.model" % (
curr_dir, args.language, args.model_type, "chunk", args.encoding)
if args.model_type == "crf":
tagger = CRF(tag_model_path)
tagger.load_model()
y_pos = tagger.predict(X_test)
test_sents_pos = generate_features.append_tags(
test_sents, "pos", y_pos)
X_test = [generate_features.sent2features(
s, "chunk", args.model_type) for s in test_sents_pos]
chunker = CRF(chunk_model_path)
chunker.load_model()
y_chunk = chunker.predict(X_test)
test_fname = path.basename(test_data_path)
output_file = "%s/%s.parse" % (output_dir, test_fname)
data_writer.write_anno_to_file(
output_file, test_sents_pos, y_chunk, "chunk")
logger.info("Output in: %s" % output_file)
data_writer.write_to_screen(output_file)
else:
X_test = [generate_features.sent2features(
s, args.tag_type, args.model_type) for s in test_sents]
if args.model_type == "crf":
tagger = CRF(model_path)
tagger.load_model()
y_pred = tagger.predict(X_test)
data_writer.write_anno_to_file(
args.output_path, test_sents, y_pred, args.tag_type)
data_writer.write_to_screen(args.output_path)
logger.info("Output in: %s" % args.output_path)
if args.model_type == "lstm":
model = Sequence().load(model_path+"/weights.h5", model_path +
"/params.json", model_path+"/preprocessor.json")
f = open(args.test_data, "r")
sent = f.read()
tok = Tokenizer(lang=args.language, split_sen=True)
tokenized_sents = tok.tokenize(sent)
for tokens in tokenized_sents:
for token in tokens:
sent = sent + " " + token
sent = sent.strip()
print(model.analyze(sent))
if __name__ == '__main__':
pipeline() | en | 0.492817 | Pipeline code for Indic Tagger Example: python pipeline.py -p train -o outputs -l tel -t chunk -m crf -i data/test/tel/test.utf.conll.chunk -e utf -f conll -p, --pipeline_type - train, test, predict -l, --language - te, hi, ta, pu, mr, be, ur, ka, ml -t, --tag_type - pos, chunk -m, --model_type - crf, hmm, cnn, lstm -f, --data_format - ssf, tnt, text -e, --encoding - utf8, wx (default: utf8) -i, --input_file - path to the test data file -o, --output_file - path to the output file -s, --sent_split - split the sentences in the test data (default: True) This function parses and return arguments passed in #') # Split the data into train and test # Intialize BiLSTM model # Train the model for 10 echos # Run the model on test data # Split the data into train and test # Intialize BiLSTM model # Train the model for 10 echos # Run the model on test data # Split the data into train and test # Intialize BiLSTM model # Train the model for 10 echos # Run the model on test data # Pos tagging | 2.906412 | 3 |
src/htmlformatter.py | gbenetz/pvcheck | 3 | 6616930 | """Formatter producing HTML data"""
import formatter
from jsonformatter import JSONFormatter
import i18n
_ = i18n.translate
_trans_dic = {"\n": "<br>", "–": "–", "—": "—", "&": "&", ">": ">", "<": "<"}
_trantab = str.maketrans(_trans_dic)
section_summary = {}
total_summary = {"ok": 0, "warning": 0, "error": 0}
class HTMLFormatter(JSONFormatter):
def end_session(self):
header = self._tests_table_header_builder()
for element in header:
if element != "TEST":
section_summary[element] = {"ok": 0, "warning": 0, "error": 0}
self.print_html_header()
self.print_tests_table()
self.print_tests_information()
self.print_summary_table()
print(" </body>")
print('</html>')
@staticmethod
def print_html_header():
"""Print header and style."""
print("""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>PvCheck Result</title>
<style rel="stylesheet" type="text/css">
html {
font-family: sans-serif;
}
h1 {
overflow: hidden;
text-align: center;
}
h1:before,
h1:after {
background-color: #333;
content: "";
display: inline-block;
height: 1px;
position: relative;
vertical-align: middle;
width: 50%;
}
h1:before {
right: 0.5em;
margin-left: -50%;
}
h1:after {
left: 0.5em;
margin-right: -50%;
}
table {
border-collapse: collapse;
border: 2px solid rgb(200,200,200);
letter-spacing: 1px;
font-size: 0.8rem;
}
td, th {
border: 1px solid rgb(190,190,190);
padding: 10px 20px;
}
th {
background-color: rgb(235,235,235);
}
td {
text-align: center;
}
tr:nth-child(even) td {
background-color: rgb(250,250,250);
}
tr:nth-child(odd) td {
background-color: rgb(245,245,245);
}
caption {
padding: 10px;
}
</style>
</head>
<body>""")
print(""" <h1>PvCheck</h1>
<ul>
<li><a href="#summary">{}</a></li>
<li><a href="#info">info</a></li>
</ul>""".format(_("summary")))
def print_tests_table(self):
"""Print a table containing tests' results."""
print(' <h2 align="center">{}</h2>'.format(_("Test Result")))
print(' <table align="center">')
print(' <tr>')
tests_table_header = self._tests_table_header_builder()
self._print_tests_table_header(tests_table_header)
self._print_tests_table_rows(tests_table_header)
print(" </table>")
print(" <br>")
def _tests_table_header_builder(self):
if len(self._tests) > 1:
header = ["TEST"]
else:
header = []
for test in self._tests:
section_names = list(test["sections"].keys())
for name in section_names:
if name not in header:
header.append(name)
return header
@staticmethod
def _print_tests_table_header(tests_table_header):
for element in tests_table_header:
print(" <th>{}</th>".format(element.translate(_trantab)))
print(" </tr>")
def _print_tests_table_rows(self, tests_table_header):
if len(self._tests) > 1:
print_test_name = True
first_element_index = 1
else:
print_test_name = False
first_element_index = 0
for test in self._tests:
row = self._test_table_row_builder(test, tests_table_header)
self._print_test_table_row(print_test_name, first_element_index, row)
def _test_table_row_builder(self, test, header):
if len(self._tests) > 1:
row = [test["title"]]
else:
row = []
for element in header:
if element != "TEST":
try:
row.append(test["sections"][element]["section status"])
except KeyError:
row.append("")
return row
def _print_test_table_row(self, print_test_name, first_element_index, row):
print(" <tr>")
if print_test_name:
print(' <td><a href="#{}">{}</a></td>'.format(row[0].translate(_trantab),
row[0].translate(_trantab)))
for element in row[first_element_index:]:
self._print_section_status(element)
print(" </tr>")
@staticmethod
def _print_section_status(section):
if section == "ok":
color = "green"
elif section == "error":
color = "red"
elif section == "exec_error":
section = "execution failed"
color = "red"
elif section == "missing":
color = "orange"
else:
color = "black"
print(' <td><font color="{}">{}</font></td>'.format(color, section.translate(_trantab)))
def print_tests_information(self):
"""Print a section for each test containing results' information."""
print(' <h2><a name="info">Info</a></h2>')
if self._tests[0]["error_message"] != "ok":
self._print_error_message()
exit(0)
header = self._tests_table_header_builder()
for test in self._tests:
self._print_test_information(test, header)
print(" <hr>")
def _print_error_message(self):
print(" <hr>")
print(" <p><font color='red'>{}</font></p>".format(self._tests[0]["error_message"]))
print(" <hr>")
print(" </body>")
print('</html>')
def _print_test_information(self, test, sections):
print(" <hr>")
if len(self._tests) > 1:
self._print_test_name(test)
self._print_command_line(test)
if len(test["input_text"]) > 0:
self._print_input_text(test)
if test["input_file_name"] is not None:
self._print_input_file_name(test)
for section in sections:
if section != "TEST":
self._print_section_status_message(test, section)
print(' </p>')
@staticmethod
def _print_test_name(test):
print(' <p><a name="{}"><b>TEST:</b> {}</a><br>'.format(test["title"].translate(_trantab),
test["title"].translate(_trantab)))
@staticmethod
def _print_command_line(test):
command_line = ""
for element in test["command_line"]:
command_line += " " + element
print(' <b>{}:</b> {}<br>'.format(_("COMMAND LINE"), command_line.translate(_trantab)))
@staticmethod
def _print_input_text(test):
print(' <b>INPUT:</b> {}<br>'.format(test["input_text"].translate(_trantab)))
@staticmethod
def _print_input_file_name(test):
if test["input_file_name"] == "<temp.file>":
input_file_name = _("TEMPORARY FILE")
else:
input_file_name = test["input_file_name"]
print(' <b>{}:</b><br> {}<br>'.format(input_file_name.translate(_trantab),
test["file_text"].translate(_trantab)))
def _print_section_status_message(self, test, section):
if test["sections"][section]["section status"] == "ok":
section_summary[section]["ok"] += 1
total_summary["ok"] += 1
self._print_section_ok_message(section)
elif test["sections"][section]["section status"] == "error":
section_summary[section]["error"] += 1
total_summary["error"] += 1
for wrong_line in test["sections"][section]['wrong_lines']:
self._print_section_error_message(wrong_line, section)
elif test["sections"][section]["section status"] == "exec_error":
section_summary[section]["error"] += 1
total_summary["error"] += 1
self._print_section_exec_error_message(section)
else:
section_summary[section]["warning"] += 1
total_summary["warning"] += 1
self._print_section_warning_message(section)
@staticmethod
def _print_section_ok_message(section):
msg = "OK"
color = "green"
print(' <b><font color="{}">{}: </b>{}</font><br>'.format(color, section, msg))
@staticmethod
def _print_section_error_message(wrong_line, section):
if wrong_line[2] is None:
msg = _("unexpected line '%s'") % (wrong_line[0] + 1)
elif wrong_line[1] is None:
msg = _("missing line (expected '%s')") % (wrong_line[2])
else:
out_string = formatter.handle_non_printable_chars(wrong_line[1])
out_string = out_string.translate(_trantab)
msg = _("line %d is wrong (expected '%s', got '%s')") % (wrong_line[0] + 1, wrong_line[2],
out_string)
color = "red"
print(' <b><font color="{}">{}: </b>{}</font><br>'.format(color, section, msg))
@staticmethod
def _print_section_exec_error_message(section):
msg = _("execution failed")
color = "red"
print(' <b><font color="{}">{}: </b>{}</font><br>'.format(color, section, msg))
@staticmethod
def _print_section_warning_message(section):
msg = _("missing section")
color = "orange"
print(' <b><font color="{}">{}: </b>{}</font><br>'.format(color, section, msg))
def print_summary_table(self):
"""Print a test summary table."""
self._print_summary_table_header()
header = self._tests_table_header_builder()
for section in header:
if section != "TEST":
self._print_section_summary_row(section)
self._print_total_summary_row()
print(" </table>")
@staticmethod
def _print_summary_table_header():
print(""" <a name="summary"><h2 align="center">{}</h2>
<table align="center">
<tr>
<th> </th>
<th><font color= "green">{}</font></th>
<th><font color = "orange">{}</font></th>
<th><font color = "red">{}</font></th>
</tr>""".format(_("Summary"), _("Successes"), _("Warnings"), _("Errors")))
@staticmethod
def _print_section_summary_row(section):
print(" <tr>")
print(' <td>{}</td>'.format(section))
print(' <td>{}</td>'.format(section_summary[section]["ok"]))
print(' <td>{}</td>'.format(section_summary[section]["warning"]))
print(' <td>{}</td>'.format(section_summary[section]["error"]))
print(" </tr>")
@staticmethod
def _print_total_summary_row():
print(" <tr>")
print(' <td>{}</td>'.format(_("TOTAL")))
print(' <td>{}</td>'.format(total_summary["ok"]))
print(' <td>{}</td>'.format(total_summary["warning"]))
print(' <td>{}</td>'.format(total_summary["error"]))
print(" </tr>")
| """Formatter producing HTML data"""
import formatter
from jsonformatter import JSONFormatter
import i18n
_ = i18n.translate
_trans_dic = {"\n": "<br>", "–": "–", "—": "—", "&": "&", ">": ">", "<": "<"}
_trantab = str.maketrans(_trans_dic)
section_summary = {}
total_summary = {"ok": 0, "warning": 0, "error": 0}
class HTMLFormatter(JSONFormatter):
def end_session(self):
header = self._tests_table_header_builder()
for element in header:
if element != "TEST":
section_summary[element] = {"ok": 0, "warning": 0, "error": 0}
self.print_html_header()
self.print_tests_table()
self.print_tests_information()
self.print_summary_table()
print(" </body>")
print('</html>')
@staticmethod
def print_html_header():
"""Print header and style."""
print("""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>PvCheck Result</title>
<style rel="stylesheet" type="text/css">
html {
font-family: sans-serif;
}
h1 {
overflow: hidden;
text-align: center;
}
h1:before,
h1:after {
background-color: #333;
content: "";
display: inline-block;
height: 1px;
position: relative;
vertical-align: middle;
width: 50%;
}
h1:before {
right: 0.5em;
margin-left: -50%;
}
h1:after {
left: 0.5em;
margin-right: -50%;
}
table {
border-collapse: collapse;
border: 2px solid rgb(200,200,200);
letter-spacing: 1px;
font-size: 0.8rem;
}
td, th {
border: 1px solid rgb(190,190,190);
padding: 10px 20px;
}
th {
background-color: rgb(235,235,235);
}
td {
text-align: center;
}
tr:nth-child(even) td {
background-color: rgb(250,250,250);
}
tr:nth-child(odd) td {
background-color: rgb(245,245,245);
}
caption {
padding: 10px;
}
</style>
</head>
<body>""")
print(""" <h1>PvCheck</h1>
<ul>
<li><a href="#summary">{}</a></li>
<li><a href="#info">info</a></li>
</ul>""".format(_("summary")))
def print_tests_table(self):
"""Print a table containing tests' results."""
print(' <h2 align="center">{}</h2>'.format(_("Test Result")))
print(' <table align="center">')
print(' <tr>')
tests_table_header = self._tests_table_header_builder()
self._print_tests_table_header(tests_table_header)
self._print_tests_table_rows(tests_table_header)
print(" </table>")
print(" <br>")
def _tests_table_header_builder(self):
if len(self._tests) > 1:
header = ["TEST"]
else:
header = []
for test in self._tests:
section_names = list(test["sections"].keys())
for name in section_names:
if name not in header:
header.append(name)
return header
@staticmethod
def _print_tests_table_header(tests_table_header):
for element in tests_table_header:
print(" <th>{}</th>".format(element.translate(_trantab)))
print(" </tr>")
def _print_tests_table_rows(self, tests_table_header):
if len(self._tests) > 1:
print_test_name = True
first_element_index = 1
else:
print_test_name = False
first_element_index = 0
for test in self._tests:
row = self._test_table_row_builder(test, tests_table_header)
self._print_test_table_row(print_test_name, first_element_index, row)
def _test_table_row_builder(self, test, header):
if len(self._tests) > 1:
row = [test["title"]]
else:
row = []
for element in header:
if element != "TEST":
try:
row.append(test["sections"][element]["section status"])
except KeyError:
row.append("")
return row
def _print_test_table_row(self, print_test_name, first_element_index, row):
print(" <tr>")
if print_test_name:
print(' <td><a href="#{}">{}</a></td>'.format(row[0].translate(_trantab),
row[0].translate(_trantab)))
for element in row[first_element_index:]:
self._print_section_status(element)
print(" </tr>")
@staticmethod
def _print_section_status(section):
if section == "ok":
color = "green"
elif section == "error":
color = "red"
elif section == "exec_error":
section = "execution failed"
color = "red"
elif section == "missing":
color = "orange"
else:
color = "black"
print(' <td><font color="{}">{}</font></td>'.format(color, section.translate(_trantab)))
def print_tests_information(self):
"""Print a section for each test containing results' information."""
print(' <h2><a name="info">Info</a></h2>')
if self._tests[0]["error_message"] != "ok":
self._print_error_message()
exit(0)
header = self._tests_table_header_builder()
for test in self._tests:
self._print_test_information(test, header)
print(" <hr>")
def _print_error_message(self):
print(" <hr>")
print(" <p><font color='red'>{}</font></p>".format(self._tests[0]["error_message"]))
print(" <hr>")
print(" </body>")
print('</html>')
def _print_test_information(self, test, sections):
print(" <hr>")
if len(self._tests) > 1:
self._print_test_name(test)
self._print_command_line(test)
if len(test["input_text"]) > 0:
self._print_input_text(test)
if test["input_file_name"] is not None:
self._print_input_file_name(test)
for section in sections:
if section != "TEST":
self._print_section_status_message(test, section)
print(' </p>')
@staticmethod
def _print_test_name(test):
print(' <p><a name="{}"><b>TEST:</b> {}</a><br>'.format(test["title"].translate(_trantab),
test["title"].translate(_trantab)))
@staticmethod
def _print_command_line(test):
command_line = ""
for element in test["command_line"]:
command_line += " " + element
print(' <b>{}:</b> {}<br>'.format(_("COMMAND LINE"), command_line.translate(_trantab)))
@staticmethod
def _print_input_text(test):
print(' <b>INPUT:</b> {}<br>'.format(test["input_text"].translate(_trantab)))
@staticmethod
def _print_input_file_name(test):
if test["input_file_name"] == "<temp.file>":
input_file_name = _("TEMPORARY FILE")
else:
input_file_name = test["input_file_name"]
print(' <b>{}:</b><br> {}<br>'.format(input_file_name.translate(_trantab),
test["file_text"].translate(_trantab)))
def _print_section_status_message(self, test, section):
if test["sections"][section]["section status"] == "ok":
section_summary[section]["ok"] += 1
total_summary["ok"] += 1
self._print_section_ok_message(section)
elif test["sections"][section]["section status"] == "error":
section_summary[section]["error"] += 1
total_summary["error"] += 1
for wrong_line in test["sections"][section]['wrong_lines']:
self._print_section_error_message(wrong_line, section)
elif test["sections"][section]["section status"] == "exec_error":
section_summary[section]["error"] += 1
total_summary["error"] += 1
self._print_section_exec_error_message(section)
else:
section_summary[section]["warning"] += 1
total_summary["warning"] += 1
self._print_section_warning_message(section)
@staticmethod
def _print_section_ok_message(section):
msg = "OK"
color = "green"
print(' <b><font color="{}">{}: </b>{}</font><br>'.format(color, section, msg))
@staticmethod
def _print_section_error_message(wrong_line, section):
if wrong_line[2] is None:
msg = _("unexpected line '%s'") % (wrong_line[0] + 1)
elif wrong_line[1] is None:
msg = _("missing line (expected '%s')") % (wrong_line[2])
else:
out_string = formatter.handle_non_printable_chars(wrong_line[1])
out_string = out_string.translate(_trantab)
msg = _("line %d is wrong (expected '%s', got '%s')") % (wrong_line[0] + 1, wrong_line[2],
out_string)
color = "red"
print(' <b><font color="{}">{}: </b>{}</font><br>'.format(color, section, msg))
@staticmethod
def _print_section_exec_error_message(section):
msg = _("execution failed")
color = "red"
print(' <b><font color="{}">{}: </b>{}</font><br>'.format(color, section, msg))
@staticmethod
def _print_section_warning_message(section):
msg = _("missing section")
color = "orange"
print(' <b><font color="{}">{}: </b>{}</font><br>'.format(color, section, msg))
def print_summary_table(self):
"""Print a test summary table."""
self._print_summary_table_header()
header = self._tests_table_header_builder()
for section in header:
if section != "TEST":
self._print_section_summary_row(section)
self._print_total_summary_row()
print(" </table>")
@staticmethod
def _print_summary_table_header():
print(""" <a name="summary"><h2 align="center">{}</h2>
<table align="center">
<tr>
<th> </th>
<th><font color= "green">{}</font></th>
<th><font color = "orange">{}</font></th>
<th><font color = "red">{}</font></th>
</tr>""".format(_("Summary"), _("Successes"), _("Warnings"), _("Errors")))
@staticmethod
def _print_section_summary_row(section):
print(" <tr>")
print(' <td>{}</td>'.format(section))
print(' <td>{}</td>'.format(section_summary[section]["ok"]))
print(' <td>{}</td>'.format(section_summary[section]["warning"]))
print(' <td>{}</td>'.format(section_summary[section]["error"]))
print(" </tr>")
@staticmethod
def _print_total_summary_row():
print(" <tr>")
print(' <td>{}</td>'.format(_("TOTAL")))
print(' <td>{}</td>'.format(total_summary["ok"]))
print(' <td>{}</td>'.format(total_summary["warning"]))
print(' <td>{}</td>'.format(total_summary["error"]))
print(" </tr>")
| en | 0.150592 | Formatter producing HTML data Print header and style. <!DOCTYPE html> <html> <head> <meta charset="utf-8"> <title>PvCheck Result</title> <style rel="stylesheet" type="text/css"> html { font-family: sans-serif; } h1 { overflow: hidden; text-align: center; } h1:before, h1:after { background-color: #333; content: ""; display: inline-block; height: 1px; position: relative; vertical-align: middle; width: 50%; } h1:before { right: 0.5em; margin-left: -50%; } h1:after { left: 0.5em; margin-right: -50%; } table { border-collapse: collapse; border: 2px solid rgb(200,200,200); letter-spacing: 1px; font-size: 0.8rem; } td, th { border: 1px solid rgb(190,190,190); padding: 10px 20px; } th { background-color: rgb(235,235,235); } td { text-align: center; } tr:nth-child(even) td { background-color: rgb(250,250,250); } tr:nth-child(odd) td { background-color: rgb(245,245,245); } caption { padding: 10px; } </style> </head> <body> <h1>PvCheck</h1> <ul> <li><a href="#summary">{}</a></li> <li><a href="#info">info</a></li> </ul> Print a table containing tests' results. Print a section for each test containing results' information. Print a test summary table. <a name="summary"><h2 align="center">{}</h2> <table align="center"> <tr> <th> </th> <th><font color= "green">{}</font></th> <th><font color = "orange">{}</font></th> <th><font color = "red">{}</font></th> </tr> | 2.992496 | 3 |
backend/adventures/db.py | The-Fonz/come-along | 0 | 6616931 | <reponame>The-Fonz/come-along
import sys
import asyncio
import asyncpg
import unittest
import datetime
from os import environ
import jsonschema
from ..schemas import JSON_SCHEMA_LOCATION_GPS_POINT
from ..utils import db_test_case_factory, record_to_dict, records_to_dict, convert_to_datetime, getLogger, MicroserviceDb, friendlyhash
logger = getLogger('adventures.db')
SQL_CREATE_TABLE_ADVENTURES = '''
CREATE TABLE adventures
(
id SERIAL PRIMARY KEY,
name VARCHAR(255),
created TIMESTAMP,
-- User position/content will be visible after start, until stop
start TIMESTAMP,
-- Null if open-ended
stop TIMESTAMP,
description TEXT,
-- Friendly identifier for links
url_hash CHAR(8) UNIQUE,
-- Specific tracking stuff or similar
header_includes TEXT,
-- URL to image for showing on social media etc.
preview_img VARCHAR(255)
);
CREATE TABLE adventure_logos
(
id SERIAL PRIMARY KEY,
adventure_id INTEGER REFERENCES adventures(id),
name VARCHAR(255),
url VARCHAR(255),
imgsrc VARCHAR(255)
);
CREATE TYPE user_adventure_role AS ENUM ('athlete', 'content_creator');
CREATE TABLE adventures_users_link
(
id SERIAL PRIMARY KEY,
adventure_id INTEGER REFERENCES adventures(id),
-- Is owned by other microservice so no db constraint
user_id INTEGER,
role user_adventure_role
);
-- Fast joins
CREATE INDEX adventures_users_link_adventure_id_index ON adventures_users_link(adventure_id);
CREATE INDEX adventures_users_link_user_id_index ON adventures_users_link(user_id);
'''
class Db(MicroserviceDb):
async def create_tables(self):
return await self.pool.execute(SQL_CREATE_TABLE_ADVENTURES)
async def insert_adventure(self, adv):
created = datetime.datetime.utcnow()
url_hash = await friendlyhash()
id = await self.pool.fetchval('''
INSERT INTO adventures (id, name, created, start, stop, description, url_hash) VALUES (DEFAULT, $1, $2, $3, $4, $5, $6)
RETURNING id;
''', adv['name'], created, adv.get('start'), adv.get('stop'), adv.get('description', None), url_hash)
return id
async def get_adventures(self, limit=100):
"Get adventures sorted by creation datetime"
# Order descending by stop datetime first, with open-ended first,
# then order equal datetimes (all nulls) by start date
recs = await self.pool.fetch('''
SELECT * FROM adventures ORDER BY stop DESC NULLS FIRST, start DESC LIMIT $1;
''', limit)
return await records_to_dict(recs)
async def get_adventure_by_id(self, adv_id):
"Get adventure and logos"
rec = await self.pool.fetchrow('SELECT * FROM adventures WHERE id = $1;', adv_id)
out = await record_to_dict(rec)
recs_logos = await self.pool.fetch('SELECT * FROM adventure_logos WHERE adventure_id = $1;', adv_id);
out['logos'] = await records_to_dict(recs_logos)
print(out)
return out
async def get_adventure_by_hash(self, adv_hash):
# First look up id
adv_id = await self.pool.fetchval('SELECT id FROM adventures WHERE url_hash = $1;', adv_hash)
if not adv_id:
return None
# Now get by id, to make use of any joins that get_adventure_by_id does
return await self.get_adventure_by_id(adv_id)
async def insert_adventure_user_link(self, link):
id = await self.pool.fetchval('''
INSERT INTO adventures_users_link (adventure_id, user_id, role) VALUES ($1, $2, $3) RETURNING id;
''', link['adventure_id'], link['user_id'], link.get('role', 'athlete'))
return id
async def get_adventure_user_link(self, link_id):
rec = await self.pool.fetchrow('''SELECT * FROM adventures_users_link WHERE id = $1;''', link_id)
return await record_to_dict(rec)
async def get_adventure_user_links_by_user_id(self, user_id):
recs = await self.pool.fetch('''SELECT * FROM adventures_users_link WHERE user_id=$1;''', user_id)
if not recs:
return None
return await records_to_dict(recs)
async def get_adventure_links(self, adv_id):
recs = await self.pool.fetch('''SELECT * FROM adventures_users_link WHERE adventure_id = $1;''', adv_id)
if not recs:
return None
return await records_to_dict(recs)
class AdventuresDbTestCase(db_test_case_factory(Db)):
adv = {
'name': 'TestAdventure',
'description': 'Hi there!'
}
link = {
'adventure_id': -1,
'user_id': -100
}
def test_insert_and_retrieve(self):
adv_id = self.lru(self.db.insert_adventure(self.adv))
self.assertIsInstance(adv_id, int)
adv_db = self.lru(self.db.get_adventure_by_id(adv_id))
self.assertEqual(len(adv_db['url_hash']), 8)
self.assertDictContainsSubset(self.adv, adv_db)
# Can fail if someone else inserted adventure in the meantime, but very unlikely
adv_recent = self.lru(self.db.get_adventures())[0]
self.assertDictContainsSubset(self.adv, adv_recent)
def test_link_fail(self):
self.assertRaises(asyncpg.exceptions.ForeignKeyViolationError,
self.awrap(self.db.insert_adventure_user_link), self.link)
def test_link(self):
adv_id = self.lru(self.db.insert_adventure(self.adv))
link = self.link.copy()
link['adventure_id'] = adv_id
link_id = self.lru(self.db.insert_adventure_user_link(link))
self.assertIsInstance(link_id, int)
link_retrieved = self.lru(self.db.get_adventure_user_link(link_id))
# Test default
self.assertEqual(link_retrieved['role'], 'athlete')
# Test retrieving via adventure
links = self.lru(self.db.get_adventure_links(adv_id))
self.assertEqual(len(links), 1)
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--create', action='store_true',
help="Create db tables and indexes")
parser.add_argument('--test', action='store_true',
help="Test db")
args = parser.parse_args()
if args.create:
l = asyncio.get_event_loop()
db = l.run_until_complete(Db.create())
l.run_until_complete(db.create_tables())
if args.test:
# Pass only system name, ignore other args
unittest.main(verbosity=1, argv=sys.argv[:1])
| import sys
import asyncio
import asyncpg
import unittest
import datetime
from os import environ
import jsonschema
from ..schemas import JSON_SCHEMA_LOCATION_GPS_POINT
from ..utils import db_test_case_factory, record_to_dict, records_to_dict, convert_to_datetime, getLogger, MicroserviceDb, friendlyhash
logger = getLogger('adventures.db')
SQL_CREATE_TABLE_ADVENTURES = '''
CREATE TABLE adventures
(
id SERIAL PRIMARY KEY,
name VARCHAR(255),
created TIMESTAMP,
-- User position/content will be visible after start, until stop
start TIMESTAMP,
-- Null if open-ended
stop TIMESTAMP,
description TEXT,
-- Friendly identifier for links
url_hash CHAR(8) UNIQUE,
-- Specific tracking stuff or similar
header_includes TEXT,
-- URL to image for showing on social media etc.
preview_img VARCHAR(255)
);
CREATE TABLE adventure_logos
(
id SERIAL PRIMARY KEY,
adventure_id INTEGER REFERENCES adventures(id),
name VARCHAR(255),
url VARCHAR(255),
imgsrc VARCHAR(255)
);
CREATE TYPE user_adventure_role AS ENUM ('athlete', 'content_creator');
CREATE TABLE adventures_users_link
(
id SERIAL PRIMARY KEY,
adventure_id INTEGER REFERENCES adventures(id),
-- Is owned by other microservice so no db constraint
user_id INTEGER,
role user_adventure_role
);
-- Fast joins
CREATE INDEX adventures_users_link_adventure_id_index ON adventures_users_link(adventure_id);
CREATE INDEX adventures_users_link_user_id_index ON adventures_users_link(user_id);
'''
class Db(MicroserviceDb):
async def create_tables(self):
return await self.pool.execute(SQL_CREATE_TABLE_ADVENTURES)
async def insert_adventure(self, adv):
created = datetime.datetime.utcnow()
url_hash = await friendlyhash()
id = await self.pool.fetchval('''
INSERT INTO adventures (id, name, created, start, stop, description, url_hash) VALUES (DEFAULT, $1, $2, $3, $4, $5, $6)
RETURNING id;
''', adv['name'], created, adv.get('start'), adv.get('stop'), adv.get('description', None), url_hash)
return id
async def get_adventures(self, limit=100):
"Get adventures sorted by creation datetime"
# Order descending by stop datetime first, with open-ended first,
# then order equal datetimes (all nulls) by start date
recs = await self.pool.fetch('''
SELECT * FROM adventures ORDER BY stop DESC NULLS FIRST, start DESC LIMIT $1;
''', limit)
return await records_to_dict(recs)
async def get_adventure_by_id(self, adv_id):
"Get adventure and logos"
rec = await self.pool.fetchrow('SELECT * FROM adventures WHERE id = $1;', adv_id)
out = await record_to_dict(rec)
recs_logos = await self.pool.fetch('SELECT * FROM adventure_logos WHERE adventure_id = $1;', adv_id);
out['logos'] = await records_to_dict(recs_logos)
print(out)
return out
async def get_adventure_by_hash(self, adv_hash):
# First look up id
adv_id = await self.pool.fetchval('SELECT id FROM adventures WHERE url_hash = $1;', adv_hash)
if not adv_id:
return None
# Now get by id, to make use of any joins that get_adventure_by_id does
return await self.get_adventure_by_id(adv_id)
async def insert_adventure_user_link(self, link):
id = await self.pool.fetchval('''
INSERT INTO adventures_users_link (adventure_id, user_id, role) VALUES ($1, $2, $3) RETURNING id;
''', link['adventure_id'], link['user_id'], link.get('role', 'athlete'))
return id
async def get_adventure_user_link(self, link_id):
rec = await self.pool.fetchrow('''SELECT * FROM adventures_users_link WHERE id = $1;''', link_id)
return await record_to_dict(rec)
async def get_adventure_user_links_by_user_id(self, user_id):
recs = await self.pool.fetch('''SELECT * FROM adventures_users_link WHERE user_id=$1;''', user_id)
if not recs:
return None
return await records_to_dict(recs)
async def get_adventure_links(self, adv_id):
recs = await self.pool.fetch('''SELECT * FROM adventures_users_link WHERE adventure_id = $1;''', adv_id)
if not recs:
return None
return await records_to_dict(recs)
class AdventuresDbTestCase(db_test_case_factory(Db)):
adv = {
'name': 'TestAdventure',
'description': 'Hi there!'
}
link = {
'adventure_id': -1,
'user_id': -100
}
def test_insert_and_retrieve(self):
adv_id = self.lru(self.db.insert_adventure(self.adv))
self.assertIsInstance(adv_id, int)
adv_db = self.lru(self.db.get_adventure_by_id(adv_id))
self.assertEqual(len(adv_db['url_hash']), 8)
self.assertDictContainsSubset(self.adv, adv_db)
# Can fail if someone else inserted adventure in the meantime, but very unlikely
adv_recent = self.lru(self.db.get_adventures())[0]
self.assertDictContainsSubset(self.adv, adv_recent)
def test_link_fail(self):
self.assertRaises(asyncpg.exceptions.ForeignKeyViolationError,
self.awrap(self.db.insert_adventure_user_link), self.link)
def test_link(self):
adv_id = self.lru(self.db.insert_adventure(self.adv))
link = self.link.copy()
link['adventure_id'] = adv_id
link_id = self.lru(self.db.insert_adventure_user_link(link))
self.assertIsInstance(link_id, int)
link_retrieved = self.lru(self.db.get_adventure_user_link(link_id))
# Test default
self.assertEqual(link_retrieved['role'], 'athlete')
# Test retrieving via adventure
links = self.lru(self.db.get_adventure_links(adv_id))
self.assertEqual(len(links), 1)
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--create', action='store_true',
help="Create db tables and indexes")
parser.add_argument('--test', action='store_true',
help="Test db")
args = parser.parse_args()
if args.create:
l = asyncio.get_event_loop()
db = l.run_until_complete(Db.create())
l.run_until_complete(db.create_tables())
if args.test:
# Pass only system name, ignore other args
unittest.main(verbosity=1, argv=sys.argv[:1]) | en | 0.503574 | CREATE TABLE adventures ( id SERIAL PRIMARY KEY, name VARCHAR(255), created TIMESTAMP, -- User position/content will be visible after start, until stop start TIMESTAMP, -- Null if open-ended stop TIMESTAMP, description TEXT, -- Friendly identifier for links url_hash CHAR(8) UNIQUE, -- Specific tracking stuff or similar header_includes TEXT, -- URL to image for showing on social media etc. preview_img VARCHAR(255) ); CREATE TABLE adventure_logos ( id SERIAL PRIMARY KEY, adventure_id INTEGER REFERENCES adventures(id), name VARCHAR(255), url VARCHAR(255), imgsrc VARCHAR(255) ); CREATE TYPE user_adventure_role AS ENUM ('athlete', 'content_creator'); CREATE TABLE adventures_users_link ( id SERIAL PRIMARY KEY, adventure_id INTEGER REFERENCES adventures(id), -- Is owned by other microservice so no db constraint user_id INTEGER, role user_adventure_role ); -- Fast joins CREATE INDEX adventures_users_link_adventure_id_index ON adventures_users_link(adventure_id); CREATE INDEX adventures_users_link_user_id_index ON adventures_users_link(user_id); INSERT INTO adventures (id, name, created, start, stop, description, url_hash) VALUES (DEFAULT, $1, $2, $3, $4, $5, $6) RETURNING id; # Order descending by stop datetime first, with open-ended first, # then order equal datetimes (all nulls) by start date SELECT * FROM adventures ORDER BY stop DESC NULLS FIRST, start DESC LIMIT $1; # First look up id # Now get by id, to make use of any joins that get_adventure_by_id does INSERT INTO adventures_users_link (adventure_id, user_id, role) VALUES ($1, $2, $3) RETURNING id; SELECT * FROM adventures_users_link WHERE id = $1; SELECT * FROM adventures_users_link WHERE user_id=$1; SELECT * FROM adventures_users_link WHERE adventure_id = $1; # Can fail if someone else inserted adventure in the meantime, but very unlikely # Test default # Test retrieving via adventure # Pass only system name, ignore other args | 2.222032 | 2 |
tests/test_triggering_measures.py | geosharma/liquepy | 4 | 6616932 | import liquepy as lq
from tests.conftest import TEST_DATA_DIR
import numpy as np
import pytest
def test_calc_lsn():
cpt = lq.field.load_mpa_cpt_file(TEST_DATA_DIR + "standard_1.csv")
bi2014 = lq.trigger.run_bi2014(cpt, pga=0.25, m_w=7.5, gwl=cpt.gwl)
epsilon = lq.trigger.calc_volumetric_strain_zhang_2004(bi2014.factor_of_safety, bi2014.q_c1n_cs)
lsn_direct = lq.trigger.calc_lsn(epsilon * 100, cpt.depth)
assert np.isclose(lsn_direct, 36.0919293469645, rtol=0.01) # v0.5.5
def test_single_calc_lpi_increments():
depth = 6.61
fos = 0.45
lpi_inc = lq.trigger.triggering_measures.calc_lpi_increments(np.ones(2) * fos, np.array([depth, depth + 0.01]))
assert np.isclose(lpi_inc[1], 0.0368, rtol=0.01), lpi_inc[1] # unvalidated test value
@pytest.mark.parametrize(
'depth, fos, lpi', [
(0.98, 2.0, 0.0),
(2.18, 0.686, 0.0280),
(3.2, 0.59, 0.0344),
(6.61, 0.45, 0.0368),
(12.390, 1.413, 0.0)
]
)
def test_calc_lpi_increments(depth, fos, lpi):
lpi_inc = lq.trigger.triggering_measures.calc_lpi_increments(np.ones(2) * fos, np.array([depth, depth + 0.01]))
assert np.isclose(lpi_inc[1], lpi, rtol=0.01), lpi_inc[1] # unvalidated test value
if __name__ == '__main__':
test_calc_lpi_increments()
| import liquepy as lq
from tests.conftest import TEST_DATA_DIR
import numpy as np
import pytest
def test_calc_lsn():
cpt = lq.field.load_mpa_cpt_file(TEST_DATA_DIR + "standard_1.csv")
bi2014 = lq.trigger.run_bi2014(cpt, pga=0.25, m_w=7.5, gwl=cpt.gwl)
epsilon = lq.trigger.calc_volumetric_strain_zhang_2004(bi2014.factor_of_safety, bi2014.q_c1n_cs)
lsn_direct = lq.trigger.calc_lsn(epsilon * 100, cpt.depth)
assert np.isclose(lsn_direct, 36.0919293469645, rtol=0.01) # v0.5.5
def test_single_calc_lpi_increments():
depth = 6.61
fos = 0.45
lpi_inc = lq.trigger.triggering_measures.calc_lpi_increments(np.ones(2) * fos, np.array([depth, depth + 0.01]))
assert np.isclose(lpi_inc[1], 0.0368, rtol=0.01), lpi_inc[1] # unvalidated test value
@pytest.mark.parametrize(
'depth, fos, lpi', [
(0.98, 2.0, 0.0),
(2.18, 0.686, 0.0280),
(3.2, 0.59, 0.0344),
(6.61, 0.45, 0.0368),
(12.390, 1.413, 0.0)
]
)
def test_calc_lpi_increments(depth, fos, lpi):
lpi_inc = lq.trigger.triggering_measures.calc_lpi_increments(np.ones(2) * fos, np.array([depth, depth + 0.01]))
assert np.isclose(lpi_inc[1], lpi, rtol=0.01), lpi_inc[1] # unvalidated test value
if __name__ == '__main__':
test_calc_lpi_increments()
| en | 0.530298 | # v0.5.5 # unvalidated test value # unvalidated test value | 1.837218 | 2 |
util/zernike.py | computational-imaging/multishot-localization-microscopy | 0 | 6616933 | <filename>util/zernike.py<gh_stars>0
# Copyright (c) 2021, The Board of Trustees of the Leland Stanford Junior University
"""Functions for computing Zernike polynomials."""
from __future__ import annotations
import math
import torch
def noll2mn(j):
"""Convert Noll's index to (m,n)."""
n = int(math.ceil((-3 + math.sqrt(1 + 8 * j)) / 2))
jr = j - int(n * (n + 1) / 2)
p = n % 4
if p == 0 or p == 1:
m1 = jr
m2 = -(jr - 1)
if (n - m1) % 2 == 0:
m = m1
else:
m = m2
else:
m1 = jr - 1
m2 = -jr
if (n - m1) % 2 == 0:
m = m1
else:
m = m2
return n, m
def radial(n, m, r):
"""Compute a radial function of Zernike polynomial."""
assert m >= 0
output = torch.zeros_like(r).double()
for k in range(0, int((n - m) / 2) + 1):
output += (
(((-1) ** k) * math.factorial(n - k))
/ (
math.factorial(k)
* math.factorial(int((n + m) / 2 - k))
* math.factorial(int((n - m) / 2 - k))
)
* r ** (n - 2 * k)
)
return output
def zernike_nm(n, m, r, theta):
"""Compute zernike polynomials from m,n index."""
r = r.cpu()
theta = theta.cpu()
binary_mask = r <= 1.0
if m == 0:
zern = radial(n, 0, r)
else:
if m > 0:
zern = radial(n, m, r) * torch.cos(m * theta)
else:
m = abs(m)
zern = radial(n, m, r) * torch.sin(m * theta)
zern = zern * binary_mask
return zern.float()
def zernike_array(j, r, theta):
"""Compute zernike polynomials from Noll's index.
Args:
j: Noll's index
r: Normalized radial coordinates
theta: Angle in polar coordinates
Returns:
Zernike polynomial
"""
n, m = noll2mn(j)
return zernike_nm(n, m, r, theta)
| <filename>util/zernike.py<gh_stars>0
# Copyright (c) 2021, The Board of Trustees of the Leland Stanford Junior University
"""Functions for computing Zernike polynomials."""
from __future__ import annotations
import math
import torch
def noll2mn(j):
"""Convert Noll's index to (m,n)."""
n = int(math.ceil((-3 + math.sqrt(1 + 8 * j)) / 2))
jr = j - int(n * (n + 1) / 2)
p = n % 4
if p == 0 or p == 1:
m1 = jr
m2 = -(jr - 1)
if (n - m1) % 2 == 0:
m = m1
else:
m = m2
else:
m1 = jr - 1
m2 = -jr
if (n - m1) % 2 == 0:
m = m1
else:
m = m2
return n, m
def radial(n, m, r):
"""Compute a radial function of Zernike polynomial."""
assert m >= 0
output = torch.zeros_like(r).double()
for k in range(0, int((n - m) / 2) + 1):
output += (
(((-1) ** k) * math.factorial(n - k))
/ (
math.factorial(k)
* math.factorial(int((n + m) / 2 - k))
* math.factorial(int((n - m) / 2 - k))
)
* r ** (n - 2 * k)
)
return output
def zernike_nm(n, m, r, theta):
"""Compute zernike polynomials from m,n index."""
r = r.cpu()
theta = theta.cpu()
binary_mask = r <= 1.0
if m == 0:
zern = radial(n, 0, r)
else:
if m > 0:
zern = radial(n, m, r) * torch.cos(m * theta)
else:
m = abs(m)
zern = radial(n, m, r) * torch.sin(m * theta)
zern = zern * binary_mask
return zern.float()
def zernike_array(j, r, theta):
"""Compute zernike polynomials from Noll's index.
Args:
j: Noll's index
r: Normalized radial coordinates
theta: Angle in polar coordinates
Returns:
Zernike polynomial
"""
n, m = noll2mn(j)
return zernike_nm(n, m, r, theta)
| en | 0.646506 | # Copyright (c) 2021, The Board of Trustees of the Leland Stanford Junior University Functions for computing Zernike polynomials. Convert Noll's index to (m,n). Compute a radial function of Zernike polynomial. Compute zernike polynomials from m,n index. Compute zernike polynomials from Noll's index. Args: j: Noll's index r: Normalized radial coordinates theta: Angle in polar coordinates Returns: Zernike polynomial | 3.003092 | 3 |
practice/chatbot/chatbotmodel.py | ohahohah/basketball-chatbot | 4 | 6616934 | import telegram
from telegram.ext import Updater, CommandHandler
import json
with open('../../config.json', 'r') as f:
config = json.load(f) # config.json 이라는 파일의 내용을 가져온다.
class TelegramBot:
def __init__(self, name, token):
self.core = telegram.Bot(token)
self.updater = Updater(token, use_context=True)
self.id = config['TEST_ID']
self.name = name
def sendMessage(self, text):
self.core.sendMessage(chat_id=self.id, text=text)
def stop(self):
self.updater.start_polling()
self.updater.dispatcher.stop()
self.updater.job_queue.stop()
self.updater.stop()
class Botwkbl(TelegramBot):
def __init__(self):
secret_key = config['TELEGRAM_TOKEN'] # Telegram Token 값을 scret_key라는 변수에 담는다
self.token = secret_key
TelegramBot.__init__(self, 'wkbl', self.token)
self.updater.stop()
def add_handler(self, cmd, func):
self.updater.dispatcher.add_handler(CommandHandler(cmd, func))
def start(self):
self.sendMessage('안녕하세요. 무엇을 알고 싶으세요?'
'\n/ranking \n/winningrate \n/win \n/loose \n/gap \n/score \n/stop')
self.updater.start_polling()
self.updater.idle()
| import telegram
from telegram.ext import Updater, CommandHandler
import json
with open('../../config.json', 'r') as f:
config = json.load(f) # config.json 이라는 파일의 내용을 가져온다.
class TelegramBot:
def __init__(self, name, token):
self.core = telegram.Bot(token)
self.updater = Updater(token, use_context=True)
self.id = config['TEST_ID']
self.name = name
def sendMessage(self, text):
self.core.sendMessage(chat_id=self.id, text=text)
def stop(self):
self.updater.start_polling()
self.updater.dispatcher.stop()
self.updater.job_queue.stop()
self.updater.stop()
class Botwkbl(TelegramBot):
def __init__(self):
secret_key = config['TELEGRAM_TOKEN'] # Telegram Token 값을 scret_key라는 변수에 담는다
self.token = secret_key
TelegramBot.__init__(self, 'wkbl', self.token)
self.updater.stop()
def add_handler(self, cmd, func):
self.updater.dispatcher.add_handler(CommandHandler(cmd, func))
def start(self):
self.sendMessage('안녕하세요. 무엇을 알고 싶으세요?'
'\n/ranking \n/winningrate \n/win \n/loose \n/gap \n/score \n/stop')
self.updater.start_polling()
self.updater.idle()
| ko | 1.000045 | # config.json 이라는 파일의 내용을 가져온다. # Telegram Token 값을 scret_key라는 변수에 담는다 | 2.438641 | 2 |
install_Rdeps.py | euxhenh/cellar | 0 | 6616935 | <gh_stars>0
from rpy2.robjects.packages import importr
devtools = importr('devtools')
base = importr('base')
base.options(timeout=300) # cisTopic download takes some time
devtools.install_github("aertslab/cisTopic", upgrade=False)
devtools.install_github("CamaraLab/STvEA", force=True, upgrade=False)
| from rpy2.robjects.packages import importr
devtools = importr('devtools')
base = importr('base')
base.options(timeout=300) # cisTopic download takes some time
devtools.install_github("aertslab/cisTopic", upgrade=False)
devtools.install_github("CamaraLab/STvEA", force=True, upgrade=False) | en | 0.817762 | # cisTopic download takes some time | 1.57048 | 2 |
models/context_detector.py | metterian/peep-talk | 3 | 6616936 | from dataclasses import dataclass, field
from typing import List, ClassVar
import torch
import numpy as np
import random
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
class ContextDector:
'''Parent class for Context Similarity and Lingustic Acceptability for initialzing '''
flag: bool = False
def __init__(self, model_name) -> None:
self.score = []
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForSequenceClassification.from_pretrained(model_name)
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
class SituationSimilarity(ContextDector):
'''Get similarity between personality and history using fine-tuned model by MRPC dataset'''
def __init__(self, model_name = "textattack/xlnet-base-cased-MRPC") -> None:
super().__init__(model_name)
def predict(self, history: list, history_sentences: list):
sentence = history[-1] # recent human input
scores = []
for history_sentence in history_sentences:
paraphrase = self.tokenizer.encode_plus(sentence, history_sentence, return_tensors="pt").to(self.device)
paraphrase_classification_logits = self.model(**paraphrase)[0]
paraphrase_results = torch.softmax(paraphrase_classification_logits, dim=1).tolist()[0]
scores.append(float(paraphrase_results[1])) # classes = ["not paraphrase", "is paraphrase"]
scores = np.array(scores)
score_max = np.max(scores)
self.score.append(score_max * 100)
return score_max * 100
class LinguisticAcceptability(ContextDector):
'''Get a score on how linguistically acceptable a user's input sentence using fine-tuned by CoLA dataset'''
def __init__(self, model_name = "textattack/xlnet-base-cased-CoLA") -> None:
super().__init__(model_name)
def predict(self, history):
sentence = history[-1]
# classes = ["wrong", "correct"]
paraphrase = self.tokenizer(sentence, return_tensors="pt").to(self.device)
paraphrase_classification_logits = self.model(**paraphrase)[0]
paraphrase_results = torch.softmax(paraphrase_classification_logits, dim=1).tolist()[0]
self.score.append(float(paraphrase_results[1] * 100))
return float(paraphrase_results[1] * 100)
| from dataclasses import dataclass, field
from typing import List, ClassVar
import torch
import numpy as np
import random
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
class ContextDector:
'''Parent class for Context Similarity and Lingustic Acceptability for initialzing '''
flag: bool = False
def __init__(self, model_name) -> None:
self.score = []
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForSequenceClassification.from_pretrained(model_name)
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
class SituationSimilarity(ContextDector):
'''Get similarity between personality and history using fine-tuned model by MRPC dataset'''
def __init__(self, model_name = "textattack/xlnet-base-cased-MRPC") -> None:
super().__init__(model_name)
def predict(self, history: list, history_sentences: list):
sentence = history[-1] # recent human input
scores = []
for history_sentence in history_sentences:
paraphrase = self.tokenizer.encode_plus(sentence, history_sentence, return_tensors="pt").to(self.device)
paraphrase_classification_logits = self.model(**paraphrase)[0]
paraphrase_results = torch.softmax(paraphrase_classification_logits, dim=1).tolist()[0]
scores.append(float(paraphrase_results[1])) # classes = ["not paraphrase", "is paraphrase"]
scores = np.array(scores)
score_max = np.max(scores)
self.score.append(score_max * 100)
return score_max * 100
class LinguisticAcceptability(ContextDector):
'''Get a score on how linguistically acceptable a user's input sentence using fine-tuned by CoLA dataset'''
def __init__(self, model_name = "textattack/xlnet-base-cased-CoLA") -> None:
super().__init__(model_name)
def predict(self, history):
sentence = history[-1]
# classes = ["wrong", "correct"]
paraphrase = self.tokenizer(sentence, return_tensors="pt").to(self.device)
paraphrase_classification_logits = self.model(**paraphrase)[0]
paraphrase_results = torch.softmax(paraphrase_classification_logits, dim=1).tolist()[0]
self.score.append(float(paraphrase_results[1] * 100))
return float(paraphrase_results[1] * 100)
| en | 0.764553 | Parent class for Context Similarity and Lingustic Acceptability for initialzing Get similarity between personality and history using fine-tuned model by MRPC dataset # recent human input # classes = ["not paraphrase", "is paraphrase"] Get a score on how linguistically acceptable a user's input sentence using fine-tuned by CoLA dataset # classes = ["wrong", "correct"] | 2.626739 | 3 |
HLTrigger/Configuration/python/HLT_75e33/tasks/caloTowersRecTask_cfi.py | PKUfudawei/cmssw | 1 | 6616937 | <filename>HLTrigger/Configuration/python/HLT_75e33/tasks/caloTowersRecTask_cfi.py
import FWCore.ParameterSet.Config as cms
from ..modules.towerMaker_cfi import *
caloTowersRecTask = cms.Task(
towerMaker
)
| <filename>HLTrigger/Configuration/python/HLT_75e33/tasks/caloTowersRecTask_cfi.py
import FWCore.ParameterSet.Config as cms
from ..modules.towerMaker_cfi import *
caloTowersRecTask = cms.Task(
towerMaker
)
| none | 1 | 1.163807 | 1 | |
supersql/constants/datatype_parameters.py | rayattack/supersql | 2 | 6616938 |
DEFAULT = 'default'
PK = 'pk'
PRIMARY_KEY = 'primary_key'
REQUIRED = 'required'
|
DEFAULT = 'default'
PK = 'pk'
PRIMARY_KEY = 'primary_key'
REQUIRED = 'required'
| none | 1 | 1.166197 | 1 | |
python/Chapter2/add_labels.py | wboswall/academia | 0 | 6616939 | <reponame>wboswall/academia
import csv
fields = ['ItemID', 'Name', 'Description', 'Owner',
'Price', 'Condition', 'DateRegistered']
with open('tooldesc2.csv') as td_in:
rdr = csv.DictReader(td_in, fieldnames = fields)
items = [item for item in rdr]
with open('tooldesc3.csv', 'w', newline='') as td_out:
wrt = csv.DictWriter(td_out, fieldnames=fields)
wrt.writeheader()
wrt.writerows(items)
| import csv
fields = ['ItemID', 'Name', 'Description', 'Owner',
'Price', 'Condition', 'DateRegistered']
with open('tooldesc2.csv') as td_in:
rdr = csv.DictReader(td_in, fieldnames = fields)
items = [item for item in rdr]
with open('tooldesc3.csv', 'w', newline='') as td_out:
wrt = csv.DictWriter(td_out, fieldnames=fields)
wrt.writeheader()
wrt.writerows(items) | none | 1 | 2.968855 | 3 | |
day1.py | redashu/codes | 0 | 6616940 | #!/usr/bin/python3
print("Hello world")
x=4
y="ok google"
z=[4,77,x,y]
print(y)
print(z)
print(x,y)
| #!/usr/bin/python3
print("Hello world")
x=4
y="ok google"
z=[4,77,x,y]
print(y)
print(z)
print(x,y)
| fr | 0.386793 | #!/usr/bin/python3 | 2.965339 | 3 |
molecular/util/util.py | fbrundu/molecular | 0 | 6616941 | # -*- coding: utf-8 -*-
import contextlib
import numpy as np
from sklearn.metrics import make_scorer, roc_auc_score
from sklearn.preprocessing import LabelBinarizer
import sys
@contextlib.contextmanager
def nostdout():
savestdout = sys.stdout
class Devnull(object):
def write(self, _): pass
def flush(self): pass
sys.stdout = Devnull()
try:
yield
finally:
sys.stdout = savestdout
def mad(array):
''' Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation '''
return np.median(np.abs(array - np.median(array)))
def _mc_roc_auc(truth, pred, average='macro'):
''' Multiclass ROC AUC '''
lb = LabelBinarizer()
lb.fit(truth)
truth = lb.transform(truth)
pred = lb.transform(pred)
return roc_auc_score(truth, pred, average=average)
def mc_roc_auc():
return make_scorer(_mc_roc_auc)
| # -*- coding: utf-8 -*-
import contextlib
import numpy as np
from sklearn.metrics import make_scorer, roc_auc_score
from sklearn.preprocessing import LabelBinarizer
import sys
@contextlib.contextmanager
def nostdout():
savestdout = sys.stdout
class Devnull(object):
def write(self, _): pass
def flush(self): pass
sys.stdout = Devnull()
try:
yield
finally:
sys.stdout = savestdout
def mad(array):
''' Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation '''
return np.median(np.abs(array - np.median(array)))
def _mc_roc_auc(truth, pred, average='macro'):
''' Multiclass ROC AUC '''
lb = LabelBinarizer()
lb.fit(truth)
truth = lb.transform(truth)
pred = lb.transform(pred)
return roc_auc_score(truth, pred, average=average)
def mc_roc_auc():
return make_scorer(_mc_roc_auc)
| en | 0.612377 | # -*- coding: utf-8 -*- Median Absolute Deviation: a "Robust" version of standard deviation. Indices variabililty of the sample. https://en.wikipedia.org/wiki/Median_absolute_deviation Multiclass ROC AUC | 2.461851 | 2 |
Programming/python/codes/dfs.py | kwangjunechoi7/TIL | 0 | 6616942 | <reponame>kwangjunechoi7/TIL
graph = {
'A': ['B'],
'B': ['A', 'C', 'H'],
'C': ['B', 'D'],
'D': ['C', 'E', 'G'],
'E': ['D', 'F'],
'F': ['E'],
'G': ['D'],
'H': ['B', 'I', 'J', 'M'],
'I': ['H'],
'J': ['H', 'K'],
'K': ['J', 'L'],
'L': ['K'],
'M': ['H']
}
def bfs(graph, start_node):
visit = list()
queue = list()
queue.append(start_node)
while queue:
node = queue.pop(0)
if node not in visit:
visit.append(node)
queue.extend(graph[node])
return visit
print(bfs(graph, 'A'))
# def dfs(graph, v, visited):
# # current node process
# visited[v] = True
# print(v, end= ' ')
# # recursive connect other nodes
# for i in graph[v]:
# if not visited[i]:
# dfs(graph, i, visited)
| graph = {
'A': ['B'],
'B': ['A', 'C', 'H'],
'C': ['B', 'D'],
'D': ['C', 'E', 'G'],
'E': ['D', 'F'],
'F': ['E'],
'G': ['D'],
'H': ['B', 'I', 'J', 'M'],
'I': ['H'],
'J': ['H', 'K'],
'K': ['J', 'L'],
'L': ['K'],
'M': ['H']
}
def bfs(graph, start_node):
visit = list()
queue = list()
queue.append(start_node)
while queue:
node = queue.pop(0)
if node not in visit:
visit.append(node)
queue.extend(graph[node])
return visit
print(bfs(graph, 'A'))
# def dfs(graph, v, visited):
# # current node process
# visited[v] = True
# print(v, end= ' ')
# # recursive connect other nodes
# for i in graph[v]:
# if not visited[i]:
# dfs(graph, i, visited) | en | 0.725508 | # def dfs(graph, v, visited): # # current node process # visited[v] = True # print(v, end= ' ') # # recursive connect other nodes # for i in graph[v]: # if not visited[i]: # dfs(graph, i, visited) | 3.8916 | 4 |
desafio009.py | Darlingcris/Desafios-Python | 0 | 6616943 | <filename>desafio009.py<gh_stars>0
# Faça um programa que leia um numero inteiro qualquer e mostre na tela a sua tabuada.
x=int(input("Digite um numero inteiro para criar a sua tabuada: "))
n=0
while n<=10:
t =x*n
print("{} x {} = {} \n".format(x,n,t))
n=n+1
print("Fim do programa.")
| <filename>desafio009.py<gh_stars>0
# Faça um programa que leia um numero inteiro qualquer e mostre na tela a sua tabuada.
x=int(input("Digite um numero inteiro para criar a sua tabuada: "))
n=0
while n<=10:
t =x*n
print("{} x {} = {} \n".format(x,n,t))
n=n+1
print("Fim do programa.")
| pt | 0.98127 | # Faça um programa que leia um numero inteiro qualquer e mostre na tela a sua tabuada. | 3.969188 | 4 |
mge_graphql/types/common.py | mgesoftware/mge-graphql | 2 | 6616944 | import graphene
class Error(graphene.ObjectType):
field = graphene.String(
description=(
"Name of a field that caused the error. A value of `null` indicates that "
"the error isn't associated with a particular field."
),
required=False,
)
message = graphene.String(description="The error message.")
class Meta:
description = "Represents an error in the input of a mutation." | import graphene
class Error(graphene.ObjectType):
field = graphene.String(
description=(
"Name of a field that caused the error. A value of `null` indicates that "
"the error isn't associated with a particular field."
),
required=False,
)
message = graphene.String(description="The error message.")
class Meta:
description = "Represents an error in the input of a mutation." | none | 1 | 2.758987 | 3 | |
util/config/__init__.py | yangwenke2010/template_crawler | 4 | 6616945 | <reponame>yangwenke2010/template_crawler
# -*- coding:utf-8 -*-
# 配置文件基础模块
import configparser
class ConfigParser():
def __init__(self, config_file='config.cfg', section_name='config'):
self.config_file = config_file
self.section_name = section_name
self.config = configparser.ConfigParser()
self.config.read(self.config_file)
def add_section(self, section_name):
self.config.add_section(section_name)
self.use_section(section_name)
def use_section(self, section_name):
self.section_name = section_name
def set_kv(self, k, v):
self.config.set(self.section_name, k, str(v))
self.save
def read(self, k):
return self.config[self.section_name][k]
def read_all(self):
return {k:self.read(k) for k in self.config[self.section_name]}
@property
def save(self):
self.config.write(open(self.config_file, 'w'))
class ConfigReader(ConfigParser):
@staticmethod
def read_section_key(config_name, section_name, *k, **kwargs):
cp = ConfigParser(config_file=config_name, section_name=section_name)
if "all" in kwargs.keys() and kwargs["all"]:
return cp.read_all()
elif len(k) == 1:
return cp.read(k[0])
else:
return {ik:cp.read(ik) for ik in k} | # -*- coding:utf-8 -*-
# 配置文件基础模块
import configparser
class ConfigParser():
def __init__(self, config_file='config.cfg', section_name='config'):
self.config_file = config_file
self.section_name = section_name
self.config = configparser.ConfigParser()
self.config.read(self.config_file)
def add_section(self, section_name):
self.config.add_section(section_name)
self.use_section(section_name)
def use_section(self, section_name):
self.section_name = section_name
def set_kv(self, k, v):
self.config.set(self.section_name, k, str(v))
self.save
def read(self, k):
return self.config[self.section_name][k]
def read_all(self):
return {k:self.read(k) for k in self.config[self.section_name]}
@property
def save(self):
self.config.write(open(self.config_file, 'w'))
class ConfigReader(ConfigParser):
@staticmethod
def read_section_key(config_name, section_name, *k, **kwargs):
cp = ConfigParser(config_file=config_name, section_name=section_name)
if "all" in kwargs.keys() and kwargs["all"]:
return cp.read_all()
elif len(k) == 1:
return cp.read(k[0])
else:
return {ik:cp.read(ik) for ik in k} | en | 0.435824 | # -*- coding:utf-8 -*- # 配置文件基础模块 | 3.092903 | 3 |
room_reservations/apps.py | nahidsaikat/reservation | 0 | 6616946 | from django.apps import AppConfig
class RoomReservationsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'room_reservations'
| from django.apps import AppConfig
class RoomReservationsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'room_reservations'
| none | 1 | 1.351198 | 1 | |
oscar/test/newfactories.py | nosrevi/django-oscar | 2 | 6616947 | <filename>oscar/test/newfactories.py
# coding=utf-8
"""
Factories using factory boy.
Using a silly module name as I don't want to mix the old and new
implementations of factories, but I do want to allow importing both from the
same place.
In 2020, when all tests use the new factory-boy factories, we can rename this
module to factories.py and drop the old ones.
"""
import datetime
from decimal import Decimal as D
from django.utils.timezone import now
import factory
from oscar.core.loading import get_model, get_class
from oscar.core.compat import get_user_model
__all__ = ["UserFactory", "CountryFactory", "UserAddressFactory",
"BasketFactory", "VoucherFactory", "ProductFactory",
"StockRecordFactory", "ProductAttributeFactory",
"ProductAttributeValueFactory", "AttributeOptionGroupFactory",
"AttributeOptionFactory", "PartnerFactory",
"ProductCategoryFactory", "CategoryFactory", "RangeFactory",
"ProductClassFactory"]
Selector = get_class('partner.strategy', 'Selector')
class UserFactory(factory.DjangoModelFactory):
username = factory.Sequence(lambda n: 'the_j_meister nummer %d' % n)
email = factory.Sequence(lambda n: '<EMAIL>' % n)
first_name = 'joseph'
last_name = 'winterbottom'
password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>')
is_active = True
is_superuser = False
is_staff = False
class Meta:
model = get_user_model()
class CountryFactory(factory.DjangoModelFactory):
iso_3166_1_a2 = 'GB'
name = "UNITED KINGDOM"
class Meta:
model = get_model('address', 'Country')
class UserAddressFactory(factory.DjangoModelFactory):
title = "Dr"
first_name = "Barry"
last_name = 'Barrington'
line1 = "1 King Road"
line4 = "London"
postcode = "SW1 9RE"
country = factory.SubFactory(CountryFactory)
user = factory.SubFactory(UserFactory)
class Meta:
model = get_model('address', 'UserAddress')
class BasketFactory(factory.DjangoModelFactory):
@factory.post_generation
def set_strategy(self, create, extracted, **kwargs):
# Load default strategy (without a user/request)
self.strategy = Selector().strategy()
class Meta:
model = get_model('basket', 'Basket')
class VoucherFactory(factory.DjangoModelFactory):
name = "My voucher"
code = "MYVOUCHER"
start_datetime = now() - datetime.timedelta(days=1)
end_datetime = now() - datetime.timedelta(days=10)
class Meta:
model = get_model('voucher', 'Voucher')
class PartnerFactory(factory.DjangoModelFactory):
name = "Gardners"
class Meta:
model = get_model('partner', 'Partner')
class StockRecordFactory(factory.DjangoModelFactory):
partner = factory.SubFactory(PartnerFactory)
partner_sku = factory.Sequence(lambda n: 'unit%d' % n)
price_currency = "GBP"
price_excl_tax = D('9.99')
num_in_stock = 100
class Meta:
model = get_model('partner', 'StockRecord')
class ProductClassFactory(factory.DjangoModelFactory):
name = "Books"
requires_shipping = True
track_stock = True
class Meta:
model = get_model('catalogue', 'ProductClass')
class CategoryFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'Category %d' % n)
# Very naive handling of treebeard node fields. Works though!
depth = 0
path = factory.Sequence(lambda n: '%04d' % n)
class Meta:
model = get_model('catalogue', 'Category')
class ProductCategoryFactory(factory.DjangoModelFactory):
category = factory.SubFactory(CategoryFactory)
class Meta:
model = get_model('catalogue', 'ProductCategory')
class ProductFactory(factory.DjangoModelFactory):
class Meta:
model = get_model('catalogue', 'Product')
structure = Meta.model.STANDALONE
upc = factory.Sequence(lambda n: '978080213020%d' % n)
title = "A confederacy of dunces"
product_class = factory.SubFactory(ProductClassFactory)
stockrecords = factory.RelatedFactory(StockRecordFactory, 'product')
categories = factory.RelatedFactory(ProductCategoryFactory, 'product')
class ProductAttributeFactory(factory.DjangoModelFactory):
code = name = 'weight'
product_class = factory.SubFactory(ProductClassFactory)
type = "float"
class Meta:
model = get_model('catalogue', 'ProductAttribute')
class AttributeOptionGroupFactory(factory.DjangoModelFactory):
name = u'Grüppchen'
class Meta:
model = get_model('catalogue', 'AttributeOptionGroup')
class AttributeOptionFactory(factory.DjangoModelFactory):
# Ideally we'd get_or_create a AttributeOptionGroup here, but I'm not
# aware of how to not create a unique option group for each call of the
# factory
option = factory.Sequence(lambda n: 'Option %d' % n)
class Meta:
model = get_model('catalogue', 'AttributeOption')
class ProductAttributeValueFactory(factory.DjangoModelFactory):
attribute = factory.SubFactory(ProductAttributeFactory)
product = factory.SubFactory(ProductFactory)
class Meta:
model = get_model('catalogue', 'ProductAttributeValue')
class RangeFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'Range %d' % n)
slug = factory.Sequence(lambda n: 'range-%d' % n)
class Meta:
model = get_model('offer', 'Range')
| <filename>oscar/test/newfactories.py
# coding=utf-8
"""
Factories using factory boy.
Using a silly module name as I don't want to mix the old and new
implementations of factories, but I do want to allow importing both from the
same place.
In 2020, when all tests use the new factory-boy factories, we can rename this
module to factories.py and drop the old ones.
"""
import datetime
from decimal import Decimal as D
from django.utils.timezone import now
import factory
from oscar.core.loading import get_model, get_class
from oscar.core.compat import get_user_model
__all__ = ["UserFactory", "CountryFactory", "UserAddressFactory",
"BasketFactory", "VoucherFactory", "ProductFactory",
"StockRecordFactory", "ProductAttributeFactory",
"ProductAttributeValueFactory", "AttributeOptionGroupFactory",
"AttributeOptionFactory", "PartnerFactory",
"ProductCategoryFactory", "CategoryFactory", "RangeFactory",
"ProductClassFactory"]
Selector = get_class('partner.strategy', 'Selector')
class UserFactory(factory.DjangoModelFactory):
username = factory.Sequence(lambda n: 'the_j_meister nummer %d' % n)
email = factory.Sequence(lambda n: '<EMAIL>' % n)
first_name = 'joseph'
last_name = 'winterbottom'
password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>')
is_active = True
is_superuser = False
is_staff = False
class Meta:
model = get_user_model()
class CountryFactory(factory.DjangoModelFactory):
iso_3166_1_a2 = 'GB'
name = "UNITED KINGDOM"
class Meta:
model = get_model('address', 'Country')
class UserAddressFactory(factory.DjangoModelFactory):
title = "Dr"
first_name = "Barry"
last_name = 'Barrington'
line1 = "1 King Road"
line4 = "London"
postcode = "SW1 9RE"
country = factory.SubFactory(CountryFactory)
user = factory.SubFactory(UserFactory)
class Meta:
model = get_model('address', 'UserAddress')
class BasketFactory(factory.DjangoModelFactory):
@factory.post_generation
def set_strategy(self, create, extracted, **kwargs):
# Load default strategy (without a user/request)
self.strategy = Selector().strategy()
class Meta:
model = get_model('basket', 'Basket')
class VoucherFactory(factory.DjangoModelFactory):
name = "My voucher"
code = "MYVOUCHER"
start_datetime = now() - datetime.timedelta(days=1)
end_datetime = now() - datetime.timedelta(days=10)
class Meta:
model = get_model('voucher', 'Voucher')
class PartnerFactory(factory.DjangoModelFactory):
name = "Gardners"
class Meta:
model = get_model('partner', 'Partner')
class StockRecordFactory(factory.DjangoModelFactory):
partner = factory.SubFactory(PartnerFactory)
partner_sku = factory.Sequence(lambda n: 'unit%d' % n)
price_currency = "GBP"
price_excl_tax = D('9.99')
num_in_stock = 100
class Meta:
model = get_model('partner', 'StockRecord')
class ProductClassFactory(factory.DjangoModelFactory):
name = "Books"
requires_shipping = True
track_stock = True
class Meta:
model = get_model('catalogue', 'ProductClass')
class CategoryFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'Category %d' % n)
# Very naive handling of treebeard node fields. Works though!
depth = 0
path = factory.Sequence(lambda n: '%04d' % n)
class Meta:
model = get_model('catalogue', 'Category')
class ProductCategoryFactory(factory.DjangoModelFactory):
category = factory.SubFactory(CategoryFactory)
class Meta:
model = get_model('catalogue', 'ProductCategory')
class ProductFactory(factory.DjangoModelFactory):
class Meta:
model = get_model('catalogue', 'Product')
structure = Meta.model.STANDALONE
upc = factory.Sequence(lambda n: '978080213020%d' % n)
title = "A confederacy of dunces"
product_class = factory.SubFactory(ProductClassFactory)
stockrecords = factory.RelatedFactory(StockRecordFactory, 'product')
categories = factory.RelatedFactory(ProductCategoryFactory, 'product')
class ProductAttributeFactory(factory.DjangoModelFactory):
code = name = 'weight'
product_class = factory.SubFactory(ProductClassFactory)
type = "float"
class Meta:
model = get_model('catalogue', 'ProductAttribute')
class AttributeOptionGroupFactory(factory.DjangoModelFactory):
name = u'Grüppchen'
class Meta:
model = get_model('catalogue', 'AttributeOptionGroup')
class AttributeOptionFactory(factory.DjangoModelFactory):
# Ideally we'd get_or_create a AttributeOptionGroup here, but I'm not
# aware of how to not create a unique option group for each call of the
# factory
option = factory.Sequence(lambda n: 'Option %d' % n)
class Meta:
model = get_model('catalogue', 'AttributeOption')
class ProductAttributeValueFactory(factory.DjangoModelFactory):
attribute = factory.SubFactory(ProductAttributeFactory)
product = factory.SubFactory(ProductFactory)
class Meta:
model = get_model('catalogue', 'ProductAttributeValue')
class RangeFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'Range %d' % n)
slug = factory.Sequence(lambda n: 'range-%d' % n)
class Meta:
model = get_model('offer', 'Range')
| en | 0.858368 | # coding=utf-8 Factories using factory boy. Using a silly module name as I don't want to mix the old and new implementations of factories, but I do want to allow importing both from the same place. In 2020, when all tests use the new factory-boy factories, we can rename this module to factories.py and drop the old ones. # Load default strategy (without a user/request) # Very naive handling of treebeard node fields. Works though! # Ideally we'd get_or_create a AttributeOptionGroup here, but I'm not # aware of how to not create a unique option group for each call of the # factory | 2.220349 | 2 |
src/models/brute_cluster.py | zeou1/maggot_models | 0 | 6616948 | """
Created on Thu Mar 14 14:19:55 2019
These functions allow for "brute clustering," inspired by R's mclust.
Clustering is performed first by hierarchical agglomeration, then fitting a
Gaussian Mixture via Expectation Maximization (EM). There are several ways to
perform both agglomeration and EM so these functions performs the (specified)
combinations of methods then evaluates each according to BIC.
@author: <NAME>
"""
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.mixture import GaussianMixture
from sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters
from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky
from sklearn.metrics import adjusted_rand_score
import matplotlib.pyplot as plt
from scipy import stats
from scipy.stats import multivariate_normal
def calcBIC(x, wts, means, variances, k):
"""
Calculates likelihood of a set of data from a GMM, then calculates BIC
Inputs:
x - nxd datapoints
wts - list of mixture weights (same length as means and variances)
means - list of d numpy arrays of mixture means
variances - list of dxd covariance matrices
k - number of parameters
Outputs:
bic - BIC where higher is better
"""
n = x.shape[0]
likelihood = 0
for wt, mu, var in zip(wts, means, variances):
mu = np.squeeze(mu)
var = np.squeeze(var)
try:
var = multivariate_normal(mu, var)
except np.linalg.LinAlgError:
return -np.inf
likelihood += wt * var.pdf(x)
loglik = np.sum(np.log(likelihood))
bic = 2 * loglik - np.log(n) * k
return bic
def processBIC(data, wts, mus, covs, m):
"""
Calculates BIC from input that is formatted either as the sklearn GaussianMixture
components or from data that was saved to a csv in R
Inputs
data - nxd numpy array of data
wts - k numpy array of mixture weights
mus - kxd numpy array of means
covs - kxdxd in the case of r and in python, the shape depends on the model type
(see GaussianMixture class)
m - a string that specifies the model, implies that format of the other inputs
(e.g. 'VII' implies that the parameters were read from a csv that was
written by R)
Outputs
BIC - bic value as calculated by the function above
"""
d = data.shape[1]
k = len(wts)
# These options indicate mclust model types, so the format of covs is how
# it was written to a csv in R
if m == "VII":
params = k * (1 + d + 1)
covs = np.split(covs, covs.shape[0])
elif m == "EEE":
params = k * (1 + d) + d * (d + 1) / 2
covs = np.split(covs, covs.shape[0])
elif m == "VVV":
params = k * (1 + d + d * (d + 1) / 2)
covs = np.split(covs, covs.shape[0])
elif m == "VVI":
params = k * (1 + d + d)
covs = np.split(covs, covs.shape[0])
# These options indicate GaussianMixture types, so the format of covs is
# sklearrn.mixture.GaussianMixture.covariances_
elif m == "spherical":
params = k * (1 + d + 1)
covs = [v * np.identity(d) for v in covs]
elif m == "tied":
params = k * (1 + d) + d * (d + 1) / 2
covs = [covs for v in np.arange(k)]
elif m == "full":
params = k * (1 + d + d * (d + 1) / 2)
covs = np.split(covs, covs.shape[0])
elif m == "diag":
params = k * (1 + d + d)
covs = [np.diag(covs[i, :]) for i in np.arange(k)]
params = params - 1 # because the weights must add to 1
wts = np.split(wts, wts.shape[0])
means = np.split(mus, mus.shape[0])
return calcBIC(data, wts, means, covs, params)
colors = [
"red",
"green",
"blue",
"orange",
"purple",
"yellow",
"black",
"brown",
"lightsalmon",
"greenyellow",
"cornflowerblue",
"tan",
"violet",
"gold",
"slategray",
"peru",
"indianred",
"darkolivegreen",
"navy",
"darkgoldenrod",
"deeppink",
"darkkhaki",
"silver",
"saddlebrown",
]
def agglomerate(data, aff, link, k):
"""
Hierarchical Agglomeration
inputs:
data - nxd numpy array
aff - affinity technique, an element of ['euclidean','manhattan','cosine']
link - linkage technique, an element of ['ward','complete','average','single']
k - number of clusters
outputs:
one_hot - nxk numpy array with a single one in each row indicating cluster
membership
exceptions:
ward linkage can only be used with euclidean/l2 affinity so if ward is
specified with a different linkage then there is an Exception
"""
n = data.shape[0]
if link == "ward" and aff != "euclidean":
raise Exception("Ward linkage is only valid with Euclidean affinity")
agglom = AgglomerativeClustering(n_clusters=k, affinity=aff, linkage=link).fit(data)
one_hot = np.zeros([n, k])
one_hot[np.arange(n), agglom.labels_] = 1
return one_hot
def initialize_params(data, one_hot, cov):
"""
sklearn's Gaussian Mixture does not allow initialization from class membership
but it does allow from initialization of mixture parameters, so here we calculate
the mixture parameters according to class membership
input:
data - nxd numpy array
one_hot - nxd numpy array with a single one in each row indicating cluster
membership
k - number of clusters
output:
weights - k array of mixing weights
means - kxd array of means of mixture components
precisions - precision matrices, format depends on the EM clustering option
(eg 'full' mode needs a list of matrices, one for each mixture
component,but 'tied' mode only needs a single matrix, since all
precisions are constrained to be equal)
"""
n = data.shape[0]
weights, means, covariances = _estimate_gaussian_parameters(
data, one_hot, 1e-06, cov
)
weights /= n
precisions_cholesky_ = _compute_precision_cholesky(covariances, cov)
if cov == "tied":
c = precisions_cholesky_
precisions = np.dot(c, c.T)
elif cov == "diag":
precisions = precisions_cholesky_
else:
precisions = [np.dot(c, c.T) for c in precisions_cholesky_]
return weights, means, precisions
def cluster(data, aff, link, cov, k, c_true=None):
"""
Cluster according to specified method
input:
data - nxk numpy matrix of data
c_true - n array of true cluster membership
aff - affinity, element of ['euclidean','manhattan','cosine'] or none for EM
from scratch
link - linkage, element of ['ward','complete','average','single'], or none for
EM from scratch
cov - covariance, element of ['full','tied','diag','spherical']
k - # of clusters
output:
c_hat - n array of clustering results
means - kxd array of means of mixture components
bic - Bayes Information Criterion for this clustering
ari - Adjusted Rand Index to comparing clustering result to true clustering
reg - regularization parameter that was used in the clustering results
(0 or 1e-6)
"""
iter_num = 100
if aff == "none" or link == "none":
try: # no regularization
reg = 0
gmm = GaussianMixture(
n_components=k,
covariance_type=cov,
reg_covar=reg,
max_iter=iter_num,
verbose=0,
verbose_interval=1,
)
c_hat = gmm.fit_predict(data)
bic = processBIC(data, gmm.weights_, gmm.means_, gmm.covariances_, cov)
if any([sum(c_hat == i) <= 1 for i in range(k)]) or bic == -np.inf:
raise ValueError
# if there was a numerical error during EM,or while calculating BIC,
# or if the clustering found a class with only one element
except: # regularize
reg = 1e-6
gmm = GaussianMixture(
n_components=k,
covariance_type=cov,
reg_covar=reg,
max_iter=iter_num,
verbose=0,
verbose_interval=1,
)
c_hat = gmm.fit_predict(data)
bic = processBIC(data, gmm.weights_, gmm.means_, gmm.covariances_, cov)
else:
one_hot = agglomerate(data, aff, link, k)
weights, means, precisions = initialize_params(data, one_hot, cov)
try:
reg = 0
gmm = GaussianMixture(
n_components=k,
covariance_type=cov,
weights_init=weights,
means_init=means,
precisions_init=precisions,
max_iter=iter_num,
reg_covar=reg,
verbose=0,
verbose_interval=1,
)
c_hat = gmm.fit_predict(data)
bic = processBIC(data, gmm.weights_, gmm.means_, gmm.covariances_, cov)
if any([sum(c_hat == i) <= 1 for i in range(k)]) or bic == -np.inf:
raise ValueError
# if there was a numerical error, or if initial clustering produced a
# mixture component with only one element
except:
reg = 1e-6
gmm = GaussianMixture(
n_components=k,
covariance_type=cov,
weights_init=weights,
means_init=means,
precisions_init=precisions,
max_iter=iter_num,
reg_covar=reg,
verbose=0,
verbose_interval=1,
)
c_hat = gmm.fit_predict(data)
bic = processBIC(data, gmm.weights_, gmm.means_, gmm.covariances_, cov)
if c_true is not None:
ari = adjusted_rand_score(c_true, c_hat)
else:
ari = None
means = gmm.means_
return c_hat, means, bic, ari, reg, gmm._n_parameters()
def brute_cluster(
x,
ks,
affinities=None,
linkages=None,
covariance_types=None,
c_true=None,
plot=False,
savefigs=None,
verbose=0,
metric=None,
):
"""
Cluster all combinations of options and plot results
inputs:
x - nxd array of data
c_true - n array of true clustering
affinites - list of affinity modes, each must be an element of
['none,'euclidean','manhattan','cosine']
linkages - list of linkage modes, each must be an element of
['none','ward','complete','average','single']
covariance_types - list of covariance modes, each must be an element of
['full','tied','diag','spherical']
ks - list of cluster numbers
savefigs - None indicates that figures should not be saved, a string value
indicates the name that should be used when saving the figures
verbose - if 0, no output, if 1, output the current clustering options
being used
outputs:
bics,aris - 44xlength(ks) array of bic and ari values for each clustering result
"""
if affinities is None:
affinities = ["none", "euclidean", "manhattan", "cosine"]
if linkages is None:
linkages = ["none", "ward", "complete", "average", "single"]
if covariance_types is None:
covariance_types = ["full", "tied", "diag", "spherical"]
cov_dict = {"full": 0, "tied": 1, "diag": 2, "spherical": 3}
aff_dict = {"none": 0, "euclidean": 0, "manhattan": 1, "cosine": 2}
link_dict = {"none": 0, "ward": 1, "complete": 2, "average": 3, "single": 4}
# 11 agglomeration combos: 4 with l2 affinity, 3 with l1, 3 with cos, and no agglom
# 4 EM options: full, tied, diag, spherical
bics = np.zeros([44, len(ks)]) - np.inf
aris = np.zeros([44, len(ks)]) - np.inf
best_ari = float("-inf")
best_bic = float("-inf")
best_n_params = np.inf
if metric is not None:
best_metric = float("-inf")
best_bic_metric = float("-inf")
best_combo_metric = []
best_c_hat_metric = []
best_k_metric = 0
best_means_metric = []
best_reg_metric = 0
best_n_params_metric = 0
for i, k in enumerate(ks):
for af in affinities:
for li in linkages:
# some combinations don't work, skip these
if li == "ward" and af != "euclidean":
continue
if (li == "none" and af != "none") or (af == "none" and li != "none"):
continue
for cov in covariance_types:
if verbose == 1:
print(f"K={k}, Affinity={af}, Linkage={li}, Covariance={cov}")
row = 11 * cov_dict[cov] + 3 * aff_dict[af] + link_dict[li]
c_hat, means, bic, ari, reg, n_params = cluster(
x, af, li, cov, k, c_true
)
bics[row, i] = bic
aris[row, i] = ari
if c_true is not None and ari > best_ari:
best_ari = ari
best_combo_ari = [af, li, cov]
best_c_hat_ari = c_hat
best_k_ari = k
if bic > best_bic:
best_bic = bic
best_combo_bic = [af, li, cov]
best_c_hat_bic = c_hat
best_k_bic = k
best_means_bic = means
reg_bic = reg
best_n_params = n_params
if metric is not None:
score = metric(c_hat, n_params)
if score > best_metric:
best_bic_metric = bic
best_combo_metric = [af, li, cov]
best_c_hat_metric = c_hat
best_k_metric = k
best_means_metric = means
best_reg_metric = reg
best_n_params_metric = n_params
best_metric = score
# True plot**********************************
if plot and c_true is not None:
plt.figure(figsize=(8, 8))
ptcolors = [colors[i] for i in c_true.astype(int)]
plt.scatter(x[:, 0], x[:, 1], c=ptcolors)
plt.title("True labels")
plt.xlabel("First feature")
plt.ylabel("Second feature")
if savefigs is not None:
plt.savefig(savefigs + "_python_true.png")
# Plot with best BIC*********************************
if plot:
plt.figure(figsize=(8, 8))
# ptcolors = [colors[i] for i in best_c_hat_bic]
plt.scatter(x[:, 0], x[:, 1], c=best_c_hat_bic)
# mncolors = [colors[i] for i in np.arange(best_k_bic)]
mncolors = [i for i in np.arange(best_k_bic)]
plt.scatter(best_means_bic[:, 0], best_means_bic[:, 1], c=mncolors, marker="x")
plt.title(
"py(agg-gmm) BIC %3.0f from " % best_bic
+ str(best_combo_bic)
+ " k="
+ str(best_k_bic)
+ " reg="
+ str(reg_bic)
) # + "iter=" + str(best_iter_bic))
plt.legend()
plt.xlabel("First feature")
plt.ylabel("Second feature")
if savefigs is not None:
plt.savefig(savefigs + "_python_bestbic.png")
titles = ["full", "tied", "diag", "spherical"]
if plot and c_true is not None:
# Plot with best ARI************************************
plt.figure(figsize=(8, 8))
ptcolors = [colors[i] for i in best_c_hat_ari]
plt.scatter(x[:, 0], x[:, 1], c=ptcolors)
plt.title(
"py(agg-gmm) ARI %3.3f from " % best_ari
+ str(best_combo_ari)
+ " k="
+ str(best_k_ari)
) # + "iter=" + str(best_iter_ari))
plt.xlabel("First feature")
plt.ylabel("Second feature")
if savefigs is not None:
plt.savefig(savefigs + "_python_bestari.png")
# ARI vs BIC********************************
plt.figure(figsize=(8, 8))
for row in np.arange(4):
xs = bics[row * 11 : (row + 1) * 11, :]
ys = aris[row * 11 : (row + 1) * 11, :]
idxs = (xs != -np.inf) * (ys != -np.inf)
plt.scatter(xs[idxs], ys[idxs], label=titles[row])
idxs = (bics != -np.inf) * (aris != -np.inf)
slope, _, r_value, _, p_value = stats.linregress(bics[idxs], aris[idxs])
plt.xlabel("BIC")
plt.ylabel("ARI")
plt.legend(loc="lower right")
plt.title(
"Pyclust's ARI vs BIC for Drosophila Data with Correlation r^2=%2.2f"
% (r_value ** 2)
)
plt.show()
if savefigs is not None:
plt.savefig(savefigs + "_python_bicari.png")
if plot:
# plot of all BICS*******************************
labels = {
0: "none",
1: "l2/ward",
2: "l2/complete",
3: "l2/average",
4: "l2/single",
5: "l1/complete",
6: "l1/average",
7: "l1/single",
8: "cos/complete",
9: "cos/average",
10: "cos/single",
}
f, ((ax0, ax1), (ax2, ax3)) = plt.subplots(
2, 2, sharey="row", sharex="col", figsize=(10, 10)
)
for row in np.arange(bics.shape[0]):
if all(bics[row, :] == -np.inf):
continue
if row <= 10:
ax0.plot(np.arange(1, len(ks) + 1), bics[row, :])
elif row <= 21:
ax1.plot(
np.arange(1, len(ks) + 1), bics[row, :], label=labels[row % 11]
)
elif row <= 32:
ax2.plot(np.arange(1, len(ks) + 1), bics[row, :])
elif row <= 43:
ax3.plot(np.arange(1, len(ks) + 1), bics[row, :])
ax0.set_title(titles[0])
ax0.set(ylabel="bic")
ax1.set_title(titles[1])
ax1.legend(loc="lower right")
ax2.set_title(titles[2])
ax2.set(xlabel="k")
ax2.set(ylabel="bic")
ax3.set_title(titles[3])
ax3.set(xlabel="k")
plt.show()
if savefigs is not None:
plt.savefig(savefigs + "_python_bicplot.png")
if metric is not None:
return best_c_hat_metric, best_n_params_metric
else:
return best_c_hat_bic, best_n_params
| """
Created on Thu Mar 14 14:19:55 2019
These functions allow for "brute clustering," inspired by R's mclust.
Clustering is performed first by hierarchical agglomeration, then fitting a
Gaussian Mixture via Expectation Maximization (EM). There are several ways to
perform both agglomeration and EM so these functions performs the (specified)
combinations of methods then evaluates each according to BIC.
@author: <NAME>
"""
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.mixture import GaussianMixture
from sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters
from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky
from sklearn.metrics import adjusted_rand_score
import matplotlib.pyplot as plt
from scipy import stats
from scipy.stats import multivariate_normal
def calcBIC(x, wts, means, variances, k):
"""
Calculates likelihood of a set of data from a GMM, then calculates BIC
Inputs:
x - nxd datapoints
wts - list of mixture weights (same length as means and variances)
means - list of d numpy arrays of mixture means
variances - list of dxd covariance matrices
k - number of parameters
Outputs:
bic - BIC where higher is better
"""
n = x.shape[0]
likelihood = 0
for wt, mu, var in zip(wts, means, variances):
mu = np.squeeze(mu)
var = np.squeeze(var)
try:
var = multivariate_normal(mu, var)
except np.linalg.LinAlgError:
return -np.inf
likelihood += wt * var.pdf(x)
loglik = np.sum(np.log(likelihood))
bic = 2 * loglik - np.log(n) * k
return bic
def processBIC(data, wts, mus, covs, m):
"""
Calculates BIC from input that is formatted either as the sklearn GaussianMixture
components or from data that was saved to a csv in R
Inputs
data - nxd numpy array of data
wts - k numpy array of mixture weights
mus - kxd numpy array of means
covs - kxdxd in the case of r and in python, the shape depends on the model type
(see GaussianMixture class)
m - a string that specifies the model, implies that format of the other inputs
(e.g. 'VII' implies that the parameters were read from a csv that was
written by R)
Outputs
BIC - bic value as calculated by the function above
"""
d = data.shape[1]
k = len(wts)
# These options indicate mclust model types, so the format of covs is how
# it was written to a csv in R
if m == "VII":
params = k * (1 + d + 1)
covs = np.split(covs, covs.shape[0])
elif m == "EEE":
params = k * (1 + d) + d * (d + 1) / 2
covs = np.split(covs, covs.shape[0])
elif m == "VVV":
params = k * (1 + d + d * (d + 1) / 2)
covs = np.split(covs, covs.shape[0])
elif m == "VVI":
params = k * (1 + d + d)
covs = np.split(covs, covs.shape[0])
# These options indicate GaussianMixture types, so the format of covs is
# sklearrn.mixture.GaussianMixture.covariances_
elif m == "spherical":
params = k * (1 + d + 1)
covs = [v * np.identity(d) for v in covs]
elif m == "tied":
params = k * (1 + d) + d * (d + 1) / 2
covs = [covs for v in np.arange(k)]
elif m == "full":
params = k * (1 + d + d * (d + 1) / 2)
covs = np.split(covs, covs.shape[0])
elif m == "diag":
params = k * (1 + d + d)
covs = [np.diag(covs[i, :]) for i in np.arange(k)]
params = params - 1 # because the weights must add to 1
wts = np.split(wts, wts.shape[0])
means = np.split(mus, mus.shape[0])
return calcBIC(data, wts, means, covs, params)
colors = [
"red",
"green",
"blue",
"orange",
"purple",
"yellow",
"black",
"brown",
"lightsalmon",
"greenyellow",
"cornflowerblue",
"tan",
"violet",
"gold",
"slategray",
"peru",
"indianred",
"darkolivegreen",
"navy",
"darkgoldenrod",
"deeppink",
"darkkhaki",
"silver",
"saddlebrown",
]
def agglomerate(data, aff, link, k):
"""
Hierarchical Agglomeration
inputs:
data - nxd numpy array
aff - affinity technique, an element of ['euclidean','manhattan','cosine']
link - linkage technique, an element of ['ward','complete','average','single']
k - number of clusters
outputs:
one_hot - nxk numpy array with a single one in each row indicating cluster
membership
exceptions:
ward linkage can only be used with euclidean/l2 affinity so if ward is
specified with a different linkage then there is an Exception
"""
n = data.shape[0]
if link == "ward" and aff != "euclidean":
raise Exception("Ward linkage is only valid with Euclidean affinity")
agglom = AgglomerativeClustering(n_clusters=k, affinity=aff, linkage=link).fit(data)
one_hot = np.zeros([n, k])
one_hot[np.arange(n), agglom.labels_] = 1
return one_hot
def initialize_params(data, one_hot, cov):
"""
sklearn's Gaussian Mixture does not allow initialization from class membership
but it does allow from initialization of mixture parameters, so here we calculate
the mixture parameters according to class membership
input:
data - nxd numpy array
one_hot - nxd numpy array with a single one in each row indicating cluster
membership
k - number of clusters
output:
weights - k array of mixing weights
means - kxd array of means of mixture components
precisions - precision matrices, format depends on the EM clustering option
(eg 'full' mode needs a list of matrices, one for each mixture
component,but 'tied' mode only needs a single matrix, since all
precisions are constrained to be equal)
"""
n = data.shape[0]
weights, means, covariances = _estimate_gaussian_parameters(
data, one_hot, 1e-06, cov
)
weights /= n
precisions_cholesky_ = _compute_precision_cholesky(covariances, cov)
if cov == "tied":
c = precisions_cholesky_
precisions = np.dot(c, c.T)
elif cov == "diag":
precisions = precisions_cholesky_
else:
precisions = [np.dot(c, c.T) for c in precisions_cholesky_]
return weights, means, precisions
def cluster(data, aff, link, cov, k, c_true=None):
"""
Cluster according to specified method
input:
data - nxk numpy matrix of data
c_true - n array of true cluster membership
aff - affinity, element of ['euclidean','manhattan','cosine'] or none for EM
from scratch
link - linkage, element of ['ward','complete','average','single'], or none for
EM from scratch
cov - covariance, element of ['full','tied','diag','spherical']
k - # of clusters
output:
c_hat - n array of clustering results
means - kxd array of means of mixture components
bic - Bayes Information Criterion for this clustering
ari - Adjusted Rand Index to comparing clustering result to true clustering
reg - regularization parameter that was used in the clustering results
(0 or 1e-6)
"""
iter_num = 100
if aff == "none" or link == "none":
try: # no regularization
reg = 0
gmm = GaussianMixture(
n_components=k,
covariance_type=cov,
reg_covar=reg,
max_iter=iter_num,
verbose=0,
verbose_interval=1,
)
c_hat = gmm.fit_predict(data)
bic = processBIC(data, gmm.weights_, gmm.means_, gmm.covariances_, cov)
if any([sum(c_hat == i) <= 1 for i in range(k)]) or bic == -np.inf:
raise ValueError
# if there was a numerical error during EM,or while calculating BIC,
# or if the clustering found a class with only one element
except: # regularize
reg = 1e-6
gmm = GaussianMixture(
n_components=k,
covariance_type=cov,
reg_covar=reg,
max_iter=iter_num,
verbose=0,
verbose_interval=1,
)
c_hat = gmm.fit_predict(data)
bic = processBIC(data, gmm.weights_, gmm.means_, gmm.covariances_, cov)
else:
one_hot = agglomerate(data, aff, link, k)
weights, means, precisions = initialize_params(data, one_hot, cov)
try:
reg = 0
gmm = GaussianMixture(
n_components=k,
covariance_type=cov,
weights_init=weights,
means_init=means,
precisions_init=precisions,
max_iter=iter_num,
reg_covar=reg,
verbose=0,
verbose_interval=1,
)
c_hat = gmm.fit_predict(data)
bic = processBIC(data, gmm.weights_, gmm.means_, gmm.covariances_, cov)
if any([sum(c_hat == i) <= 1 for i in range(k)]) or bic == -np.inf:
raise ValueError
# if there was a numerical error, or if initial clustering produced a
# mixture component with only one element
except:
reg = 1e-6
gmm = GaussianMixture(
n_components=k,
covariance_type=cov,
weights_init=weights,
means_init=means,
precisions_init=precisions,
max_iter=iter_num,
reg_covar=reg,
verbose=0,
verbose_interval=1,
)
c_hat = gmm.fit_predict(data)
bic = processBIC(data, gmm.weights_, gmm.means_, gmm.covariances_, cov)
if c_true is not None:
ari = adjusted_rand_score(c_true, c_hat)
else:
ari = None
means = gmm.means_
return c_hat, means, bic, ari, reg, gmm._n_parameters()
def brute_cluster(
x,
ks,
affinities=None,
linkages=None,
covariance_types=None,
c_true=None,
plot=False,
savefigs=None,
verbose=0,
metric=None,
):
"""
Cluster all combinations of options and plot results
inputs:
x - nxd array of data
c_true - n array of true clustering
affinites - list of affinity modes, each must be an element of
['none,'euclidean','manhattan','cosine']
linkages - list of linkage modes, each must be an element of
['none','ward','complete','average','single']
covariance_types - list of covariance modes, each must be an element of
['full','tied','diag','spherical']
ks - list of cluster numbers
savefigs - None indicates that figures should not be saved, a string value
indicates the name that should be used when saving the figures
verbose - if 0, no output, if 1, output the current clustering options
being used
outputs:
bics,aris - 44xlength(ks) array of bic and ari values for each clustering result
"""
if affinities is None:
affinities = ["none", "euclidean", "manhattan", "cosine"]
if linkages is None:
linkages = ["none", "ward", "complete", "average", "single"]
if covariance_types is None:
covariance_types = ["full", "tied", "diag", "spherical"]
cov_dict = {"full": 0, "tied": 1, "diag": 2, "spherical": 3}
aff_dict = {"none": 0, "euclidean": 0, "manhattan": 1, "cosine": 2}
link_dict = {"none": 0, "ward": 1, "complete": 2, "average": 3, "single": 4}
# 11 agglomeration combos: 4 with l2 affinity, 3 with l1, 3 with cos, and no agglom
# 4 EM options: full, tied, diag, spherical
bics = np.zeros([44, len(ks)]) - np.inf
aris = np.zeros([44, len(ks)]) - np.inf
best_ari = float("-inf")
best_bic = float("-inf")
best_n_params = np.inf
if metric is not None:
best_metric = float("-inf")
best_bic_metric = float("-inf")
best_combo_metric = []
best_c_hat_metric = []
best_k_metric = 0
best_means_metric = []
best_reg_metric = 0
best_n_params_metric = 0
for i, k in enumerate(ks):
for af in affinities:
for li in linkages:
# some combinations don't work, skip these
if li == "ward" and af != "euclidean":
continue
if (li == "none" and af != "none") or (af == "none" and li != "none"):
continue
for cov in covariance_types:
if verbose == 1:
print(f"K={k}, Affinity={af}, Linkage={li}, Covariance={cov}")
row = 11 * cov_dict[cov] + 3 * aff_dict[af] + link_dict[li]
c_hat, means, bic, ari, reg, n_params = cluster(
x, af, li, cov, k, c_true
)
bics[row, i] = bic
aris[row, i] = ari
if c_true is not None and ari > best_ari:
best_ari = ari
best_combo_ari = [af, li, cov]
best_c_hat_ari = c_hat
best_k_ari = k
if bic > best_bic:
best_bic = bic
best_combo_bic = [af, li, cov]
best_c_hat_bic = c_hat
best_k_bic = k
best_means_bic = means
reg_bic = reg
best_n_params = n_params
if metric is not None:
score = metric(c_hat, n_params)
if score > best_metric:
best_bic_metric = bic
best_combo_metric = [af, li, cov]
best_c_hat_metric = c_hat
best_k_metric = k
best_means_metric = means
best_reg_metric = reg
best_n_params_metric = n_params
best_metric = score
# True plot**********************************
if plot and c_true is not None:
plt.figure(figsize=(8, 8))
ptcolors = [colors[i] for i in c_true.astype(int)]
plt.scatter(x[:, 0], x[:, 1], c=ptcolors)
plt.title("True labels")
plt.xlabel("First feature")
plt.ylabel("Second feature")
if savefigs is not None:
plt.savefig(savefigs + "_python_true.png")
# Plot with best BIC*********************************
if plot:
plt.figure(figsize=(8, 8))
# ptcolors = [colors[i] for i in best_c_hat_bic]
plt.scatter(x[:, 0], x[:, 1], c=best_c_hat_bic)
# mncolors = [colors[i] for i in np.arange(best_k_bic)]
mncolors = [i for i in np.arange(best_k_bic)]
plt.scatter(best_means_bic[:, 0], best_means_bic[:, 1], c=mncolors, marker="x")
plt.title(
"py(agg-gmm) BIC %3.0f from " % best_bic
+ str(best_combo_bic)
+ " k="
+ str(best_k_bic)
+ " reg="
+ str(reg_bic)
) # + "iter=" + str(best_iter_bic))
plt.legend()
plt.xlabel("First feature")
plt.ylabel("Second feature")
if savefigs is not None:
plt.savefig(savefigs + "_python_bestbic.png")
titles = ["full", "tied", "diag", "spherical"]
if plot and c_true is not None:
# Plot with best ARI************************************
plt.figure(figsize=(8, 8))
ptcolors = [colors[i] for i in best_c_hat_ari]
plt.scatter(x[:, 0], x[:, 1], c=ptcolors)
plt.title(
"py(agg-gmm) ARI %3.3f from " % best_ari
+ str(best_combo_ari)
+ " k="
+ str(best_k_ari)
) # + "iter=" + str(best_iter_ari))
plt.xlabel("First feature")
plt.ylabel("Second feature")
if savefigs is not None:
plt.savefig(savefigs + "_python_bestari.png")
# ARI vs BIC********************************
plt.figure(figsize=(8, 8))
for row in np.arange(4):
xs = bics[row * 11 : (row + 1) * 11, :]
ys = aris[row * 11 : (row + 1) * 11, :]
idxs = (xs != -np.inf) * (ys != -np.inf)
plt.scatter(xs[idxs], ys[idxs], label=titles[row])
idxs = (bics != -np.inf) * (aris != -np.inf)
slope, _, r_value, _, p_value = stats.linregress(bics[idxs], aris[idxs])
plt.xlabel("BIC")
plt.ylabel("ARI")
plt.legend(loc="lower right")
plt.title(
"Pyclust's ARI vs BIC for Drosophila Data with Correlation r^2=%2.2f"
% (r_value ** 2)
)
plt.show()
if savefigs is not None:
plt.savefig(savefigs + "_python_bicari.png")
if plot:
# plot of all BICS*******************************
labels = {
0: "none",
1: "l2/ward",
2: "l2/complete",
3: "l2/average",
4: "l2/single",
5: "l1/complete",
6: "l1/average",
7: "l1/single",
8: "cos/complete",
9: "cos/average",
10: "cos/single",
}
f, ((ax0, ax1), (ax2, ax3)) = plt.subplots(
2, 2, sharey="row", sharex="col", figsize=(10, 10)
)
for row in np.arange(bics.shape[0]):
if all(bics[row, :] == -np.inf):
continue
if row <= 10:
ax0.plot(np.arange(1, len(ks) + 1), bics[row, :])
elif row <= 21:
ax1.plot(
np.arange(1, len(ks) + 1), bics[row, :], label=labels[row % 11]
)
elif row <= 32:
ax2.plot(np.arange(1, len(ks) + 1), bics[row, :])
elif row <= 43:
ax3.plot(np.arange(1, len(ks) + 1), bics[row, :])
ax0.set_title(titles[0])
ax0.set(ylabel="bic")
ax1.set_title(titles[1])
ax1.legend(loc="lower right")
ax2.set_title(titles[2])
ax2.set(xlabel="k")
ax2.set(ylabel="bic")
ax3.set_title(titles[3])
ax3.set(xlabel="k")
plt.show()
if savefigs is not None:
plt.savefig(savefigs + "_python_bicplot.png")
if metric is not None:
return best_c_hat_metric, best_n_params_metric
else:
return best_c_hat_bic, best_n_params
| en | 0.788373 | Created on Thu Mar 14 14:19:55 2019 These functions allow for "brute clustering," inspired by R's mclust. Clustering is performed first by hierarchical agglomeration, then fitting a Gaussian Mixture via Expectation Maximization (EM). There are several ways to perform both agglomeration and EM so these functions performs the (specified) combinations of methods then evaluates each according to BIC. @author: <NAME> Calculates likelihood of a set of data from a GMM, then calculates BIC Inputs: x - nxd datapoints wts - list of mixture weights (same length as means and variances) means - list of d numpy arrays of mixture means variances - list of dxd covariance matrices k - number of parameters Outputs: bic - BIC where higher is better Calculates BIC from input that is formatted either as the sklearn GaussianMixture components or from data that was saved to a csv in R Inputs data - nxd numpy array of data wts - k numpy array of mixture weights mus - kxd numpy array of means covs - kxdxd in the case of r and in python, the shape depends on the model type (see GaussianMixture class) m - a string that specifies the model, implies that format of the other inputs (e.g. 'VII' implies that the parameters were read from a csv that was written by R) Outputs BIC - bic value as calculated by the function above # These options indicate mclust model types, so the format of covs is how # it was written to a csv in R # These options indicate GaussianMixture types, so the format of covs is # sklearrn.mixture.GaussianMixture.covariances_ # because the weights must add to 1 Hierarchical Agglomeration inputs: data - nxd numpy array aff - affinity technique, an element of ['euclidean','manhattan','cosine'] link - linkage technique, an element of ['ward','complete','average','single'] k - number of clusters outputs: one_hot - nxk numpy array with a single one in each row indicating cluster membership exceptions: ward linkage can only be used with euclidean/l2 affinity so if ward is specified with a different linkage then there is an Exception sklearn's Gaussian Mixture does not allow initialization from class membership but it does allow from initialization of mixture parameters, so here we calculate the mixture parameters according to class membership input: data - nxd numpy array one_hot - nxd numpy array with a single one in each row indicating cluster membership k - number of clusters output: weights - k array of mixing weights means - kxd array of means of mixture components precisions - precision matrices, format depends on the EM clustering option (eg 'full' mode needs a list of matrices, one for each mixture component,but 'tied' mode only needs a single matrix, since all precisions are constrained to be equal) Cluster according to specified method input: data - nxk numpy matrix of data c_true - n array of true cluster membership aff - affinity, element of ['euclidean','manhattan','cosine'] or none for EM from scratch link - linkage, element of ['ward','complete','average','single'], or none for EM from scratch cov - covariance, element of ['full','tied','diag','spherical'] k - # of clusters output: c_hat - n array of clustering results means - kxd array of means of mixture components bic - Bayes Information Criterion for this clustering ari - Adjusted Rand Index to comparing clustering result to true clustering reg - regularization parameter that was used in the clustering results (0 or 1e-6) # no regularization # if there was a numerical error during EM,or while calculating BIC, # or if the clustering found a class with only one element # regularize # if there was a numerical error, or if initial clustering produced a # mixture component with only one element Cluster all combinations of options and plot results inputs: x - nxd array of data c_true - n array of true clustering affinites - list of affinity modes, each must be an element of ['none,'euclidean','manhattan','cosine'] linkages - list of linkage modes, each must be an element of ['none','ward','complete','average','single'] covariance_types - list of covariance modes, each must be an element of ['full','tied','diag','spherical'] ks - list of cluster numbers savefigs - None indicates that figures should not be saved, a string value indicates the name that should be used when saving the figures verbose - if 0, no output, if 1, output the current clustering options being used outputs: bics,aris - 44xlength(ks) array of bic and ari values for each clustering result # 11 agglomeration combos: 4 with l2 affinity, 3 with l1, 3 with cos, and no agglom # 4 EM options: full, tied, diag, spherical # some combinations don't work, skip these # True plot********************************** # Plot with best BIC********************************* # ptcolors = [colors[i] for i in best_c_hat_bic] # mncolors = [colors[i] for i in np.arange(best_k_bic)] # + "iter=" + str(best_iter_bic)) # Plot with best ARI************************************ # + "iter=" + str(best_iter_ari)) # ARI vs BIC******************************** # plot of all BICS******************************* | 3.192939 | 3 |
tests/AppInstanceClientTest.py | rkhleics/oktasdk-python | 1 | 6616949 | <filename>tests/AppInstanceClientTest.py
from okta.AppInstanceClient import AppInstanceClient
from okta.framework.OktaError import OktaError
import unittest
import os
from okta.models.app.AppInstance import AppInstance
class SessionsClientTest(unittest.TestCase):
def setUp(self):
self.client = AppInstanceClient(os.environ.get('OKTA_TEST_URL'), os.environ.get('OKTA_TEST_KEY'))
self.username = os.environ.get('OKTA_TEST_ADMIN_NAME')
self.password = <PASSWORD>.get('OKTA_TEST_ADMIN_PASSWORD')
def test_create_app(self):
app = AppInstance.build_bookmark("https://www.google.com")
app = self.client.create_app_instance(app)
self.assertIsNotNone(app.id)
def test_delete_app(self):
app = AppInstance.build_bookmark("https://www.google.com")
app = self.client.create_app_instance(app)
self.client.deactivate_app_instance(app.id)
self.client.delete_app_instance(app.id)
self.assertRaises(OktaError, self.client.get_app_instance, app.id) | <filename>tests/AppInstanceClientTest.py
from okta.AppInstanceClient import AppInstanceClient
from okta.framework.OktaError import OktaError
import unittest
import os
from okta.models.app.AppInstance import AppInstance
class SessionsClientTest(unittest.TestCase):
def setUp(self):
self.client = AppInstanceClient(os.environ.get('OKTA_TEST_URL'), os.environ.get('OKTA_TEST_KEY'))
self.username = os.environ.get('OKTA_TEST_ADMIN_NAME')
self.password = <PASSWORD>.get('OKTA_TEST_ADMIN_PASSWORD')
def test_create_app(self):
app = AppInstance.build_bookmark("https://www.google.com")
app = self.client.create_app_instance(app)
self.assertIsNotNone(app.id)
def test_delete_app(self):
app = AppInstance.build_bookmark("https://www.google.com")
app = self.client.create_app_instance(app)
self.client.deactivate_app_instance(app.id)
self.client.delete_app_instance(app.id)
self.assertRaises(OktaError, self.client.get_app_instance, app.id) | none | 1 | 2.353669 | 2 | |
nmt_adaptation/change_tag.py | Sohyo/NMT-Adaptation | 0 | 6616950 | <reponame>Sohyo/NMT-Adaptation
import os
from nmt_adaptation.util import arr2txt
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Exchange, remove, add tag.')
parser.add_argument('--data_name', type=str, default='EMEA',
help='The name of the datasets that we want to change the tag.')
parser.add_argument('--new_data_name', type=str, default='1',
help='A new name of the file. This name will be combined with exited file and create new file name.')
parser.add_argument('--new_tag', type=str, default='PT',
help='The new tag')
args = parser.parse_args()
return args.data_name, args.new_data_name, args.new_tag
def change_tag(root_dir, file_name, replacement_tag, new_file_name):
"""
:param root_dir: The directory where the tagged phrases are
:param file_name: The name of the tagged phrases file
:param replacement_tag: The new tag wanted to change
:param new_file_name: The new name of the file
:return: Write the new data file
"""
changed_dataset = []
with open(os.path.join(root_dir, file_name)) as text:
for sentence in text:
# replace the tags located front and end of the sentences with the new tag!
sentence = sentence.split()
sentence[0] = f"{replacement_tag}"
sentence[-1] = f"{replacement_tag}"
# sentence[-1] = "PT"
changed_dataset.append(' '.join(sentence))
arr2txt(arr=changed_dataset, file_name=os.path.join(root_dir, new_file_name))
def remove_tags(root_dir, file_name):
with open(os.path.join(root_dir, file_name)) as text:
# This looks quite strange but it is basically split the very first and very last words from the sentences.
# And then only take the middle string which is a sentence.
data_tag_removed = [line.split(' ', 1)[1].rsplit(' ', 1)[0] for line in text]
arr2txt(arr=data_tag_removed, file_name=os.path.join(root_dir, f'raw_{file_name}'))
def get_raw_dataset():
# TODO
pass
def add_tag():
# TODO
pass
def main():
# TODO : maybe class can work better for this?
root_dir = "../data/edited_phrases/"
data_name, new_data_name, new_tag = parse_arguments()
langs = [".de", ".en"]
# EXCHANGE TAG
for lang in langs:
change_tag(root_dir=root_dir, file_name=f"{data_name}_tagged_train_phrase_4_0.5{lang}",
replacement_tag=new_tag, new_file_name=f"{data_name}_{new_data_name}_tag_train_phrase_4_0.5{lang}")
# REMOVE tag
# for lang in langs:
# remove_tags(root_dir=root_dir, file_name=f"{data_name}_tagged_train_phrase_4_0.5{lang}")
# ADD TAG
if __name__ == '__main__':
main()
| import os
from nmt_adaptation.util import arr2txt
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Exchange, remove, add tag.')
parser.add_argument('--data_name', type=str, default='EMEA',
help='The name of the datasets that we want to change the tag.')
parser.add_argument('--new_data_name', type=str, default='1',
help='A new name of the file. This name will be combined with exited file and create new file name.')
parser.add_argument('--new_tag', type=str, default='PT',
help='The new tag')
args = parser.parse_args()
return args.data_name, args.new_data_name, args.new_tag
def change_tag(root_dir, file_name, replacement_tag, new_file_name):
"""
:param root_dir: The directory where the tagged phrases are
:param file_name: The name of the tagged phrases file
:param replacement_tag: The new tag wanted to change
:param new_file_name: The new name of the file
:return: Write the new data file
"""
changed_dataset = []
with open(os.path.join(root_dir, file_name)) as text:
for sentence in text:
# replace the tags located front and end of the sentences with the new tag!
sentence = sentence.split()
sentence[0] = f"{replacement_tag}"
sentence[-1] = f"{replacement_tag}"
# sentence[-1] = "PT"
changed_dataset.append(' '.join(sentence))
arr2txt(arr=changed_dataset, file_name=os.path.join(root_dir, new_file_name))
def remove_tags(root_dir, file_name):
with open(os.path.join(root_dir, file_name)) as text:
# This looks quite strange but it is basically split the very first and very last words from the sentences.
# And then only take the middle string which is a sentence.
data_tag_removed = [line.split(' ', 1)[1].rsplit(' ', 1)[0] for line in text]
arr2txt(arr=data_tag_removed, file_name=os.path.join(root_dir, f'raw_{file_name}'))
def get_raw_dataset():
# TODO
pass
def add_tag():
# TODO
pass
def main():
# TODO : maybe class can work better for this?
root_dir = "../data/edited_phrases/"
data_name, new_data_name, new_tag = parse_arguments()
langs = [".de", ".en"]
# EXCHANGE TAG
for lang in langs:
change_tag(root_dir=root_dir, file_name=f"{data_name}_tagged_train_phrase_4_0.5{lang}",
replacement_tag=new_tag, new_file_name=f"{data_name}_{new_data_name}_tag_train_phrase_4_0.5{lang}")
# REMOVE tag
# for lang in langs:
# remove_tags(root_dir=root_dir, file_name=f"{data_name}_tagged_train_phrase_4_0.5{lang}")
# ADD TAG
if __name__ == '__main__':
main() | en | 0.840629 | :param root_dir: The directory where the tagged phrases are :param file_name: The name of the tagged phrases file :param replacement_tag: The new tag wanted to change :param new_file_name: The new name of the file :return: Write the new data file # replace the tags located front and end of the sentences with the new tag! # sentence[-1] = "PT" # This looks quite strange but it is basically split the very first and very last words from the sentences. # And then only take the middle string which is a sentence. # TODO # TODO # TODO : maybe class can work better for this? # EXCHANGE TAG # REMOVE tag # for lang in langs: # remove_tags(root_dir=root_dir, file_name=f"{data_name}_tagged_train_phrase_4_0.5{lang}") # ADD TAG | 3.518696 | 4 |