index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
59,875 | WilfredLemus/antivirus | refs/heads/master | /antivirus/apps/home/admin.py | from django.contrib import admin
from .models import Categoria, antivirus
admin.site.register(Categoria)
admin.site.register(antivirus)
| {"/antivirus/apps/home/admin.py": ["/antivirus/apps/home/models.py"]} |
59,876 | WilfredLemus/antivirus | refs/heads/master | /antivirus/apps/home/urls.py | from django.conf.urls import include, url
from django.contrib import admin
from .views import IndexView, TestView
urlpatterns = [
url(r'^$', IndexView.as_view(), name='index'),
url(r'^test/$', TestView.as_view(), name='test'),
# url(r'^preguntar/$', QuestionCreateView.as_view(), name='create_question'),
] | {"/antivirus/apps/home/admin.py": ["/antivirus/apps/home/models.py"]} |
59,877 | WilfredLemus/antivirus | refs/heads/master | /antivirus/apps/home/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='antivirus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=100)),
('slug', models.SlugField(editable=False)),
('descripcion', models.CharField(max_length=150)),
('imagen', models.ImageField(upload_to=b'events')),
('precio', models.DecimalField(default=0.0, max_digits=5, decimal_places=2)),
('SistemaOperativo', models.CharField(max_length=60)),
('arquitectura', models.CharField(max_length=60)),
('ram', models.IntegerField()),
],
),
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=50)),
('slug', models.SlugField(editable=False)),
],
),
migrations.AddField(
model_name='antivirus',
name='categoria',
field=models.ForeignKey(to='home.Categoria'),
),
]
| {"/antivirus/apps/home/admin.py": ["/antivirus/apps/home/models.py"]} |
59,881 | royw/fullmonty | refs/heads/master | /fullmonty/remote_shell.py | # coding=utf-8
"""
Remote shell with a context over ssh with support for pexpect.
Usage
-----
.. code-block:: python
with RemoteShell(user=user, password=password, host=host, verbose=True) as remote:
remote.run("ls ~")
remote.put(local_file, remote_dir)
remote.get(remote_file)
"""
import json
import os
import stat
import sys
import re
from time import sleep
from getpass import getpass, getuser
import pexpect
import paramiko
from paramiko import SSHClient
from fullmonty.touch import touch
from fullmonty.simple_logger import debug
try:
from pexpect.pxssh import pxssh
except ImportError:
try:
from pexpect import pxssh
except ImportError:
# noinspection PyUnresolvedReferences
import pxssh
# from pexpect.pxssh import ExceptionPxssh
from scp import SCPClient
try:
# noinspection PyUnresolvedReferences
from ordereddict import OrderedDict
except ImportError:
# noinspection PyUnresolvedReferences
from collections import OrderedDict
from .ashell import AShell, CR, MOVEMENT
__docformat__ = 'restructuredtext en'
__all__ = ('RemoteShell',)
class RemoteShell(AShell):
"""
Provides run interface over an ssh connection.
:param user:
:type user:
:param password:
:type password:
:param host:
:type host:
:param logfile:
:type logfile:
:param verbose:
:type verbose:
"""
def __init__(self, host, user=None, password=None, logfile=None, verbose=False, password_callback=None):
super(RemoteShell, self).__init__(is_remote=True, verbose=verbose)
self.creds_file = os.path.expanduser('~/.remote_shell_rc')
if host is None or not host:
raise AttributeError("You must provide a non-empty string for 'host'")
if user is None:
user = self.getUser(host)
if user is None or not user:
raise AttributeError("You must provide a non-empty string for 'user'")
if password is None:
password = self.getPassword(host, user)
self.user = user
self.password = password
self.address = host
self.port = 22
# noinspection PyBroadException
try:
# noinspection PyCallingNonCallable
self.ssh = pxssh(timeout=1200)
self.ssh.login(host, user)
except:
if not password:
password = getpass('password for {user}@{host}: '.format(user=user, host=host))
if password_callback is not None and callable(password_callback):
password_callback(password)
# noinspection PyCallingNonCallable
self.ssh = pxssh(timeout=1200)
self.ssh.login(host, user, password)
self.accept_defaults = False
self.logfile = logfile
self.prefix = None
self.postfix = None
def env(self):
"""returns the environment dictionary"""
environ = {}
# noinspection PyBroadException
try:
for line in self.run('env').split("\n"):
match = re.match(r'([^=]+)=(.*)', line)
if match:
environ[match.group(1).strip()] = match.group(2).strip()
except:
pass
return environ
def _report(self, output, out_stream, verbose):
def _out_string(value):
if value:
if isinstance(value, str):
self.display(value, out_stream=out_stream, verbose=verbose)
output.append(value)
_out_string(self.ssh.before)
_out_string(self.ssh.after)
# noinspection PyUnusedLocal
def run_pattern_response(self, cmd_args, out_stream=sys.stdout, verbose=True,
prefix=None, postfix=None,
pattern_response=None, accept_defaults=False,
timeout=1200):
"""
Run the external command and interact with it using the patter_response dictionary
:param timeout:
:param accept_defaults:
:param cmd_args: command line arguments
:param out_stream: stream verbose messages are written to
:param verbose: output messages if asserted
:param prefix: command line arguments prepended to the given cmd_args
:param postfix: command line arguments appended to the given cmd_args
:param pattern_response: dictionary whose key is a regular expression pattern that when matched
results in the value being sent to the running process. If the value is None, then no response is sent.
"""
pattern_response_dict = OrderedDict(pattern_response or {})
if accept_defaults:
sudo_pattern = 'password for {user}: '.format(user=self.user)
sudo_response = "{password}\r".format(password=self.password)
pattern_response_dict[sudo_pattern] = sudo_response
# accept default prompts, don't match "[sudo] "
pattern_response_dict[r'\[\S+\](?<!\[sudo\])(?!\S)'] = CR
pattern_response_dict[MOVEMENT] = None
pattern_response_dict[pexpect.TIMEOUT] = None
patterns = list(pattern_response_dict.keys())
patterns.append(self.ssh.PROMPT)
args = self.expand_args(cmd_args, prefix=prefix, postfix=postfix)
command_line = ' '.join(args)
# info("pattern_response_dict => %s" % repr(pattern_response_dict))
# self.display("{line}\n".format(line=command_line), out_stream=out_stream, verbose=verbose)
output = []
self.ssh.prompt(timeout=0.1) # clear out any pending prompts
self._report(output, out_stream=out_stream, verbose=verbose)
self.ssh.sendline(command_line)
while True:
try:
index = self.ssh.expect(patterns)
if index == patterns.index(pexpect.TIMEOUT):
print("ssh.expect TIMEOUT")
else:
self._report(output, out_stream=out_stream, verbose=verbose)
if index == patterns.index(self.ssh.PROMPT):
break
key = patterns[index]
response = pattern_response_dict[key]
if response:
sleep(0.1)
self.ssh.sendline(response)
except pexpect.EOF:
self._report(output, out_stream=out_stream, verbose=verbose)
break
self.ssh.prompt(timeout=0.1)
self._report(output, out_stream=out_stream, verbose=verbose)
return ''.join(output).split("\n")
# noinspection PyUnusedLocal,PyShadowingNames
def run(self, cmd_args, out_stream=sys.stdout, env=None, verbose=True,
prefix=None, postfix=None, accept_defaults=False, pattern_response=None, timeout=120,
timeout_interval=.001, debug=False):
"""
Runs the command and returns the output, writing each the output to out_stream if verbose is True.
:param cmd_args: list of command arguments or str command line
:type cmd_args: list or str
:param out_stream: the output stream
:type out_stream: file
:param env: the environment variables for the command to use.
:type env: dict
:param verbose: if verbose, then echo the command and it's output to stdout.
:type verbose: bool
:param prefix: list of command arguments to prepend to the command line
:type prefix: list[str]
:param postfix: list of command arguments to append to the command line
:type postfix: list[str]
:param accept_defaults: accept responses to default regexes.
:type accept_defaults: bool
:param pattern_response: dictionary whose key is a regular expression pattern that when matched
results in the value being sent to the running process. If the value is None, then no response is sent.
:type pattern_response: dict[str, str]
:param timeout: the maximum time to give the process to complete
:type timeout: int
:param timeout_interval: the time to sleep between process output polling
:type timeout_interval: int
:param debug: emit debugging info
:type debug: bool
:returns: the output of the command
:rtype: str
"""
if isinstance(cmd_args, str):
# cmd_args = pexpect.split_command_line(cmd_args)
cmd_args = [cmd_args]
if pattern_response or accept_defaults or self.accept_defaults:
return self.run_pattern_response(cmd_args, out_stream=out_stream, verbose=verbose,
prefix=prefix, postfix=postfix,
pattern_response=pattern_response,
accept_defaults=accept_defaults or self.accept_defaults,
timeout=timeout)
args = self.expand_args(cmd_args, prefix=prefix, postfix=postfix)
command_line = ' '.join(args)
self.display("{line}\n".format(line=command_line), out_stream=out_stream, verbose=verbose)
self.ssh.prompt(timeout=.1) # clear out any pending prompts
self.ssh.sendline(command_line)
self.ssh.prompt(timeout=timeout)
buf = [self.ssh.before]
if self.ssh.after:
buf.append(str(self.ssh.after))
return ''.join(buf)
def put(self, files, remote_path=None, out_stream=sys.stdout, verbose=False):
"""
Copy a file from the local system to the remote system.
:param files:
:param remote_path:
:param out_stream:
:param verbose:
:return: :rtype:
"""
if remote_path is None:
remote_path = files
self.display("scp '{src}' '{dest}'".format(src=files, dest=remote_path),
out_stream=out_stream, verbose=verbose)
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(self.address, self.port, self.user, self.password)
scp = SCPClient(ssh.get_transport())
# scp = SCPClient(self.ssh.get_transport())
output = scp.put(files, remote_path, recursive=True) or ''
self.display("\n" + output, out_stream=out_stream, verbose=verbose)
return output
def get(self, remote_path, local_path=None, out_stream=sys.stdout, verbose=False):
"""
Copy a file from the remote system to the local system.
:param remote_path:
:param local_path:
:param out_stream:
:param verbose:
:return: :rtype:
"""
if local_path is None:
local_path = remote_path
self.display("scp '{src}' '{dest}'\n".format(src=remote_path, dest=local_path),
out_stream=out_stream, verbose=verbose)
names = [name.strip() for name in self.run(['/bin/ls', '-1', '--color=never', remote_path]).split('\r\n')[1:] if
name.strip() != '[PEXPECT]$']
self.display("names: {names}".format(names=repr(names)))
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(self.address, self.port, self.user, self.password)
# scp = SFTPClient.from_transport(ssh.get_transport())
# output = scp.get(remote_path, local_path, recursive=True)
ftp = ssh.open_sftp()
for name in names:
self.display(name + '\n')
ftp.get(name, local_path)
output = repr(names)
self.display(output, out_stream=out_stream, verbose=verbose)
return output
def _system(self, command_line):
self.ssh.sendline(command_line)
self.ssh.prompt()
buf = [self.ssh.before]
if self.ssh.after:
buf.append(str(self.ssh.after))
return ''.join(buf)
def logout(self):
"""
Close the ssh session.
"""
if self.ssh:
self.ssh.logout()
self.ssh = None
def getUserFromCredsFile(self, host):
# noinspection PyArgumentEqualDefault
with open(self.creds_file, 'r') as creds_file:
creds_dict = json.loads(creds_file.read())
if host in creds_dict:
if 'user' in creds_dict[host]:
return creds_dict[host]['user']['name']
def getPasswordFromCredsFile(self, host, user):
# noinspection PyArgumentEqualDefault
try:
# noinspection PyArgumentEqualDefault
with open(self.creds_file, 'r') as creds_file:
creds_dict = json.loads(creds_file.read())
if host in creds_dict:
if 'user' in creds_dict[host]:
if creds_dict[host]['user'] == user:
return creds_dict[host]['password']
except Exception as ex:
debug(str(ex))
return None
def getUser(self, host):
user = self.getUserFromCredsFile(host)
if user is None or not user:
user = getuser()
return user
def getPassword(self, host, user):
password = self.getPasswordFromCredsFile(host, user)
if password is None or not password:
password = getpass('password for {user}@{host}: '.format(user=user, host=host))
self.saveUserPasswordToCredsFile(host, user, password)
return password
def saveUserPasswordToCredsFile(self, host, user, password):
creds_dict = {}
# noinspection PyBroadException
try:
# noinspection PyArgumentEqualDefault
with open(self.creds_file, 'r') as creds_file:
creds_dict = json.loads(creds_file.read())
except:
pass
if host not in creds_dict.keys():
creds_dict[host] = {}
creds_dict[host]['user'] = user
creds_dict[host]['password'] = password
mode = stat.S_IWUSR | stat.S_IRUSR
# noinspection PyBroadException
try:
mode = os.stat(self.creds_file).st_mode
except:
# noinspection PyBroadException
try:
touch(self.creds_file)
except:
pass
# noinspection PyBroadException
try:
os.chmod(self.creds_file, mode | stat.S_IWUSR | stat.S_IWRITE)
with open(self.creds_file, 'w') as out_file:
json.dump(creds_dict, out_file)
os.chmod(self.creds_file, mode)
except:
pass
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,882 | royw/fullmonty | refs/heads/master | /fullmonty/tmp_dir.py | # coding=utf-8
"""
Context manager for using a temporary directory.
Usage::
with TmpDir as dir:
print("temp directory: {dir}".format(dir=dir))
"""
import shutil
import tempfile
class TmpDir(object):
def __init__(self):
self.__dir_path = None
def __enter__(self):
self.__dir_path = tempfile.mkdtemp()
return self.__dir_path
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.__dir_path)
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,883 | royw/fullmonty | refs/heads/master | /tests/test_comparable_mixin.py | # coding=utf-8
"""
Describe Me!
"""
from fullmonty.comparable_mixin import ComparableMixin
__docformat__ = 'restructuredtext en'
__author__ = 'wrighroy'
# noinspection PyDocstring
class BaseClass(object):
pass
# noinspection PyDocstring
def test_int_compare():
# noinspection PyDocstring
def cmpkey(self):
return self.data
Aclass = type('Aclass', (BaseClass, ComparableMixin), {'_cmpkey': cmpkey})
a1 = Aclass()
a2 = Aclass()
a1.data = 1
a2.data = 1
assert a1 == a2
assert a1 <= a2
a2.data = 2
assert a1 != a2
assert a1 < a2
assert a1 <= a2
a1.data = 2
assert a1 == a2
assert a1 >= a2
a1.data = 3
assert a1 > a2
assert a1 >= a2
# noinspection PyDocstring
def test_str_compare():
# noinspection PyDocstring
def cmpkey(self):
return self.data
Aclass = type('Aclass', (BaseClass, ComparableMixin), {'_cmpkey': cmpkey})
a1 = Aclass()
a2 = Aclass()
a1.data = '1'
a2.data = '1'
assert a1 == a2
assert a1 <= a2
a2.data = '2'
assert a1 != a2
assert a1 < a2
assert a1 <= a2
a1.data = '2'
assert a1 == a2
assert a1 >= a2
a1.data = '3'
assert a1 > a2
assert a1 >= a2
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,884 | royw/fullmonty | refs/heads/master | /tests/test_method_missing_hook.py | # coding=utf-8
"""
Unit tests for method_missing mixin.
We create two classes, Foo, and Bar. Foo is our control, we apply the mixin but do not override the default
method missing behavior. While Bar is our class with new behavior where we override both instance and class
method_missing methods.
"""
import collections
from fullmonty.method_missing_hook import MethodMissingHook
__docformat__ = 'restructuredtext en'
__author__ = 'wrighroy'
# noinspection PyMethodMayBeStatic
class Foo(MethodMissingHook):
"""
Foo class does not implement method_missing method and is our test control to make sure we don't break anything.
"""
CHARLEY = 'Brown'
def __init__(self):
self.davidson = 'Harley'
def live_to_ride(self):
return 'Ride to Live'
# noinspection PyMethodMayBeStatic
class Bar(MethodMissingHook):
"""Bar class overrides method_missing and class_method_missing methods."""
WILLIEG = 'Davidson'
def __init__(self):
self.hog = 'wild'
def live_to_ride(self):
return 'Ride to Live'
# override default method_missing method
def method_missing(self, name, *argv, **kwargs):
if name in ['FLSTC', 'Heritage', 'Softtail']:
return "FLSTC Heritage Softtail"
return Bar.method_as_string(name, *argv, **kwargs)
# override default class_method_missing method
@classmethod
def class_method_missing(cls, name, *argv, **kwargs):
return cls.method_as_string(name, *argv, **kwargs)
# noinspection PyMethodMayBeStatic
class TestClass(object):
def test_attribute_accessor(self):
"""make sure didn't break attribute accessors"""
foo = Foo()
foo.alpha = 1
assert foo.alpha == 1
assert foo.davidson == 'Harley'
assert foo.live_to_ride() == 'Ride to Live'
bar = Bar()
assert bar.hog == 'wild'
assert bar.live_to_ride() == 'Ride to Live'
def test_class_accessor(self):
"""make sure didn't break constants"""
assert Foo.CHARLEY == 'Brown'
assert Bar.WILLIEG == 'Davidson'
def test_method_missing(self):
"""didn't break default missing method behavior"""
foo = Foo()
try:
foo.alpha()
assert False, 'did not raise expected AttributeError'
except AttributeError as ex:
assert True, 'raised expected AttributeError %s' % str(ex)
def test_handing_method_missing(self):
"""use Bar's method_missing with no args to the missing method"""
bar = Bar()
try:
result = bar.beta()
assert result == 'beta()', 'did not return method name (%s)' % result
except AttributeError as ex:
assert False, 'was not found %s' % str(ex)
def test_handing_method_missing_with_args(self):
"""use Bar's method_missing with just args to the missing method"""
bar = Bar()
try:
result = bar.beta(1, 2, 3)
assert result == 'beta(1, 2, 3)', 'did not return method name (%s)' % result
except AttributeError as ex:
assert False, 'was not found %s' % str(ex)
def test_handing_method_missing_with_kwargs(self):
"""use Bar's method_missing with just kwargs to the missing method"""
bar = Bar()
try:
result = bar.beta(a=4, b=5, c=6)
assert result == 'beta(a=4, b=5, c=6)', 'did not return method name (%s)' % result
except AttributeError as ex:
assert False, 'was not found %s' % str(ex)
def test_handing_method_missing_with_both(self):
"""use Bar's method_missing with both args and kwargs to the missing method"""
bar = Bar()
try:
result = bar.beta(1, 2, 3, a=4, b=5, c=6)
assert result == 'beta(1, 2, 3, a=4, b=5, c=6)', 'did not return method name (%s)' % result
except AttributeError as ex:
assert False, 'was not found %s' % str(ex)
def test_allow_specific_missing_methods(self):
bar = Bar()
assert bar.FLSTC() == 'FLSTC Heritage Softtail'
assert bar.Heritage() == 'FLSTC Heritage Softtail'
assert bar.Softtail() == 'FLSTC Heritage Softtail'
assert bar.WLA() == 'WLA()'
def test_handling_missing_class_method(self):
try:
Foo.alpha()
assert False, 'did not raise expected AttributeError'
except AttributeError as ex:
assert True, 'raised expected AttributeError %s' % str(ex)
try:
result = Bar.beta(1, 2, 3, a=4, b=5, c=6)
assert result == 'beta(1, 2, 3, a=4, b=5, c=6)', 'did not return method name (%s)' % result
except AttributeError as ex:
assert False, 'was not found %s' % str(ex)
class Mash(MethodMissingHook):
"""
dictionary keys must be hashable
"""
def __init__(self, initial_dict=None):
if initial_dict is None:
initial_dict = {}
self._attributes = initial_dict.copy()
def __getitem__(self, item):
"""
Called to implement evaluation of self[key]. For sequence types, the accepted keys should be integers and
slice objects. Note that the special interpretation of negative indexes (if the class wishes to emulate a
sequence type) is up to the __getitem__() method. If key is of an inappropriate type, TypeError may be
raised; if of a value outside the set of indexes for the sequence (after any special interpretation of
negative values), IndexError should be raised. For mapping types, if key is missing (not in the
container), KeyError should be raised.
:param item: dictionary index
:type item: object
:return: the value referenced by the item index
:rtype: object
:raises: IndexError|TypeError|KeyError
"""
if item not in self._attributes:
raise KeyError("{item} not in this mash.".format(item=str(item)))
return self._attributes[item]
def __setitem__(self, key, value):
if not isinstance(key, collections.Hashable):
raise TypeError("The key ({key}) is not hashable".format(key=str(key)))
self._attributes[key] = value
return self
def __delitem__(self, key):
if not isinstance(key, collections.Hashable):
raise TypeError("The key ({key}) is not hashable".format(key=str(key)))
del (self._attributes[key])
return self
# def method_missing(self, name, *argv, **kwargs):
# """
# Called when normal instance dispatching fails to resolve the called method.
#
# The default behavior is to raise AttributeError. The change this behavior, override this method.
#
# :param name: method name called
# :type name: str
# :param argv: positional arguments passed on the method call
# :type argv: list
# :param kwargs: keyword arguments passed on the method call
# :type kwargs: dict
# :return: object
# :raises: AttributeError
# """
# return self._attributes[name]
def attributes(self):
"""
Return attribute dictionary.
:return: the attributes
:rtype: dict
"""
return self._attributes
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,885 | royw/fullmonty | refs/heads/master | /fullmonty/method_missing_hook.py | # coding=utf-8
"""
One part of meta programming is being able to handle when an object doesn't respond to a message. It could be
incompatible object, dynamically handled method (ex, for a DSL), typo, etc.
Python calls an object's __getattr__(name) method when it does not find an attribute that matches the name. __getattr__
returns a method reference if it finds the attribute, raises AttributeError otherwise.
This module provides a mixin that adds two methods, method_messing for handling missing instance methods, and
class_method_missing for handling missing class methods. The intent is for your class to simply override these
methods.
"""
from six import add_metaclass
__docformat__ = 'restructuredtext en'
__author__ = 'wrighroy'
# noinspection PyDocstring
class MethodMissingClassHook(type):
def __getattr__(cls, item):
"""
Hook in the class method_missing method.
"""
print("MethodMissingClassHook.__getattr__({item})".format(item=str(item)))
def delegator(*argv, **kwargs):
"""called when the class dispatcher can not find the given attribute name"""
return cls.class_method_missing(item, *argv, **kwargs)
return delegator
# noinspection PyDocstring
@add_metaclass(MethodMissingClassHook)
class MethodMissingHook(object):
def __getattr__(self, item):
"""
Hook in the instance method_missing method.
"""
def delegator(*argv, **kwargs):
return self.method_missing(item, *argv, **kwargs)
return delegator
@classmethod
def class_method_missing(cls, name, *argv, **kwargs):
"""
Called when normal class dispatching fails to resolve the called method.
The default behavior is to raise AttributeError. The change this behavior, override this method.
:param name: method name called
:type name: str
:param argv: positional arguments passed on the method call
:type argv: list
:param kwargs: keyword arguments passed on the method call
:type kwargs: dict
:return: None
:raises: AttributeError
"""
print('class_method_missing')
raise AttributeError(MethodMissingClassHook.method_as_string(name, *argv, **kwargs))
# noinspection PyMethodMayBeStatic
def method_missing(self, name, *argv, **kwargs):
"""
Called when normal instance dispatching fails to resolve the called method.
The default behavior is to raise AttributeError. The change this behavior, override this method.
:param name: method name called
:type name: str
:param argv: positional arguments passed on the method call
:type argv: list
:param kwargs: keyword arguments passed on the method call
:type kwargs: dict
:return: None
:raises: AttributeError
"""
raise AttributeError(MethodMissingClassHook.method_as_string(name, *argv, **kwargs))
@staticmethod
def method_as_string(name, *argv, **kwargs):
"""
Format the method name and arguments into a human readable string.
:param name: method name
:type name: str
:param argv: positional arguments passed on the method call
:type argv: list
:param kwargs: keyword arguments passed on the method call
:type kwargs: dict
:return: the formatted string
:rtype: str
"""
args = []
if argv:
args.append(', '.join(str(arg) for arg in argv))
if kwargs:
for k in sorted(kwargs.keys()):
args.append('{key}={value}'.format(key=str(k), value=str(kwargs[k])))
return "{name}({args})".format(name=name, args=', '.join(args))
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,886 | royw/fullmonty | refs/heads/master | /fullmonty/local_shell.py | # coding=utf-8
"""
Run external scripts and programs on the local system.
Local *run* and *system* commands support prefix and postfix. Prefix is a string to prepend to the
command given to the methods. Postfix is a string appended to the command. For example:
.. code-block:: python
with LocalShell(prefix="MY_ENV=$HOME/my_stuff ") as local:
local.run("my_executable my_arg")
would execute: "MY_ENV=$HOME/my_stuff my_executable my_arg"
"""
import signal
import os
import fcntl
import sys
import pexpect
from time import sleep
if sys.version_info[0] >= 3:
# noinspection PyShadowingBuiltins
unicode = str
try:
# use subprocess32 as it is the backport of the Python3.2 rewrite of subprocess
# from: https://stackoverflow.com/questions/21194380/is-subprocess-popen-not-thread-safe
#
# A substantial revision to subprocess was made in Python 3.2 which addresses various race conditions
# (amongst other things, the fork & exec code is in a C module, rather than doing some reasonably
# involved Python code in the critical part between fork and exec), and is available backported to
# recent Python 2.x releases in the subprocess32 module. Note the following from the PyPI page:
# "On POSIX systems it is guaranteed to be reliable when used in threaded applications."
# noinspection PyPackageRequirements,PyUnresolvedReferences
import subprocess32
# HACK
subprocess = subprocess32
except ImportError:
import subprocess
try:
# noinspection PyUnresolvedReferences
from ordereddict import OrderedDict
except ImportError:
# noinspection PyUnresolvedReferences
from collections import OrderedDict
from .ashell import AShell, MOVEMENT, CR
from .graceful_interrupt_handler import GracefulInterruptHandler
__docformat__ = 'restructuredtext en'
__all__ = ('LocalShell', 'run', 'system', 'script')
required_packages = [
'pexpect',
]
class LocalShell(AShell):
"""
Provides run interface on local system.
"""
def __init__(self, logfile=None, verbose=False, prefix=None, postfix=None):
super(LocalShell, self).__init__(is_remote=False, verbose=verbose)
self.logfile = logfile
self.prefix = prefix
self.postfix = postfix
# noinspection PyMethodMayBeStatic
def env(self):
"""return local environment dictionary."""
return os.environ
def run_pattern_response(self, cmd_args, out_stream=sys.stdout, verbose=True, debug=False,
prefix=None, postfix=None, pattern_response=None):
"""
Run the external command and interact with it using the patter_response dictionary
:param cmd_args: command line arguments
:param out_stream: stream verbose messages are written to
:param verbose: output messages if asserted
:param prefix: command line arguments prepended to the given cmd_args
:param postfix: command line arguments appended to the given cmd_args
:param pattern_response: dictionary whose key is a regular expression pattern that when matched
results in the value being sent to the running process. If the value is None, then no response is sent.
:param debug: enable debug messages
"""
self.display("run_pattern_response(%s)\n\n" % cmd_args, out_stream=out_stream, verbose=debug)
if pattern_response is None:
pattern_response = OrderedDict()
pattern_response[r'\[\S+\](?<!\[sudo\]) '] = CR # accept default prompts, don't match "[sudo] "
pattern_response[MOVEMENT] = None
pattern_response[pexpect.TIMEOUT] = CR
patterns = list(pattern_response.keys())
args = self.expand_args(cmd_args, prefix=prefix, postfix=postfix)
command_line = ' '.join(args)
output = []
try:
child = pexpect.spawn(command_line)
while True:
try:
index = child.expect(patterns, timeout=120)
self.display(str(child.before), out_stream=out_stream, verbose=verbose)
output.append(str(child.before))
if child.after:
self.display(str(child.after), out_stream=out_stream, verbose=verbose)
output.append(str(child.after))
key = patterns[index]
response = pattern_response[key]
if response:
child.sendline(response)
except pexpect.EOF:
break
except pexpect.ExceptionPexpect as ex:
self.display(str(ex) + '\n', out_stream=out_stream, verbose=verbose)
raise ex
return ''.join(output).split("\n")
def run(self, cmd_args, out_stream=sys.stdout, env=None, verbose=False,
prefix=None, postfix=None, accept_defaults=False, pattern_response=None,
timeout=0, timeout_interval=1, debug=False, raise_on_interrupt=False,
use_signals=True):
"""
Runs the command and returns the output, writing each the output to out_stream if verbose is True.
:param cmd_args: list of command arguments or str command line
:type cmd_args: list or str
:param out_stream: the output stream
:type out_stream: file
:param env: the environment variables for the command to use.
:type env: dict
:param verbose: if verbose, then echo the command and it's output to stdout.
:type verbose: bool
:param prefix: list of command arguments to prepend to the command line
:type prefix: list[str]
:param postfix: list of command arguments to append to the command line
:type postfix: list[str]
:param accept_defaults: accept responses to default regexes.
:type accept_defaults: bool
:param pattern_response: dictionary whose key is a regular expression pattern that when matched
results in the value being sent to the running process. If the value is None, then no response is sent.
:type pattern_response: dict[str, str]
:param timeout: the maximum time to give the process to complete
:type timeout: int
:param timeout_interval: the time to sleep between process output polling
:type timeout_interval: int
:param debug: emit debugging info
:type debug: bool
:param raise_on_interrupt: on keyboard interrupt, raise the KeyboardInterrupt exception
:type raise_on_interrupt: bool
:param use_signals: Use signals to handle ^C outside of process. Warning, if threaded then set to False.
:type use_signals: bool
:returns: the output of the command
:rtype: str
"""
try:
# noinspection PyUnboundLocalVariable,PyShadowingBuiltins
basestring = basestring
except NameError:
# noinspection PyShadowingBuiltins
basestring = (str, unicode)
if isinstance(cmd_args, basestring):
cmd_args = pexpect.split_command_line(cmd_args)
self.display("run(%s, %s)\n\n" % (cmd_args, env), out_stream=out_stream, verbose=debug)
if pattern_response:
return self.run_pattern_response(cmd_args, out_stream=out_stream, verbose=verbose,
prefix=prefix, postfix=postfix, debug=debug,
pattern_response=pattern_response)
if accept_defaults:
return self.run_pattern_response(cmd_args, out_stream=out_stream, verbose=verbose,
prefix=prefix, postfix=postfix, debug=debug)
lines = []
for line in self.run_generator(cmd_args, out_stream=out_stream, env=env, verbose=verbose,
prefix=prefix, postfix=postfix,
timeout=timeout, timeout_interval=timeout_interval,
debug=debug, raise_on_interrupt=raise_on_interrupt,
use_signals=use_signals):
lines.append(line)
return ''.join(lines)
def run_generator(self, cmd_args, out_stream=sys.stdout, env=None, verbose=True,
prefix=None, postfix=None, timeout=0, timeout_interval=1, debug=False,
raise_on_interrupt=False, use_signals=True):
"""
Runs the command and yields on each line of output, writing each the output to out_stream if verbose is True.
:param postfix:
:param out_stream:
:param cmd_args: list of command arguments
:type cmd_args: list
:param env: the environment variables for the command to use.
:type env: dict
:param verbose: if verbose, then echo the command and it's output to stdout.
:type verbose: bool
:param prefix: list of command arguments to prepend to the command line
:type prefix: list
:param timeout: max time in seconds for command to run
:type timeout: int
:param timeout_interval: sleep period in seconds between output polling
:type timeout_interval: int
:param debug: debug log messages
:type debug: bool
:param raise_on_interrupt: on keyboard interrupt, raise the KeyboardInterrupt exception
:type raise_on_interrupt: bool
:param use_signals: Use signals to handle ^C outside of process. Warning, if threaded then set to False.
:type use_signals: bool
"""
self.display("run_generator(%s, %s)\n\n" % (cmd_args, env), out_stream=out_stream, verbose=debug)
args = self.expand_args(cmd_args, prefix=prefix, postfix=postfix)
command_line = ' '.join(args)
self.display("{line}\n\n".format(line=command_line), out_stream=out_stream, verbose=verbose)
for line in self.run_process(args, env=env, out_stream=out_stream, verbose=debug,
timeout=timeout, timeout_interval=timeout_interval,
raise_on_interrupt=raise_on_interrupt,
use_signals=use_signals):
self.display(line, out_stream=out_stream, verbose=verbose)
yield line
def run_process(self, cmd_args, env=None, out_stream=sys.stdout, verbose=True,
timeout=0, timeout_interval=1, raise_on_interrupt=False,
use_signals=True):
"""
Run the process yield for each output line from the process.
:param out_stream:
:param cmd_args: command line components
:type cmd_args: list
:param env: environment
:type env: dict
:param verbose: outputs the method call if True
:type verbose: bool
:param timeout: max time in seconds for command to run
:type timeout: int
:param timeout_interval: sleep period in seconds between output polling
:type timeout_interval: int
:param raise_on_interrupt: on keyboard interrupt, raise the KeyboardInterrupt exception
:type raise_on_interrupt: bool
:param use_signals: Use signals to handle ^C outside of process. Warning, if threaded then set to False.
:type use_signals: bool
"""
self.display("run_process(%s, %s)\n\n" % (cmd_args, env), out_stream=out_stream, verbose=verbose)
sub_env = os.environ.copy()
if env:
for key, value in env.items():
sub_env[key] = value
timeout_seconds = timeout
interrupt_handler = None
try:
if use_signals:
interrupt_handler = GracefulInterruptHandler()
interrupt_handler.capture()
def preexec_function():
"""Ignore the SIGINT signal by setting the handler to the standard signal handler SIG_IGN."""
if use_signals:
signal.signal(signal.SIGINT, signal.SIG_IGN)
process = subprocess.Popen(cmd_args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=sub_env, preexec_fn=preexec_function)
while process.poll() is None: # returns None while subprocess is running
if interrupt_handler is not None and interrupt_handler.interrupted:
process.kill()
while True:
line = self._non_block_read(process.stdout)
if not line:
break
yield line
if timeout:
if timeout_seconds > 0:
sleep(timeout_interval)
timeout_seconds -= timeout_interval
else:
process.kill()
line = self._non_block_read(process.stdout)
if line:
yield line
if interrupt_handler is not None and interrupt_handler.interrupted and raise_on_interrupt:
raise KeyboardInterrupt()
finally:
if interrupt_handler is not None:
interrupt_handler.release()
# noinspection PyMethodMayBeStatic
def _non_block_read(self, output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
# noinspection PyBroadException
try:
return output.read().decode()
except:
return ''
def _system(self, command_line):
return os.popen(command_line).read()
run = LocalShell().run
system = LocalShell().system
script = LocalShell().script
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,887 | royw/fullmonty | refs/heads/master | /tests/test_tmp_dir.py | # coding=utf-8
import os
from fullmonty.tmp_dir import TmpDir
def test_tmp_dir():
path = None
with TmpDir() as dir:
path = dir
assert os.path.isdir(dir)
assert not os.path.isdir(path)
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,888 | royw/fullmonty | refs/heads/master | /fullmonty/__init__.py | # coding=utf-8
"""\
FullMonty
=========
FullMonty is a collection of various library modules that are not big enough to justify individual packaging.
For example, I like using a context with cd:
.. code-block:: python
with cd(path):
# current working directory is path
Another is the list_helper module where compress_list, unique_list, and is_sequence reside. Now the implementation
is just one liners but I find the intent of the code easier to comprehend with this:
.. code-block:: python
new_list = compress_list(old_list)
versus:
.. code-block:: python
new_list = [item for item in old_list if item]
Etymology
---------
The full monty is a British slang phrase of uncertain origin. It is generally used to mean "everything which is
necessary, appropriate, or possible; ‘the works’".
-- http://en.wikipedia.org/wiki/Full_monty_%28phrase%29
Installation
------------
To install from PyPI:
.. code-block:: bash
➤ pip install fullmonty
"""
__docformat__ = 'restructuredtext en'
__version__ = '0.1.23'
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,889 | royw/fullmonty | refs/heads/master | /fullmonty/timeit.py | # coding=utf-8
"""
timeit is a primitive profiling decorator
Usage:
.. code-block:: python
@timeit
def foobar():
#...
"""
import tempfile
import time
timeit_enabled = False
timeit_file = tempfile.NamedTemporaryFile(prefix="new-pcap-checkin", suffix="timeit", delete=False)
def timeit(func):
"""
function timing decorator
:param func: function being decorated
"""
# noinspection PyDocstring
def wrapper(*args, **kwargs):
if timeit_enabled:
ts = time.time()
result = func(*args, **kwargs)
te = time.time()
# timeit_file.file.write('func:%r args:[%r, %r] took: %2.4f sec\n' % (func.__name__, args, kwargs, te-ts))
timeit_file.file.write('func:%r took: %2.4f sec\n' % (func.__name__, te - ts))
return result
else:
return func(*args, **kwargs)
return wrapper
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,890 | royw/fullmonty | refs/heads/master | /fullmonty/capture_sys_output.py | # coding=utf-8
"""
A context manager for capturing stdout and stderr.
from: https://stackoverflow.com/questions/18651705/argparse-unit-tests-suppress-the-help-message
By: Martijn Pieters https://stackoverflow.com/users/100297/martijn-pieters
"""
from contextlib import contextmanager
from cStringIO import StringIO
import sys
@contextmanager
def capture_sys_output():
"""
Usage::
with capture_sys_output() as (stdout, stderr):
arg_parser.parse_known_args(['-h'])
self.assertEqual(stderr.getvalue(), '')
help_message = stdout.getvalue()
"""
capture_out, capture_err = StringIO(), StringIO()
current_out, current_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = capture_out, capture_err
yield capture_out, capture_err
finally:
sys.stdout, sys.stderr = current_out, current_err
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,891 | royw/fullmonty | refs/heads/master | /fullmonty/unionfs.py | # coding=utf-8
"""
FUSE Union File System support
"""
import os
import subprocess
import shutil
from contextlib import contextmanager
from .mkdir_p import mkdir_p
from .simple_logger import info
@contextmanager
def unionfs(source_dirs=None, mount_dir=None, verbose=False):
"""
Enable using unionfs using the *with* function.
Usage::
with unionfs(source_dirs=None, mount_dir=None) as unionfilesystem:
unionfilesystem.foo(bar)
:param source_dirs: directories that form union. Topmost directory first in list.
:type source_dirs: list[str]
:param mount_dir: path to the union directory.
:type mount_dir: str
:param verbose: emit progress messages
:type verbose: bool
"""
if unionfs_available(verbose=verbose):
try:
if verbose:
info("mkdir_p({dir})".format(dir=mount_dir))
mkdir_p(mount_dir)
cmd = 'unionfs-fuse {source_dirs} {mount_dir}'.format(source_dirs=':'.join(source_dirs),
mount_dir=mount_dir)
if verbose:
info(cmd)
subprocess.call(cmd, shell=True)
yield
finally:
# noinspection PyBroadException
try:
if os.path.isdir(mount_dir):
cmd = 'fusermount -u {mount}'.format(mount=mount_dir)
if verbose:
info(cmd)
subprocess.call(cmd, shell=True)
if verbose:
info('rmtree({dir})'.format(dir=mount_dir))
shutil.rmtree(mount_dir)
except:
pass
def unionfs_available(verbose=False):
"""
check if unionfs-fuse is installed
:param verbose: emit progress messages
:type verbose: bool
"""
cmd = 'which unionfs-fuse'
if verbose:
info(cmd)
path = subprocess.check_output(cmd, shell=True).strip()
if verbose:
info(path)
return path and os.path.isfile(path)
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,892 | royw/fullmonty | refs/heads/master | /tests/test_mash.py | # coding=utf-8
"""
Test Mash
"""
import unittest
from argparse import Namespace
from fullmonty.mash import Mash
from fullmonty.simple_logger import info
__docformat__ = 'restructuredtext en'
__author__ = 'wrighroy'
# noinspection PyMethodMayBeStatic
class TestMash(unittest.TestCase):
"""
Test the Mash class
"""
def test_basic_dict(self):
"""
basic functionality
"""
a = Mash()
assert len(a) == 0
a['foo'] = 'bar'
assert len(a) == 1
assert a['foo'] == 'bar'
assert a.foo == 'bar'
a['foo'] = 'fig'
assert len(a) == 1
assert a['foo'] == 'fig'
assert a.foo == 'fig'
del a['foo']
assert len(a) == 0
def test_basic_namespace(self):
"""
basic functionality
"""
a = Mash()
assert len(a) == 0
a.foo = 'bar'
assert len(a) == 1
assert a['foo'] == 'bar'
assert a.foo == 'bar'
a.foo = 'fig'
assert len(a) == 1
assert a['foo'] == 'fig'
assert a.foo == 'fig'
del a.foo
assert len(a) == 0
def test_identitiy(self):
"""
test identity
"""
a = Mash()
a.foo = 'bar'
assert a['foo'] == a.foo
assert a.foo == a['foo']
def test_keys(self):
"""
test dict.keys
"""
a = Mash()
a.foo = 1
a.bar = 2
a.fig = 3
assert len(a) == 3
assert len(a.keys()) == 3
assert set(a.keys()) == {'foo', 'bar', 'fig'}
def test_copy_constructor(self):
"""
test copy constructor
"""
a = Mash()
a.foo = 1
a.bar = 2
a.fig = 3
b = Mash(a)
assert len(b) == 3
assert len(b.keys()) == 3
assert set(b.keys()) == {'foo', 'bar', 'fig'}
def test_delattr(self):
"""
test delattr
"""
a = Mash()
a.foo = 1
a.bar = 2
a.fig = 3
assert set(a.keys()) == {'foo', 'bar', 'fig'}
delattr(a, 'bar')
assert set(a.keys()) == {'foo', 'fig'}
def test_invalid_attribute_name_keys(self):
"""
These keys are accessible only via dict accessors. You can not use attribute accessors.
:return:
:rtype:
"""
a = Mash()
a[1] = 'foo'
a['a*b'] = 'bar'
a['c.d'] = 'fig'
assert a[1] == 'foo'
assert a['a*b'] == 'bar'
assert a['c.d'] == 'fig'
def test_constructors(self):
data = {'e': 5, 'f': 6}
namespace = Namespace(**data)
assert Mash(data)
assert Mash(namespace)
self.assertDictEqual(Mash(data), data)
self.assertDictEqual(Mash(namespace), data)
def test_eq(self):
data = {'a': 1, 'b': 2}
namespace = Namespace(**data)
mash = Mash(namespace)
mash2 = Mash(data)
# info("data: " + repr(data))
# info("mash: " + repr(mash))
# info("mash2: " + repr(mash2))
self.assertDictEqual(mash, data, "mash.__dict__ == data")
self.assertEqual(mash, mash2, "mash.__eq__(mash2)")
self.assertDictEqual(mash, namespace.__dict__, "mash.__dict__ == namespace.__dict__")
def test_comparisons(self):
data = {'c': 3, 'd': 4}
namespace = Namespace(**data)
# mash(data) vs data
self.assertDictEqual(data, Mash(data))
self.assertDictEqual(Mash(data), data)
# mash(namespace) vs namespace
self.assertDictEqual(Mash(namespace), namespace.__dict__)
self.assertDictEqual(namespace.__dict__, Mash(namespace))
# mash(namespace) vs data
self.assertDictEqual(Mash(namespace), data)
self.assertDictEqual(data, Mash(namespace))
# mash(data) vs namespace
self.assertDictEqual(Mash(data), namespace.__dict__)
self.assertDictEqual(namespace.__dict__, Mash(data))
def test_vars(self):
a = Mash()
# a[1] = 'foo'
a['a*b'] = 'bar'
a['c.d'] = 'fig'
b = Namespace(**a)
assert getattr(a, 'a*b', None)
assert getattr(b, 'a*b', None)
assert getattr(a, 'c.d', None)
assert getattr(b, 'c.d', None)
assert b
# assert b[1] == 'foo'
assert vars(b)['a*b'] == 'bar'
assert vars(b)['c.d'] == 'fig'
# assert vars(b) == a
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,893 | royw/fullmonty | refs/heads/master | /fullmonty/certs.py | # coding=utf-8
"""
Certificate helpers
"""
from fullmonty.simple_logger import error
__docformat__ = 'restructuredtext en'
def self_signed_certs_allowed():
try:
import requests
requests.packages.urllib3.disable_warnings()
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
except ImportError as ex:
error(str(ex))
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,894 | royw/fullmonty | refs/heads/master | /tests/test_backup.py | # coding=utf-8
"""
Tests the backup functionality used when rendering templates.
"""
import os
from fullmonty.backup import backup_filename, next_backup_filename
def test_backup_filename():
"""Test getting the backup file name for a given file name"""
assert backup_filename('foo') == 'foo~'
assert backup_filename('foo~') == 'foo~'
assert backup_filename('foo1~') == 'foo1~'
def test_next_backup_filename():
"""Test generating the next backup file name"""
assert next_backup_filename('foo', ['a', 'b']) == 'foo~'
assert next_backup_filename('foo', ['a', 'b', 'foo']) == 'foo~'
assert next_backup_filename('foo', ['a', 'b', 'foo~']) == 'foo1~'
assert next_backup_filename('foo', ['a', 'b', 'foo', 'foo~']) == 'foo1~'
assert next_backup_filename('foo', ['a', 'b', 'foo~', 'foo1~']) == 'foo2~'
assert next_backup_filename('foo', ['a', 'b', 'foo~', 'foo1~', 'foo3~']) == 'foo4~'
assert next_backup_filename('foo', ['a', 'b', 'foo', 'foo1~', 'foo3~']) == 'foo~'
def backup_files(dest_dir):
"""
:param dest_dir: the directory where the backup files should be located
:returns: a generator for all of the backup files in the dest_dir
"""
return [file_ for file_ in os.listdir(dest_dir) if file_.endswith('~')]
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,895 | royw/fullmonty | refs/heads/master | /tests/test_localshell.py | # coding=utf-8
"""
test LocalShell
"""
import Queue
import multiprocessing
from threading import Thread
from multiprocessing import Process
from fullmonty.local_shell import LocalShell
CMD_LINE = 'pwd'
def test_local_shell():
""" test normal LocalShell usage """
with LocalShell() as local:
result = local.run(CMD_LINE)
assert result
def test_local_shell_no_signal():
""" test with signal usage diabled """
with LocalShell() as local:
result = local.run(CMD_LINE, use_signals=False)
assert result
def test_local_shell_multitheaded():
""" test using threads """
N_ATTEMPTS = 100
q = Queue.Queue()
def get_pwd(q):
with LocalShell() as local:
result_ = local.run(CMD_LINE, use_signals=False)
q.put(result_)
threads = []
for i in range(0, N_ATTEMPTS):
thread = Thread(target=get_pwd, args=(q,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
count = 0
while not q.empty():
count += 1
results = q.get()
for result in results:
assert result
assert count == N_ATTEMPTS
def test_local_shell_multiprocess():
""" test using processes """
N_ATTEMPTS = 100
q = multiprocessing.Queue()
def get_pwd(q):
with LocalShell() as local:
result_ = local.run(CMD_LINE, use_signals=False)
q.put(result_)
processes = []
for i in range(0, N_ATTEMPTS):
process = Process(target=get_pwd, args=(q,))
processes.append(process)
process.start()
for process in processes:
process.join()
count = 0
while not q.empty():
count += 1
results = q.get()
for result in results:
assert result
assert count == N_ATTEMPTS
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,896 | royw/fullmonty | refs/heads/master | /fullmonty/make_bytes.py | # coding=utf-8
"""
Handle string to bytes conversion for python 2 & 3.
From: http://python3porting.com/problems.html#bytes-strings-and-unicode
Usage
-----
::
from fullmonty.make_bytes import b
with open(file_name, 'w') as file_:
file_.write(b(str_variable))
"""
import sys
if sys.version < '3':
# noinspection PyDocstring
def b(x):
return x
else:
import codecs
# noinspection PyDocstring
def b(x):
return codecs.latin_1_encode(x)[0]
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,897 | royw/fullmonty | refs/heads/master | /tests/test_make_bytes.py | # coding=utf-8
"""
unit tests for make_bytes
"""
from io import BytesIO
from fullmonty.make_bytes import b
__docformat__ = 'restructuredtext en'
__author__ = 'wrighroy'
def test_str_write():
"""
For python2: BytesIO.write(str)
For python3: BytesIO.write(bytes)
Use b(data) to convert a string to either str or bytes depending on python version
"""
buf = BytesIO()
data = 'foobar'
buf.write(b(data))
assert buf.getvalue().decode('utf-8') == data
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,898 | royw/fullmonty | refs/heads/master | /fullmonty/find_directory.py | # coding=utf-8
"""
Locate a directory by environment variable or path or contents.
Locating rules are:
* If given an environment variable name and the environment variable exists and the value is the path to a directory,
then return the directory's path.
* If given a path string ("path:path:...") is given, scan the path string for the first existing directory and
return it.
* If a search string, comma separated file names, is given, search the PATH environment variable value for the
search file(s), if one exists, return the directory it is in.
* If an error_message string is given, raise a FindError exception with error_message as the message value.
* Raise a FindError exception with a generic message.
"""
import os
__docformat__ = 'restructuredtext en'
# noinspection PyDocstring
class FindError(Exception):
pass
def find_directory(env=None, path=None, search=None, error_message=None):
"""
find a directory
:param env:
:param path:
:param search:
:param error_message:
:return: :rtype: :raise FindError:
"""
if env is not None:
if env in os.environ:
if os.path.isdir(os.environ[env]):
return os.environ[env]
if path is not None:
for directory in path.split(':'):
if os.path.isdir(os.path.expanduser(directory)):
return directory
if search is not None:
for directory in os.environ['PATH'].split(':'):
if os.path.isdir(os.path.expanduser(directory)):
all_found = True
for file_ in search.split(','):
if not os.path.isfile(file_):
all_found = False
continue
if all_found:
return directory
if error_message is not None:
raise FindError(error_message)
raise FindError("Cannot find directory given:\n env={env}\n path={path}\n search={search}".format(
env=env, path=path, search=search
))
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,899 | royw/fullmonty | refs/heads/master | /tests/test_list_helper.py | # coding=utf-8
"""
test the list helper functions
"""
import collections
from fullmonty.list_helper import compress_list, unique_list, is_sequence, flatten
def test_compress_list():
"""
compress_list removes empty or None elements from a list
"""
# incompressible lists
assert compress_list([]) == []
assert compress_list([1]) == [1]
assert compress_list([1, 2]) == [1, 2]
# remove None element
assert compress_list([None]) == []
assert compress_list([None, 1]) == [1]
assert compress_list([None, 1, 2]) == [1, 2]
assert compress_list([1, None, 2]) == [1, 2]
assert compress_list([1, 2, None]) == [1, 2]
# remove empty strings
assert compress_list(['']) == []
assert compress_list(['', 1]) == [1]
assert compress_list(['', 1, 2]) == [1, 2]
assert compress_list([1, '', 2]) == [1, 2]
assert compress_list([1, 2, '']) == [1, 2]
# remove empty lists
assert compress_list([[]]) == []
assert compress_list([[], 1]) == [1]
assert compress_list([[], 1, 2]) == [1, 2]
assert compress_list([1, [], 2]) == [1, 2]
assert compress_list([1, 2, []]) == [1, 2]
def test_unique_list():
"""removes duplicate entries in the list"""
assert unique_list([]) == []
assert unique_list([1]) == [1]
assert unique_list([1, 2]) == [1, 2]
assert unique_list([1, 1]) == [1]
assert unique_list([1, 1, 1]) == [1]
assert unique_list([1, 1, 2]) == [1, 2]
assert unique_list([1, 2, 1]) == [1, 2]
assert unique_list([2, 1, 1]) == [2, 1]
def test_unique_str_list():
"""removes duplicate entries in the list"""
assert unique_list([""]) == [""]
assert unique_list(["1"]) == ["1"]
assert unique_list(["1", "2"]) == ["1", "2"]
assert unique_list(["1", "1"]) == ["1"]
assert unique_list(["1", "1", "1"]) == ["1"]
assert unique_list(["1", "1", "2"]) == ["1", "2"]
assert unique_list(["1", "2", "1"]) == ["1", "2"]
assert unique_list(["2", "1", "1"]) == ["2", "1"]
def test_is_sequence():
"""does the given object behave like a list but is not a string?"""
# non-list-like objects
assert not is_sequence(None)
assert not is_sequence(1)
assert not is_sequence('')
assert not is_sequence('foo')
# list-like objects
# lists
assert is_sequence([])
assert is_sequence([1])
assert is_sequence([1, 2])
# tuples
assert is_sequence(())
assert is_sequence((1,))
assert is_sequence((1, 2))
def test_flatten():
assert isinstance(flatten([1]), collections.Iterable)
assert isinstance(flatten([1, 2, 3]), collections.Iterable)
assert isinstance(flatten([1, [2, 3]]), collections.Iterable)
assert isinstance(flatten(["1"]), collections.Iterable)
assert isinstance(flatten(["1", "2", "3"]), collections.Iterable)
assert isinstance(flatten(["1", ["2", "3"]]), collections.Iterable)
assert isinstance(flatten([u"1"]), collections.Iterable)
assert isinstance(flatten([u"1", u"2", u"3"]), collections.Iterable)
assert isinstance(flatten([u"1", [u"2", u"3"]]), collections.Iterable)
assert isinstance(flatten([b"1"]), collections.Iterable)
assert isinstance(flatten([b"1", b"2", b"3"]), collections.Iterable)
assert isinstance(flatten([b"1", [b"2", b"3"]]), collections.Iterable)
assert list(flatten([[b'user']])) == [b'user']
assert list(flatten([[u'pass']])) == [u'pass']
assert list(flatten([b'lab'])) == [b'lab']
assert list(flatten([u'lab'])) == [u'lab']
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,900 | royw/fullmonty | refs/heads/master | /fullmonty/mash.py | # coding=utf-8
"""
A Mash behaves like both a dictionary and a NameSpace. This is a very simple implementation,
attribute accessors are just added onto a dictionary. So a['foo'] == a.foo
Keys must be hashable. If a key is not a valid attribute name, then it can only be accessed
using dict accessors.
For example:
..code-block:: python
a = Mash()
a['foo'] = 'bar'
assert a['foo'] == 'bar'
assert a.foo == 'bar'
a.foo = 'fig'
assert a['foo'] == 'fig'
assert a.foo == 'fig'
"""
import collections
from argparse import Namespace
__docformat__ = 'restructuredtext en'
class Mash(dict):
"""
dictionary keys must be hashable.
Inherits dict behavior so we just extend by adding attribute accessing.
"""
def __init__(self, obj=None):
if obj is None:
super(Mash, self).__init__(dict())
elif isinstance(obj, collections.Mapping):
super(Mash, self).__init__(obj)
elif isinstance(obj, dict):
super(Mash, self).__init__(obj)
elif isinstance(obj, Namespace):
super(Mash, self).__init__(vars(obj))
elif isinstance(obj, Mash):
super(Mash, self).__init__(vars(obj))
def __getattr__(self, key):
"""
Support self.key read access
:param key: the mash dictionary key
:type key: collections.Hashable
:return: the value or None
:rtype: object|None
:raises: TypeError
"""
if not isinstance(key, collections.Hashable):
raise TypeError("The key ({key}) is not hashable".format(key=str(key)))
if key not in self:
self[key] = None
# noinspection PyTypeChecker
return self[key]
def __setattr__(self, key, value):
"""
Support self.key write access
:param key: the mash dictionary key
:type key: collections.Hashable
:param value: the value to associate with the key
:type value: object
:return: this instance
:rtype: Mash
:raises: TypeError
"""
if not isinstance(key, collections.Hashable):
raise TypeError("The key ({key}) is not hashable".format(key=str(key)))
self[key] = value
return self
def __delattr__(self, key):
"""
Support deleting: del self.key
:param key: the mash dictionary key
:type key: collections.Hashable
:return: this instance
:rtype: Mash
:raises: TypeError
"""
if not isinstance(key, collections.Hashable):
raise TypeError("The key ({key}) is not hashable".format(key=str(key)))
# noinspection PyTypeChecker
del self[key]
return self
def __eq__(self, other):
# print('{src}.__eq__({other})'.format(src=repr(self), other=repr(other)))
if other is None:
# print("if other is None:")
return vars(self) == {}
elif isinstance(other, Mash):
# print("elif isinstance(other, Mash):")
return vars(self) == vars(other)
elif isinstance(other, collections.Mapping):
# print("elif isinstance(other, collections.Mapping):")
return vars(self) == other
elif isinstance(other, dict):
# print("elif isinstance(other, dict):")
return vars(self) == other
elif isinstance(other, Namespace):
# print("elif isinstance(other, Namespace):")
return vars(self) == vars(other)
else:
# print("else:")
return vars(self) == vars(other)
Namespace.__eq__ = Mash.__eq__
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,901 | royw/fullmonty | refs/heads/master | /tests/test_remote_shell.py | # coding=utf-8
"""
Test the remote shell
"""
from fullmonty.local_shell import LocalShell
from fullmonty.remote_shell import RemoteShell
def test_remote_run():
with LocalShell() as local:
dir = local.run("pwd")
local_ls = local.run("/bin/ls -l")
with RemoteShell(user='wrighroy', password='yakityYak52', host='localhost') as remote:
remote_ls = remote.run("/bin/ls -1 {dir}".format(dir=dir))
assert local_ls == remote_ls
| {"/tests/test_method_missing_hook.py": ["/fullmonty/method_missing_hook.py"], "/tests/test_tmp_dir.py": ["/fullmonty/tmp_dir.py"], "/tests/test_mash.py": ["/fullmonty/mash.py"], "/tests/test_localshell.py": ["/fullmonty/local_shell.py"], "/tests/test_make_bytes.py": ["/fullmonty/make_bytes.py"], "/tests/test_remote_shell.py": ["/fullmonty/local_shell.py", "/fullmonty/remote_shell.py"]} |
59,936 | paulosalem/time-blender | refs/heads/master | /tests/test_random_events.py | import numpy as np
from tests.common import AbstractTest
from time_blender.core import Generator, ConstantEvent
from time_blender.deterministic_events import WalkEvent
from time_blender.random_events import NormalEvent, UniformEvent, PoissonEvent, TopResistance, BottomResistance
class AbstractRandomEventTest(AbstractTest):
def setUp(self):
super().setUp()
class TestUniformEvent(AbstractRandomEventTest):
def setUp(self):
super().setUp()
self.param_1 = -20
self.param_2 = 20
self.original_event = UniformEvent(low=self.param_1, high=self.param_2)
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.original_event)
values = data.values
mean = np.mean(values)
self.assertClose(mean, 0, abs_tol=1.0)
self.common_model_test(self.original_event)
def test_generalize_from_observations(self):
fresh_data = self.generate_learn_generate(self.generator,
original_event=self.original_event,
fresh_event=UniformEvent(low=NormalEvent(-25, 10),
high=NormalEvent(2, 5)),
max_optimization_evals=300,
start_date=self.start_date, end_date=self.end_date)
print(fresh_data)
self.assertClose(np.mean(fresh_data), 0.0, rel_tol=0.1, abs_tol=1.0)
# TODO test_generalize_from_observations_2 using indexes
def test_generalize_from_observations_2(self):
data = self.generator.generate(events=self.original_event)
fresh_event = UniformEvent(low=NormalEvent(-25, 10),
high=NormalEvent(2, 5))
# data contains Pandas Series
fresh_event.generalize_from_observations(data, n_simulations=3, max_optimization_evals=300,
error_strategy='best_trace')
fresh_data = Generator(start_date=self.start_date, end_date=self.end_date).generate(fresh_event)
print(data)
print(fresh_data)
# TODO test index intersections when observations are Pandas' Series
self.assertTrue(False) # TODO
class TestNormalEvent(AbstractRandomEventTest):
def setUp(self):
super().setUp()
self.param_1 = 14
self.original_event = NormalEvent(self.param_1, 1.0)
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.original_event)
values = data.values
mean = np.mean(values)
print(mean)
self.assertClose(mean, self.param_1, rel_tol=0.1)
self.common_model_test(self.original_event)
def test_generalize_from_observations_1(self):
fresh_data = self.generate_learn_generate(self.generator,
original_event=self.original_event,
fresh_event=NormalEvent(10.0, 2.0),
start_date=self.start_date, end_date=self.end_date,
max_optimization_evals=300,
error_strategy='all_traces')
self.assertClose(np.mean(fresh_data), self.param_1, rel_tol=0.1)
def test_generalize_from_observations_2(self):
fresh_data = self.generate_learn_generate(self.generator,
original_event=self.original_event,
fresh_event=NormalEvent(UniformEvent(low=-100.0, high=100.0), 2.0),
n_simulations=3, max_optimization_evals=500,
start_date=self.start_date, end_date=self.end_date)
self.assertClose(np.mean(fresh_data), self.param_1, rel_tol=0.1)
class TestPoissonEvent(AbstractRandomEventTest):
def setUp(self):
super().setUp()
self.param_1 = 7
self.original_event = PoissonEvent(self.param_1)
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.original_event)
values = data.values
mean = np.mean(values)
print(mean)
self.assertClose(mean, self.param_1)
self.common_model_test(self.original_event)
def test_generalize_from_observations(self):
fresh_data = self.generate_learn_generate(self.generator,
original_event=self.original_event,
fresh_event=PoissonEvent(UniformEvent(low=1.0,
high=10.0)),
start_date=self.start_date, end_date=self.end_date)
self.assertClose(np.mean(fresh_data), self.param_1)
class TestTopResistance(AbstractRandomEventTest):
def test_execute(self):
base = WalkEvent(NormalEvent(1, 2))
resistance_1 = NormalEvent(0.5, 0.1)
model = TopResistance(base,
resistance_value_begin=50,
resistance_value_end=55,
resistance_probability=0.5,
resistance_strength_event=resistance_1)
self.common_model_test(model)
class TestBottomResistance(AbstractRandomEventTest):
def test_execute(self):
base = WalkEvent(NormalEvent(-1, 2))
resistance_1 = NormalEvent(0.5, 0.1)
model = BottomResistance(base,
resistance_value_begin=-20,
resistance_value_end=-30,
resistance_probability=0.5,
resistance_strength_event=resistance_1)
self.common_model_test(model)
def test_execute_2(self):
resistance_1 = NormalEvent(0.5, 0.1)
base = NormalEvent(19, 5)
model = TopResistance(base,
resistance_value_begin=20,
resistance_value_end=50,
resistance_probability=0.9,
resistance_strength_event=resistance_1)
data = self.common_model_test(model)
print(data) | {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,937 | paulosalem/time-blender | refs/heads/master | /time_blender/models.py | # Standard models
import numpy as np
import pandas as pd
from time_blender.coordination_events import PastEvent, CumulativeEvent, ParentValueEvent, TemporarySwitch, Choice, \
SeasonalEvent
from time_blender.core import LambdaEvent, ConstantEvent, wrapped_constant_param, Event, Invariant
from time_blender.deterministic_events import WaveEvent, ClockEvent, WalkEvent, IdentityEvent, ClipEvent
from time_blender.random_events import NormalEvent, wrap_in_resistance, BernoulliEvent, PoissonEvent
from clize import Parameter
from time_blender.util import shift_weekend_and_holidays
from time_blender.cli import cli_model, a_list
class SimpleModels:
@staticmethod
@cli_model
def cycle(base:float=10.0, period:float=72, growth_rate:float=2):
period_fluctuation = WalkEvent(NormalEvent(0, 1), initial_pos=period, capture_parent_value=False)
amplitude_trend = ClockEvent() * ConstantEvent(base) * NormalEvent(3, 1)
we = WaveEvent(period_fluctuation, amplitude_trend)
capacity_trend = ClockEvent() * ConstantEvent(growth_rate*base) * NormalEvent(3, 0.1)
return we + capacity_trend
class ClassicModels:
@staticmethod
@cli_model
def ar(p: int, *, constant: float=0, error_mean: float=0, error_std: float=1, coefs_low: float=-1, coefs_high: float=1,
coefs: a_list=None, error_event: Parameter.IGNORE=None, capture_parent_value=True):
"""
Creates a new AR (autoreressive) model. The model's coeficients can either be generated automatically
by providing coefs_low and coefs_high parameters, or be explicitly defined by providing a list in the
coefs parameter.
:param p: The order of the AR model (how far back should it look).
:param constant: The model's constant term.
:param error_mean: The mean of the normal error component.
:param error_std: The standard deviation of the normal error component.
:param coefs_low: If specified, defines the lower bound of the coeficients to be generated. If left None,
then the coefs parameter must be specified.
:param coefs_high: If specified, defines the upper bound of the coeficients to be generated. If left None,
then the coefs parameter must be specified.
:param coefs: A list or dict with numeric keys of the coeficients to be employed. Must have size p. The i-th
element (if list) or key (if dict) correspond to the i-th coeficient.
If this is specified, coefs_low and coefs_high are ignored.
:param error_event: An error event. If specified, it is used instead of error_mean and error_std.
:capture_parent_value: Whether the parent value should be used as the new current value to which
the event's execution is added. This is useful to embed the present event
into larger contexts and accumulate on top of their feedback.
:return: An AR model.
"""
# check coeficients
if (coefs is None) and (coefs_low is None) and (coefs_high is None):
raise ValueError("Either coefs or coefs_loe, coefs_high must be specified.")
# check error events
if (coefs is error_mean) and (error_std is None) and (error_event is None):
raise ValueError("Some error must be specified.")
# Start with the model's constant.
if error_event is not None:
if not isinstance(constant, Event):
x = ConstantEvent(constant, parallel_events=[error_event])
else:
x = constant.parallel_to(error_event)
else:
if not isinstance(constant, Event):
x = ConstantEvent(constant)
else:
x = constant
past = []
# Add the autoregressive terms
for i in range(0, p):
if coefs is not None:
alpha = coefs[i]
else:
alpha = np.random.uniform(coefs_low, coefs_high)
pe = PastEvent(i + 1, allow_learning=False)
past.append(pe)
if error_event is not None:
error = PastEvent(i, allow_learning=False)
error.refers_to(error_event)
else:
error = NormalEvent(error_mean, error_std)
x = x + pe * ConstantEvent(alpha) + error
# connect past events to the series to which they refer to
for pe in past:
if capture_parent_value:
pe.refers_to(ParentValueEvent(x))
else:
pe.refers_to(x)
return x
@staticmethod
@cli_model
def ma(q, *, series_mean: float=0, error_mean: float=0, error_std: float=1,
coefs_low: float=-1, coefs_high: float=1, coefs: a_list=None, error_event: Parameter.IGNORE=None):
"""
Creates a new MA (Moving Average) model. The model's coeficients can either be generated automatically
by providing coefs_low and coefs_high parameters (default), or be explicitly defined by providing a list in the
coefs parameter.
:param q: The order of the MA model (how far back should it look).
:param series_mean: The mean of the series.
:param error_mean: The mean of the normal error of each past random shock.
:param error_std: The standard deviation of the normal error of each past random shock.
:param coefs_low: If specified, defines the lower bound of the coeficients to be generated. If left None,
then the coefs parameter must be specified.
:param coefs_high: If specified, defines the upper bound of the coeficients to be generated. If left None,
then the coefs parameter must be specified.
:param coefs: A list or dict with numeric keys of the coeficients to be employed. Must have size p. The i-th
element (if list) or key (if dict) correspond to the i-th coeficient.
If this is specified, coefs_low and coefs_high are ignored.
:param error_event: An error event. If specified, it is used instead of error_mean and error_std.
:return: The MA model.
"""
# check coeficients
if (coefs is None) and (coefs_low is None) and (coefs_high is None):
raise ValueError("Either coefs or coefs_loe, coefs_high must be specified.")
# error shocks
if error_event is None:
error_event = NormalEvent(error_mean, error_std)
# Put the mean term first
x = ConstantEvent(series_mean, parallel_events=[error_event])
past = []
# Add model terms
for i in range(0, q):
if coefs is not None:
alpha = coefs[i]
else:
alpha = np.random.uniform(coefs_low, coefs_high)
p = PastEvent(i + 1, allow_learning=False)
past.append(p)
x = x + p * ConstantEvent(alpha)
# connect past events to the series to which they refer to
for p in past:
p.refers_to(error_event)
return x
@staticmethod
@cli_model
def arma(p, q, constant: float=0, error_mean: float=0, error_std: float=1,
ar_coefs_low: float=-1, ar_coefs_high: float=1, ar_coefs: a_list=None,
ma_coefs_low: float=-1, ma_coefs_high: float=1, ma_coefs: a_list=None,
capture_parent_value=True):
"""
Creates an ARMA model. This differs slightly from simply summing AR and MA models, because here a common
normal error series is also provided
:param p:
:param q:
:param constant:
:param error_mean:
:param error_std:
:param ar_coefs_low:
:param ar_coefs_high:
:param ar_coefs:
:param ma_coefs_low:
:param ma_coefs_high:
:param ma_coefs:
:param capture_parent_value:
:return:
"""
# common error series
error_event = NormalEvent(error_mean, error_std)
m1 = ClassicModels.ar(p, constant=constant, coefs_low=ar_coefs_low, coefs_high=ar_coefs_high, coefs=ar_coefs,
error_event=error_event, capture_parent_value=capture_parent_value)
m2 = ClassicModels.ma(q, series_mean=0.0, coefs_low=ma_coefs_low, coefs_high=ma_coefs_high, coefs=ma_coefs,
error_event=error_event)
return m1 + m2
@staticmethod
@cli_model
def arima(p, q, constant: float=0, error_mean: float=0, error_std: float=1,
ar_coefs_low: float=-1, ar_coefs_high: float=1, ar_coefs: a_list=None,
ma_coefs_low: float=-1, ma_coefs_high: float=1, ma_coefs: a_list=None,
capture_parent_value=True):
"""
Creates an ARIMA model. This adds the integration not found in ARMA. That is to say, values are accumulated
over time.
:param p:
:param q:
:param constant:
:param error_mean:
:param error_std:
:param ar_coefs_low:
:param ar_coefs_high:
:param ar_coefs:
:param ma_coefs_low:
:param ma_coefs_high:
:param ma_coefs:
:param capture_parent_value:
:return:
"""
return CumulativeEvent(\
ClassicModels.arma(p=p, q=1, constant=constant, error_mean=error_mean, error_std=error_std,
ar_coefs_low=ar_coefs_low, ar_coefs_high=ar_coefs_high, ar_coefs=ar_coefs,
ma_coefs_low=ma_coefs_low, ma_coefs_high=ma_coefs_high, ma_coefs=ma_coefs,
capture_parent_value=capture_parent_value))
class BankingModels:
# TODO put newer version here
@staticmethod
@cli_model
def salary_earner(salary_value=20000, payment_day: int=1, *,
regular_expense_mean=50, regular_expense_std=10,
large_expense_mean=1000, large_expense_std=100,
emergency_probability=0.1, emergencies_mean=500, emergencies_std=100):
##########################################
# Job parameters
##########################################
salary = ConstantEvent(salary_value, name='salary',
require_lower_bound=0.0,
learning_normal_std = 5000)
payday = ConstantEvent(payment_day, name='payday', allow_learning=False)
########################################
# Daily life
########################################
typical_daily_cashflow = SeasonalEvent(salary, day=payday, fill_with_previous=False)
# regular expenses
typical_daily_cashflow -= ClipEvent(NormalEvent(regular_expense_mean, regular_expense_std),
min_value=ConstantEvent(0.0, allow_learning=False))
# large expenses
typical_daily_cashflow -= ClipEvent(PoissonEvent(1)*NormalEvent(regular_expense_mean, regular_expense_std),
min_value=ConstantEvent(0.0, allow_learning=False))
# emergencies
typical_daily_cashflow -= ClipEvent(BernoulliEvent(emergency_probability) * NormalEvent(emergencies_mean,
emergencies_std),
min_value=ConstantEvent(0.0, allow_learning=False))
# investments
investment_base_mean = salary_value*0.025
investment_base_std = salary_value*0.0012
typical_daily_cashflow -= ClipEvent(Choice([ConstantEvent(0),
NormalEvent(investment_base_mean, investment_base_std),
NormalEvent(2*investment_base_mean, 2*investment_base_std),
NormalEvent(4*investment_base_mean, 4*investment_base_std)],
fix_choice=True),
min_value=ConstantEvent(0.0, allow_learning=False))
# cyclic residual
typical_daily_cashflow -= WaveEvent(180, salary_value/100)
non_payday_process = TemporarySwitch(typical_daily_cashflow,
ConstantEvent(0.0, allow_learning=False),
switch_duration=ConstantEvent(10, learning_normal_std=2))
def aux_daily_cashflow(t, i, memory, sub_events):
# ensures that on payday there is no temporary switch
if t.day == sub_events['payday'].execute(t):
flow = sub_events['typical_daily_cashflow'].execute(t)
else:
flow = sub_events['non_payday_process'].execute(t)
return sub_events['parent_value'].execute(t) + flow
parent_value = ParentValueEvent(default=0)
x = LambdaEvent(aux_daily_cashflow, sub_events={'payday': payday,
'typical_daily_cashflow': typical_daily_cashflow,
'non_payday_process': non_payday_process,
'parent_value': parent_value})
x = wrap_in_resistance(x,
top_resistance_levels=[3*salary_value],
bottom_resistance_levels=[-salary_value],
top_resistance_strength_event=ClipEvent(NormalEvent(0.1, 0.05), min_value=0.0),
bottom_resistance_strength_event=ClipEvent(NormalEvent(2, 1), min_value=0.0),
tolerance=salary_value,
top_resistance_probability=0.99,
bottom_resistance_probability=0.99)
##########################
# Invariants
##########################
invariant_1 = Invariant(lambda t, events: events['salary'].execute(t) >= 10000,
events={'salary': salary})
x.add_invariant(invariant_1)
salary_earner_model = x
#salary_earner_model = Replicated(x,
# duration_per_replication=NormalEvent(mean=360, std=90, allow_learning=False),
# max_replication=3)
return salary_earner_model
@staticmethod
@cli_model
def salary_earner_simple(salary_value=5000, payment_day: int=1, *, expense_mean=100.0, expense_sd=300.0):
# ensure we are working with a ConstantEvent
salary = wrapped_constant_param(prefix='banking', name='salary', value=salary_value, require_lower_bound=0.0)
# Daily expense model
daily_normal_expense = ClipEvent(NormalEvent(expense_mean, expense_sd), min_value=0.0)
def aux(t, i, memory, sub_events):
t_next_month = t + pd.DateOffset(months=1)
actual_payment_day_cur = shift_weekend_and_holidays(pd.Timestamp(t.year, t.month,
sub_events['payment_day']),
direction='backward')
actual_payment_day_next = shift_weekend_and_holidays(pd.Timestamp(t_next_month.year, t_next_month.month,
sub_events['payment_day']),
direction='backward')
if t.date() == actual_payment_day_cur:
# current month
memory['money'] = memory.get('money', 0.0) + sub_events['salary'].execute(t)
elif t.date() == actual_payment_day_next:
# advance for the next month, if applicable
memory['money'] = memory.get('money', 0.0) + sub_events['salary'].execute(t)
else:
memory['money'] = memory.get('money', 0.0) - sub_events['daily_normal_expense'].execute(t)
return memory['money']
# The final model
model = LambdaEvent(aux, sub_events={'salary': salary,
'daily_normal_expense': daily_normal_expense,
'payment_day': payment_day})
return model
class EconomicModels:
@staticmethod
@cli_model
def kondratiev_business_cycle(base: float = 0.0, growth_mean: float = 1, growth_sd: float = 2,
wave_period: float = 12, wave_amplitude: float= 0.05):
"""
A naive interpretation of so-called "Kondratieve waves" business cycle theory.
:param base: The initial economic condition.
:param growth_mean: The mean of economic growth.
:param growth_sd: The standard deviation of economic growth.
:param wave_period: The period of the wave that modifies present conditions.
:param wave_amplitude: The amplitude of the wave that modifies present conditions.
:return:
"""
we = WaveEvent(wave_period, wave_amplitude)
return WalkEvent(NormalEvent(growth_mean, growth_sd), initial_pos=base) * (ConstantEvent(1) + we)
class EcologyModels:
@staticmethod
@cli_model
def predator_prey(n_predators=10, n_preys=40,
alpha=1.1, beta=0.02, delta=0.02, gamma=0.008):
"""
Discrete version of the Lotka–Volterra equations for predator-prey model. The equations are:
preys(t + 1) = alpha*preys(t) - beta*preys*predators(t)
predators(t + 1) = delta*preys*predators(t) - gamma*predators(t)
:param n_predators: Initial number of predators.
:param n_preys: Initial number of preys.
:param alpha: Prey reproduction factor.
:param beta: Effective killing factor for predators.
:param delta: Effective multiplication factor for predators based on prey consumption.
:param gamma: Natural death rate for predators.
:return: A predators and a preys model.
"""
# preys = alpha*preys - beta*preys*predators
# predators = delta*preys*predators - gamma*predators
past_preys = PastEvent(1, undefined_value=n_preys, name='Past Preys', allow_learning=False)
past_predators = PastEvent(1, undefined_value=n_predators, name='Past Predators', allow_learning=False)
preys = ClipEvent((ConstantEvent(alpha) * past_preys - ConstantEvent(beta)*past_preys*past_predators),
min_value=0.0,
name="Preys")
predators = ClipEvent((ConstantEvent(delta) * past_preys * past_predators - ConstantEvent(gamma) * past_predators),
min_value=0.0,
name="Predators")
preys.parallel_to(predators)
predators.parallel_to(preys)
past_preys.refers_to(preys)
past_predators.refers_to(predators)
return predators, preys
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,938 | paulosalem/time-blender | refs/heads/master | /tests/test_models.py | import unittest
import math
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_error
from time_blender.core import ConstantEvent, Generator
from time_blender.random_events import NormalEvent
pd.set_option('display.max_rows', None)
from tests.common import AbstractTest
from time_blender.models import ClassicModels, BankingModels, SimpleModels, EconomicModels, EcologyModels
class ModelsTest(AbstractTest):
def setUp(self):
super().setUp()
self.start_date = '2016-01-01'
self.end_date = '2016-03-01'
def test_ar(self):
self.common_model_test(ClassicModels.ar(3, random_seed=42))
self.common_model_test(ClassicModels.ar(3, coefs=[0, 1, 2]))
def test_ma(self):
self.common_model_test(ClassicModels.ma(3))
def test_arma(self):
self.common_model_test(ClassicModels.arma(p=3, q=3))
def test_salary_earner(self):
s = self.common_model_test(BankingModels.salary_earner(salary_value=20000, payment_day=1)).iloc[:, 0]
s = s.diff()
s.iloc[0] = 20000.0 # we must add an extra salary because the diff operation removed the first one
n_months = len(s.resample('MS').sum())
n_salaries = len(s[s >= 15000]) # this assumes that daily expense in payment day is not very large
print(n_months, n_salaries)
# the number of salaries payed must be equal to the number of months considered
self.assertEqual(n_months, n_salaries)
def test_salary_earner_simple(self):
s = self.common_model_test(BankingModels.salary_earner_simple(salary_value=5000, payment_day=1)).iloc[:, 0]
s = s.diff()
s.iloc[0] = 5000.0 # we must add an extra salary because the diff operation removed the first one
n_months = len(s.resample('MS').sum())
n_salaries = len(s[s == 5000])
print(n_months, n_salaries)
# the number of salaries payed must be equal to the number of months considered
self.assertEqual(n_months, n_salaries)
def test_cycle(self):
self.common_model_test(SimpleModels.cycle())
def test_kondratiev_business_cycle(self):
self.common_model_test(EconomicModels.kondratiev_business_cycle())
def test_predator_prey(self):
predators_model, preys_model = EcologyModels.predator_prey(n_predators=100, n_preys=100,
alpha=1.01, beta=0.002, delta=2.2, gamma=0.002)
self.common_model_test(predators_model)
def test_generalize_from_observations_1(self):
oracle = ClassicModels.arima(2, 0, constant=ConstantEvent(5))
oracle_data = self.common_model_test(oracle)
model_under_test = ClassicModels.arima(2, 0, constant=ConstantEvent(1))
generator = Generator(start_date=self.start_date, end_date=self.end_date, freq='D')
fresh_data = self.generate_learn_generate(generator,
original_event=oracle,
fresh_event=model_under_test,
start_date=self.start_date, end_date=self.end_date,
n_simulations=1, max_optimization_evals=1000,
error_strategy='best_trace')
print(np.mean(fresh_data), np.mean(oracle_data))
print(mean_absolute_error(fresh_data, oracle_data))
self.assertClose(np.mean(fresh_data), np.mean(oracle_data), rel_tol=0.2) | {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,939 | paulosalem/time-blender | refs/heads/master | /tests/common.py | import unittest
import pandas as pd
import numpy as np
import math
import random
from time_blender.core import Generator
class AbstractTest(unittest.TestCase):
def common_model_test(self, model, n=1, print_data=False):
data = Generator(start_date=self.start_date, end_date=self.end_date, freq='D').generate([model], n=n)
idx_true = pd.date_range(self.start_date, self.end_date, freq='D')
if n > 1:
df = data[0]
else:
df = data
self.assertEqual(len(df), len(idx_true))
if print_data:
print(df)
return data
def setUp(self):
self.start_date = '2016-01-01'
self.end_date = '2018-10-01'
# set random seeds for consistent results
np.random.seed(3)
random.seed(3)
def assertClose(self, a, b, rel_tol=0.1, abs_tol=0.0, verbose=True):
if verbose:
print(f"Is {a} close to {b} up to {rel_tol} relative tolerance and {abs_tol} absolute tolerance?")
self.assertTrue(math.isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol))
def generate_learn_generate(self, generator, original_event, fresh_event,
start_date, end_date,
n_simulations=3, max_optimization_evals=300,
upper_bound=None, lower_bound=None,
error_strategy='best_trace'):
data = generator.generate(events=original_event)
values = data.values
fresh_event.generalize_from_observations([values], n_simulations=n_simulations,
max_optimization_evals=max_optimization_evals,
upper_bound=upper_bound, lower_bound=lower_bound,
error_strategy=error_strategy,
verbose=True)
return Generator(start_date=start_date, end_date=end_date).generate(fresh_event).values | {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,940 | paulosalem/time-blender | refs/heads/master | /time_blender/util.py | import random
import pandas as pd
import numpy as np
# A counter to be used when creating fresh names
import time_blender
def fresh_id():
fresh_id.counter += 1
return fresh_id.counter
fresh_id.counter = -1
def set_random_seed(seed):
if seed is not None:
random.seed(seed)
np.random.seed(seed)
# Add any other source of randomness here
def shift_weekend_and_holidays(day, direction='forward', holidays=[]):
shift = 0
if (day.weekday() == 5) or (day.weekday() == 6) or (day in holidays):
if direction == 'forward':
shift = 1
else:
shift = -1
# recursive step
return shift_weekend_and_holidays(day + pd.DateOffset(days=shift), direction, holidays)
else:
# recursion base
return day
def is_sequence(x):
return isinstance(x,(list,pd.Series,np.ndarray))
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,941 | paulosalem/time-blender | refs/heads/master | /time_blender/core.py | import logging
import math
import random
import copy
import numpy as np
from numpy.random import random_integers
import pandas as pd
import functools
import matplotlib
import matplotlib.pyplot as plt
from hyperopt import STATUS_OK, STATUS_FAIL
from scipy import signal
from scipy.stats import ks_2samp
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.preprocessing import MinMaxScaler
import hyperopt
from joblib import Parallel, delayed
from time_blender.config import LEARNING_CONFIG
from time_blender.util import fresh_id, is_sequence
###############################################################################
# Auxiliary functions.
###############################################################################
def wrapped_constant_param(prefix, name, value, **kwargs):
"""
Wraps the specified value as a ConstantEvent if it is numeric and not already an event.
:param prefix: A prefix for the name of the event.
:param name: The name of the event.
:param value: The value to be wrapped.
:param kwargs: Additional aruments to pass to ConstantEvent's constructor.
:return: The wrapped value.
"""
# default result
res = value
if not isinstance(value, Event):
if isinstance(value, int) or isinstance(value, float):
res = ConstantEvent(value, name=f'{prefix}_{name}', **kwargs)
elif isinstance(value, bool):
res = ConstantEvent(int(value), name=f'{prefix}_{name}', **kwargs)
elif is_sequence(value):
res = [wrapped_constant_param(prefix, f'{name}_{i}', v, **kwargs) for i, v in enumerate(value)]
return res
###############################################################################
# Auxiliary structures
###############################################################################
class Invariant:
def __init__(self, invariant_func, events, description=""):
self._invariant_func = invariant_func
self.description = description
if isinstance(events, dict):
self._named_events = events
else:
self._named_events = {}
for event in events:
assert event.name not in self._named_events, f"Event names cannot be duplicated, but we found " \
f"two named '{event.name}'"
self._named_events[event.name] = event
def check(self, t) -> (bool, str):
return self._invariant_func(t, self._named_events), self.description
###############################################################################
# Core event classes
###############################################################################
class Event:
# TODO remove push_down, since it must always be True?
def __init__(self, name=None, parallel_events=None, push_down=True, allow_learning=True, invariants=None):
self.name = self._default_name_if_none(name)
self._indexed_generated_values = {} # time to value
self._generated_values = [] # sequence of values
self._last_pos = -1
self._allow_learning = allow_learning
self._invariants = invariants
# determine which attributes are causal events
self._init_causal_parameters()
# set parallel events
self.parallel_events = None
if parallel_events is not None:
self.parallel_to(parallel_events)
else:
self.parallel_events = None
self.push_down = push_down
self.parent_value = None
# to avoid infinite recursions, locks are available for some methods
self._execution_locked = False
self._push_down_locked = False
# scaling parameters, used to rescale the generated values if needed
self._scaling_max = None
self._scaling_min = None
def _init_causal_parameters(self):
self._causal_parameters = []
for k, v in self.__dict__.items():
# events might be given as attributes
if isinstance(v, Event):
self._causal_parameters.append(v)
# events might be stored in lists
elif isinstance(v, list) and k != '_causal_parameters': # the key verification avoids an infinite loop
for element in v:
if isinstance(element, Event):
self._causal_parameters.append(element)
def _wrapped_param(self, prefix, name, value, **kwargs):
return wrapped_constant_param(prefix=prefix, name=name, value=value, **kwargs)
def _causal_parameters_closure(self, only_learnable=True):
closure = []
for event in self._causal_parameters:
# should the event be added?
if only_learnable and (not event._allow_learning):
add = False
else:
add = True
# if so, add it
if add:
closure.append(event)
closure = closure + event._causal_parameters_closure(only_learnable=only_learnable)
return closure
def execute(self, t):
"""
Executes the event and generates an output for the present moment.
:param t: The time in which the event takes place.
:return: The scalar value of the event in the specified moment.
"""
######################
# Auxiliary functions
######################
def aux_check_invariants(t):
invariants_hold, failed_invariant_description = self._check_invariants(t)
if not invariants_hold:
raise AssertionError(f"Invariant violated: {failed_invariant_description}")
###################
# Main execute
###################
if not self._execution_locked:
self._execution_locked = True
# check whether invariants hold
aux_check_invariants(t)
# update parallel events
if self.parallel_events is not None:
for e in self.parallel_events:
e.execute(t)
# process this event
if t not in self._indexed_generated_values:
self._last_pos += 1
v = self._execute(t, self._last_pos)
# save the value for future reference and to avoid recomputing
self._indexed_generated_values[t] = v
self._generated_values.append(v)
res = self._indexed_generated_values[t]
# Result might be used by underlying events as well. So it must be pushed down.
self._push_down(t, res)
self._execution_locked = False
return res
else:
return None
def _execute(self, t, i):
raise NotImplementedError("Must be implemented by concrete subclasses.")
def add_invariant(self, invariant: Invariant):
if self._invariants is None:
self._invariants = []
self._invariants.append(invariant)
return self
def add_invariants(self, invariants):
for invariant in invariants:
self.add_invariant(invariant)
def _check_invariants(self, t):
if self._invariants is not None:
for invariant in self._invariants:
holds, description = invariant.check(t)
if not holds:
return False, description # at least one invariant failed
return True, "" # no invariant failed, so they all hold
else:
return True, "" # nothing to possibly fail
def _push_down(self, t, parent_value):
"""
Given a value executed at the specified moment by a parent event, pushes it down to its children
events. That is to say, provides a downward path for executed values, opposite to the the regular
information flow from children to parent.
:param t: The execution moment.
:param parent_value: The value executed by a parent.
:return: None
"""
# to avoid infinite loops in situations of mutual dependency, reentrancy cannot be allowed.
if not self._push_down_locked:
self._push_down_locked = True
# by default, saves the parent value, which can later be used by any other object as necessary
self.parent_value = parent_value
# also allows custom operations
self._capture_push_down_value(t, parent_value)
if self.push_down:
for e in self._causal_parameters:
e._push_down(t, parent_value)
self._push_down_locked = False
def _capture_push_down_value(self, t, parent_value):
"""
Receives a value executed by a parent at the specified time and, if needed, stores this value locally.
By default, actually nothing is stored, concrete subclasses must overload this method to do so.
For example, events that are stateful and supposed to track some current value might store values
produced by parents.
:param t:
:param parent_value:
:return:
"""
pass
def _value_or_execute_if_event(self, var_name, x, t):
if isinstance(x, Event):
return x.execute(t)
else: # it is a scalar value
return x
def parallel_to(self, events):
if self.parallel_events is None:
self.parallel_events = []
if isinstance(events, list):
for pe in events:
if isinstance(pe, Event):
self.parallel_events.append(pe)
elif isinstance(events, Event):
self.parallel_events.append(events)
else:
raise ValueError("Either a list of events or an event must be specified.")
return self
def value_at_pos(self, i):
return self._generated_values[i]
def value_at(self, t):
return self._indexed_generated_values[t]
def _default_name_if_none(self, name=None):
if name is None:
return f"{type(self).__name__}_{str(fresh_id())}"
else:
return name
def reset(self):
"""
Cleans all caches in order to allow the reuse of the event in a new generation process.
:return: None
"""
self._indexed_generated_values = {}
self._generated_values = []
self._last_pos = -1
# clear causes too, if the object is not locked
if not self._execution_locked:
self._execution_locked = True
for e in self._causal_parameters:
e.reset()
self._execution_locked = False
self.parent_value = None
def is_root_cause(self):
"""
Checks whether this event depend on any other or not (i.e., it is a root cause).
:return: True if no dependecies exist; False otherwise.
"""
return len(self._causal_parameters) == 0
def __str__(self):
return self.name
def __add__(self, other):
return LambdaEvent(lambda t, i, mem, sub_events: sub_events['a'].execute(t) + sub_events['b'].execute(t),
sub_events={'a': self, 'b': other})
def __sub__(self, other):
return LambdaEvent(lambda t, i, mem, sub_events: sub_events['a'].execute(t) - sub_events['b'].execute(t),
sub_events={'a': self, 'b': other})
def __mul__(self, other):
return LambdaEvent(lambda t, i, mem, sub_events: sub_events['a'].execute(t) * sub_events['b'].execute(t),
sub_events={'a': self, 'b': other})
def __truediv__(self, other):
return LambdaEvent(lambda t, i, mem, sub_events: sub_events['a'].execute(t) / sub_events['b'].execute(t),
sub_events={'a': self, 'b': other})
def _push_constants_down(self, scalar_values):
"""
Recursively attributes the constants named in the specified dictionary to the appropriate events.
:param scalar_values: A dict with the named values.
:return:
"""
# to avoid infinite loops in situations of mutual dependency, reentrancy cannot be allowed.
if not self._push_down_locked:
self._push_down_locked = True
try:
for event in self._causal_parameters:
if isinstance(event, ConstantEvent):
name = event.name
if name in scalar_values:
value = scalar_values[name]
# check whether the value is within the required bounds, and if not enforce it.
if event.require_lower_bound is not None and value < event.require_lower_bound:
event.constant = event.require_lower_bound
elif event.require_upper_bound is not None and value > event.require_upper_bound:
event.constant = event.require_upper_bound
else:
event.constant = value
except KeyError as err:
logging.debug(err)
for event in self._causal_parameters:
event._push_constants_down(scalar_values)
self._push_down_locked = False
def generalize_from_observations(self, observed_traces,
n_simulations=20, max_optimization_evals=300,
upper_bound=None, lower_bound=None,
error_strategy='best_trace',
error_metric='mae',
generator=None,
sample_proportion=1.0,
verbose=False):
"""
Given various observations, learn the model parameters that best fit them.
:param observed_traces: A sequence of sequences of observations. E.g., [[1, 0, 1, 0, 1, 1], [1, 1, 0], ...];
or a sequence of Series indexed by timestamps.
:param n_simulations: How many simulations per observed trace are to be performed when calculating the error.
:param max_optimization_evals: How many passes to perform on the optimization procedure.
:param lower_bound: If not otherwise given, this will be the lower bound of parameters being optimized.
:param upper_bound: If not otherwise given, this will be the upper bound of parameters being optimized.
:param error_strategy: The calculation strategy to use for the error function.
'best_trace' indicates that the error will consider the best trace only, ignoring
the others;
'all_traces' indicates that all traces will be considered equally.
:param error_metric: How to measure the difference between two traces.
:param generator: Specify the generator to consider when learning from data. If the observed traces
also include timestamps, this will force the learning mechanisms to align the temporal
indexes (date and time) for each point generated.
:param sample_proportion: A float from 0.0 to 1.0 indicating how much of each trace should be used. This
is useful to speed-up learning when traces are too large.
:param verbose: Whether to print auxiliary information.
:return:
"""
# TODO factor the learning mechanism out, so that different mechanisms can be experimented with.
def error(y_true, y_pred):
if error_metric == 'mse':
err_func = mean_squared_error
elif error_metric == 'mae':
err_func = mean_absolute_error
elif error_metric == 'ks':
err_func = lambda y_pred, y_true: ks_2samp(y_pred, y_true)[0]
elif error_metric == 'cross-correlation':
def aux_cross_corr(y_pred, y_true):
corr = signal.correlate(y_pred, y_true, mode='same')
return -sum(corr)
err_func = aux_cross_corr
else:
raise ValueError(f"Invalid metric function: {error_metric}.")
try:
#print(f'Predicted vs True: {y_pred}, {y_true}')
return err_func(y_pred=y_pred, y_true=y_true)
# return mean_absolute_error(y_pred=y_pred, y_true=y_true)
except ValueError:
#print(f'Predicted vs True (under error): {y_pred}, {y_true}')
y_pred[np.isinf(y_pred)] = LEARNING_CONFIG['large_default_value']
y_pred[np.isnan(y_pred)] = LEARNING_CONFIG['large_default_value']
y_true[np.isinf(y_true)] = LEARNING_CONFIG['large_default_value']
y_true[np.isnan(y_true)] = LEARNING_CONFIG['large_default_value']
return err_func(y_pred=y_pred, y_true=y_true)
# objective function for black box optimization
def aux_objective_function(args):
try:
# Change the constant parameters
self._push_constants_down(args)
#
# Consider each observed trace in order to calculate the error. We'll do this in parallel.
#
# The function to run in parallel over each trace. It returns the error over that trace w.r.t. current
# simulation parameters.
def aux_trace_error(trace):
nonlocal generator
#
# calculate dates and temporal lengths
#
n_obs = len(trace)
if isinstance(trace, pd.Series):
trace_idx = trace.index
start_date = trace_idx[0]
end_date = trace_idx[-1]
else:
trace_idx = None
start_date = pd.Timestamp.today()
end_date = start_date + pd.offsets.Day(n_obs)
if generator is None:
generator = Generator(start_date=start_date, end_date=end_date)
#
# run simulation a few times
#
sim_outputs = []
trace_segments = []
for i in range(0, n_simulations):
# If requested, use only part of the available data.
if sample_proportion < 1.0:
sim_index = pd.date_range(generator.start_date, generator.end_date, freq=generator.freq)
sim_length = math.ceil(len(sim_index)*sample_proportion)
sim_start_pos = random.randint(0, len(sim_index)-1)
sim_index = sim_index[sim_start_pos:sim_start_pos + sim_length]
sim_start_date = sim_index[0]
sim_end_date = sim_index[-1]
else:
sim_start_date = generator.start_date
sim_end_date = generator.end_date
res = generator.generate(self, new_start_date=sim_start_date, new_end_date=sim_end_date)
# is the generated index included in the observed trace index?
if trace_idx is not None:
# ensure the simulated index is within the observed trace one ...
if not res.index.isin(trace_idx):
raise ValueError("When the observed trace contains a temporal index, the simulated"
" index must be included therein.")
# ... and then only keep the intersection points for comparison. This will
# align the _dates_ of the values when available.
trace_segment = trace[trace.index.isin(res.index)]
trace_segments.append(trace_segment.values)
else:
trace_segment = trace[:len(res)]
trace_segments.append(trace_segment)
sim_outputs.append(res.values[:len(trace_segment), :])
#
# calculate error in relation to observations
#
sim_outputs_flattened = functools.reduce(lambda x, y: np.concatenate([x, y],
axis=None),
sim_outputs)
trace_copies_flattened = functools.reduce(lambda x, y: np.concatenate([x, y],
axis=None),
trace_segments)
return error(trace_copies_flattened,
sim_outputs_flattened)
#errors = [aux_trace_error(trace) for trace in rescaled_traces]
errors = Parallel(n_jobs=-2)(delayed(aux_trace_error)(trace) for trace in observed_traces)
# decide how to compute the final error. Focus on specific traces or consider all of them?
if error_strategy == 'best_trace':
err = min(errors) # selects the error w.r.t. the best trace
min_error_trace_pos = np.argmin(errors)
elif error_strategy == 'all_traces':
err = np.mean(errors)
min_error_trace_pos = None
else:
raise ValueError(f"Invalid error_strategy: {error_strategy}.")
return {'loss': err, 'status': STATUS_OK, 'min_error_trace_pos': min_error_trace_pos}
except AssertionError as ae:
return {'status': STATUS_FAIL}
except ValueError as ve:
return {'status': STATUS_FAIL}
params = self._causal_parameters_closure(only_learnable=True)
# for p in params:
# print(p)
#
# define parameter search space
#
space = {}
for param in params:
if isinstance(param, ConstantEvent):
# acquire upper bound
if param.require_upper_bound is not None:
ub = param.require_upper_bound
# print(f"Upper bound constraint found: {ub}")
else:
ub = upper_bound
# acquire lower bound
if param.require_lower_bound is not None:
lb = param.require_lower_bound
# print(f"Lower bound constraint found: {lb}")
else:
lb = lower_bound
# the actual random variables
if (lb is not None) and (ub is not None) and \
(param.learning_normal_mean is None) and (param.learning_normal_std is None):
# if we have both bounds and nothing else, let's use them
space[param.name] = hyperopt.hp.uniform(param.name, lb, ub)
else:
normal_mean = \
param.learning_normal_mean if param.learning_normal_mean is not None else param.constant
normal_std = \
abs(param.learning_normal_std if param.learning_normal_std is not None else normal_mean)
space[param.name] = hyperopt.hp.normal(param.name, mu=param.constant, sigma=max(normal_std, 1))
# TODO somehow also enforce upper and lower bounds here using some hyperopt mechanism
if verbose:
print(f"Considering {len(space)} variables.")
# optimize
trials = hyperopt.Trials()
best = hyperopt.fmin(aux_objective_function, space, algo=hyperopt.tpe.suggest,
max_evals=max_optimization_evals, trials=trials)
# hyperopt.tpe.suggest
# hyperopt.anneal.suggest
# hyperopt.atpe.suggest
# hyperopt.rand.suggest
if verbose:
print(best)
print(f'Best trial = {trials.best_trial}')
best_min_error_trace_pos = trials.best_trial['result']['min_error_trace_pos']
# propagate the learned parameters down
self._push_constants_down(best)
def clone(self):
"""
Produces a copy of the present object, ensuring that elements that must be unique or shared are indeed so.
:return: A clone of the object.
"""
# custom implementation of __deepcopy__ ensure that names are actually unique, among other details.
return copy.deepcopy(self)
def _fix_copy(self, c):
# adjust elements that must be unique or have references preserved
c.name = f"{self.name}_clone-{str(fresh_id())}"
c.parallel_events = self.parallel_events
return c
def __copy__(self):
# adapted from
# https://stackoverflow.com/questions/1500718/how-to-override-the-copy-deepcopy-operations-for-a-python-object
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
self._fix_copy(result)
return result
def __deepcopy__(self, memo):
# adapted from
# https://stackoverflow.com/questions/1500718/how-to-override-the-copy-deepcopy-operations-for-a-python-object
if id(self) not in memo:
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
self._fix_copy(result)
else:
result = memo[id(self)]
return result
###############################################################################
# More core events
###############################################################################
class ConstantEvent(Event):
def __init__(self, constant=0.0,
require_lower_bound=None,
require_upper_bound=None,
learning_normal_mean=None,
learning_normal_std=None,
name=None, parallel_events=None, push_down=True, allow_learning=True):
super().__init__(name, parallel_events, push_down, allow_learning)
self.constant = constant
self.require_lower_bound = require_lower_bound
self.require_upper_bound = require_upper_bound
self.learning_normal_mean = learning_normal_mean
self.learning_normal_std = learning_normal_std
self._check_constraints()
def _execute(self, t, i):
self._check_constraints()
return self._value_or_execute_if_event('constant', self.constant, t)
def _check_constraints(self):
if (self.require_lower_bound is not None) and (self.constant < self.require_lower_bound):
raise AssertionError(f"Constraint violation: constant must be positive, but was {self.constant}.")
if (self.require_upper_bound is not None) and (self.constant > self.require_upper_bound):
raise AssertionError(f"Constraint violation: constant must be less than or equal to the upper bound "
f"{self.require_upper_bound}, but was {self.constant}.")
class LambdaEvent(Event):
def __init__(self, func, sub_events={}, name=None, parallel_events=None, push_down=True, allow_learning=True):
super().__init__(name, parallel_events, push_down, allow_learning)
self.func = func
self.sub_events = sub_events
self.memory = {}
self._make_sub_events_causal(sub_events)
if self.func.__closure__ is not None:
raise ValueError(f"The specified function cannot depend on free variables. "
f"Its closure must be empty, but was {self.func.__closure__} instead!")
def _make_sub_events_causal(self, sub_events):
if sub_events is not None:
for key, se in sub_events.items():
if isinstance(se, Event):
self._causal_parameters.append(se)
def _execute(self, t, i):
return self.func(t, i, self.memory, self.sub_events)
def reset(self):
self.memory = {}
super().reset()
def __copy__(self):
result = LambdaEvent(func=self.func, sub_events=self.sub_events,
name=self.name, parallel_events=self.parallel_events, push_down=self.push_down,
allow_learning=self._allow_learning)
self._fix_copy(result)
return result
def __deepcopy__(self, memo):
if id(self) not in memo:
result = LambdaEvent(func=self.func, sub_events={},
name=self.name, parallel_events=self.parallel_events, push_down=self.push_down,
allow_learning=self._allow_learning)
# to handle circular references, we need to first update the memo dict with the new reference
# for the present object before deepcopying its attributes.
memo[id(self)] = result
sub_events_deepcopy = copy.deepcopy(self.sub_events, memo)
# update deepcopied attributes
result.sub_events = sub_events_deepcopy
result._init_causal_parameters()
result._make_sub_events_causal(sub_events_deepcopy)
self._fix_copy(result)
else:
result = memo[id(self)]
return result
###############################################################################
# Generating mechanisms and other related conveniences
###############################################################################
class Generator:
def __init__(self, start_date, end_date, between_time=None, freq='D', filter_func=lambda df: df):
"""
Creates a new time series generator using the specified events. The names of the events will be used
later to name the columns of generated DataFrames.
:param start_date: The first date of the series to generate.
:param end_date: The last date of the series to generate.
:param between_time A pair which, when specified, define a start and end times for every day generated
:param freq: The frequency to be used, in terms of Pandas frequency strings.
:param filter_func: A filter function to apply to the generated data before returning.
"""
self.between_time = between_time
self.freq = freq
self.filter_func = filter_func
self.start_date = start_date
self.end_date = end_date
def generate(self, events, n=1, new_start_date=None, new_end_date=None):
"""
Generates time series from the model assigned to the present generator.
:param events: Either a list of events or a dict of events. In the former case, the name of each
event is retrieved from the event itself. In the latter case, the user can specify new names.
:param n: The number of series to generate.
:param new_start_date: If specified, overrides the class' start_date for this particular generation.
:param new_end_date: If specified, overrides the class' end_date for this particular generation.
:return: A list of generated time series.
"""
#
# Setup events to use
#
self._set_events(events)
# calculate proper start data
if new_start_date is not None:
start = new_start_date
else:
start = self.start_date
# calculate proper end date
if new_end_date is not None:
end = new_end_date
else:
end = self.end_date
#
# Generate data from the given events.
#
generated_data = []
for i in range(0, n):
values = {}
dates = pd.date_range(start, end, freq=self.freq)
for name in self.named_events:
self.named_events[name].reset() # clears the cache
for t in dates:
for name in self.named_events:
if name not in values:
values[name] = []
values[name].append(self.named_events[name].execute(t))
df = pd.DataFrame(values, index=dates)
if self.between_time is not None:
df = df.between_time(self.between_time[0], self.between_time[1])
df = self.filter_func(df)
generated_data.append(df)
if len(generated_data) > 1:
return generated_data
else:
return generated_data[0]
def _set_events(self, events):
if isinstance(events, dict):
self.named_events = events
else:
self.named_events = {}
# make iterable
if not isinstance(events, list):
events = [events]
for i, event in enumerate(events):
if event.name is not None:
self.named_events[event.name] = event
else:
self.named_events['Event ' + str(i)] = event
def generate(model, start_date, end_date, n=1, freq='D', filter_func=lambda df: df):
"""
A convenience method to generate time series from the specified model using the default generator.
:param model: The model from which the data is to be generated.
:param start_date: The first date of the series to generate.
:param end_date: The last date of the series to generate.
:param n: The number of series to generate.
:param freq: The frequency to be used, in terms of Pandas frequency strings.
:param filter_func: A filter function to apply to the generated data before returning.
:return: A list of generated time series.
"""
data = Generator(start_date=start_date,
end_date=end_date,
freq=freq,
filter_func=filter_func) \
.generate(model, n=n)
return data
def generate_and_plot(model, start_date, end_date, n=1, freq='D', return_data=False, filter_func=lambda df: df,
grid=True):
"""
A convenience method to generate time series from the specified model using the default generator, and also plot
them.
:param model: The model from which the data is to be generated.
:param start_date: The first date of the series to generate.
:param end_date: The last date of the series to generate.
:param n: The number of series to generate.
:param freq: The frequency to be used, in terms of Pandas frequency strings.
:param return_data: Whether to return the generated data or not. The default is False, useful when
usin Jupyter notebooks only to show the charts, without further data processing.
:param filter_func: A filter function to apply to the generated data before returning.
:return: A list of generated time series.
"""
data = generate(model, start_date, end_date, n=n, freq=freq, filter_func=filter_func)
# plot
def aux_plot(df):
df.plot(grid=grid)
for i in range(0, n):
aux_plot(data[i]) if n > 1 else aux_plot(data)
plt.show()
if return_data:
return data
def save_to_csv(data, common_name, **kwargs):
for i, df in enumerate(data):
name = common_name + '_' + i
df.to_csv(name, kwargs)
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,942 | paulosalem/time-blender | refs/heads/master | /tests/test_core.py | import copy
import numpy as np
from tests.common import AbstractTest
from time_blender.coordination_events import Piecewise, Replicated, PastEvent
from time_blender.core import Generator, ConstantEvent, LambdaEvent
from time_blender.deterministic_events import WalkEvent
from time_blender.models import BankingModels, ClassicModels
from time_blender.random_events import NormalEvent, UniformEvent, PoissonEvent, TopResistance, BottomResistance
class TestEvent(AbstractTest):
def setUp(self):
super().setUp()
def test_clone(self):
#
# Let's test the copy strategy using a piecewise model.
#
banking_model_1 = BankingModels.salary_earner(salary=ConstantEvent(5000.0,
require_lower_bound=0,
require_upper_bound=30000),
expense_mean=ConstantEvent(100.0,
require_lower_bound=0,
require_upper_bound=1000),
expense_sd=ConstantEvent(100.0,
require_lower_bound=0,
require_upper_bound=30000))
banking_model_2 = banking_model_1.clone()
banking_model_3 = banking_model_1.clone()
# top level names must be unique
self.assertNotEqual(banking_model_1.name, banking_model_2.name)
self.assertNotEqual(banking_model_1.name, banking_model_3.name)
self.assertNotEqual(banking_model_2.name, banking_model_3.name)
# nested names must also be unique
self.assertNotEqual(banking_model_1._causal_parameters[0].name,
banking_model_2._causal_parameters[0].name)
# classes must be equal, though
self.assertEqual(banking_model_1._causal_parameters[0].__class__,
banking_model_2._causal_parameters[0].__class__)
t_separator_1 = NormalEvent(ConstantEvent(60.0,
require_lower_bound=0,
require_upper_bound=100),
ConstantEvent(20.0,
require_lower_bound=0,
require_upper_bound=100))
t_separator_2 = t_separator_1.clone()
# top level names must be unique
self.assertNotEqual(t_separator_1.name, t_separator_2.name)
pw = Piecewise([banking_model_1, banking_model_2, banking_model_3],
t_separators=[t_separator_1, t_separator_2])
res = self.common_model_test(pw)
def test_clone_2(self):
base_event = NormalEvent() + PoissonEvent()
def aux(t, i, memory, sub_events):
res = 2 * sub_events['base'].execute(t)
return res
print(aux.__closure__)
base_model = LambdaEvent(aux, sub_events={'base': base_event})
model = Replicated(base_model, NormalEvent(mean=10, std=5), max_replication=2)
data = self.common_model_test(model, n=2)
self.assertTrue((data[0].iloc[-10:-1].values != data[1].iloc[-10:-1].values).any())
def test_clone_3(self):
pe = PastEvent(1)
event = ConstantEvent(1) + pe
pe.refers_to(event)
self.common_model_test(event)
# cloning must not break anything
cloned_event = event.clone()
self.common_model_test(cloned_event)
self.assertNotEqual(event._causal_parameters[0].name, cloned_event._causal_parameters[0].name)
def test_constant_generation(self):
constant_event_1 = ConstantEvent(10)
data_1 = self.common_model_test(constant_event_1, n=2)
# series generated from a constant must have the same values
self.assertTrue((data_1[0].iloc[-10:-1].values == data_1[1].iloc[-10:-1].values).all())
def test_lambda_composition_generation(self):
#
# Various composition strategies
#
events = [NormalEvent(0, 1),
NormalEvent(0, 1)*ConstantEvent(1000),
NormalEvent(0, 1)+ConstantEvent(1000),
NormalEvent(0, 1)-ConstantEvent(1000),
NormalEvent(0, 1)/ConstantEvent(1000)]
data_sets = [self.common_model_test(e, n=2) for e in events]
# check each composition behavior
for data in data_sets:
# different generated data series must have different values
#print(data[0].iloc[-10:-1].values, data[1].iloc[-10:-1].values)
self.assertFalse((data[0].iloc[-10:-1].values == data[1].iloc[-10:-1].values).all())
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,943 | paulosalem/time-blender | refs/heads/master | /tests/test_util.py | import unittest
import pandas as pd
from time_blender.util import shift_weekend_and_holidays
class TestUtil(unittest.TestCase):
def test_shift_weekend_and_holidays(self):
def aux_check(a, b, direction='forward'):
shifted_day = shift_weekend_and_holidays(a, direction=direction, holidays=[])
self.assertEquals(shifted_day, b)
aux_check(pd.Timestamp(2018, 12, 22), pd.Timestamp(2018, 12, 24))
aux_check(pd.Timestamp(2018, 12, 23), pd.Timestamp(2018, 12, 24))
aux_check(pd.Timestamp(2018, 12, 24), pd.Timestamp(2018, 12, 24))
aux_check(pd.Timestamp(2018, 12, 22), pd.Timestamp(2018, 12, 21), direction='backward')
aux_check(pd.Timestamp(2018, 12, 23), pd.Timestamp(2018, 12, 21), direction='backward') | {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,944 | paulosalem/time-blender | refs/heads/master | /time_blender/cli.py | ########################################################################################################################
# Command-Line Interface utils
########################################################################################################################
import pandas as pd
from sigtools.wrappers import decorator, wrapper_decorator
from clize import run, parser, Parameter
import ast
import matplotlib.pyplot as plt
import time_blender
from time_blender.util import set_random_seed
@parser.value_converter
def a_list(x):
if isinstance(x, list):
return x
else:
return ast.literal_eval(x)
using_cli = False
cli_end_date = pd.Timestamp.now()
cli_begin_date = cli_end_date - pd.DateOffset(months=12)
cli_output_file = None #'out.csv'
cli_freq = 'D'
cli_plot = False
@wrapper_decorator
def cli_model(model_func, *args, n: int=1, begin=None, end=None, freq='D', output_file=None, plot=False,
random_seed=None, **kwargs: Parameter.IGNORE):
"""
:param model_func:
:param args:
:param n: How many series are to be generated with the specified parameters.
:param begin: The first date to be generated. Format: YYYY-MM-DD
:param end: The last date to be generated. Format: YYYY-MM-DD
:param freq: The frequency of the generation. Refer to Pandas documentation for the proper string values.
:param output_file: A prefix to be used as the base of files in which to save the series. If this is omitted,
the series is just returned in the console.
:param plot: Whether the series should be plotted. If an output file is defined, the base name is also used
to save an image.
:param kwargs:
:return:
"""
if using_cli:
set_random_seed(random_seed)
event = model_func(*args, **kwargs)
if end is None:
cli_end_date = pd.Timestamp.now()
else:
cli_end_date = pd.to_datetime(end)
if begin is None:
cli_begin_date = cli_end_date - pd.DateOffset(months=12)
else:
cli_begin_date = pd.to_datetime(begin)
cli_freq = freq
cli_output_file = output_file
g = time_blender.core.Generator([event])
print(cli_end_date, cli_end_date)
data = g.generate(cli_begin_date, cli_end_date, n=n, freq=cli_freq)
#print(data)
results = None
for i, d in enumerate(data):
if cli_output_file is not None:
if i == 0:
name = f'{cli_output_file}'
else:
name = f'{i}_{cli_output_file}'
d.to_csv(name + '.csv')
plt.ioff()
plt.clf()
d.plot()
plt.savefig(name + '.png')
else:
if results is None:
results = ""
results += d.to_csv() # without a path, returns a string with the CSV
if plot:
d.plot()
# Shows all plots at once
if cli_output_file is None:
plt.show()
return results
else:
return model_func(*args, **kwargs)
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,945 | paulosalem/time-blender | refs/heads/master | /time_blender/coordination_events.py | from time_blender.core import Event
import time_blender.config as config
import random
import numpy as np
from numpy.random import choice
# Filters, connectors, etc.
from time_blender.random_events import BernoulliEvent
from time_blender.util import is_sequence
class OnceEvent(Event):
"""
Executes and memoizes the result of an event, so that it is not recomputed later.
"""
def __init__(self, event, name=None, parallel_events=None, push_down=True, allow_learning=True):
self.event = event
self.value = None
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
if self.value is None:
self.value = self.event.execute(t)
return self.value
def reset(self):
super().reset()
self.value = None
class ParentValueEvent(Event):
def __init__(self, event=None, default=0.0, name=None, parallel_events=None, push_down=True, allow_learning=False):
self.event = event
self.default = self._wrapped_param(name, 'default', default)
self.value = None
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
if self.event is not None:
self.event.execute(t)
self.value = self.event.parent_value
else:
# if self.event is None, it means this event's parent value is desired.
if self.parent_value is not None:
self.value = self.parent_value
else:
# if it is the first execution, there's no parent_value yet.
self.value = self.default.execute(t)
return self.value
def reset(self):
self.value = None
super().reset()
class CumulativeEvent(Event):
"""
Executes and accumulates the result of an underlying event over time, so that the result is the cumulative sum
of that event.
"""
def __init__(self, event, name=None, parallel_events=None, push_down=True, allow_learning=True,
capture_parent_value=True):
"""
:param event:
:param name:
:param parallel_events:
:param push_down:
:param allow_learning:
:param capture_parent_value: Whether the parent value should be used as the new current value to which
the event's execution is added. This is useful to embed the present event
into larger contexts and accumulate on top of their feedback.
"""
self.event = event
self.value = None
self.capture_parent_value = capture_parent_value
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
if self.value is None:
# the first result in the series
self.value = self.event.execute(t)
else:
# accumulates with past results
self.value = self.value + self.event.execute(t)
return self.value
def _capture_push_down_value(self, t, parent_value):
if self.capture_parent_value:
self.value = parent_value
def reset(self):
super().reset()
self.value = None
class PastEvent(Event):
"""
Refers to a past event considering a certain delay. Typically, we don't want to make the delay parameter
learnable, so it is recommended to set allow_learning=False in the constructor.
"""
def __init__(self, delay, undefined_value=0.0, refers_to=None, name=None, parallel_events=None, push_down=True,
allow_learning=False):
self.delay = self._wrapped_param(name, 'delay', delay)
self.undefined_value = self._wrapped_param(name, 'undefined_value', undefined_value)
self.event = refers_to
super().__init__(name, parallel_events, push_down, allow_learning)
def refers_to(self, event):
self.event = event
self._init_causal_parameters()
return self
def _execute(self, t, i):
if self.event is None:
raise ValueError("The event to which the present event refers to has not been defined yet.")
else:
pos = int(i - self.delay.execute(t))
if pos >= 0:
try:
v = self.event.value_at_pos(pos)
except IndexError:
self.event.execute(t)
v = self.event.value_at_pos(pos)
except Exception:
raise ValueError(f'Invalid position for past event: {pos}.')
return v
else:
return self.undefined_value.constant
class ReferenceEvent(PastEvent):
def __init__(self, undefined_value=0.0, refers_to=None, name=None, parallel_events=None, push_down=True,
allow_learning=False):
super().__init__(delay=0, undefined_value=undefined_value, refers_to=refers_to,
name=name, parallel_events=parallel_events, push_down=push_down,
allow_learning=allow_learning)
class SeasonalEvent(Event):
def __init__(self, event, default=0, fill_with_previous=True,
year:int=None, month:int=None, day:int=None,
hour:int=None, minute:int=None, second:int=None, microsecond:int=None,
is_weekday=None, is_weekend:bool=None,
name=None, parallel_events=None, push_down=True,
allow_learning=True):
name = self._default_name_if_none(name)
self.event = event
self.default = self._wrapped_param(name, 'default', default)
self.fill_with_previous = fill_with_previous
self.year = self._wrapped_param(name, 'year', year,
require_lower_bound=0.0)
self.month = self._wrapped_param(name, 'month', month,
require_lower_bound=0.0)
self.day = self._wrapped_param(name, 'day', day,
require_lower_bound=0.0)
self.hour = self._wrapped_param(name, 'hour', hour,
require_lower_bound=0.0)
self.minute = self._wrapped_param(name, 'minute', minute,
require_lower_bound=0.0)
self.second = self._wrapped_param(name, 'second', second,
require_lower_bound=0.0)
self.microsecond = self._wrapped_param(name, 'microsecond', microsecond,
require_lower_bound=0.0)
if is_weekday is not None and is_weekday:
self.weekday = self._wrapped_param(name, 'weekday', [0,1,2,3,4])
else:
self.weekday = None
if is_weekend is not None and is_weekend:
self.weekend = self._wrapped_param(name, 'weekend', [5, 6])
else:
self.weekend = None
# what was the last value generated by the underlying event that was not blocked?
self._last_accepted_value = None
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
def aux_match(a, b, cont):
if a is not None:
if is_sequence(a):
# OR semantics: any list element has the desired value?
for x in a:
if x.execute(t) == b:
return cont
# if we got here, no list element matched the desired one
return False
elif a.execute(t) == b:
return cont
else:
return False
else:
# if no constraint was specified, just go on
return cont
is_season = \
aux_match(self.year, t.year,
aux_match(self.month, t.month,
aux_match(self.day, t.day,
aux_match(self.hour, t.hour,
aux_match(self.minute, t.minute,
aux_match(self.second, t.second,
aux_match(self.microsecond, t.microsecond,
aux_match(self.weekday, t.weekday(),
aux_match(self.weekend, t.weekday(),
True)))))))))
if is_season:
res = self.event.execute(t)
self._last_accepted_value = res
else:
if self.fill_with_previous and self._last_accepted_value is not None:
res = self._last_accepted_value
elif self.fill_with_previous and (self._last_accepted_value is None) and (self.default is not None):
res = self.default.execute(t)
elif (not self.fill_with_previous) and (self.default is not None):
res = self.default.execute(t)
else:
raise ValueError("Either a base value or a previous value for the underlying event must be available,"
"but both were None.")
return res
class Choice(Event):
def __init__(self, events, fix_choice=False, name=None, parallel_events=None, push_down=True, allow_learning=True):
"""
:param events: When given a list of events, a uniform distribution will be assumed. When given a dict,
keys are events and values are their probabilities according to a categorical distribution.
The choice can be taken either once or at all executions.
:param fix_choice: Whether the choice, once made, should be permanent.
:param name:
"""
self._fix_choice = fix_choice
self._fixed_choice = None
if isinstance(events, dict):
self.events = list(events.keys())
self.probs = list(events.values())
self.events_probs = events
elif isinstance(events, list):
self.events = events
uni_prob = 1.0/len(events)
self.events_probs = {e: uni_prob for e in events}
self.probs = list(self.events_probs.values())
name = self._default_name_if_none(name)
super().__init__(name, parallel_events, push_down, allow_learning)
def sample_from_definition(self, t):
raise NotImplementedError("Choice cannot be sampled directly from.")
def sample_from_learned_distribution(self):
raise NotImplementedError("Choice cannot be sampled directly from.")
def choose(self, t):
# choose
if self._fixed_choice is not None:
idx = self._fixed_choice
else:
idx = choice(range(0, len(self.events)), p=self.probs)
# fix choice if necessary
if self._fix_choice and self._fixed_choice is None:
self._fixed_choice = idx
return self.events[idx]
def _execute(self, t, i):
return self.choose(t).execute(t)
def reset(self):
super().reset()
self._fixed_choice = None
class Piecewise(Event):
def __init__(self, events: list, t_separators:list=None,
name=None, parallel_events=None, push_down=True,
allow_learning=True):
self.events = events
self.t_separators = t_separators
self._cur_separator_pos = 0
self._cur_separator = None
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
if self._cur_separator_pos < len(self.t_separators):
self._cur_separator = self.t_separators[self._cur_separator_pos]
if i >= self._value_or_execute_if_event(f'cur_separator_pos_{self._cur_separator_pos}', self._cur_separator,
t):
self._cur_separator_pos += 1
return self.events[self._cur_separator_pos].execute(t)
def reset(self):
super().reset()
self._cur_separator_pos = 0
self._cur_separator = None
# TODO class RandomPiecewise(Event):
#
# def __init__(self, events: list, t_separators:list=None, spacing_mean:float=None, spacing_sd:float=None, mode: str='fixed',
# name=None, parallel_events=None, random_seed=None):
class TemporarySwitch(Event):
"""
Temporarily switches results from a main event to an alternative one. Once a switch is made, it remains in force
for a determined number of steps.
"""
def __init__(self, main_event, alternative_event, switch_duration=1, switch_probability=0.5,
name=None, parallel_events=None, push_down=True,
allow_learning=True):
self.main_event = self._wrapped_param(name, 'main_event', main_event)
self.alternative_event = self._wrapped_param(name, 'alternative_event', alternative_event)
self.switch_probability = self._wrapped_param(name, 'switch_probability', switch_probability,
require_lower_bound=0.0,
require_upper_bound=1.0)
self.switch_duration = self._wrapped_param(name, 'switch_duration', switch_duration,
require_lower_bound=0.0)
self.is_switched = False # always begin with main_event
self.unswitch_step = None
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
if not self.is_switched:
switch_probability_event = BernoulliEvent(self.switch_probability.execute(t))
if switch_probability_event.execute(t) == 1:
self.is_switched = True
self.unswitch_step = i + self.switch_duration.execute(t)
return self.main_event.execute(t)
else: # is switched
if i >= self.unswitch_step:
self.is_switched = False
self.unswitch_step = None
return self.alternative_event.execute(t)
def reset(self):
super().reset()
self.is_switched = False
self.unswitch_step = None
class Replicated(Event):
"""
Continuously runs a clone of the underlying event for a certain specified duration, re-cloning the event
and restarting the process at the end of each period.
"""
def __init__(self, event, duration_per_replication, max_replication=3,
name=None, parallel_events=None, push_down=True, allow_learning=True):
# WARNING: events must be pre-instantiated before use because, when learning is performed,
# all relevant variables must already exist, otherwise the optimizer will not have them
# when the causal parameters closure is calculated in the beginning of the optimization.
self.events = [event.clone() for i in range(0, max_replication)]
self.duration_per_replication = self._wrapped_param(name, 'duration_per_replication', duration_per_replication,
require_lower_bound=1.0)
self.max_replication = max_replication
self.current_event_pos = 0
self.replicate_at_step = None
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
# Should we replicate?
if self.replicate_at_step is None:
self.current_event_pos = 0
self.replicate_at_step = self.duration_per_replication.execute(t)
elif self.replicate_at_step == i:
self.current_event_pos = min(self.max_replication, self.current_event_pos + 1)
self.replicate_at_step = self.replicate_at_step + self.duration_per_replication.execute(t)
return self.events[self.current_event_pos].execute(t)
def reset(self):
super().reset()
self.current_event_pos = 0
self.replicate_at_step = None
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,946 | paulosalem/time-blender | refs/heads/master | /tests/test_composition.py | import unittest
import pandas as pd
from time_blender.coordination_events import OnceEvent, Choice
from time_blender.core import Generator, LambdaEvent, ConstantEvent
from time_blender.deterministic_events import WalkEvent, WaveEvent
from time_blender.random_events import NormalEvent, UniformEvent
class TestDeterministicEvents(unittest.TestCase):
def setUp(self):
self.begin_date = pd.Timestamp(2018, 1, 1)
self.end_date = pd.Timestamp(2018, 3, 30)
self.days = (self.end_date - self.begin_date).days + 1
def test_composition_1(self):
e1 = NormalEvent(0.0, 2)
rw = WalkEvent(e1, initial_pos=0.0)
g = Generator(start_date=self.begin_date, end_date=self.end_date)
data = g.generate({'rw': rw})
print(data)
self.assertEqual(len(data), self.days)
def test_composition_2(self):
norm = NormalEvent(0, 1)
we = WaveEvent(10, 3)
compos = norm + we
data = Generator(start_date=self.begin_date, end_date=self.end_date).generate([compos])
self.assertEqual(len(data), self.days)
def test_composition_3(self):
const = ConstantEvent(4)
norm = NormalEvent(0, 1)
we = WaveEvent(30, 3)
t_change = OnceEvent(UniformEvent(0, 90))
compos1 = we + norm
compos2 = const + norm
def aux(t, i, memory, sub_events):
if i <= sub_events['t_change'].execute(t):
return sub_events['compos1'].execute(t)
else:
return sub_events['compos2'].execute(t)
e = LambdaEvent(aux, sub_events={'t_change': t_change, 'compos1': compos1, 'compos2': compos2})
data = Generator(start_date=self.begin_date, end_date=self.end_date).generate([e])
df = data
self.assertEqual(len(df), self.days)
def test_composition_4(self):
const = ConstantEvent(4)
we = WaveEvent(30, 3)
compos1 = we
compos2 = const
chc = Choice([compos1, compos2])
data = Generator(start_date=self.begin_date, end_date=self.end_date).generate([chc])
self.assertEqual(len(data), self.days)
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,947 | paulosalem/time-blender | refs/heads/master | /time_blender/deterministic_events.py | from time_blender.core import Event, ConstantEvent
import numpy as np
class IdentityEvent(Event):
def __init__(self, event, name=None, parallel_events=None, push_down=True, allow_learning=True):
name = self._default_name_if_none(name)
self.event = event
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
return self.event.execute(t)
class ClipEvent(Event):
def __init__(self, event, max_value=None, min_value=None, name=None, parallel_events=None, push_down=True,
allow_learning=True):
name = self._default_name_if_none(name)
self.event = event
self.max_value = self._wrapped_param(name, 'max_value', max_value)
self.min_value = self._wrapped_param(name, 'min_value', min_value)
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
v = self.event.execute(t)
if self.max_value is not None and v > self._value_or_execute_if_event('max_value', self.max_value, t):
v = self.max_value.constant
elif self.min_value is not None and v < self._value_or_execute_if_event('min_value', self.min_value, t):
v = self.min_value.constant
return v
class ClockEvent(Event):
def __init__(self, as_ticks=True, name=None, parallel_events=None, push_down=True, allow_learning=True):
name = self._default_name_if_none(name)
self.as_ticks = as_ticks
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
if self.as_ticks:
return i
else:
return t
class WaveEvent(Event):
def __init__(self, period, amplitude, pos=0.0, name=None, parallel_events=None, push_down=True,
allow_learning=True):
super().__init__(name, parallel_events, push_down, allow_learning)
name = self._default_name_if_none(name)
self.period = self._wrapped_param(name, 'period', period)
self.amplitude = self._wrapped_param(name, 'amplitude', amplitude)
self.pos = self._wrapped_param(name, 'pos', pos)
self.ini_pos = pos
super().__init__(name, parallel_events, push_down)
def _execute(self, t, i):
step = 2 * np.pi / self._value_or_execute_if_event('period', self.period, t)
self.pos.constant += step
self.pos.constant = self.pos.constant % (2*np.pi)
return self._value_or_execute_if_event('amplitude', self.amplitude, t) * np.sin(self.pos.constant)
def reset(self):
self.pos.constant = self.ini_pos
super().reset()
class WalkEvent(Event):
def __init__(self, step, initial_pos=None, name=None, parallel_events=None, push_down=True, allow_learning=True,
capture_parent_value=True):
self.step = self._wrapped_param(name, 'step', step)
if initial_pos is not None:
self.pos = self._wrapped_param(name, 'pos', initial_pos)
if not isinstance(self.pos, ConstantEvent):
raise ValueError("The initial position must be a constant.")
self.ini_pos = self.pos.clone()
else:
self.pos = ConstantEvent(0)
self.ini_pos = None
self.capture_parent_value = capture_parent_value
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
self.pos.constant = self.pos.constant + self._value_or_execute_if_event('step', self.step, t)
return self.pos.constant
def _capture_push_down_value(self, t, parent_value):
if self.capture_parent_value:
self.pos.constant = parent_value
# else, ignore parent value
def reset(self):
if self.ini_pos is not None:
self.pos = self.ini_pos.clone()
else:
self.pos = ConstantEvent(0)
super().reset()
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,948 | paulosalem/time-blender | refs/heads/master | /tests/test_coordination_events.py | import unittest
import pandas as pd
import numpy as np
import math
from tests.common import AbstractTest
from time_blender.coordination_events import Piecewise, Choice, SeasonalEvent, OnceEvent, PastEvent, \
ConcludingValueEvent
from time_blender.core import Generator, ConstantEvent, LambdaEvent
from time_blender.deterministic_events import WalkEvent
from time_blender.random_events import NormalEvent, UniformEvent, PoissonEvent
class TestOnceEvent(AbstractTest):
def test(self):
once = OnceEvent(NormalEvent(0, 1))
data = self.common_model_test(once, print_data=True)
# All values are the same, because the first value was recorded by Once and just repeated later
self.assertEquals(set(list(data.diff().dropna().values[:, 0])), {0.0})
class TestPastEvent(AbstractTest):
def test(self):
walk = WalkEvent(0, 10)
past_walk = PastEvent(3, refers_to=walk, parallel_events=[walk])
data_past_walk = self.common_model_test(past_walk)
print(walk._generated_values)
walk.reset()
data_walk = self.common_model_test(walk)
# The present is greater than the past
#print(data_walk[0])
#print(data_past_walk[0])
#self.assertTrue((data_walk[0] > data_past_walk[0]).all().values[0])
self.assertTrue((data_walk.iloc[:, 0] > data_past_walk.iloc[:, 0]).all())
# The difference is 30, since the walk steps 10 and the past event is 3 steps behind.
self.assertEquals(set((data_walk.iloc[10:, 0] - data_past_walk.iloc[10:, 0]).values), {30.0})
class TestConcludingValueEvent(AbstractTest):
def test(self):
event = ConcludingValueEvent() + ConstantEvent(10)
self.common_model_test(event, n=1, print_data=True)
class TestSeasonalEvent(AbstractTest):
def test(self):
data_1 = self.common_model_test(SeasonalEvent(NormalEvent(0, 1), base=0, year=2018, month=9,
day=None, hour=None, minute=None, second=None,
name=None, parallel_events=None), print_data=False)
data_2 = self.common_model_test(SeasonalEvent(NormalEvent(0, 1), base=0, year=None, month=None,
day=None, hour=None, minute=None, second=None,
is_weekday=True,
name=None, parallel_events=None), print_data=False)
data_3 = self.common_model_test(SeasonalEvent(NormalEvent(0, 1), base=None, year=None, month=None,
day=None, hour=None, minute=None, second=None,
is_weekday=True,
name=None, parallel_events=None), print_data=False)
print(data_2)
print(data_3)
class TestChoice(AbstractTest):
def test(self):
self.common_model_test(Choice([NormalEvent(0, 1), NormalEvent(0, 4)]))
class TestPiecewise(AbstractTest):
def test(self):
self.common_model_test(Piecewise([NormalEvent(100, 10), NormalEvent(0, 1)],
t_separators=[NormalEvent(30, 20)]),
n=10)
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,949 | paulosalem/time-blender | refs/heads/master | /setup.py | from setuptools import setup, find_packages
setup(name='time-blender',
version='0.3.0',
description='A compositional time series generator.',
url='https://github.com/paulosalem/time-blender',
author='Paulo Salem',
author_email='paulosalem@paulosalem.com',
license='MIT',
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"],
include_package_data=True,
install_requires=[
'pandas', 'numpy', 'clize', 'hyperopt', 'sigtools', 'matplotlib', 'scikit-learn'
],
scripts=['bin/time_blender']) | {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,950 | paulosalem/time-blender | refs/heads/master | /time_blender/config.py |
# Probabilistic programming configurations
PP_CONFIG = {
'max_time_step': 1000000
}
LEARNING_CONFIG = {
'large_default_value': 1000000000
}
## TODO other general configurations (e.g., default upper and lower bounds, etc.) | {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,951 | paulosalem/time-blender | refs/heads/master | /time_blender/random_events.py | import numpy as np
from time_blender.core import Event
from scipy.stats import norm, poisson, uniform, bernoulli
from time_blender.deterministic_events import ClipEvent
class UniformEvent(Event):
def __init__(self, low=-1.0, high=1.0, name=None, parallel_events=None, push_down=True, allow_learning=True):
name = self._default_name_if_none(name)
self.low = self._wrapped_param(name, 'low', low)
self.high = self._wrapped_param(name, 'high', high)
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
l = self._value_or_execute_if_event('low', self.low, t)
h = self._value_or_execute_if_event('high', self.high, t)
return uniform.rvs(loc=l, scale=max(0, h-l))
class BernoulliEvent(Event):
def __init__(self, p=0.5, name=None, parallel_events=None, push_down=True, allow_learning=True):
name = self._default_name_if_none(name)
self.p = self._wrapped_param(name, 'p', p, require_lower_bound=0.0, require_upper_bound=1.0)
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
p = self._value_or_execute_if_event('p', self.p, t)
return bernoulli.rvs(p=p)
class NormalEvent(Event):
def __init__(self, mean=0.0, std=1.0, name=None, parallel_events=None, push_down=True, allow_learning=True):
name = self._default_name_if_none(name)
self.mean = self._wrapped_param(name, 'mean', mean)
self.std = self._wrapped_param(name, 'std', std, require_lower_bound=0.0)
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
loc = self._value_or_execute_if_event('mean', self.mean, t)
scale = self._value_or_execute_if_event('std', self.std, t)
v = norm.rvs(loc=loc, scale=scale)
return v
class PoissonEvent(Event):
def __init__(self, lamb=1, name=None, parallel_events=None, push_down=True, allow_learning=True):
name = self._default_name_if_none(name)
self.lamb = self._wrapped_param(name, 'lamb', lamb, require_lower_bound=0.0)
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
l = self._value_or_execute_if_event('lamb', self.lamb, t)
return poisson.rvs(mu=l)
class Resistance(Event):
def __init__(self, event, resistance_value_begin, resistance_value_end,
resistance_probability, resistance_strength_event, direction, name=None, parallel_events=None,
push_down=True, allow_learning=True):
self.event = event
self.resistance_value_begin = self._wrapped_param(name, 'resistance_value_begin', resistance_value_begin)
self.resistance_value_end = self._wrapped_param(name, 'resistance_value_end', resistance_value_end)
self.resistance_probability = self._wrapped_param(name, 'resistance_probability',
resistance_probability,
require_lower_bound=0.0, require_upper_bound=1.0)
self.resistance_strength_event = self._wrapped_param(name, 'resistance_strength_event',
resistance_strength_event,
require_lower_bound=0.0, require_upper_bound=1.0)
self.direction = direction
super().__init__(name, parallel_events, push_down, allow_learning)
def _execute(self, t, i):
value = self.event.execute(t)
# Decide whether to resist
rand_top = uniform.rvs() # [0, 1]
rand_bottom = uniform.rvs() # [0, 1]
if self.direction == 'top' and \
self.resistance_value_begin.constant <= value < self.resistance_value_end.constant and \
self.resistance_probability.constant > rand_top:
resist = True
elif self.direction == 'bottom' and \
self.resistance_value_end.constant < value <= self.resistance_value_begin.constant and \
self.resistance_probability.constant > rand_bottom:
resist = True
else:
resist = False
# apply resistance, if pertinent
if resist:
resistance_factor = self.resistance_strength_event.execute(t)
assert 0.0 <= resistance_factor, f"Resistance factor must be >= 0.0, but was {resistance_factor}."
if self.direction == 'top':
# pushes downward
return value - abs(value) * resistance_factor
else:
# pushes upward
return value + abs(value) * resistance_factor
else:
# no resistance applied
return value
class TopResistance(Resistance):
def __init__(self, event, resistance_value_begin, resistance_value_end, resistance_probability,
resistance_strength_event, name=None, parallel_events=None, push_down=True, allow_learning=True):
super().__init__(event=event,
resistance_value_begin=resistance_value_begin,
resistance_value_end=resistance_value_end,
resistance_probability=resistance_probability,
resistance_strength_event=resistance_strength_event,
direction='top',
name=name, parallel_events=parallel_events, push_down=push_down,
allow_learning=allow_learning)
class BottomResistance(Resistance):
def __init__(self, event, resistance_value_begin, resistance_value_end, resistance_probability,
resistance_strength_event, name=None, parallel_events=None, push_down=True, allow_learning=True):
super().__init__(event=event,
resistance_value_begin=resistance_value_begin,
resistance_value_end=resistance_value_end,
resistance_probability=resistance_probability,
resistance_strength_event=resistance_strength_event,
direction='bottom',
name=name, parallel_events=parallel_events, push_down=push_down,
allow_learning=allow_learning)
def wrap_in_resistance(event, top_resistance_levels=[], bottom_resistance_levels=[],
top_resistance_strength_event=ClipEvent(NormalEvent(0.02, 0.01), min_value=0.0),
bottom_resistance_strength_event=ClipEvent(NormalEvent(0.02, 0.01), min_value=0.0),
tolerance=5, top_resistance_probability=0.5, bottom_resistance_probability=0.5):
if len(top_resistance_levels) > 0:
level = top_resistance_levels[0]
res = TopResistance(event,
resistance_value_begin=level - tolerance,
resistance_value_end=level,
resistance_probability=top_resistance_probability,
resistance_strength_event=top_resistance_strength_event)
# recursive step
res = wrap_in_resistance(res,
top_resistance_levels=top_resistance_levels[1:],
bottom_resistance_levels=bottom_resistance_levels,
top_resistance_strength_event=top_resistance_strength_event,
bottom_resistance_strength_event=bottom_resistance_strength_event,
tolerance=tolerance,
top_resistance_probability=top_resistance_probability,
bottom_resistance_probability=bottom_resistance_probability)
elif len(bottom_resistance_levels) > 0:
level = bottom_resistance_levels[0]
res = BottomResistance(event,
resistance_value_begin=level + tolerance,
resistance_value_end=level,
resistance_probability=bottom_resistance_probability,
resistance_strength_event=bottom_resistance_strength_event)
# recursive step
res = wrap_in_resistance(res,
top_resistance_levels=top_resistance_levels,
bottom_resistance_levels=bottom_resistance_levels[1:],
top_resistance_strength_event=top_resistance_strength_event,
bottom_resistance_strength_event=bottom_resistance_strength_event,
tolerance=tolerance,
top_resistance_probability=top_resistance_probability,
bottom_resistance_probability=bottom_resistance_probability)
else:
# recursion base
res = event
return res
# TODO bubble
# TODO mean-reverting models
| {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,952 | paulosalem/time-blender | refs/heads/master | /tests/test_deterministic_events.py | import unittest
import pandas as pd
import numpy as np
from tests.common import AbstractTest
from time_blender.core import Generator, ConstantEvent
from time_blender.deterministic_events import ClockEvent, WaveEvent, WalkEvent
class TestClockEvent(AbstractTest):
def setUp(self):
super().setUp()
self.event = ClockEvent()
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.event)
values = data.values
# values must be increasing
for i in range(1, len(values)):
self.assertEquals(values[i], values[i - 1] + 1)
class TestWaveEvent(AbstractTest):
def setUp(self):
super().setUp()
self.event = WaveEvent(30, 100)
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.event)
values = data.values
print(np.mean(values))
self.assertClose(np.mean(values), 0.0, abs_tol=2.0) # centers on zero
self.assertGreater(len([v for v in values if v > 90]), 0) # goes up
self.assertGreater(len([v for v in values if v < 90]), 0) # goes down
class TestConstantEvent(AbstractTest):
def setUp(self):
super().setUp()
self.event = ConstantEvent(30)
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.event)
values = data.values
for v in values:
self.assertEquals(v, 30)
class TestWalkEvent(AbstractTest):
def setUp(self):
super().setUp()
self.event = WalkEvent(10, initial_pos=0)
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.event)
values = data.values
print(values) | {"/tests/test_random_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/models.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py", "/time_blender/util.py", "/time_blender/cli.py"], "/tests/test_models.py": ["/time_blender/core.py", "/time_blender/random_events.py", "/tests/common.py", "/time_blender/models.py"], "/tests/common.py": ["/time_blender/core.py"], "/time_blender/core.py": ["/time_blender/config.py", "/time_blender/util.py"], "/tests/test_core.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/models.py", "/time_blender/random_events.py"], "/tests/test_util.py": ["/time_blender/util.py"], "/time_blender/cli.py": ["/time_blender/util.py"], "/time_blender/coordination_events.py": ["/time_blender/core.py", "/time_blender/config.py", "/time_blender/random_events.py", "/time_blender/util.py"], "/tests/test_composition.py": ["/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/deterministic_events.py": ["/time_blender/core.py"], "/tests/test_coordination_events.py": ["/tests/common.py", "/time_blender/coordination_events.py", "/time_blender/core.py", "/time_blender/deterministic_events.py", "/time_blender/random_events.py"], "/time_blender/random_events.py": ["/time_blender/core.py", "/time_blender/deterministic_events.py"], "/tests/test_deterministic_events.py": ["/tests/common.py", "/time_blender/core.py", "/time_blender/deterministic_events.py"]} |
59,953 | ugneokmanaite/FileHandling_ClassDemo | refs/heads/master | /requests.py | from text_file import TextFileHandling
file_path = "modified.txt"
# by putting the file path here and importing the class we only need to change the file path once if we need to
text_file = TextFileHandling(file_path)
# print(text_file.read_text())
#
# text_file.write_text_file()
# print(text_file.write_text_file())
# print(text_file.read_text_file_using_with())
#
# print(text_file.write_text_file_using())
# text_file.playing_with_python_OS_module()
print(text_file.playing_with_exception())
| {"/requests.py": ["/text_file.py"]} |
59,954 | ugneokmanaite/FileHandling_ClassDemo | refs/heads/master | /text_file.py | class TextFileHandling:
def __init__(self, file_path, text_storage=None):
self.file_path = file_path
self.text_storage = text_storage
# going to read in two ways and write in two ways
# creating some methods
# opening file
# reading the file
# closing the file
def read_text(self):
# read file
# self.text_storage = file.read()
# self.text_storage = file.read(3) - #will print 3 first characters
# self.text_storage = file.readline() # will read first line from current position
#self.text_storage = file.readline()
# self.file.seek(0)
# self.file.seek(0)
# self.text_storage = file.readlines() # will read the rest of the lines from current position
#file.seek(0)
#file.seek(3,2)
# self.text_storage = file.readline()
# print(file.tell()) # the pointer is at the current position and will start reading from there
# close file
#file.close()
return self.text_storage
def write_text_file(self):
# it will create or override file
file = open("writer.txt", "w")
file.write("My first python created file")
file.close()
# file = open("writer.txt", "a") #append mode a+ means append and read
file = open("writer.txt", "a+") # append and read
file.write("\n I am overriding the file")
file.seek(0)
self.text_storage = file.read() # storing what I read from the file to the instance variable
file.close()
print(file.closed) # gives me the status of closure, # true because its closed
print(file.name) # which file were working on currently
print(file.mode)
return self.text_storage
def read_text_file_using_with(self):
# to reduce the overhead of closing files
# open the file and read it. No overhead of closing
# automatically closes the file and also closes it during times of exception being raised
with open("order.txt", "r") as file:
self.text_storage = file.read()
return self.text_storage
def write_text_file_using(self):
with open("writer.txt", "w+") as file: # w+ means write and read mode
file.write("Using writer with functionality")
print(file.tell()) # just to see where we are at - tells current position of your pointer
file.seek(0) # repositioning the pointer to the beginning
self.text_storage = file.read()
return self.text_storage
def playing_with_python_OS_module(self):
import os
print(os.getcwd()) # current working directory
# os.remove("writer.txt") # this will remove the file
# os.rmdir() # this will remove the folder
# print(os.listdir()) # listening files and directories
# os.rename('order.txt','modified.txt') #renaming
#os.chdir("C:/Users/Ugne/Desktop/Holiday pictures")
#print((os.getcwd()))
# os.mkdir("Ugne") # adding directory
# os.rmdir("Ugne") # removing directory
os.chdir("C:/Users/Ugne/PycharmProjects/FileHandling_ClassDemo/")
print((os.getcwd()))
def playing_with_exception(self):
try:
file = open(self.file_path, 'r')
except Exception as e:
print(e)
print("File is not present")
else:
self.text_storage = file.read()
file.close()
finally:
print("Will run for sure")
return self.text_storage
| {"/requests.py": ["/text_file.py"]} |
59,956 | mbroihier/ml | refs/heads/master | /xorExample.py | import getopt
import sys
import matplotlib.pyplot as plt
import tensorflow as tf
import Model
def main(bypass, cycles):
'''
Analysis of XOR learning operation
'''
if cycles is None:
cycles = 1250
tf.random.set_seed(1671)
# Create a model with two neural layers. The first layer, "0", has two inputs and three neurons/outputs with a learning factor/rate of 0.5.
# The second layer, "1", has 3 inputs and one neuron/output with a learning factor/rate of 0.5. The model is stored at xorModel0 and
# xorModel1 in the current directory. If it exists, the weights stored in the file are used as the initial weights.
xorModel = Model.Model([(2, 3, 0.5), (3, 1, 0.5)], filePath="xorModel")
print("Initial Weights for layer 0:\n {}".format(xorModel.layers[0].weights))
print("Initial Weights for layer 1:\n {}".format(xorModel.layers[1].weights))
oo = [] # Stores the output of response when input is 0 0
ol = [] # // 0 1
lo = [] # // 1 0
ll = [] # // 1 1
layer0Error = []
layer1Error = []
for iteration in range(cycles):
result = xorModel.feedForward([1.0, 1.0]) # input is 1 1 so the expected output is 0
xorModel.updateWeights([0.0])
if not bypass:
ll.append(result)
layer0Error.append(xorModel.layers[0].error.numpy())
layer1Error.append(xorModel.layers[1].error.copy())
result = xorModel.feedForward([1.0, 0.0]) # input is 1 0 so the expected output is 1
xorModel.updateWeights([1.0])
if not bypass:
lo.append(result)
layer0Error.append(xorModel.layers[0].error.numpy())
layer1Error.append(xorModel.layers[1].error.copy())
result = xorModel.feedForward([0.0, 1.0]) # input is 0 1 so the expected output is 1
xorModel.updateWeights([ 1.0])
if not bypass:
ol.append(result)
layer0Error.append(xorModel.layers[0].error.numpy())
layer1Error.append(xorModel.layers[1].error.copy())
result = xorModel.feedForward([0.0, 0.0]) # input is 0 0 so the expected output is 0
xorModel.updateWeights([0.0])
if not bypass:
oo.append(result)
layer0Error.append(xorModel.layers[0].error.numpy())
layer1Error.append(xorModel.layers[1].error.copy())
print("model is predicting:\n {}".format(xorModel.feedForward([1.0, 1.0]))) # lets see how close we got
print("model is predicting:\n {}".format(xorModel.feedForward([0.0, 0.0]))) # lets see how close we got
print("model is predicting:\n {}".format(xorModel.feedForward([1.0, 0.0]))) # lets see how close we got
print("model is predicting:\n {}".format(xorModel.feedForward([0.0, 1.0]))) # lets see how close we got
print("Final Weights for layer 0:\n {}".format(xorModel.layers[0].weights))
print("Final Weights for layer 1:\n {}".format(xorModel.layers[1].weights))
xorModel.storeModel("xorModel") # store the current results of the model
if bypass:
sys.exit(0)
fig = plt.figure()
plt.plot(oo)
plt.plot(ol, "*")
plt.plot(lo, "^")
plt.plot(ll, "+")
plt.title("Output Each Cycle")
plt.xlabel("Cycle")
plt.ylabel("Value")
plt.savefig("LearningXOR.png")
plt.show()
fig = plt.figure()
plt.plot(layer0Error)
plt.title("Layer 0 Error Over Time")
plt.xlabel("Epoch")
plt.ylabel("Value")
plt.show()
fig = plt.figure()
plt.plot(layer1Error)
plt.title("Layer 1 Error Over Time")
plt.xlabel("Epoch")
plt.ylabel("Value")
plt.show()
if __name__ == '__main__':
bypass = False
cycles = None
usage = "python3 xorExample.py [-c <number of cycles>] [--bypass]\n bypass - bypass graphic display\n -c - learning cycles to perform"
try:
opts, args = getopt.getopt(sys.argv[1:], "c:", ["bypass"])
# print("opts: {}, args: {}".format(opts, args))
for opt, arg in opts:
if opt == "-c":
print("Setting cycles to {}".format(arg))
cycles = int(arg)
else:
if opt == "--bypass":
bypass = True
else:
print("options given were: {} {}".format(opt, arg))
print(usage)
sys.exit(-1)
for arg in args:
print("invalid argument observed: {}".format(arg))
print(usage)
sys.exit(-1)
except getopt.GetoptError:
print("GetoptError exception")
print(usage)
sys.exit(-1)
main(bypass, cycles)
| {"/xorExample.py": ["/Model.py"], "/Model.py": ["/NeuralLayer.py"]} |
59,957 | mbroihier/ml | refs/heads/master | /NeuralLayer.py | import pickle
import tensorflow as tf
class NeuralLayer:
'''
Class for defining a layer of nuerons that can have multiple inputs and neurons/outputs - a neuron can only
have one output, but it may server as inputs to many neurons in the next layer. The equations used in this
class assume that the bias term is included in the weights vector and that the input to the weight is 1.0.
The equations assume that the loss function is the sum of the squares of the error where error is defined as
the difference between a known target value and the output of the Psi function. The Psi function is
1/(1+e^-z). And z, also known as net, is the sum of product of the weights and inputs (which includes the bias).
'''
def __init__(self, numberOfInputs, numberOfOutputs, learningFactor, id="me", debug=False, filePath=None):
'''
Nueron constructor - uses tensors - last weight is the bias term
'''
self.id = id
self.debug = debug
self.numberOfInputs = numberOfInputs
self.numberOfNeurons = numberOfOutputs
self.backPropagatedErrorNotSet = True
self.learningFactor = learningFactor
self.normalizer = 2.0
self.delta = [0.0] * self.numberOfNeurons
self.weights = tf.random.uniform([numberOfInputs+1, numberOfOutputs], minval=-0.5, maxval=0.5,
dtype=tf.dtypes.float32)
self.error = [0.0] * numberOfOutputs
self.filePath = filePath
if filePath is not None:
try:
fileHandle = open(filePath, "rb")
self.weights = pickle.load(fileHandle)
fileHandle.close()
except FileNotFoundError:
pass
def storeLayer(self, filePath):
'''
Store the weights that have been trained
'''
fileHandle = open(filePath, "wb")
pickle.dump(self.weights, fileHandle)
fileHandle.close()
def calculateOutput(self, inputs):
'''
Given the inputs, calculate the outputs
'''
self.inputs = tf.concat([inputs, [1.0]], 0)
self.outputs = self.psi(self.netAKAz())
return self.outputs
def netAKAz(self):
'''
Calculate the sum of the product of the weights and the inputs and add to the bia - this is net AKA z
'''
return tf.tensordot(self.inputs, self.weights, 1)
def psi(self, z):
'''
Apply the logistic function, ψ, to the outputs
'''
return 1.0 / (1.0 + tf.exp(-z))
def netWRTWeight(self, index):
'''
∂zᵢ/∂wᵢ = inputᵢ -- the change in neuron output with respect to a weight
'''
return self.inputs[index]
def netWRTWeightVector(self):
'''
∂zᵢ/∂wᵢ = inputᵢ -- the change in neuron output with respect to a weight - this is a vector
'''
return self.inputs
def psiWRTz(self, index):
'''
∂ψᵢ/∂zᵢ = ψᵢ*(1-ψᵢ) where ψ = 1 / (1 + e^(-z)) -- the partial change of ψ with respect to z - this
is a scalar - must designate output index
'''
return self.outputs[index]*(1 - self.outputs[index])
def errorWRTPsi(self, targetArray, index):
'''
∂Eᵢ/∂ψᵢ = -(targetOutput - ψᵢ) # assuming that E is square of the error and ignoring the gain (2) -
this is a scalar must designate output index
'''
if (self.backPropagatedErrorNotSet):
targetOutput = targetArray[index]
self.error[index] = - (self.normalizer * (targetOutput - self.outputs[index]))
else:
pass # should have been set by a higher layer
return self.error[index]
def updateWeights(self, target=None, deltas=None):
'''
Update the weights to minimize the loss - if in batch mode, the deltas have been accumulated by updateDeltas
'''
if deltas is None:
deltas = self.updateDeltas(target)
self.weights -= self.learningFactor * tf.transpose(deltas)
def updateDeltas(self, target, deltas=None):
'''
Update the deltas during batch processing
'''
for neuron in range(self.numberOfNeurons):
if neuron == 0:
deltaDeltas = tf.reshape(tf.convert_to_tensor(self.errorWRTPsi(target, neuron)
* self.psiWRTz(neuron)
* self.netWRTWeightVector()),
[1, len(self.netWRTWeightVector())]) # make a 1 by n vector
else:
deltaDeltas = tf.concat((deltaDeltas, [self.errorWRTPsi(target, neuron)
* self.psiWRTz(neuron)
* self.netWRTWeightVector()]), 0) # tack on a new row
if self.debug:
print("updateDeltas - layer {}, neuron {}, weight deltaDeltas\n{}".
format(self.id, neuron, deltaDeltas))
if deltas is None:
deltas = deltaDeltas
else:
deltas += deltaDeltas
self.propagateError() # do this before updating weights
return deltas
def propagateError(self):
'''
Determine error to send to previous layers
For each neuron, determine the amount of error at it's output that needs to be applied to the input
which is the output of the previous level. Those individual neuron amounts then need to be summed
across all neurons.
'''
previousLayerNeuronError = [0.0] * (self.numberOfInputs + 1)
for thisLayerNeuron in range(self.numberOfNeurons):
error = self.error[thisLayerNeuron]
amountForEachPreviousLayerNeuron = error * self.weights[:, thisLayerNeuron] * self.psiWRTz(thisLayerNeuron)
if self.debug:
print("sum of weights for neurons at this layer: {}".
format(tf.reduce_sum(self.weights[:, thisLayerNeuron])))
print("propagateError - in layer {}, neuron {}, contribution:{}".
format(self.id, thisLayerNeuron, amountForEachPreviousLayerNeuron))
print("propagateError - Error {}, weights {}".format(error, self.weights[:, thisLayerNeuron]))
previousLayerNeuronError += amountForEachPreviousLayerNeuron
self.errorForNextLayer = previousLayerNeuronError
if self.debug:
print("propagateError - in layer {}, the next layer's error will be\n {}".
format(self.id, previousLayerNeuronError))
def setPropagationError(self, error):
'''
From a higher layer, set the error propogating back to this layer
'''
self.error = error
if self.debug:
print("setPropagationError - setting propagation error in layer {} to\n {}".
format(self.id, self.error))
self.backPropagatedErrorNotSet = False
def setLearningFactor(self, factor):
'''
Setter for learning factor
'''
self.learningFactor = factor
| {"/xorExample.py": ["/Model.py"], "/Model.py": ["/NeuralLayer.py"]} |
59,958 | mbroihier/ml | refs/heads/master | /Model.py | import tensorflow as tf
import NeuralLayer as nl
class Model:
'''
Class for defining a model that consists of neural layers. The first layer is always the inputs, which
means that it does not exist as a NeuralLayer. All subsequent layers are completely interconnected
except for the final layer.
'''
def __init__(self, inputOutputList, debug=False, filePath=None):
'''
Model constructor - contrusts layers from the list entries
'''
self.layers = []
layerIndex = 0
for entryTuple in inputOutputList:
layerid = None
if isinstance(entryTuple[-1], str):
layerid = entryTuple[-1]
inputs = entryTuple[0]
outputs = entryTuple[1]
learningFactor = entryTuple[2]
if filePath is None:
weightFilePath = filePath
else:
weightFilePath = filePath + str(layerIndex)
layerIndex += 1
self.layers.append(nl.NeuralLayer(inputs, outputs, learningFactor, layerid, debug, weightFilePath))
def storeModel(self, filePath):
'''
Store the weights for all of the layers of this model
'''
layerIndex = 0
for layer in self.layers:
layer.storeLayer(filePath + str(layerIndex))
layerIndex += 1
def feedForward(self, inputs):
'''
Given the inputs, propagate them through the model layers
'''
layerOutputs = inputs
for aLayer in self.layers:
layerOutputs = aLayer.calculateOutput(layerOutputs)
return layerOutputs
def updateDeltas(self, target, deltas=None):
'''
Update the deltas in all the layers
'''
reversedLayers = self.layers.copy()
reversedLayers.reverse()
lastLayer = len(reversedLayers) - 1
newDeltaList = False
if deltas is None:
deltas = [] # make a list of deltas, one for each layer
newDeltaList = True
for index, layer in enumerate(reversedLayers):
if newDeltaList:
deltas.append(layer.updateDeltas(target))
else:
deltas[index] = layer.updateDeltas(target, deltas=deltas[index])
if index < lastLayer:
reversedLayers[index+1].setPropagationError(layer.errorForNextLayer)
return deltas
def updateWeights(self, target=None, deltas=None):
'''
Update the weights and propagate the error of all layers
'''
reversedLayers = self.layers.copy()
reversedLayers.reverse()
lastLayer = len(reversedLayers) - 1
for index, layer in enumerate(reversedLayers):
if deltas is None:
layer.updateWeights(target)
if index < lastLayer:
reversedLayers[index+1].setPropagationError(layer.errorForNextLayer)
else:
layer.updateWeights(deltas=deltas[index])
| {"/xorExample.py": ["/Model.py"], "/Model.py": ["/NeuralLayer.py"]} |
59,959 | simonjheiler/ui_human_capital | refs/heads/main | /src/model_analysis/elasticity_1_step.py | """
Compute approximate elasticity of unemployment to pension benefits by age.
by simulation runs the solution routine (see solve_model_old2.py)
with N=10000; this code can be used in the calibration procedure.
See elasticity_exact.py for a more precise computation of elasticity by age.
"""
#####################################################
# IMPORTS
#####################################################
import copy
import json
import multiprocessing
import sys
import numpy as np
from bld.project_paths import project_paths_join as ppj
from src.model_analysis.run_utils import _solve_run
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
def elasticity_1_step(controls, calibration):
# set controls
n_runs = 3
n_parallel_jobs = controls["n_parallel_jobs"]
shock_size = 0.05
# load variables
n_periods_working = calibration["n_periods_working"]
n_types = calibration["n_types"]
type_weights = np.array(calibration["type_weights"])
ui_replacement_rate_vector = np.array(calibration["ui_replacement_rate_vector"])
# calculate derived variables
n_years_working = int(n_periods_working / 4)
# initialize objects
value_at_birth = np.full((n_types, n_runs), np.nan)
share_nonemployed = np.full((n_types, n_periods_working, n_runs), np.nan)
# generate shocked input vectors
shock_direction = np.array([-1, 0, 1])
shock_timing = np.zeros((n_types, n_periods_working))
direction = 1
for year_idx in range(0, n_years_working, 5):
period_idx_start = int(year_idx * 4)
period_idx_end = int(min(period_idx_start + 4, n_periods_working))
shock_timing[:, period_idx_start:period_idx_end] = np.full(4, direction)
direction = -direction # invert direction of shock for every bracket
ui_replacement_rate_vector_all = np.repeat(
ui_replacement_rate_vector, n_runs
).reshape((n_types, n_periods_working, n_runs))
for run_idx in range(n_runs):
ui_replacement_rate_vector_all[:, :, run_idx] += (
shock_timing * 0.5 * shock_size * shock_direction[run_idx]
)
# define program for parallel computation
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{
"ui_replacement_rate_vector": ui_replacement_rate_vector_all[
:, :, run_idx
].tolist()
},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_parallel_jobs) as pool:
out = pool.starmap(_solve_run, inputs)
# extract results
for run_idx in range(n_runs):
value_at_birth[:, run_idx] = np.array(out[run_idx]["welfare"])
share_nonemployed[:, :, run_idx] = np.array(out[run_idx]["share_nonemployed"])
# average over types
average_value_at_birth = np.average(value_at_birth, weights=type_weights, axis=0)
average_share_nonemployed = np.average(
share_nonemployed, weights=type_weights, axis=0
)
average_ui_replacement_rate = np.average(
ui_replacement_rate_vector, weights=type_weights, axis=0
)
# calculate elasticities
age_min = 20
age_bins = [[20, 30], [35, 45], [50, 60]]
n_bins = len(age_bins)
# averaged over types
average_elasticity_unemployment_up = (
(average_share_nonemployed[:, 2] - average_share_nonemployed[:, 1])
* average_ui_replacement_rate
/ (average_share_nonemployed[:, 1] * 0.5 * shock_size * shock_timing[0, :])
)
average_elasticity_unemployment_down = (
(average_share_nonemployed[:, 0] - average_share_nonemployed[:, 1])
* average_ui_replacement_rate
/ (
average_share_nonemployed[:, 1]
* 0.5
* shock_size
* shock_timing[0, :]
* (-1)
)
)
average_elasticity_unemployment = (
0.5 * average_elasticity_unemployment_down
+ 0.5 * average_elasticity_unemployment_up
)
average_elasticity_unemployment = average_elasticity_unemployment * (
1 - np.isinf(average_elasticity_unemployment)
)
average_elasticity_unemployment_mean = np.full(n_bins, np.nan)
for bin_idx, age_bin in enumerate(age_bins):
average_elasticity_unemployment_mean[bin_idx] = np.nansum(
average_elasticity_unemployment[
(age_bin[0] - age_min) * 4 : (age_bin[1] - age_min + 1) * 4
]
* average_share_nonemployed[
(age_bin[0] - age_min) * 4 : (age_bin[1] - age_min + 1) * 4, 2
]
) / np.sum(
average_share_nonemployed[
(age_bin[0] - age_min) * 4 : (age_bin[1] - age_min + 1) * 4, 2
]
* (
1
- (
shock_timing[
0, (age_bin[0] - age_min) * 4 : (age_bin[1] - age_min + 1) * 4
]
== 0
)
)
)
# by type
elasticity_unemployment_up = (
(share_nonemployed[:, :, 2] - share_nonemployed[:, :, 1])
* ui_replacement_rate_vector
/ (share_nonemployed[:, :, 1] * 0.5 * shock_size * shock_timing)
)
elasticity_unemployment_down = (
(share_nonemployed[:, :, 0] - share_nonemployed[:, :, 1])
* ui_replacement_rate_vector
/ (share_nonemployed[:, :, 1] * 0.5 * shock_size * shock_timing * (-1))
)
elasticity_unemployment = (
0.5 * elasticity_unemployment_down + 0.5 * elasticity_unemployment_up
)
elasticity_unemployment = elasticity_unemployment * (
1 - np.isinf(elasticity_unemployment)
)
elasticity_unemployment_mean = np.full((n_types, n_bins), np.nan)
for bin_idx, age_bin in enumerate(age_bins):
elasticity_unemployment_mean[:, bin_idx] = np.nansum(
elasticity_unemployment[
:, (age_bin[0] - age_min) * 4 : (age_bin[1] - age_min + 1) * 4
]
* share_nonemployed[
:, (age_bin[0] - age_min) * 4 : (age_bin[1] - age_min + 1) * 4, 2
],
axis=1,
) / np.sum(
share_nonemployed[
:, (age_bin[0] - age_min) * 4 : (age_bin[1] - age_min + 1) * 4, 2
]
* (
1
- (
shock_timing[
:, (age_bin[0] - age_min) * 4 : (age_bin[1] - age_min + 1) * 4
]
== 0
)
),
axis=1,
)
# print and return results
print(
"=====================================\n" " unemployment elasticity to benefits"
)
for bin_idx, age_bin in enumerate(age_bins):
print(
" {}-{} \t\t\t\t\t {:9.2f}".format(
age_bin[0], age_bin[1], average_elasticity_unemployment_mean[bin_idx]
)
)
print("=====================================")
out = {
"age_bins": age_bins,
"average_elasticity_unemployment_mean": average_elasticity_unemployment_mean,
"average_pv_utility_simulated_corrected": average_value_at_birth,
"average_share_nonemployed": average_share_nonemployed,
"elasticity_unemployment_mean": elasticity_unemployment_mean,
"pv_utility_simulated_corrected": value_at_birth,
"share_nonemployed": share_nonemployed,
"ui_replacement_rate_vector_all": ui_replacement_rate_vector_all,
}
for item in out:
try:
out[item] = out[item].tolist()
except AttributeError:
pass
return out
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
try:
setup_name = sys.argv[1]
method = sys.argv[2]
except IndexError:
setup_name = "base_combined"
method = "linear"
# load calibration and set some variables
calibration = json.load(
open(ppj("IN_MODEL_SPECS", "analytics_calibration_" + setup_name + ".json"))
)
# set controls
controls = {
"interpolation_method": method,
"n_iterations_solve_max": 20,
"n_simulations": int(1e6),
"n_parallel_jobs": 3,
"run_simulation": True,
"seed_simulation": 3405,
"show_progress_solve": False,
"show_summary": False,
"tolerance_solve": 1e-7,
}
# approximate elasticity
elast_1_step = elasticity_1_step(controls, calibration)
# store results
with open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_" + setup_name + "_elasticity_approx_" + method + ".json",
),
"w",
) as outfile:
json.dump(elast_1_step, outfile, ensure_ascii=False, indent=2)
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,960 | simonjheiler/ui_human_capital | refs/heads/main | /src/model_analysis/first_best_3_agents_individual.py | """ Compute first best solution.
This module computes the first best solution.
"""
#####################################################
# IMPORTS
#####################################################
import json
import sys
import numpy as np
from bld.project_paths import project_paths_join as ppj
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
def _get_results_3_agents_individual():
calibration_base = json.load(
open(ppj("IN_MODEL_SPECS", "analytics_calibration_base_individual.json"))
)
first_best_high = json.load(
open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_edu_high_opt_rate_only_first_best_linear.json",
)
)
)
first_best_medium = json.load(
open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_edu_medium_opt_rate_only_first_best_linear.json",
)
)
)
first_best_low = json.load(
open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_edu_low_opt_rate_only_first_best_linear.json",
)
)
)
# load parameters from base calibration
type_weights = np.array(calibration_base["type_weights"])
n_simulations = first_best_high["n_simulations"]
consumption_grid = np.array(first_best_high["consumption_grid"])
consumption_opt_first_best = np.array(
[
first_best_high["consumption_opt_first_best"],
first_best_medium["consumption_opt_first_best"],
first_best_low["consumption_opt_first_best"],
]
)
consumption_opt_first_best_idx = np.array(
[
first_best_high["consumption_opt_first_best_idx"],
first_best_medium["consumption_opt_first_best_idx"],
first_best_low["consumption_opt_first_best_idx"],
]
)
effort_searching_all_aggregated = np.array(
[
first_best_high["effort_searching_all_aggregated"],
first_best_medium["effort_searching_all_aggregated"],
first_best_low["effort_searching_all_aggregated"],
]
)
effort_searching_aggregated = np.array(
[
first_best_high["effort_searching_aggregated"],
first_best_medium["effort_searching_aggregated"],
first_best_low["effort_searching_aggregated"],
]
)
effort_searching_loss_aggregated = np.array(
[
first_best_high["effort_searching_loss_aggregated"],
first_best_medium["effort_searching_loss_aggregated"],
first_best_low["effort_searching_loss_aggregated"],
]
)
effort_searching_all_mean = np.array(
[
first_best_high["effort_searching_all_mean"],
first_best_medium["effort_searching_all_mean"],
first_best_low["effort_searching_all_mean"],
]
)
effort_searching_mean = np.array(
[
first_best_high["effort_searching_mean"],
first_best_medium["effort_searching_mean"],
first_best_low["effort_searching_mean"],
]
)
effort_searching_loss_mean = np.array(
[
first_best_high["effort_searching_loss_mean"],
first_best_medium["effort_searching_loss_mean"],
first_best_low["effort_searching_loss_mean"],
]
)
wage_loss_factor_vector = np.array(
[
first_best_high["wage_loss_factor_vector"],
first_best_medium["wage_loss_factor_vector"],
first_best_low["wage_loss_factor_vector"],
]
)
income_tax_rate_vector_first_best = np.array(
[
first_best_high["income_tax_rate_vector_first_best"],
first_best_medium["income_tax_rate_vector_first_best"],
first_best_low["income_tax_rate_vector_first_best"],
]
)
interpolation_weight = np.array(
[
first_best_high["interpolation_weight"],
first_best_medium["interpolation_weight"],
first_best_low["interpolation_weight"],
]
)
share_unemployed_mean = np.array(
[
first_best_high["share_unemployed_mean"],
first_best_medium["share_unemployed_mean"],
first_best_low["share_unemployed_mean"],
]
)
share_unemployed_loss_mean = np.array(
[
first_best_high["share_unemployed_loss_mean"],
first_best_medium["share_unemployed_loss_mean"],
first_best_low["share_unemployed_loss_mean"],
]
)
ui_replacement_rate_vector_first_best = np.array(
[
first_best_high["ui_replacement_rate_vector_first_best"],
first_best_medium["ui_replacement_rate_vector_first_best"],
first_best_low["ui_replacement_rate_vector_first_best"],
]
)
wage_employed_mean = np.array(
[
first_best_high["wage_employed_mean"],
first_best_medium["wage_employed_mean"],
first_best_low["wage_employed_mean"],
]
)
wage_pre_displacement_nonemployed_mean = np.array(
[
first_best_high["wage_pre_displacement_nonemployed_mean"],
first_best_medium["wage_pre_displacement_nonemployed_mean"],
first_best_low["wage_pre_displacement_nonemployed_mean"],
]
)
wage_unemployed_loss_mean = np.array(
[
first_best_high["wage_unemployed_loss_mean"],
first_best_medium["wage_unemployed_loss_mean"],
first_best_low["wage_unemployed_loss_mean"],
]
)
wage_hc_factor_vector = np.array(
[
first_best_high["wage_hc_factor_vector"],
first_best_medium["wage_hc_factor_vector"],
first_best_low["wage_hc_factor_vector"],
]
)
wealth = np.array(
[
first_best_high["wealth"],
first_best_medium["wealth"],
first_best_low["wealth"],
]
)
wealth_simulated = np.array(
[
first_best_high["wealth_simulated"],
first_best_medium["wealth_simulated"],
first_best_low["wealth_simulated"],
]
)
welfare = np.array(
[
first_best_high["welfare"],
first_best_medium["welfare"],
first_best_low["welfare"],
]
)
welfare_simulated = np.array(
[
first_best_high["welfare_simulated"],
first_best_medium["welfare_simulated"],
first_best_low["welfare_simulated"],
]
)
pv_income_employed = np.array(
[
first_best_high["pv_income_employed"],
first_best_medium["pv_income_employed"],
first_best_low["pv_income_employed"],
]
)
pv_income_searching = np.array(
[
first_best_high["pv_income_searching"],
first_best_medium["pv_income_searching"],
first_best_low["pv_income_searching"],
]
)
pv_income_searching_loss = np.array(
[
first_best_high["pv_income_searching_loss"],
first_best_medium["pv_income_searching_loss"],
first_best_low["pv_income_searching_loss"],
]
)
pv_search_cost_employed = np.array(
[
first_best_high["pv_search_cost_employed"],
first_best_medium["pv_search_cost_employed"],
first_best_low["pv_search_cost_employed"],
]
)
pv_search_cost_searching = np.array(
[
first_best_high["pv_search_cost_searching"],
first_best_medium["pv_search_cost_searching"],
first_best_low["pv_search_cost_searching"],
]
)
pv_search_cost_searching_loss = np.array(
[
first_best_high["pv_search_cost_searching_loss"],
first_best_medium["pv_search_cost_searching_loss"],
first_best_low["pv_search_cost_searching_loss"],
]
)
pv_utils_employed = np.array(
[
first_best_high["pv_utils_employed"],
first_best_medium["pv_utils_employed"],
first_best_low["pv_utils_employed"],
]
)
pv_utils_searching = np.array(
[
first_best_high["pv_utils_searching"],
first_best_medium["pv_utils_searching"],
first_best_low["pv_utils_searching"],
]
)
pv_utils_searching_loss = np.array(
[
first_best_high["pv_utils_searching_loss"],
first_best_medium["pv_utils_searching_loss"],
first_best_low["pv_utils_searching_loss"],
]
)
share_unemployed = np.array(
[
first_best_high["share_unemployed"],
first_best_medium["share_unemployed"],
first_best_low["share_unemployed"],
]
)
share_unemployed_loss = np.array(
[
first_best_high["share_unemployed_loss"],
first_best_medium["share_unemployed_loss"],
first_best_low["share_unemployed_loss"],
]
)
consumption_opt_first_best = np.squeeze(consumption_opt_first_best)
consumption_opt_first_best_idx = np.squeeze(consumption_opt_first_best_idx)
effort_searching_all_aggregated = np.squeeze(effort_searching_all_aggregated)
effort_searching_aggregated = np.squeeze(effort_searching_aggregated)
effort_searching_loss_aggregated = np.squeeze(effort_searching_loss_aggregated)
effort_searching_all_mean = np.squeeze(effort_searching_all_mean)
effort_searching_mean = np.squeeze(effort_searching_mean)
effort_searching_loss_mean = np.squeeze(effort_searching_loss_mean)
wage_loss_factor_vector = np.squeeze(wage_loss_factor_vector)
income_tax_rate_vector_first_best = np.squeeze(income_tax_rate_vector_first_best)
interpolation_weight = np.squeeze(interpolation_weight)
share_unemployed_mean = np.squeeze(share_unemployed_mean)
share_unemployed_loss_mean = np.squeeze(share_unemployed_loss_mean)
ui_replacement_rate_vector_first_best = np.squeeze(
ui_replacement_rate_vector_first_best
)
wage_employed_mean = np.squeeze(wage_employed_mean)
wage_pre_displacement_nonemployed_mean = np.squeeze(
wage_pre_displacement_nonemployed_mean
)
wage_unemployed_loss_mean = np.squeeze(wage_unemployed_loss_mean)
wage_hc_factor_vector = np.squeeze(wage_hc_factor_vector)
wealth = np.squeeze(wealth)
wealth_simulated = np.squeeze(wealth_simulated)
welfare = np.squeeze(welfare)
welfare_simulated = np.squeeze(welfare_simulated)
pv_income_employed = np.squeeze(pv_income_employed)
pv_income_searching = np.squeeze(pv_income_searching)
pv_income_searching_loss = np.squeeze(pv_income_searching_loss)
pv_search_cost_employed = np.squeeze(pv_search_cost_employed)
pv_search_cost_searching = np.squeeze(pv_search_cost_searching)
pv_search_cost_searching_loss = np.squeeze(pv_search_cost_searching_loss)
pv_utils_employed = np.squeeze(pv_utils_employed)
pv_utils_searching = np.squeeze(pv_utils_searching)
pv_utils_searching_loss = np.squeeze(pv_utils_searching_loss)
share_unemployed = np.squeeze(share_unemployed)
share_unemployed_loss = np.squeeze(share_unemployed_loss)
pv_income_employed_aggregated = np.average(
pv_income_employed, weights=type_weights, axis=0
)
pv_income_searching_aggregated = np.average(
pv_income_searching, weights=type_weights, axis=0
)
pv_income_searching_loss_aggregated = np.average(
pv_income_searching_loss, weights=type_weights, axis=0
)
pv_search_cost_employed_aggregated = np.average(
pv_search_cost_employed, weights=type_weights, axis=0
)
pv_search_cost_searching_aggregated = np.average(
pv_search_cost_searching, weights=type_weights, axis=0
)
pv_search_cost_searching_loss_aggregated = np.average(
pv_search_cost_searching_loss, weights=type_weights, axis=0
)
pv_utils_employed_aggregated = np.average(
pv_utils_employed, weights=type_weights, axis=0
)
pv_utils_searching_aggregated = np.average(
pv_utils_searching, weights=type_weights, axis=0
)
pv_utils_searching_loss_aggregated = np.average(
pv_utils_searching_loss, weights=type_weights, axis=0
)
share_unemployed_aggregated = np.average(
share_unemployed, weights=type_weights, axis=0
)
share_unemployed_loss_aggregated = np.average(
share_unemployed_loss, weights=type_weights, axis=0
)
out = {
"consumption_grid": consumption_grid,
"consumption_opt_first_best": consumption_opt_first_best,
"consumption_opt_first_best_idx": consumption_opt_first_best_idx,
"effort_searching_all_aggregated": effort_searching_all_aggregated,
"effort_searching_aggregated": effort_searching_aggregated,
"effort_searching_loss_aggregated": effort_searching_loss_aggregated,
"effort_searching_all_mean": effort_searching_all_mean,
"effort_searching_mean": effort_searching_mean,
"effort_searching_loss_mean": effort_searching_loss_mean,
"wage_loss_factor_vector": wage_loss_factor_vector,
"income_tax_rate_vector_first_best": income_tax_rate_vector_first_best,
"interpolation_weight": interpolation_weight,
"n_simulations": n_simulations,
"pv_income_employed": pv_income_employed,
"pv_income_searching": pv_income_searching,
"pv_income_searching_loss": pv_income_searching_loss,
"pv_search_cost_employed": pv_search_cost_employed,
"pv_search_cost_searching": pv_search_cost_searching,
"pv_search_cost_searching_loss": pv_search_cost_searching_loss,
"pv_utils_employed": pv_utils_employed,
"pv_utils_searching": pv_utils_searching,
"pv_utils_searching_loss": pv_utils_searching_loss,
"share_unemployed": share_unemployed,
"share_unemployed_loss": share_unemployed_loss,
"pv_income_employed_aggregated": pv_income_employed_aggregated,
"pv_income_searching_aggregated": pv_income_searching_aggregated,
"pv_income_searching_loss_aggregated": pv_income_searching_loss_aggregated,
"pv_search_cost_employed_aggregated": pv_search_cost_employed_aggregated,
"pv_search_cost_searching_aggregated": pv_search_cost_searching_aggregated,
"pv_search_cost_searching_loss_aggregated": pv_search_cost_searching_loss_aggregated,
"pv_utils_employed_aggregated": pv_utils_employed_aggregated,
"pv_utils_searching_aggregated": pv_utils_searching_aggregated,
"pv_utils_searching_loss_aggregated": pv_utils_searching_loss_aggregated,
"share_unemployed_aggregated": share_unemployed_aggregated,
"share_unemployed_loss_aggregated": share_unemployed_loss_aggregated,
"share_unemployed_mean": share_unemployed_mean,
"share_unemployed_loss_mean": share_unemployed_loss_mean,
"ui_replacement_rate_vector_first_best": ui_replacement_rate_vector_first_best,
"wage_employed_mean": wage_employed_mean,
"wage_pre_displacement_nonemployed_mean": wage_pre_displacement_nonemployed_mean,
"wage_unemployed_loss_mean": wage_unemployed_loss_mean,
"wage_hc_factor_vector": wage_hc_factor_vector,
"wealth": wealth,
"wealth_simulated": wealth_simulated,
"welfare": welfare,
"welfare_simulated": welfare_simulated,
}
for item in out:
try:
out[item] = out[item].tolist()
except AttributeError:
pass
return out
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
try:
method = sys.argv[2]
except IndexError:
method = "linear"
results = _get_results_3_agents_individual()
with open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_base_individual_first_best_" + method + ".json",
),
"w",
) as outfile:
json.dump(results, outfile, ensure_ascii=False, indent=2)
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,961 | simonjheiler/ui_human_capital | refs/heads/main | /src/utilities/model_utils.py | """ Utilities for numerical modelling.
This modules contains standardized functions for used
throughout the analytical part of the project for
handling of the numerical routines.
"""
#####################################################
# IMPORTS
#####################################################
import sys
import numpy as np
#####################################################
# PARAMETERS
#####################################################
global optim_options
optim_options = {}
#####################################################
# FUNCTIONS
#####################################################
def optstep(f, x0, f0, g0, d, method=3, MaxIters=100):
"""Solve a one dimensional optimal step length problem
% Used to compute step lengths in multidimensional optimization
% USAGE % [s, fx, errcode, iters] = optstep(method, f, x0, f0, g0, d, MaxIters);
Parameters
----------
f : string
function name
x0 : np.float
starting value
f0 : np.float
function value at the starting point
g0 : np.float
gradient of f at the starting point
d :
the search direction
method : int
method to calculate step length; options:
1 - step length is set to 1
2 - STEPBHHH
3 - STEPBT (default)
4 - STEPGOLD (called if other methods fail)
MaxIters :
the maximum # of itereations before trying something else
Returns
-------
optval : type
the current value of the option
Raises
------
Copyright(c) 1997 - 2000, Paul L.Fackler & Mario J.Miranda
paul_fackler @ ncsu.edu, miranda .4 @ osu.edu
"""
if method == 1:
fx = eval(f, x0 + d)
if fx < f0:
s = 1
iters = 1
errcode = 0
else:
(s, fx, iters, errcode) = stepgold(f, x0, f0, g0, d, MaxIters)
elif method == 2:
(s, fx, iters, errcode) = stepbhhh(f, x0, f0, g0, d, MaxIters)
if errcode:
(s, fx, iters2, errcode) = stepgold(f, x0, f0, g0, d, MaxIters)
iters = iters + iters2
elif method == 3:
(s, fx, iters, errcode) = stepbt(f, x0, f0, g0, d, MaxIters)
if errcode:
(s, fx, iters2, errcode) = stepgold(f, x0, f0, g0, d, MaxIters)
iters = iters + iters2
elif method == 4:
(s, fx, iters, errcode) = stepgold(f, x0, f0, g0, d, MaxIters)
else:
raise ValueError
return s, fx, iters, errcode
def stepbhhh(f, x0, f0, g0, d, MaxIters):
"""Compute approximate minimum step length using BHHH algorithm
Parameters
----------
f : string
the objective function being minimized
x0 : float
the current value of the parameters
f0 :
the value of f(x0)
g0 :
the gradient vector of f at x0
d :
the search direction
MaxIters : int
the maximum number of function evaluations allowed
Returns
-------
s: the optimal step in the direction d
fs: the value of f at x + s * d,
iter: the number of iterations used
Raises
------
errcode: equals 1 if maximum iterations are exceeded
STEPBHHH uses an algorithm based on one discussed in Berndt, et.al., Annals of
Economic and Social Measurement, 1974, pp. 653 - 665. This procedure specifies a
cone of convergence in the plane defined by the direction vector, d, and the
value of the objective % function. The cone is defined by the lines through the
origin (x, f(x)) with slopes(d'g) * delta and (d'g) * (1-delta). Delta must lie on
(0, 0.5). The procedure iterates until a point is found on the objective function
that lies within the cone. In general, the wider the cone, the faster a "suitable"
step size will be found. If a trial point lies above the cone the step size will
be increased and if it lies below the cone the step size is decreased.
"""
# INITIALIZATIONS
if len(sys.argv) < 6 or np.isempty(MaxIters):
MaxIters = 25
delta = optget("optstep", "bhhhcone", 0.0001)
dg = -d * g0 # directional derivative
tol1 = dg * delta
tol0 = dg * (1 - delta)
s = 1
ds = 1
iteration = None
temp = None
fs = None
errcode = 0
# first bracket the cone
for i in range(MaxIters):
iteration = i
x = x0 + s * d
fs = eval(f, x)
temp = (f0 - fs) / s
if temp < tol0:
ds = 2 * ds
s = s + ds
else:
break
if tol0 <= temp <= tol1:
return s, fs, iteration, errcode
ds = ds / 2
s = s - ds
# then use bisection to get inside it
for _ in range(MaxIters):
iteration += 1
ds = ds / 2
x = x0 + s * d
fs = eval(f, x)
temp = (f0 - fs) / s
if temp > tol1:
s = s - ds
elif temp < tol0:
s = s + ds
else:
return s, fs, iteration, errcode
errcode = 1
return s, fs, iteration, errcode
def stepbt(f, x0, f0, g0, d, MaxIters):
"""Compute approximate minimum step length
Parameters
----------
f : string
the objective function being minimized
x0 :
the current value of the parameters
f0 :
the value of f(x); this is passed as argument to save one function
evaluation
g0 : np.array()
the gradient vector of f at x0
d : int
the search direction
MaxIters : int
the maximum number of "backsteps" of the step length
Returns
-------
s : np.float
the optimal step in the direction d
fs : np.float
the value of f at x + s * d
iter : int
the number of iterations used
Raises
------
errcode : int
equals 1 if STEPBT fails to find a suitable step length 2 if cubic
approximation finds negative root
STEPBT uses a backtracking method similar to Algorithm 6.3 .5 in Dennis and
Schnabel, Numerical Methods for Unconstrained Optimization % and Nonlinear
Equations or LNSRCH in sec 9.7 of Press, et al., Numerical Recipes.The
algorithm approximates the function with a cubic using the function value
and derivative at the initial point % and two additional points. It determines
the minimum of the approximation. If this is acceptable it returns, otherwise
it uses the current and precious point to form a new approximation. The
convergence criteria is similar to that discussed in Berndt, et.al.,
Annals of Economic and Social Measurement, 1974, pp. 653 - 665 (see description
of BHHHSTEP). The change in the step size is also limited to ensure that
lb * s(k) <= s(k + 1) <= ub * s(k)(defaults: lb = 0.1, ub = 0.5).
"""
# initialisations
delta = 1e-4 # Defines cone of convergence; must be on(0, 1 / 2)
ub = 0.5 # Upper bound on acceptable reduction in s
lb = 0.1 # Lower bound on acceptable reduction in s
dg = -d * g0 # directional derivative
tol1 = delta * dg
tol0 = (1 - delta) * dg
iteration = None
errcode = 0
# full step
s = 1
fs = eval(f, x0 + d)
if -fs + f0 <= tol1:
iteration = 1
return s, fs, iteration, errcode
# quadratic approximation
s2 = s
fs2 = fs
s = -0.5 * dg / (-fs + f0 - dg)
s = max(s, lb)
fs = eval(f, x0 + s * d)
temp = (-fs + f0) / s
if tol0 <= temp & temp <= tol1:
iteration = 2
return [s, fs, iter, errcode]
# cubic approximation
for i in range(MaxIters):
iteration += i
temp = (s - s2) * np.array((s * s, s2 * s2))
temp = np.array(((-fs + f0 - dg * s), (-fs2 + f0 - dg * s2))) / temp
a = temp[1] - temp[2]
b = s * temp(2) - s2 * temp(1)
s2 = s
fs2 = fs
if a == 0: # quadratic fits exactly
s = -0.5 * dg / b
else:
disc = b * b - 3 * a * dg
if disc < 0:
errcode = 2
return [s, fs, iter, errcode]
# complex root
s = (np.sqrt(disc) - b) / (3 * a)
s = max(min(s, ub * s2), lb * s2) # ensures acceptable step size
fs = eval(f, x0 + s * d)
temp = (-fs + f0) / s
if tol0 <= temp & temp <= tol1:
return [s, fs, iter, errcode]
errcode = 1
return [s, fs, iter, errcode]
def stepgold(f, x0, f0, g0, d, MaxIters):
"""Compute approximate minimum step length using golden search
algorithm
Parameters
----------
f : string
the objective function being minimized
x0 :
the current value of the parameters
f0 :
the value of f(x0)
g0 :
the gradient vector of f at x0 (note: not used)
d :
the search direction
MaxIters :
the maximum number of function evaluations allowed
Returns
-------
s :
the optimal step in the direction d
fs :
the value of f at x + s * d
iter :
the number of iterations used
Raises
------
errcode :
equals 1 if maximum iterations are exceeded
STEPGOLD uses step doubling to find an initial bracket and then uses the
golden search method to find a minimum value within the bracket.
Iterations cease if the bracket is less than TOL or a maximum number of
iterations is reached.
"""
alpha1 = (3 - np.sqrt(5)) / 2
alpha2 = (np.sqrt(5) - 1) / 2
tol = 1e-4 # tolerance used for Golden search algorithm
tol = tol * (alpha1 * alpha2) # the bracket will be len / (alpha1 * alpha2)
s = 1
errcode = 1 # 1 if the search is unsuccessful; otherwise 0
iteration = 0
s0 = 0
# Find a bracketing interval
fs = eval(f, x0 + d)
if f0 >= fs:
len = alpha1
else:
for _ in range(MaxIters):
iteration += 1
s = 2 * s
fl = fs
fs = eval(f, x0 + s * d)
if fs <= fl:
len = alpha1 * (s - s0)
break
else:
f0 = fl
s0 = s / 2
if iteration >= MaxIters:
s = s / 2
fs = fl
return s, fs, iteration, errcode
xl = x0 + (s0 + len) * d
xs = x0 + (s - len) * d
s = s - len
len = len * alpha2 # len now measures relative distance between xl and xs
fs = eval(f, xs)
fl = eval(f, xl)
# Golden search to find minimum
while iteration < MaxIters:
iteration += 1
if fs < fl:
s = s - len
len = len * alpha2
xs = xl
xl = xl - len * d
fs = fl
fl = eval(f, xl)
else:
len = len * alpha2
s = s + len
xl = xs
xs = xs + len * d
fl = fs
fs = eval(f, xs)
if len < tol:
errcode = 0
break
if fl > fs:
fs = fl
s = s - len
return s, fs, iter, errcode
def optget(funcname, optname, *args):
"""Get previously set function default values
Parameters
----------
funcname : string
name of function
optname : string
name of option
*args : float
option value
Returns
-------
optval : float
the current value of the option
If there does not yet exist a dictionary entry for
options[funcname][optname], it will be set to the first
argument after 'optname'. If the dictionary entry has
already been set, all arguments after 'optname' have no effect
and the existing value will be returned. Use 'optset' to
change a previously set field.
Copyright (c) 1997-2000, Paul L. Fackler & Mario J. Miranda
paul_fackler@ncsu.edu, miranda.4@osu.edu
"""
funcname = np.char.lower(funcname)
optname = np.char.lower(optname)
try:
optvalue = optim_options[funcname][optname]
except KeyError:
optvalue = args[0]
optim_options[funcname][optname] = optvalue
return optvalue
def optset(funcname, optname, optvalue):
"""Set function options
Parameters
----------
funcname : string
name of function
optname : string
name of option
optval : float
option value
Returns
-------
None
If optname = 'defaults' the current setting of the options will be cleared.
The next time the function is called, the default options will be restored.
Copyright (c) 1997-2002, Paul L. Fackler & Mario J. Miranda
paul_fackler@ncsu.edu, miranda.4@osu.edu
"""
optvar = np.char.lower(funcname) + "_options" # name of option variable
optname = np.char.lower(optname) # name of option field
if optname == "defaults":
optim_options[optvar] = None # clears option value
else:
optim_options[optvar] = optvalue # set specified field
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,962 | simonjheiler/ui_human_capital | refs/heads/main | /src/model_analysis/first_best.py | """ Compute first best solution.
This module computes the first best solution.
"""
#####################################################
# IMPORTS
#####################################################
import json
import sys
import warnings
import numba as nb
import numpy as np
from scipy import interpolate
from bld.project_paths import project_paths_join as ppj
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
def conditional_mean(array, condition, axis):
if axis == 0:
signature = "i...,i... -> ..."
elif axis == 1:
signature = "ij..., ij... -> i..."
elif axis == 2:
signature = "ijk..., ijk... -> ij..."
else:
signature = None
print("axis parameter unknown; select on of [0, 1, 2]")
return np.einsum(signature, array, condition) / condition.sum(axis)
def _get_pv_utils(
pv_utils_employed_next,
pv_utils_searching_next,
pv_utils_searching_loss_next,
wage_hc_factor_vector,
wage_loss_reference_vector,
period_idx,
):
pv_utils_employed_now = (
wage_level
* np.repeat(wage_hc_factor_vector[:, :-1], consumption_grid_size).reshape(
(n_types, hc_grid_size - 1, consumption_grid_size)
)
* np.tile(
marginal_consumption_utility(consumption_grid),
(hc_grid_size - 1) * n_types,
).reshape((n_types, hc_grid_size - 1, consumption_grid_size))
+ discount_factor
* np.einsum(
"i, ijk -> ijk",
(1 - separation_rate_vector[:, period_idx]),
pv_utils_employed_next[:, 1:, :],
)
+ discount_factor
* np.einsum(
"i, ijk -> ijk",
separation_rate_vector[:, period_idx],
pv_utils_searching_next[:, 1:, :],
)
)
pv_utils_unemployed_now = discount_factor * np.einsum(
"i, ijk -> ijk", (1 - hc_loss_probability), pv_utils_searching_next[:, :-1, :]
) + discount_factor * np.einsum(
"i, ijk -> ijk", hc_loss_probability, pv_utils_searching_loss_next[:, :-1, :]
)
pv_utils_unemployed_loss_now = (
discount_factor * pv_utils_searching_loss_next[:, :-1, :]
)
if period_idx > 0:
pv_utils_employed_loss_now = _interpolate_hc_loss(
pv_utils_employed_now,
wage_loss_reference_vector,
period_idx,
)
else:
pv_utils_employed_loss_now = pv_utils_employed_now
# original code
# (
# policy_effort_searching_now,
# policy_effort_searching_loss_now,
# pv_utils_searching_now,
# pv_utils_searching_loss_now
# ) = _get_search_effort_policy(
# pv_utils_employed_now,
# pv_utils_employed_loss_now,
# pv_utils_unemployed_now,
# pv_utils_unemployed_loss_now,
# )
# numba
(policy_effort_searching_now, pv_utils_searching_now) = _solve_searching_iter(
pv_utils_employed_now,
pv_utils_unemployed_now,
)
(
policy_effort_searching_loss_now,
pv_utils_searching_loss_now,
) = _solve_searching_iter(pv_utils_employed_loss_now, pv_utils_unemployed_loss_now)
# FOC
# (policy_effort_searching_now, pv_utils_searching_now) = _solve_searching_foc(
# pv_utils_employed_now, pv_utils_unemployed_now,
# )
# (policy_effort_searching_loss_now, pv_utils_searching_loss_now) = _solve_searching_foc(
# pv_utils_employed_loss_now, pv_utils_unemployed_loss_now
# )
return (
pv_utils_employed_now,
pv_utils_searching_now,
pv_utils_searching_loss_now,
policy_effort_searching_now,
policy_effort_searching_loss_now,
)
def _get_search_effort_policy(
pv_utils_employed_now,
pv_utils_employed_loss_now,
pv_utils_unemployed_now,
pv_utils_unemployed_loss_now,
):
pv_utils_searching_now = np.full(
(n_types, hc_grid_size - 1, consumption_grid_size), np.nan
)
pv_utils_searching_loss_now = np.full(
(n_types, hc_grid_size - 1, consumption_grid_size), np.nan
)
policy_effort_searching_now = np.full(
(n_types, hc_grid_size - 1, consumption_grid_size), np.nan
)
policy_effort_searching_loss_now = np.full(
(n_types, hc_grid_size - 1, consumption_grid_size), np.nan
)
for type_idx in range(n_types):
for consumption_level in range(consumption_grid_size):
# value function of searching
returns_searching = (
np.tile(leisure_utility_on_search_grid, hc_grid_size - 1).reshape(
hc_grid_size - 1, search_effort_grid_size
)
+ np.outer(
pv_utils_employed_now[type_idx, :, consumption_level],
job_finding_probability_grid,
)
+ np.outer(
pv_utils_unemployed_now[type_idx, :, consumption_level],
(1 - job_finding_probability_grid),
)
)
search_effort_idx = returns_searching.argmax(axis=1)
pv_utils_searching_now[type_idx, :, consumption_level] = np.array(
[
returns_searching[row, col]
for row, col in enumerate(search_effort_idx)
]
)
policy_effort_searching_now[
type_idx, :, consumption_level
] = search_effort_grid[search_effort_idx].T
# value function of searching low HK unemployed
returns_searching_loss = (
np.tile(leisure_utility_on_search_grid, hc_grid_size - 1).reshape(
hc_grid_size - 1, search_effort_grid_size
)
+ np.outer(
pv_utils_employed_loss_now[type_idx, :, consumption_level],
job_finding_probability_grid,
)
+ np.outer(
pv_utils_unemployed_loss_now[type_idx, :, consumption_level],
(1 - job_finding_probability_grid),
)
)
search_effort_idx = returns_searching_loss.argmax(axis=1)
pv_utils_searching_loss_now[type_idx, :, consumption_level] = np.array(
[
returns_searching_loss[row, col]
for row, col in enumerate(search_effort_idx)
]
)
policy_effort_searching_loss_now[
type_idx, :, consumption_level
] = search_effort_grid[search_effort_idx].T
return (
policy_effort_searching_now,
policy_effort_searching_loss_now,
pv_utils_searching_now,
pv_utils_searching_loss_now,
)
def _get_pv_income(
pv_income_employed_next,
pv_income_searching_next,
pv_income_searching_loss_next,
policy_effort_searching_now,
policy_effort_searching_loss_now,
wage_hc_factor_vector,
wage_loss_reference_vector,
period_idx,
):
pv_income_employed_now = (
wage_level
* np.repeat(wage_hc_factor_vector[:, :-1], consumption_grid_size).reshape(
(n_types, hc_grid_size - 1, consumption_grid_size)
)
+ discount_factor
* np.einsum(
"i, ijk -> ijk",
(1 - separation_rate_vector[:, period_idx]),
pv_income_employed_next[:, 1:, :],
)
+ discount_factor
* np.einsum(
"i, ijk -> ijk",
separation_rate_vector[:, period_idx],
pv_income_searching_next[:, 1:, :],
)
)
pv_income_unemployed_now = discount_factor * np.einsum(
"i, ijk -> ijk", (1 - hc_loss_probability), pv_income_searching_next[:, :-1, :]
) + discount_factor * np.einsum(
"i, ijk -> ijk", hc_loss_probability, pv_income_searching_loss_next[:, :-1, :]
)
pv_income_unemployed_loss_now = (
discount_factor * pv_income_searching_loss_next[:, :-1, :]
)
if period_idx > 0:
pv_income_employed_loss_now = _interpolate_hc_loss(
pv_income_employed_now,
wage_loss_reference_vector,
period_idx,
)
else:
pv_income_employed_loss_now = pv_income_employed_now
pv_income_searching_now = (
policy_effort_searching_now * pv_income_employed_now
+ (1 - policy_effort_searching_now) * pv_income_unemployed_now
)
pv_income_searching_loss_now = (
policy_effort_searching_loss_now * pv_income_employed_loss_now
+ (1 - policy_effort_searching_loss_now) * pv_income_unemployed_loss_now
)
return (
pv_income_employed_now,
pv_income_searching_now,
pv_income_searching_loss_now,
)
def _get_pv_search_cost(
pv_search_cost_employed_next,
pv_search_cost_searching_next,
pv_search_cost_searching_loss_next,
policy_effort_searching_now,
policy_effort_searching_loss_now,
wage_loss_reference_vector,
period_idx,
):
pv_search_cost_employed_now = discount_factor * np.einsum(
"i, ijk -> ijk",
(1 - separation_rate_vector[:, period_idx]),
pv_search_cost_employed_next[:, 1:, :],
) + discount_factor * np.einsum(
"i, ijk -> ijk",
separation_rate_vector[:, period_idx],
pv_search_cost_searching_next[:, 1:, :],
)
pv_search_cost_unemployed_now = discount_factor * np.einsum(
"i, ijk -> ijk",
(1 - hc_loss_probability),
pv_search_cost_searching_next[:, :-1, :],
) + discount_factor * np.einsum(
"i, ijk -> ijk",
hc_loss_probability,
pv_search_cost_searching_loss_next[:, :-1, :],
)
pv_search_cost_unemployed_loss_now = (
discount_factor * pv_search_cost_searching_loss_next[:, :-1, :]
)
if period_idx > 0:
pv_search_cost_employed_loss_now = _interpolate_hc_loss(
pv_search_cost_employed_now,
wage_loss_reference_vector,
period_idx,
)
else:
pv_search_cost_employed_loss_now = pv_search_cost_employed_now
pv_search_cost_searching_now = (
leisure_utility_interpolated(policy_effort_searching_now)
+ policy_effort_searching_now * pv_search_cost_employed_now
+ (1 - policy_effort_searching_now) * pv_search_cost_unemployed_now
)
pv_search_cost_searching_loss_now = (
leisure_utility_interpolated(policy_effort_searching_loss_now)
+ policy_effort_searching_loss_now * pv_search_cost_employed_loss_now
+ (1 - policy_effort_searching_loss_now) * pv_search_cost_unemployed_loss_now
)
return (
pv_search_cost_employed_now,
pv_search_cost_searching_now,
pv_search_cost_searching_loss_now,
)
def _interpolate_hc_loss(
array_in,
wage_loss_reference_vector,
period_idx,
):
# initiate output array
array_out = np.full(array_in.shape, np.nan)
# get human capital after depreciation
hc_after_loss = _hc_after_loss_n_agents(
np.tile(hc_grid[: period_idx + 1], array_in.shape[0]).reshape(
array_in.shape[0], -1
),
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
)
# interpolate output array
for type_idx in range(n_types):
for consumption_idx in range(consumption_grid_size):
array_out[
type_idx, : period_idx + 1, consumption_idx
] = interpolate.interp1d(
hc_grid[: period_idx + 1],
array_in[type_idx, : period_idx + 1, consumption_idx],
kind=interpolation_method,
)(
hc_after_loss[type_idx, :]
)
return array_out
@nb.njit
def _solve_searching_iter(
value_employed_now,
value_unemployed_now,
):
# initiate objects
policy = np.full((n_types, hc_grid_size - 1, consumption_grid_size), np.nan)
value = np.full((n_types, hc_grid_size - 1, consumption_grid_size), np.nan)
# solve for optimal search effort using grid search method
for type_idx in range(n_types):
for experience_level in range(hc_grid_size - 1):
for consumptions_level in range(consumption_grid_size):
search_returns = (
leisure_utility_on_search_grid
+ job_finding_probability_grid
* np.full(
search_effort_grid_size,
value_employed_now[
type_idx, experience_level, consumptions_level
],
)
+ (1 - job_finding_probability_grid)
* np.full(
search_effort_grid_size,
value_unemployed_now[
type_idx, experience_level, consumptions_level
],
)
)
search_effort_idx = search_returns.argmax()
value[type_idx, experience_level, consumptions_level] = search_returns[
search_effort_idx
]
policy[
type_idx, experience_level, consumptions_level
] = search_effort_grid[search_effort_idx]
return policy, value
@nb.njit
def on_grid(x):
return search_effort_grid[np.abs(x - search_effort_grid).argmin()]
def on_grid_vectorized(x):
return np.vectorize(on_grid)(x)
@nb.njit
def on_grid_iter(array_in):
dims = array_in.shape
array_out = np.full(array_in.shape, np.nan)
for x_idx in range(dims[0]):
for y_idx in range(dims[1]):
for z_idx in range(dims[2]):
array_out[x_idx, y_idx, z_idx] = search_effort_grid[
np.abs(array_in[x_idx, y_idx, z_idx] - search_effort_grid).argmin()
]
return array_out
def _solve_searching_foc(value_employed, value_unemployed):
# optimal effort from FOC / constraints
effort_off_grid = leisure_utility_dx_inverted(value_unemployed - value_employed)
effort_off_grid[
(value_unemployed - value_employed) > leisure_utility_dx_min
] = search_effort_grid[0]
effort_off_grid[
(value_unemployed - value_employed) < leisure_utility_dx_max
] = search_effort_grid[-1]
# get nearest values on grid
policy = on_grid_iter(effort_off_grid)
value = (
leisure_utility_interpolated(policy)
+ job_finding_probability(policy) * value_employed
+ (1 - job_finding_probability(policy)) * value_unemployed
)
return policy.astype(float), value.astype(float)
@nb.njit
def consumption_utility(x):
if risk_aversion_coefficient == 1:
return np.log(x)
else:
return x ** (1 - risk_aversion_coefficient) / (1 - risk_aversion_coefficient)
def _hc_after_loss_n_agents(
hc_before_loss,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
hc_after_loss = np.full(hc_before_loss.shape, np.nan)
for type_idx in range(n_types):
hc_after_loss[type_idx, ...] = _hc_after_loss_1_agent(
hc_before_loss[type_idx, ...],
wage_loss_factor_vector[type_idx, :],
wage_loss_reference_vector[type_idx, :],
period_idx,
)
return hc_after_loss
def _hc_after_loss_1_agent(
hc_before_loss,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
func = interpolate.interp1d(
wage_loss_reference_vector,
hc_grid,
kind="linear",
bounds_error=False,
fill_value=0.0,
)
val = np.maximum(
wage_hc_factor_interpolated_1_agent(
np.minimum(hc_before_loss, hc_max), wage_loss_reference_vector
)
* wage_loss_factor_vector[period_idx],
wage_hc_factor_interpolated_1_agent(0, wage_loss_reference_vector),
)
return func(val)
def wage_hc_factor_interpolated_1_agent(x, wage_hc_factor_vector):
return interpolate.interp1d(
hc_grid,
wage_hc_factor_vector,
kind="linear",
bounds_error=False,
fill_value="extrapolate",
)(x)
@nb.njit
def job_finding_probability(x):
return contact_rate * x
def leisure_utility_dx_interpolated(x):
return interpolate.interp1d(
leisure_grid, leisure_utility_dx, kind=interpolation_method
)(x)
def leisure_utility_dxdx_interpolated(x):
return interpolate.interp1d(
leisure_grid, leisure_utility_dxdx, kind=interpolation_method
)(x)
def leisure_utility_interpolated(x):
return interpolate.interp1d(
leisure_grid, leisure_utility, kind=interpolation_method
)(x)
def leisure_utility_dx_inverted(x):
return interpolate.interp1d(
leisure_utility_dx,
leisure_grid,
kind=interpolation_method,
bounds_error=False,
fill_value=np.nan,
)(x)
@nb.njit
def marginal_consumption_utility(x):
return x ** (-risk_aversion_coefficient)
def wage_hc_factor_interpolated(hc_in, wage_hc_factor_vector):
wage_hc_factor_out = np.full(hc_in.shape, np.nan)
# iterate over types and interpolate wage_hc_factor_vector
for idx in range(hc_in.shape[0]):
wage_hc_factor_out[idx, :] = interpolate.interp1d(
hc_grid,
wage_hc_factor_vector[idx, :],
kind="linear",
bounds_error=False,
fill_value="extrapolate",
)(hc_in[idx, ...])
return wage_hc_factor_out
def effort_searching_interpolated(
hc_in, period_idx, policy_effort_searching, consumption_opt_idx
):
effort_out = np.full(hc_in.shape, np.nan)
for type_idx in range(hc_in.shape[0]):
interpolator_lower = interpolate.interp1d(
hc_grid[:-1],
policy_effort_searching[
type_idx, :-1, int(consumption_opt_idx - 1), period_idx
],
kind=interpolation_method,
)
interpolator_upper = interpolate.interp1d(
hc_grid[:-1],
policy_effort_searching[
type_idx, :-1, int(consumption_opt_idx), period_idx
],
kind=interpolation_method,
)
effort_out[type_idx, :] = (1 - interpolation_weight) * interpolator_lower(
hc_in[type_idx, :]
) + interpolation_weight * interpolator_upper(hc_in[type_idx, :])
return effort_out
def _solve_first_best(calibration, controls):
global contact_rate
global consumption_grid
global consumption_grid_size
global discount_factor
global hc_grid
global hc_grid_size
global hc_max
global hc_loss_probability
global interpolation_method
global interpolation_weight
global job_finding_probability_grid
global leisure_grid
global leisure_utility
global leisure_utility_dx
global leisure_utility_dxdx
global leisure_utility_dx_max
global leisure_utility_dx_min
global leisure_utility_on_search_grid
global n_types
global risk_aversion_coefficient
global search_effort_grid
global search_effort_grid_size
global separation_rate_vector
global wage_hc_factor_vector_average
global wage_level
global wage_loss_factor_vector
# load controls
interpolation_method = controls["interpolation_method"]
n_simulations = controls["n_simulations"]
seed_simulations = controls["seed_simulation"]
show_summary = controls["show_summary"]
# load calibration
contact_rate = calibration["contact_rate"]
discount_factor = calibration["discount_factor"]
hc_loss_probability = np.array(calibration["hc_loss_probability"])
leisure_utility = np.array(calibration["leisure_utility"])
leisure_utility_dx = np.array(calibration["leisure_utility_dx"])
leisure_utility_dxdx = np.array(calibration["leisure_utility_dxdx"])
n_periods_retired = calibration["n_periods_retired"]
n_periods_working = calibration["n_periods_working"]
n_types = calibration["n_types"]
risk_aversion_coefficient = calibration["risk_aversion_coefficient"]
search_effort_grid_size = calibration["search_effort_grid_size"]
search_effort_max = calibration["search_effort_max"]
search_effort_min = calibration["search_effort_min"]
separation_rate_vector = np.array(calibration["separation_rate_vector"])
type_weights = np.array(calibration["type_weights"])
wage_hc_factor_vector = np.array(calibration["wage_hc_factor_vector"])
wage_level = calibration["wage_level"]
wage_loss_factor_vector = np.array(calibration["wage_loss_factor_vector"])
wage_loss_reference_vector = np.array(calibration["wage_loss_reference_vector"])
consumption_grid_size = 51
# calculate derived parameters
hc_grid = np.arange(n_periods_working + 1)
hc_grid_size = len(hc_grid)
hc_max = np.amax(hc_grid)
leisure_grid = np.linspace(
search_effort_min, search_effort_max, len(leisure_utility)
)
leisure_utility_dx_max = leisure_utility_dx_interpolated(search_effort_max)
leisure_utility_dx_min = leisure_utility_dx_interpolated(search_effort_min)
search_effort_grid = np.linspace(
search_effort_min, search_effort_max, search_effort_grid_size
)
job_finding_probability_grid = job_finding_probability(search_effort_grid)
leisure_utility_on_search_grid = leisure_utility_interpolated(search_effort_grid)
wage_hc_factor_vector_average = np.average(
wage_hc_factor_vector, weights=type_weights, axis=0
)
dif0h = np.linspace(0.01, 1, int((consumption_grid_size - 1) / 2)) ** 1.8
dif0l = -np.linspace(1, 0.01, int((consumption_grid_size - 1) / 2)) ** 1.8
consumption_grid = np.concatenate(
(
-dif0l * 0.7 + (1 + dif0l) * 1.371663,
np.array([1.371663]),
(1 - dif0h) * 1.371663 + dif0h * 1.9,
)
) # this vector is more detailed around 1.371663
# (I) solution
# initiate objects to store value functions and policy functions
pv_utils_employed = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
pv_utils_searching = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
pv_utils_searching_loss = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
policy_effort_searching = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
policy_effort_searching_loss = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
pv_income_employed = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
pv_income_searching = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
pv_income_searching_loss = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
pv_search_cost_employed = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
pv_search_cost_searching = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
pv_search_cost_searching_loss = np.full(
(n_types, hc_grid_size, consumption_grid_size, n_periods_working + 1),
np.nan,
)
# initiate first period of retirement
pv_utils_employed[:, :, :, -1] = np.zeros(
(n_types, hc_grid_size, consumption_grid_size)
)
pv_utils_searching[:, :, :, -1] = np.zeros(
(n_types, hc_grid_size, consumption_grid_size)
)
pv_utils_searching_loss[:, :, :, -1] = np.zeros(
(n_types, hc_grid_size, consumption_grid_size)
)
pv_income_employed[:, :, :, -1] = np.zeros(
(n_types, hc_grid_size, consumption_grid_size)
)
pv_income_searching[:, :, :, -1] = np.zeros(
(n_types, hc_grid_size, consumption_grid_size)
)
pv_income_searching_loss[:, :, :, -1] = np.zeros(
(n_types, hc_grid_size, consumption_grid_size)
)
pv_search_cost_employed[:, :, :, -1] = np.zeros(
(n_types, hc_grid_size, consumption_grid_size)
)
pv_search_cost_searching[:, :, :, -1] = np.zeros(
(n_types, hc_grid_size, consumption_grid_size)
)
pv_search_cost_searching_loss[:, :, :, -1] = np.zeros(
(n_types, hc_grid_size, consumption_grid_size)
)
# iterate over periods
period = n_periods_working
period_idx = period - 1
while period > 0:
# load policy and pv functions for the next period
pv_utils_employed_next = pv_utils_employed[:, :, :, period_idx + 1]
pv_utils_searching_next = pv_utils_searching[:, :, :, period_idx + 1]
pv_utils_searching_loss_next = pv_utils_searching_loss[:, :, :, period_idx + 1]
pv_income_employed_next = pv_income_employed[:, :, :, period_idx + 1]
pv_income_searching_next = pv_income_searching[:, :, :, period_idx + 1]
pv_income_searching_loss_next = pv_income_searching_loss[
:, :, :, period_idx + 1
]
pv_search_cost_employed_next = pv_search_cost_employed[:, :, :, period_idx + 1]
pv_search_cost_searching_next = pv_search_cost_searching[
:, :, :, period_idx + 1
]
pv_search_cost_searching_loss_next = pv_search_cost_searching_loss[
:, :, :, period_idx + 1
]
# present value functions of income in utils
(
pv_utils_employed_now,
pv_utils_searching_now,
pv_utils_searching_loss_now,
policy_effort_searching_now,
policy_effort_searching_loss_now,
) = _get_pv_utils(
pv_utils_employed_next,
pv_utils_searching_next,
pv_utils_searching_loss_next,
wage_hc_factor_vector,
wage_loss_reference_vector,
period_idx,
)
# present value of income in consumption units functions
(
pv_income_employed_now,
pv_income_searching_now,
pv_income_searching_loss_now,
) = _get_pv_income(
pv_income_employed_next,
pv_income_searching_next,
pv_income_searching_loss_next,
policy_effort_searching_now,
policy_effort_searching_loss_now,
wage_hc_factor_vector,
wage_loss_reference_vector,
period_idx,
)
# present value of search effort cost in utils functions
(
pv_search_cost_employed_now,
pv_search_cost_searching_now,
pv_search_cost_searching_loss_now,
) = _get_pv_search_cost(
pv_search_cost_employed_next,
pv_search_cost_searching_next,
pv_search_cost_searching_loss_next,
policy_effort_searching_now,
policy_effort_searching_loss_now,
wage_loss_reference_vector,
period_idx,
)
# store results
pv_utils_employed[:, :-1, :, period_idx] = pv_utils_employed_now
pv_utils_searching[:, :-1, :, period_idx] = pv_utils_searching_now
pv_utils_searching_loss[:, :-1, :, period_idx] = pv_utils_searching_loss_now
policy_effort_searching[:, :-1, :, period_idx] = policy_effort_searching_now
policy_effort_searching_loss[
:, :-1, :, period_idx
] = policy_effort_searching_loss_now
pv_income_employed[:, :-1, :, period_idx] = pv_income_employed_now
pv_income_searching[:, :-1, :, period_idx] = pv_income_searching_now
pv_income_searching_loss[:, :-1, :, period_idx] = pv_income_searching_loss_now
pv_search_cost_employed[:, :-1, :, period_idx] = pv_search_cost_employed_now
pv_search_cost_searching[:, :-1, :, period_idx] = pv_search_cost_searching_now
pv_search_cost_searching_loss[
:, :-1, :, period_idx
] = pv_search_cost_searching_loss_now
# initiate next iteration
period -= 1
period_idx = period - 1
# average over types
# pv_utils_employed_aggregated = np.einsum(
# "i, ijkl -> jkl", type_weights, pv_utils_employed
# )
# pv_utils_searching_aggregated = np.einsum(
# "i, ijkl -> jkl", type_weights, pv_utils_searching
# )
# pv_utils_searching_loss_aggregated = np.einsum(
# "i, ijkl -> jkl", type_weights, pv_utils_searching_loss
# )
#
# pv_income_employed_aggregated = np.einsum(
# "i, ijkl -> jkl", type_weights, pv_income_employed
# )
pv_income_searching_aggregated = np.einsum(
"i, ijkl -> jkl", type_weights, pv_income_searching
)
# pv_income_searching_loss_aggregated = np.einsum(
# "i, ijkl -> jkl", type_weights, pv_income_searching_loss
# )
#
# pv_search_cost_employed_aggregated = np.einsum(
# "i, ijkl -> jkl", type_weights, pv_search_cost_employed
# )
pv_search_cost_searching_aggregated = np.einsum(
"i, ijkl -> jkl", type_weights, pv_search_cost_searching
)
# pv_search_cost_searching_loss_aggregated = np.einsum(
# "i, ijkl -> jkl", type_weights, pv_search_cost_searching_loss
# )
# compute some outcomes
# Finding the consumption that makes income = consumption
consumption_opt_first_best = interpolate.interp1d(
pv_income_searching_aggregated[0, :, 0]
- consumption_grid
* (1 - discount_factor ** (n_periods_working + n_periods_retired))
/ (1 - discount_factor),
consumption_grid,
kind=interpolation_method,
)(0)
consumption_opt_idx = interpolate.interp1d(
pv_income_searching_aggregated[0, 0, 0]
- consumption_grid
* (1 - discount_factor ** (n_periods_working + n_periods_retired))
/ (1 - discount_factor),
np.linspace(1, consumption_grid_size, consumption_grid_size),
kind=interpolation_method,
)(0)
consumption_opt_idx = np.ceil(consumption_opt_idx)
pv_consumption_computed = (
consumption_opt_first_best
* (1 - discount_factor ** (n_periods_working + n_periods_retired))
/ (1 - discount_factor)
)
pv_income_computed = interpolate.interp1d(
pv_income_searching_aggregated[0, :, 0]
- consumption_grid
* (1 - discount_factor ** (n_periods_working + n_periods_retired))
/ (1 - discount_factor),
pv_income_searching_aggregated[0, :, 0],
kind=interpolation_method,
)(0)
average_pv_utility_computed = consumption_utility(consumption_opt_first_best) * (
1 - discount_factor ** (n_periods_working + n_periods_retired)
) / (1 - discount_factor) + interpolate.interp1d(
consumption_grid,
pv_search_cost_searching_aggregated[0, :, 0],
kind=interpolation_method,
)(
consumption_opt_first_best
)
pv_utility_computed = np.repeat(average_pv_utility_computed, n_types).reshape(
(n_types,)
)
if show_summary:
summary_solve = np.array(
(
("optimal consumption level", np.round(consumption_opt_first_best, 5)),
("PV consumption (computed)", np.round(pv_consumption_computed, 5)),
("PV income (computed)", np.round(pv_income_computed, 5)),
("PV utility (computed)", np.round(average_pv_utility_computed, 5)),
)
)
print(summary_solve)
# (II) simulation
# initiate simulation study
np.random.seed(seed_simulations)
interpolation_weight = (
consumption_grid[int(consumption_opt_idx - 1)] - consumption_opt_first_best
) / (
consumption_grid[int(consumption_opt_idx - 1)]
- consumption_grid[int(consumption_opt_idx)]
)
# status tracker
employed_simulated = np.full((n_types, n_simulations), 0.0).astype(bool)
searching_simulated = np.full((n_types, n_simulations), 1.0).astype(bool)
searching_loss_simulated = np.full((n_types, n_simulations), 0.0).astype(bool)
# human capital tracker
hc_simulated = np.full((n_types, n_simulations), 0.0)
hc_pre_displacement_simulated = np.full((n_types, n_simulations), 0.0)
# tracker for present value of income and search cost
pv_income_simulated = np.full((n_types, n_simulations), 0.0)
pv_search_cost_simulated = np.full((n_types, n_simulations), 0.0)
# summary statistics
effort_searching_all_mean = np.full((n_types, n_periods_working), np.nan)
effort_searching_mean = np.full((n_types, n_periods_working), np.nan)
effort_searching_loss_mean = np.full((n_types, n_periods_working), np.nan)
share_unemployed_mean = np.full((n_types, n_periods_working), np.nan)
share_unemployed_loss_mean = np.full((n_types, n_periods_working), np.nan)
wage_employed_mean = np.full((n_types, n_periods_working), np.nan)
wage_nonemployed_mean = np.full((n_types, n_periods_working), np.nan)
wage_unemployed_loss_mean = np.full((n_types, n_periods_working), np.nan)
wage_pre_displacement_nonemployed_mean = np.full(
(n_types, n_periods_working), np.nan
)
# iterate forward
period = 0
while period < n_periods_working:
period += 1
period_idx = period - 1
# (i) search phase
# simulate search effort
effort_searching_simulated = effort_searching_interpolated(
hc_simulated,
period_idx,
policy_effort_searching,
consumption_opt_idx,
)
effort_searching_loss_simulated = effort_searching_interpolated(
hc_simulated,
period_idx,
policy_effort_searching_loss,
consumption_opt_idx,
)
job_finding_probability_searching_simulated = job_finding_probability(
effort_searching_simulated
)
job_finding_probability_searching_loss_simulated = job_finding_probability(
effort_searching_loss_simulated
)
# compute search phase statistics
effort_searching_mean[:, period_idx] = conditional_mean(
effort_searching_simulated, searching_simulated, axis=1
)
effort_searching_loss_mean[:, period_idx] = conditional_mean(
effort_searching_loss_simulated, searching_loss_simulated, axis=1
)
effort_searching_all_mean[:, period_idx] = np.average(
np.array(
[
effort_searching_mean[:, period_idx],
effort_searching_loss_mean[:, period_idx],
]
),
weights=np.array(
[
np.sum(searching_simulated, axis=1),
np.sum(searching_loss_simulated, axis=1),
]
),
axis=0,
)
# update present value of simulated search cost (in utils)
pv_search_cost_simulated += discount_factor ** period_idx * (
searching_simulated
* leisure_utility_interpolated(
effort_searching_interpolated(
hc_simulated,
period_idx,
policy_effort_searching,
consumption_opt_idx,
)
)
+ searching_loss_simulated
* leisure_utility_interpolated(
effort_searching_interpolated(
hc_simulated,
period_idx,
policy_effort_searching_loss,
consumption_opt_idx,
)
)
)
# generate transition events
job_finding_event_searching_simulated = (
job_finding_probability_searching_simulated
>= np.random.rand(n_types, n_simulations)
).astype(bool)
job_finding_event_searching_loss_simulated = (
job_finding_probability_searching_loss_simulated
>= np.random.rand(n_types, n_simulations)
).astype(bool)
# simulate hc transition to consumption phase
hc_simulated = (
hc_simulated
- (
hc_simulated
- _hc_after_loss_n_agents(
hc_simulated,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
)
)
* searching_loss_simulated
* job_finding_event_searching_loss_simulated
) # hc loss materializes upon reemployment
# simulate state transition to consumption phase
employed_simulated = (
employed_simulated
+ searching_simulated * job_finding_event_searching_simulated
+ searching_loss_simulated * job_finding_event_searching_loss_simulated
)
unemployed_simulated = searching_simulated * (
1 - job_finding_event_searching_simulated
)
unemployed_loss_simulated = searching_loss_simulated * (
1 - job_finding_event_searching_loss_simulated
)
nonemployed_simulated = unemployed_simulated + unemployed_loss_simulated
# check for error in state simulation
if (
np.sum(
unemployed_simulated + unemployed_loss_simulated + employed_simulated
)
< n_simulations
):
warnings.warn(
f"ERROR! in transition from search phase "
f"to consumption phase in period {period_idx}"
)
# (ii) consumption phase
# compute statistics for consumption phase
share_unemployed_mean[:, period_idx] = np.mean(unemployed_simulated, axis=1)
share_unemployed_loss_mean[:, period_idx] = np.mean(
unemployed_loss_simulated, axis=1
)
wage_simulated = wage_level * wage_hc_factor_interpolated(
hc_simulated, wage_hc_factor_vector
)
wage_pre_displacement_simulated = wage_level * wage_hc_factor_interpolated(
hc_pre_displacement_simulated, wage_hc_factor_vector
)
wage_employed_mean[:, period_idx] = conditional_mean(
wage_simulated, employed_simulated, axis=1
)
wage_nonemployed_mean[:, period_idx] = conditional_mean(
wage_simulated, nonemployed_simulated, axis=1
)
wage_unemployed_loss_mean[:, period_idx] = conditional_mean(
wage_simulated, unemployed_loss_simulated, axis=1
)
wage_pre_displacement_nonemployed_mean[:, period_idx] = conditional_mean(
wage_pre_displacement_simulated, nonemployed_simulated, axis=1
)
# update present value of simulated income (in consumption units)
pv_income_simulated = (
pv_income_simulated
+ discount_factor ** period_idx
* wage_hc_factor_interpolated(
hc_simulated, wage_level * wage_hc_factor_vector
)
* employed_simulated
)
# generate transition events
hc_loss_event_simulated = (
np.repeat(hc_loss_probability, n_simulations).reshape(
n_types, n_simulations
)
>= np.random.rand(n_types, n_simulations)
).astype(bool)
job_loss_event_simulated = (
np.repeat(separation_rate_vector[:, period_idx], n_simulations).reshape(
n_types, n_simulations
)
>= np.random.rand(n_types, n_simulations)
).astype(bool)
# simulate hc transition to next period
hc_simulated = (
hc_simulated + np.full((n_types, n_simulations), 1.0) * employed_simulated
)
hc_pre_displacement_simulated = (
hc_pre_displacement_simulated * nonemployed_simulated
+ hc_simulated * employed_simulated
)
# simulate state transition to search phase in next period
searching_loss_simulated = (
unemployed_loss_simulated + unemployed_simulated * hc_loss_event_simulated
).astype(bool)
searching_simulated = (
unemployed_simulated * (1 - hc_loss_event_simulated)
+ employed_simulated * job_loss_event_simulated
).astype(bool)
employed_simulated = (
employed_simulated * (1 - job_loss_event_simulated)
).astype(bool)
# check for error in state simulation
if (
np.sum(searching_simulated + searching_loss_simulated + employed_simulated)
< n_simulations
):
warnings.warn(
f"ERROR! in transition from consumption phase "
f"in period {period_idx} to search phase in {period_idx + 1}"
)
# average over types
share_unemployed_aggregated = np.average(
share_unemployed_mean, weights=type_weights, axis=0
)
share_unemployed_loss_aggregated = np.average(
share_unemployed_loss_mean, weights=type_weights, axis=0
)
effort_searching_aggregated = np.average(
effort_searching_mean, weights=type_weights, axis=0
)
effort_searching_loss_aggregated = np.average(
effort_searching_loss_mean, weights=type_weights, axis=0
)
effort_searching_all_aggregated = np.average(
effort_searching_all_mean, weights=type_weights, axis=0
)
# compute some outcomes
# consumption (as computed)
pv_consumption_simulated = np.repeat(
consumption_opt_first_best
* (1 - discount_factor ** (n_periods_working + n_periods_retired))
/ (1 - discount_factor),
n_types,
)
average_pv_consumption_simulated = np.average(
pv_consumption_simulated, weights=type_weights, axis=0
)
# income
pv_income_mean = np.mean(pv_income_simulated, axis=1)
average_pv_income_simulated = np.average(
pv_income_mean, weights=type_weights, axis=0
)
# search cost
pv_search_cost_mean = np.mean(pv_search_cost_simulated, axis=1)
average_pv_search_cost_simulated = np.average(
pv_search_cost_mean, weights=type_weights, axis=0
)
# utility
pv_utility_simulated = (
np.repeat(
consumption_utility(consumption_opt_first_best)
* (1 - discount_factor ** (n_periods_working + n_periods_retired))
/ (1 - discount_factor),
n_types,
)
+ pv_search_cost_mean
)
average_pv_utility_simulated = (
consumption_utility(consumption_opt_first_best)
* (1 - discount_factor ** (n_periods_working + n_periods_retired))
/ (1 - discount_factor)
+ average_pv_search_cost_simulated
)
if show_summary:
summary_simulate = np.array(
(
("number of simulations", n_simulations),
(
"PV consumption (simulated)",
np.round(average_pv_consumption_simulated, 5),
),
("PV income (simulated)", np.round(average_pv_income_simulated, 5)),
("PV utility (simulated)", np.round(average_pv_utility_simulated, 5)),
)
)
print(summary_simulate)
# implied optimal policies
ui_replacement_rate_vector_first_best = (
consumption_opt_first_best / wage_pre_displacement_nonemployed_mean
)
income_tax_rate_vector_first_best = (
1 - consumption_opt_first_best / wage_employed_mean
)
# store some results
out = {
"consumption_grid": consumption_grid,
"consumption_opt_first_best": consumption_opt_first_best,
"consumption_opt_first_best_idx": consumption_opt_idx,
"effort_searching_all_aggregated": effort_searching_all_aggregated,
"effort_searching_aggregated": effort_searching_aggregated,
"effort_searching_loss_aggregated": effort_searching_loss_aggregated,
"effort_searching_all_mean": effort_searching_all_mean,
"effort_searching_mean": effort_searching_mean,
"effort_searching_loss_mean": effort_searching_loss_mean,
"wage_loss_factor_vector": wage_loss_factor_vector,
"income_tax_rate_vector_first_best": income_tax_rate_vector_first_best,
"interpolation_weight": interpolation_weight,
"n_simulations": n_simulations,
"share_unemployed": share_unemployed_mean,
"share_unemployed_loss": share_unemployed_loss_mean,
"share_unemployed_aggregated": share_unemployed_aggregated,
"share_unemployed_loss_aggregated": share_unemployed_loss_aggregated,
"share_unemployed_mean": share_unemployed_mean,
"share_unemployed_loss_mean": share_unemployed_loss_mean,
"ui_replacement_rate_vector_first_best": ui_replacement_rate_vector_first_best,
"wage_employed_mean": wage_employed_mean,
"wage_pre_displacement_nonemployed_mean": wage_pre_displacement_nonemployed_mean,
"wage_unemployed_loss_mean": wage_unemployed_loss_mean,
"wage_hc_factor_vector": wage_hc_factor_vector,
"wealth": pv_income_computed,
"wealth_simulated": pv_income_mean,
"welfare": pv_utility_computed,
"welfare_simulated": pv_utility_simulated,
}
for item in out:
try:
out[item] = out[item].tolist()
except AttributeError:
pass
return out
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
try:
setup_name = sys.argv[1]
method = sys.argv[2]
except IndexError:
setup_name = "base_combined"
method = "linear"
# load calibration and set variables
calibration = json.load(
open(ppj("IN_MODEL_SPECS", "analytics_calibration_" + setup_name + ".json"))
)
# set controls
controls = {
"interpolation_method": method,
"n_iterations_solve_max": 20,
"n_simulations": int(1e6),
"run_simulation": True,
"seed_simulation": 3405,
"show_summary": True,
}
results = _solve_first_best(calibration, controls)
with open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_" + setup_name + "_first_best_" + method + ".json",
),
"w",
) as outfile:
json.dump(results, outfile, ensure_ascii=False, indent=2)
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,963 | simonjheiler/ui_human_capital | refs/heads/main | /src/utilities/run_utils.py | import subprocess
import matlab.engine
from bld.project_paths import project_paths_join as ppj
#####################################################
# PARAMETERS
#####################################################
python_path = r"C:\Programs\Anaconda3\envs\labor-markets\python.exe"
stata_path = r"C:\Program Files\stata14\StataSE-64"
r_path = r"C:\Programs\Anaconda3\envs\labor-markets\Scripts\RScript.exe"
#####################################################
# FUNCTIONS
#####################################################
def run_py_script(filename, params):
cmd = [python_path, filename]
for param in params:
cmd.append(param)
return subprocess.call(cmd)
def run_r_script(filename, params):
cmd = [r_path, "--vanilla", filename]
for param in params:
cmd.append(param)
return subprocess.call(cmd)
def run_stata_script(filename, params):
cmd = [stata_path, "/e", "do", filename]
for param in params:
cmd.append(param)
return subprocess.call(cmd)
def run_matlab_script(filename, params):
eng = matlab.engine.start_matlab()
return eng.filename(nargout=0)
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
run_py = False
run_r = False
run_stata = False
run_matlab = True
# script = ppj("IN_DATA_MGMT", "format_data_cps_monthly.py")
# script = ppj("PROJECT_ROOT", "src", "model_analysis", "probit_regression.r")
# script = ppj("PROJECT_ROOT", "src", "model_analysis", "aggregate_regression_mr_old.do")
script = ppj("PROJECT_ROOT", "src", "model_analysis", "analytics", "Main.m")
parameters = []
if run_py:
run_py_script(script, parameters)
elif run_r:
run_r_script(script, parameters)
elif run_stata:
run_stata_script(script, parameters)
elif run_matlab:
run_matlab_script(script, parameters)
else:
print("select program to run script")
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,964 | simonjheiler/ui_human_capital | refs/heads/main | /src/model_analysis/elasticity_exact.py | import copy
import json
import src.utilities.istarmap_3_8 # noqa, noreorder
import multiprocessing
import sys
import numpy as np
import tqdm
from bld.project_paths import project_paths_join as ppj
from src.model_analysis.run_utils import _solve_run
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
def elasticity_exact(controls, calibration):
# load controls
show_progress = controls["show_progress"]
n_parallel_jobs = controls["n_parallel_jobs"]
shock_size = controls["step_size_elasticity"]
n_simulations = controls["n_simulations"]
# load variables
n_periods_working = calibration["n_periods_working"]
n_periods_retired = calibration["n_periods_retired"]
n_types = calibration["n_types"]
type_weights = np.array(calibration["type_weights"])
ui_replacement_rate_vector = np.array(calibration["ui_replacement_rate_vector"])
# compute derived variables
n_years_working = int(n_periods_working / 4)
n_runs = (n_years_working + 1) * 2 # no shock + up/downward shock in every year
# initialize objects
job_finding_rate_searching_all = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
share_nonemployed = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
share_unemployed_loss = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
share_searching = np.full((n_types, n_periods_working, int(n_runs / 2), 2), np.nan)
wage_hc_factor_pre_displacement = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
total_benefits = np.full((n_types, int(n_runs / 2), 2), np.nan)
pv_government_spending = np.full((n_types, int(n_runs / 2), 2), np.nan)
net_government_spending_working = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
net_government_spending_all = np.full(
(n_types, n_periods_working + n_periods_retired, int(n_runs / 2), 2), np.nan
)
marginal_utility_nonemployed = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
# generate shocked input vectors
shock_vector = np.array([shock_size, -shock_size])
index_start = np.full(n_years_working, np.nan, dtype=int)
index_end = np.full(n_years_working, np.nan, dtype=int)
for year_idx in range(n_years_working):
period_idx_start = int(year_idx * 4)
period_idx_end = int(min(period_idx_start + 4, n_periods_working))
index_start[year_idx] = period_idx_start
index_end[year_idx] = period_idx_end
ui_replacement_rate_vector_all = np.repeat(
ui_replacement_rate_vector, n_runs
).reshape((n_types, n_periods_working, n_runs))
for year_idx in range(n_years_working):
for shock_idx, shock in enumerate(shock_vector):
ui_replacement_rate_vector_all[
:,
index_start[year_idx] : index_end[year_idx],
(
year_idx + 1 + (n_years_working + 1) * shock_idx
), # run without shock first
] += shock
# define program for parallel computation
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{
"ui_replacement_rate_vector": ui_replacement_rate_vector_all[
:, :, run_idx
]
},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_parallel_jobs) as pool:
if show_progress:
out = tuple(
tqdm.tqdm(
pool.istarmap(_solve_run, inputs),
total=n_runs,
desc="Elasticity",
ascii=True,
ncols=94,
)
)
else:
out = pool.starmap(_solve_run, inputs)
# extract results
for run_idx in range(int(n_runs / 2)):
for shock_idx in range(2):
tmp = out[run_idx + (n_years_working + 1) * shock_idx]
job_finding_rate_searching_all[:, :, run_idx, shock_idx] = np.array(
tmp["job_finding_rate_searching_all_mean"]
)
marginal_utility_nonemployed[:, :, run_idx, shock_idx] = np.array(
tmp["marginal_utility_nonemployed_mean"]
)
net_government_spending_working[:, :, run_idx, shock_idx] = np.array(
tmp["net_government_spending_working"]
)
net_government_spending_all[:, :, run_idx, shock_idx] = np.array(
tmp["net_government_spending_all"]
)
pv_government_spending[:, run_idx, shock_idx] = np.array(
tmp["pv_government_spending"]
)
share_nonemployed[:, :, run_idx, shock_idx] = np.array(
tmp["share_nonemployed"]
)
share_unemployed_loss[:, :, run_idx, shock_idx] = np.array(
tmp["share_unemployed_loss"]
)
share_searching[:, :, run_idx, shock_idx] = np.array(tmp["share_searching"])
total_benefits[:, run_idx, shock_idx] = np.array(tmp["total_benefits"])
wage_hc_factor_pre_displacement[:, :, run_idx, shock_idx] = np.array(
tmp["wage_hc_factor_pre_displacement_mean"]
)
# average over types
average_ui_replacement_rate_vector = np.average(
ui_replacement_rate_vector, weights=type_weights, axis=0
)
average_job_finding_rate_searching_all = np.average(
job_finding_rate_searching_all, weights=type_weights, axis=0
)
average_marginal_utility_nonemployed = np.average(
marginal_utility_nonemployed, weights=type_weights, axis=0
)
average_net_government_spending_working = np.average(
net_government_spending_working, weights=type_weights, axis=0
)
average_net_government_spending_all = np.average(
net_government_spending_all, weights=type_weights, axis=0
)
average_pv_government_spending = np.average(
pv_government_spending, weights=type_weights, axis=0
)
average_share_nonemployed = np.average(
share_nonemployed, weights=type_weights, axis=0
)
average_share_unemployed_loss = np.average(
share_unemployed_loss, weights=type_weights, axis=0
)
average_share_searching = np.average(share_searching, weights=type_weights, axis=0)
average_total_benefits = np.average(total_benefits, weights=type_weights, axis=0)
average_wage_hc_factor_pre_displacement = np.average(
wage_hc_factor_pre_displacement, weights=type_weights, axis=0
)
# calculate elasticities
average_share_nonemployed_base = average_share_nonemployed[:, 0, 0]
average_share_searching_base = average_share_searching[:, 0, 0]
average_wage_hc_factor_pre_displacement_base = (
average_wage_hc_factor_pre_displacement[:, 0, 0]
)
share_nonemployed_base = share_nonemployed[:, :, 0, 0]
share_searching_base = share_searching[:, :, 0, 0]
wage_hc_factor_pre_displacement_base = wage_hc_factor_pre_displacement[:, :, 0, 0]
# Computing means of variables by groups of age
average_ui_replacement_rate_yearly = np.full(n_years_working, np.nan)
average_job_finding_rate_yearly_base = np.full(n_years_working, np.nan)
average_share_nonemployed_yearly_base = np.full(n_years_working, np.nan)
average_share_unemployed_loss_yearly_base = np.full(n_years_working, np.nan)
average_share_searching_yearly_base = np.full(n_years_working, np.nan)
average_job_finding_rate_yearly_shocked = np.full((n_years_working, 2), np.nan)
average_share_nonemployed_yearly_shocked = np.full((n_years_working, 2), np.nan)
average_share_unemployed_loss_yearly_shocked = np.full((n_years_working, 2), np.nan)
average_share_searching_yearly_shocked = np.full((n_years_working, 2), np.nan)
ui_replacement_rate_yearly = np.full((n_types, n_years_working), np.nan)
job_finding_rate_yearly_base = np.full((n_types, n_years_working), np.nan)
share_nonemployed_yearly_base = np.full((n_types, n_years_working), np.nan)
share_unemployed_loss_yearly_base = np.full((n_types, n_years_working), np.nan)
share_searching_yearly_base = np.full((n_types, n_years_working), np.nan)
job_finding_rate_yearly_shocked = np.full((n_types, n_years_working, 2), np.nan)
share_nonemployed_yearly_shocked = np.full((n_types, n_years_working, 2), np.nan)
share_unemployed_loss_yearly_shocked = np.full(
(n_types, n_years_working, 2), np.nan
)
share_searching_yearly_shocked = np.full((n_types, n_years_working, 2), np.nan)
for i in range(n_years_working):
idx_start = index_start[i]
idx_end = index_end[i]
average_ui_replacement_rate_yearly[i] = np.mean(
average_ui_replacement_rate_vector[idx_start:idx_end]
)
average_job_finding_rate_yearly_base[i] = np.mean(
average_job_finding_rate_searching_all[idx_start:idx_end, 0, 0]
)
average_share_nonemployed_yearly_base[i] = np.mean(
average_share_nonemployed[idx_start:idx_end, 0, 0]
)
average_share_unemployed_loss_yearly_base[i] = np.mean(
average_share_unemployed_loss[idx_start:idx_end, 0, 0]
)
average_share_searching_yearly_base[i] = np.mean(
average_share_searching[idx_start:idx_end, 0, 0]
)
for z in range(2):
average_job_finding_rate_yearly_shocked[i, z] = np.mean(
average_job_finding_rate_searching_all[idx_start:idx_end, i + 1, z]
)
average_share_nonemployed_yearly_shocked[i, z] = np.mean(
average_share_nonemployed[idx_start:idx_end, i + 1, z]
)
average_share_unemployed_loss_yearly_shocked[i, z] = np.mean(
average_share_unemployed_loss[idx_start:idx_end, i + 1, z]
)
average_share_searching_yearly_shocked[i, z] = np.mean(
average_share_searching[idx_start:idx_end, i + 1, z]
)
ui_replacement_rate_yearly[:, i] = np.mean(
ui_replacement_rate_vector[:, idx_start:idx_end], axis=1
)
job_finding_rate_yearly_base[:, i] = np.mean(
job_finding_rate_searching_all[:, idx_start:idx_end, 0, 0], axis=1
)
share_nonemployed_yearly_base[:, i] = np.mean(
share_nonemployed[:, idx_start:idx_end, 0, 0], axis=1
)
share_unemployed_loss_yearly_base[:, i] = np.mean(
share_unemployed_loss[:, idx_start:idx_end, 0, 0], axis=1
)
share_searching_yearly_base[:, i] = np.mean(
share_searching[:, idx_start:idx_end, 0, 0], axis=1
)
for z in range(2):
job_finding_rate_yearly_shocked[:, i, z] = np.mean(
job_finding_rate_searching_all[:, idx_start:idx_end, i + 1, z], axis=1
)
share_nonemployed_yearly_shocked[:, i, z] = np.mean(
share_nonemployed[:, idx_start:idx_end, i + 1, z], axis=1
)
share_unemployed_loss_yearly_shocked[:, i, z] = np.mean(
share_unemployed_loss[:, idx_start:idx_end, i + 1, z], axis=1
)
share_searching_yearly_shocked[:, i, z] = np.mean(
share_searching[:, idx_start:idx_end, i + 1, z], axis=1
)
# Computing elasticities and cross elasticities
delta_average_unemployment = (
average_share_nonemployed[:, :, 0] - average_share_nonemployed[:, :, 1]
)
delta_average_job_finding_rate_yearly = (
average_job_finding_rate_yearly_shocked[:, 0]
- average_job_finding_rate_yearly_shocked[:, 1]
)
delta_average_share_nonemployed_yearly = (
average_share_nonemployed_yearly_shocked[:, 0]
- average_share_nonemployed_yearly_shocked[:, 1]
)
delta_average_share_unemployed_loss_yearly = (
average_share_unemployed_loss_yearly_shocked[:, 0]
- average_share_unemployed_loss_yearly_shocked[:, 1]
)
delta_average_share_searching_yearly = (
average_share_searching_yearly_shocked[:, 0]
- average_share_searching_yearly_shocked[:, 1]
)
delta_unemployment = share_nonemployed[:, :, :, 0] - share_nonemployed[:, :, :, 1]
delta_job_finding_rate_yearly = (
job_finding_rate_yearly_shocked[:, :, 0]
- job_finding_rate_yearly_shocked[:, :, 1]
)
delta_share_nonemployed_yearly = (
share_nonemployed_yearly_shocked[:, :, 0]
- share_nonemployed_yearly_shocked[:, :, 1]
)
delta_share_unemployed_loss_yearly = (
share_unemployed_loss_yearly_shocked[:, :, 0]
- share_unemployed_loss_yearly_shocked[:, :, 1]
)
delta_share_searching_yearly = (
share_searching_yearly_shocked[:, :, 0]
- share_searching_yearly_shocked[:, :, 1]
)
average_elasticity_job_finding_rate_yearly = (
delta_average_job_finding_rate_yearly * ui_replacement_rate_yearly
) / (2 * shock_size * average_job_finding_rate_yearly_base)
average_elasticity_share_nonemployed_yearly = (
delta_average_share_nonemployed_yearly * ui_replacement_rate_yearly
) / (2 * shock_size * average_share_nonemployed_yearly_base)
average_elasticity_share_unemployed_loss_yearly = (
delta_average_share_unemployed_loss_yearly * ui_replacement_rate_yearly
) / (2 * shock_size * average_share_unemployed_loss_yearly_base)
average_elasticity_share_searching_yearly = (
delta_average_share_searching_yearly * ui_replacement_rate_yearly
) / (2 * shock_size * average_share_searching_yearly_base)
elasticity_job_finding_rate_yearly = (
delta_job_finding_rate_yearly * ui_replacement_rate_yearly
) / (2 * shock_size * job_finding_rate_yearly_base)
elasticity_share_nonemployed_yearly = (
delta_share_nonemployed_yearly * ui_replacement_rate_yearly
) / (2 * shock_size * share_nonemployed_yearly_base)
elasticity_share_unemployed_loss_yearly = (
delta_share_unemployed_loss_yearly * ui_replacement_rate_yearly
) / (2 * shock_size * share_unemployed_loss_yearly_base)
elasticity_share_searching_yearly = (
delta_share_searching_yearly * ui_replacement_rate_yearly
) / (2 * shock_size * share_searching_yearly_base)
# more precise computation of elasticity of unemployment
average_elasticity_unemployment = (
delta_average_unemployment
* np.repeat(average_ui_replacement_rate_vector, (n_years_working + 1)).reshape(
(n_periods_working, (n_years_working + 1))
)
) / (
2
* shock_size
* np.repeat(average_share_nonemployed[:, 0, 0], (n_years_working + 1)).reshape(
(n_periods_working, (n_years_working + 1))
)
)
average_elasticity_unemployment_mean = np.full(n_years_working, np.nan)
for year_idx in range(n_years_working):
average_elasticity_unemployment_mean[year_idx] = np.sum(
average_elasticity_unemployment[
index_start[year_idx] : index_end[year_idx], year_idx + 1
]
* average_share_nonemployed[
index_start[year_idx] : index_end[year_idx], 0, 0
]
) / np.sum(
average_share_nonemployed[index_start[year_idx] : index_end[year_idx], 0, 0]
)
elasticity_unemployment = (
delta_unemployment
* np.repeat(ui_replacement_rate_vector, (n_years_working + 1)).reshape(
(n_types, n_periods_working, (n_years_working + 1))
)
) / (
2
* shock_size
* np.repeat(share_nonemployed[:, :, 0, 0], (n_years_working + 1)).reshape(
(n_types, n_periods_working, (n_years_working + 1))
)
)
elasticity_unemployment_mean = np.full((n_types, n_years_working), np.nan)
for year_idx in range(n_years_working):
elasticity_unemployment_mean[:, year_idx] = np.sum(
elasticity_unemployment[
:, index_start[year_idx] : index_end[year_idx], year_idx + 1
]
* share_nonemployed[:, index_start[year_idx] : index_end[year_idx], 0, 0],
axis=1,
) / np.sum(
share_nonemployed[:, index_start[year_idx] : index_end[year_idx], 0, 0],
axis=1,
)
# cross elasticities for j age groups
# simulation
wage_level = calibration["wage_level"]
discount_factor = calibration["discount_factor"]
discount_factor_compounded_vector = discount_factor ** np.linspace(
0, 179, n_periods_working
)
average_cross_elasticity_benefits_yearly = np.full(n_years_working, np.nan)
average_cross_elasticity_benefits_discounted_yearly = np.full(
n_years_working, np.nan
)
average_cross_elasticity_total_benefits_yearly = np.full(n_years_working, np.nan)
average_cross_elasticity_pv_government_spending_yearly = np.full(
n_years_working, np.nan
)
average_cross_elasticity_net_government_spending_working_yearly = np.full(
n_years_working, np.nan
)
average_cross_elasticity_net_government_spending_all_yearly = np.full(
n_years_working, np.nan
)
average_adjustment_factor = np.full(n_years_working, np.nan)
average_adjustment_factor_discounted = np.full(n_years_working, np.nan)
average_marginal_utility_nonemployed_yearly = np.full(
(n_years_working, n_years_working + 1), np.nan
)
cross_elasticity_benefits_yearly = np.full((n_types, n_years_working), np.nan)
cross_elasticity_benefits_discounted_yearly = np.full(
(n_types, n_years_working), np.nan
)
cross_elasticity_total_benefits_yearly = np.full((n_types, n_years_working), np.nan)
cross_elasticity_pv_government_spending_yearly = np.full(
(n_types, n_years_working), np.nan
)
cross_elasticity_net_government_spending_working_yearly = np.full(
(n_types, n_years_working), np.nan
)
cross_elasticity_net_government_spending_all_yearly = np.full(
(n_types, n_years_working), np.nan
)
adjustment_factor = np.full((n_types, n_years_working), np.nan)
adjustment_factor_discounted = np.full((n_types, n_years_working), np.nan)
marginal_utility_nonemployed_yearly = np.full(
(n_types, n_years_working, n_years_working + 1), np.nan
)
for i in range(n_years_working):
idx_start = index_start[i]
idx_end = index_end[i]
average_cross_elasticity_benefits_yearly[i] = np.sum(
(
average_share_nonemployed[:, i + 1, 0]
- average_share_nonemployed[:, i + 1, 1]
)
* average_ui_replacement_rate_vector
* wage_level
* average_wage_hc_factor_pre_displacement_base
) / np.sum(
average_share_nonemployed_base[idx_start:idx_end]
* 2
* shock_size
* wage_level
* average_wage_hc_factor_pre_displacement_base[idx_start:idx_end],
)
average_cross_elasticity_benefits_discounted_yearly[i] = np.sum(
discount_factor_compounded_vector
* (
average_share_nonemployed[:, i + 1, 0]
- average_share_nonemployed[:, i + 1, 1]
)
* average_ui_replacement_rate_vector
* average_wage_hc_factor_pre_displacement_base
* wage_level
) / np.sum(
discount_factor_compounded_vector[idx_start:idx_end]
* average_share_nonemployed_base[idx_start:idx_end]
* 2
* shock_size
* wage_level
* average_wage_hc_factor_pre_displacement_base[idx_start:idx_end]
)
average_cross_elasticity_total_benefits_yearly[i] = (
average_total_benefits[i + 1, 0] - average_total_benefits[i + 1, 1]
) / 2
average_cross_elasticity_pv_government_spending_yearly[i] = (
average_pv_government_spending[i + 1, 0]
- average_pv_government_spending[i + 1, 1]
) / 2
average_cross_elasticity_net_government_spending_working_yearly[i] = (
np.sum(
average_net_government_spending_working[:, i + 1, 0]
- average_net_government_spending_working[:, i + 1, 1]
)
/ 2
)
average_cross_elasticity_net_government_spending_all_yearly[i] = (
np.sum(
average_net_government_spending_all[:, i + 1, 0]
- average_net_government_spending_all[:, i + 1, 1]
)
/ 2
)
average_adjustment_factor[i] = np.sum(
shock_size
* wage_level
* average_wage_hc_factor_pre_displacement_base[idx_start:idx_end]
* average_share_nonemployed_base[idx_start:idx_end]
)
average_adjustment_factor_discounted[i] = np.sum(
shock_size
* wage_level
* discount_factor_compounded_vector[idx_start:idx_end]
* average_wage_hc_factor_pre_displacement_base[idx_start:idx_end]
* average_share_nonemployed_base[idx_start:idx_end]
)
average_marginal_utility_nonemployed_yearly[i, :] = np.mean(
average_marginal_utility_nonemployed[idx_start:idx_end, :, 0]
)
cross_elasticity_benefits_yearly[:, i] = np.sum(
(share_nonemployed[:, :, i + 1, 0] - share_nonemployed[:, :, i + 1, 1])
* ui_replacement_rate_vector
* wage_level
* wage_hc_factor_pre_displacement_base,
axis=1,
) / np.sum(
share_nonemployed_base[:, idx_start:idx_end]
* 2
* shock_size
* wage_level
* wage_hc_factor_pre_displacement_base[:, idx_start:idx_end],
axis=1,
)
cross_elasticity_benefits_discounted_yearly[:, i] = np.sum(
discount_factor_compounded_vector
* (share_nonemployed[:, :, i + 1, 0] - share_nonemployed[:, :, i + 1, 1])
* ui_replacement_rate_vector
* wage_hc_factor_pre_displacement_base
* wage_level,
axis=1,
) / np.sum(
discount_factor_compounded_vector[idx_start:idx_end]
* share_nonemployed_base[:, idx_start:idx_end]
* 2
* shock_size
* wage_level
* wage_hc_factor_pre_displacement_base[:, idx_start:idx_end],
axis=1,
)
cross_elasticity_total_benefits_yearly[:, i] = (
total_benefits[:, i + 1, 0] - total_benefits[:, i + 1, 1]
) / 2
cross_elasticity_pv_government_spending_yearly[:, i] = (
pv_government_spending[:, i + 1, 0] - pv_government_spending[:, i + 1, 1]
) / 2
cross_elasticity_net_government_spending_working_yearly[:, i] = (
np.sum(
net_government_spending_working[:, :, i + 1, 0]
- net_government_spending_working[:, :, i + 1, 1]
)
/ 2
)
cross_elasticity_net_government_spending_all_yearly[:, i] = (
np.sum(
net_government_spending_all[:, :, i + 1, 0]
- net_government_spending_all[:, :, i + 1, 1]
)
/ 2
)
adjustment_factor[:, i] = np.sum(
shock_size
* wage_level
* wage_hc_factor_pre_displacement_base[:, idx_start:idx_end]
* share_nonemployed_base[:, idx_start:idx_end]
)
adjustment_factor_discounted[:, i] = np.sum(
shock_size
* wage_level
* discount_factor_compounded_vector[idx_start:idx_end]
* wage_hc_factor_pre_displacement_base[:, idx_start:idx_end]
* share_nonemployed_base[:, idx_start:idx_end]
)
marginal_utility_nonemployed_yearly[:, i, :] = np.mean(
marginal_utility_nonemployed[:, idx_start:idx_end, :, 0], axis=1
)
average_cross_elasticity_total_benefits_yearly = (
average_cross_elasticity_total_benefits_yearly
/ (average_adjustment_factor * n_simulations)
- 1
)
average_cross_elasticity_pv_government_spending_discounted_yearly = (
-average_cross_elasticity_pv_government_spending_yearly
/ (average_adjustment_factor_discounted * n_simulations)
- 1
)
average_cross_elasticity_pv_government_spending_yearly = (
-average_cross_elasticity_pv_government_spending_yearly
/ (average_adjustment_factor * n_simulations)
- 1
)
average_cross_elasticity_net_government_spending_working_yearly = (
-average_cross_elasticity_net_government_spending_working_yearly
/ (average_adjustment_factor * n_simulations)
- 1
)
average_cross_elasticity_net_government_spending_all_yearly = (
-average_cross_elasticity_net_government_spending_all_yearly
/ (average_adjustment_factor * n_simulations)
- 1
)
cross_elasticity_total_benefits_yearly = (
cross_elasticity_total_benefits_yearly / (adjustment_factor * n_simulations) - 1
)
cross_elasticity_pv_government_spending_discounted_yearly = (
-cross_elasticity_pv_government_spending_yearly
/ (adjustment_factor_discounted * n_simulations)
- 1
)
cross_elasticity_pv_government_spending_yearly = (
-cross_elasticity_pv_government_spending_yearly
/ (adjustment_factor * n_simulations)
- 1
)
cross_elasticity_net_government_spending_working_yearly = (
-cross_elasticity_net_government_spending_working_yearly
/ (adjustment_factor * n_simulations)
- 1
)
cross_elasticity_net_government_spending_all_yearly = (
-cross_elasticity_net_government_spending_all_yearly
/ (adjustment_factor * n_simulations)
- 1
)
average_cross_elasticity_total_benefits_mean = np.mean(
average_cross_elasticity_total_benefits_yearly
)
average_cross_elasticity_benefits_mean = np.mean(
average_cross_elasticity_benefits_yearly
)
average_cross_elasticity_pv_government_spending_mean = np.mean(
average_cross_elasticity_pv_government_spending_yearly
)
average_cross_elasticity_net_government_spending_working_mean = np.mean(
average_cross_elasticity_net_government_spending_working_yearly
)
average_elasticity_share_nonemployed_mean = np.mean(
average_elasticity_share_nonemployed_yearly
)
average_elasticity_share_nonemployed_std = np.std(
average_elasticity_share_nonemployed_yearly
)
average_elasticity_job_finding_rate_std = np.std(
average_elasticity_job_finding_rate_yearly
)
average_elasticity_share_searching_std = np.std(
average_elasticity_share_searching_yearly
)
average_cross_elasticity_benefits_std = np.std(
average_cross_elasticity_benefits_yearly
)
average_cross_elasticity_total_benefits_std = np.std(
average_cross_elasticity_total_benefits_yearly
)
average_cross_elasticity_pv_government_spending_std = np.std(
average_cross_elasticity_pv_government_spending_yearly
)
average_cross_elasticity_net_government_spending_working_std = np.std(
average_cross_elasticity_net_government_spending_working_yearly
)
cross_elasticity_total_benefits_mean = np.mean(
cross_elasticity_total_benefits_yearly, axis=1
)
cross_elasticity_benefits_mean = np.mean(cross_elasticity_benefits_yearly, axis=1)
cross_elasticity_pv_government_spending_mean = np.mean(
cross_elasticity_pv_government_spending_yearly, axis=1
)
cross_elasticity_net_government_spending_working_mean = np.mean(
cross_elasticity_net_government_spending_working_yearly, axis=1
)
elasticity_share_nonemployed_mean = np.mean(
elasticity_share_nonemployed_yearly, axis=1
)
elasticity_share_nonemployed_std = np.std(
elasticity_share_nonemployed_yearly, axis=1
)
elasticity_job_finding_rate_std = np.std(elasticity_job_finding_rate_yearly, axis=1)
elasticity_share_searching_std = np.std(elasticity_share_searching_yearly, axis=1)
cross_elasticity_benefits_std = np.std(cross_elasticity_benefits_yearly, axis=1)
cross_elasticity_total_benefits_std = np.std(
cross_elasticity_total_benefits_yearly, axis=1
)
cross_elasticity_pv_government_spending_std = np.std(
cross_elasticity_pv_government_spending_yearly, axis=1
)
cross_elasticity_net_government_spending_working_std = np.std(
cross_elasticity_net_government_spending_working_yearly, axis=1
)
# store results
out = {
"average_cross_elasticity_benefits_discounted_yearly": average_cross_elasticity_benefits_discounted_yearly, # noqa:B950
"average_cross_elasticity_benefits_mean": average_cross_elasticity_benefits_mean,
"average_cross_elasticity_benefits_std": average_cross_elasticity_benefits_std,
"average_cross_elasticity_benefits_yearly": average_cross_elasticity_benefits_yearly,
"average_cross_elasticity_net_government_spending_all_yearly": average_cross_elasticity_net_government_spending_all_yearly, # noqa:B950
"average_cross_elasticity_net_government_spending_working_mean": average_cross_elasticity_net_government_spending_working_mean, # noqa:B950
"average_cross_elasticity_net_government_spending_working_std": average_cross_elasticity_net_government_spending_working_std, # noqa:B950
"average_cross_elasticity_net_government_spending_working_yearly": average_cross_elasticity_net_government_spending_working_yearly, # noqa:B950
"average_cross_elasticity_pv_government_spending_discounted_yearly": average_cross_elasticity_pv_government_spending_discounted_yearly, # noqa:B950
"average_cross_elasticity_pv_government_spending_mean": average_cross_elasticity_pv_government_spending_mean, # noqa:B950
"average_cross_elasticity_pv_government_spending_std": average_cross_elasticity_pv_government_spending_std, # noqa:B950
"average_cross_elasticity_pv_government_spending_yearly": average_cross_elasticity_pv_government_spending_yearly, # noqa:B950
"average_cross_elasticity_total_benefits_mean": average_cross_elasticity_total_benefits_mean, # noqa:B950
"average_cross_elasticity_total_benefits_std": average_cross_elasticity_total_benefits_std, # noqa:B950
"average_cross_elasticity_total_benefits_yearly": average_cross_elasticity_total_benefits_yearly, # noqa:B950
"average_adjustment_factor": average_adjustment_factor,
"average_adjustment_factor_discounted": average_adjustment_factor_discounted,
"average_elasticity_unemployment_mean": average_elasticity_unemployment_mean,
"average_elasticity_job_finding_rate_yearly": average_elasticity_job_finding_rate_yearly, # noqa:B950
"average_elasticity_share_nonemployed_yearly": average_elasticity_share_nonemployed_yearly, # noqa:B950
"average_elasticity_share_unemployed_loss_yearly": average_elasticity_share_unemployed_loss_yearly, # noqa:B950
"average_elasticity_share_searching_yearly": average_elasticity_share_searching_yearly,
"average_elasticity_share_nonemployed_mean": average_elasticity_share_nonemployed_mean,
"average_elasticity_share_nonemployed_std": average_elasticity_share_nonemployed_std,
"average_elasticity_job_finding_rate_std": average_elasticity_job_finding_rate_std,
"average_elasticity_share_searching_std": average_elasticity_share_searching_std,
"average_job_finding_rate_searching_all": average_job_finding_rate_searching_all,
"average_job_finding_rate_yearly_base": average_job_finding_rate_yearly_base,
"average_job_finding_rate_yearly_shocked": average_job_finding_rate_yearly_shocked,
"average_marginal_utility_nonemployed_yearly": average_marginal_utility_nonemployed_yearly, # noqa:B950
"average_marginal_utility_nonemployed": average_marginal_utility_nonemployed,
"average_net_government_spending_all": average_net_government_spending_all,
"average_net_government_spending_working": average_net_government_spending_working,
"average_pv_government_spending": average_pv_government_spending,
"average_share_nonemployed": average_share_nonemployed,
"average_share_nonemployed_base": average_share_nonemployed_base,
"average_share_nonemployed_yearly_base": average_share_nonemployed_yearly_base,
"average_share_nonemployed_yearly_shocked": average_share_nonemployed_yearly_shocked,
"average_share_searching": average_share_searching,
"average_share_searching_base": average_share_searching_base,
"average_share_searching_yearly_base": average_share_searching_yearly_base,
"average_share_searching_yearly_shocked": average_share_searching_yearly_shocked,
"average_share_unemployed_loss": average_share_unemployed_loss,
"average_share_unemployed_loss_yearly_base": average_share_unemployed_loss_yearly_base,
"average_share_unemployed_loss_yearly_shocked": average_share_unemployed_loss_yearly_shocked, # noqa:B950
"average_total_benefits": average_total_benefits,
"average_wage_hc_factor_pre_displacement": average_wage_hc_factor_pre_displacement,
"average_wage_hc_factor_pre_displacement_base": average_wage_hc_factor_pre_displacement_base, # noqa:B950
"cross_elasticity_benefits_discounted_yearly": cross_elasticity_benefits_discounted_yearly, # noqa:B950
"cross_elasticity_benefits_mean": cross_elasticity_benefits_mean,
"cross_elasticity_benefits_std": cross_elasticity_benefits_std,
"cross_elasticity_benefits_yearly": cross_elasticity_benefits_yearly,
"cross_elasticity_net_government_spending_all_yearly": cross_elasticity_net_government_spending_all_yearly, # noqa:B950
"cross_elasticity_net_government_spending_working_mean": cross_elasticity_net_government_spending_working_mean, # noqa:B950
"cross_elasticity_net_government_spending_working_std": cross_elasticity_net_government_spending_working_std, # noqa:B950
"cross_elasticity_net_government_spending_working_yearly": cross_elasticity_net_government_spending_working_yearly, # noqa:B950
"cross_elasticity_pv_government_spending_discounted_yearly": cross_elasticity_pv_government_spending_discounted_yearly, # noqa:B950
"cross_elasticity_pv_government_spending_mean": cross_elasticity_pv_government_spending_mean, # noqa:B950
"cross_elasticity_pv_government_spending_std": cross_elasticity_pv_government_spending_std, # noqa:B950
"cross_elasticity_pv_government_spending_yearly": cross_elasticity_pv_government_spending_yearly, # noqa:B950
"cross_elasticity_total_benefits_mean": cross_elasticity_total_benefits_mean,
"cross_elasticity_total_benefits_std": cross_elasticity_total_benefits_std,
"cross_elasticity_total_benefits_yearly": cross_elasticity_total_benefits_yearly,
"adjustment_factor": adjustment_factor,
"adjustment_factor_discounted": adjustment_factor_discounted,
"elasticity_unemployment_mean": elasticity_unemployment_mean,
"elasticity_job_finding_rate_yearly": elasticity_job_finding_rate_yearly,
"elasticity_share_nonemployed_yearly": elasticity_share_nonemployed_yearly,
"elasticity_share_unemployed_loss_yearly": elasticity_share_unemployed_loss_yearly,
"elasticity_share_searching_yearly": elasticity_share_searching_yearly,
"elasticity_share_nonemployed_mean": elasticity_share_nonemployed_mean,
"elasticity_share_nonemployed_std": elasticity_share_nonemployed_std,
"elasticity_job_finding_rate_std": elasticity_job_finding_rate_std,
"elasticity_share_searching_std": elasticity_share_searching_std,
"job_finding_rate_searching_all": job_finding_rate_searching_all,
"job_finding_rate_yearly_base": job_finding_rate_yearly_base,
"job_finding_rate_yearly_shocked": job_finding_rate_yearly_shocked,
"marginal_utility_nonemployed_yearly": marginal_utility_nonemployed_yearly,
"marginal_utility_nonemployed": marginal_utility_nonemployed,
"net_government_spending_all": net_government_spending_all,
"net_government_spending_working": net_government_spending_working,
"pv_government_spending": pv_government_spending,
"share_nonemployed": share_nonemployed,
"share_nonemployed_base": share_nonemployed_base,
"share_nonemployed_yearly_base": share_nonemployed_yearly_base,
"share_nonemployed_yearly_shocked": share_nonemployed_yearly_shocked,
"share_searching": share_searching,
"share_searching_base": share_searching_base,
"share_searching_yearly_base": share_searching_yearly_base,
"share_searching_yearly_shocked": share_searching_yearly_shocked,
"share_unemployed_loss": share_unemployed_loss,
"share_unemployed_loss_yearly_base": share_unemployed_loss_yearly_base,
"share_unemployed_loss_yearly_shocked": share_unemployed_loss_yearly_shocked,
"shock_size": shock_size,
"shock_vector": shock_vector,
"total_benefits": total_benefits,
"ui_replacement_rate_vector_all": ui_replacement_rate_vector_all,
"wage_hc_factor_pre_displacement": wage_hc_factor_pre_displacement,
"wage_hc_factor_pre_displacement_base": wage_hc_factor_pre_displacement_base,
}
for item in out:
try:
out[item] = out[item].tolist()
except AttributeError:
pass
return out
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
try:
setup_name = sys.argv[1]
method = sys.argv[2]
except IndexError:
setup_name = "base_test_1"
method = "linear"
# load calibration
calibration = json.load(
open(ppj("IN_MODEL_SPECS", "analytics_calibration_" + setup_name + ".json"))
)
# set controls
controls = {
"interpolation_method": method,
"n_iterations_solve_max": 1,
"n_parallel_jobs": 12,
"n_simulations": int(1e6),
"run_simulation": True,
"seed_simulation": 3405,
"show_progress": True,
"show_progress_solve": False,
"show_summary": False,
"step_size_elasticity": 0.01,
"tolerance_solve": 1e-7,
}
# compute elasticity
elast_exact = elasticity_exact(controls, calibration)
# store results
with open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_" + setup_name + "_elasticity_exact_" + method + ".json",
),
"w",
) as outfile:
json.dump(elast_exact, outfile, ensure_ascii=False, indent=2)
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,965 | simonjheiler/ui_human_capital | refs/heads/main | /src/model_calibration/adjust_calibration.py | import copy
import json
import src.utilities.istarmap_3_8 # noqa, noreorder
import multiprocessing
import sys # noqa:F401
import tqdm # noqa:F401
import warnings # noqa:F401
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import interpolate
from scipy import optimize
from bld.project_paths import project_paths_join as ppj
from src.model_analysis.run_utils import _solve_run
from src.utilities.optimization_utils import get_step_size
from src.utilities.plot_utils import _average_by_age_group
#####################################################
# PARAMETERS
#####################################################
age_thresholds = [
-np.inf,
24,
29,
34,
39,
44,
49,
54,
59,
np.inf,
]
age_groups = [
"20 to 24",
"25 to 29",
"30 to 34",
"35 to 39",
"40 to 44",
"45 to 49",
"50 to 54",
"55 to 59",
"60 to 64",
]
age_thresholds_full = [-np.inf, 21, 25, 30, 35, 40, 45, 50, 55, 60, np.inf]
age_groups_full = [
"20",
"21 to 24",
"25 to 29",
"30 to 34",
"35 to 39",
"40 to 44",
"45 to 49",
"50 to 54",
"55 to 59",
"60 and older",
]
age_thresholds_edu = [-np.inf, 30, 35, 40, 45, 50, 55, 60, np.inf]
age_groups_edu = [
"25 to 29",
"30 to 34",
"35 to 39",
"40 to 44",
"45 to 49",
"50 to 54",
"55 to 59",
"60 and older",
]
age_thresholds_urate = [-np.inf, 21, 25, 30, 35, 40, 45, 50, 55, 60, np.inf]
age_groups_urate = [
"20",
"21 to 24",
"25 to 29",
"30 to 34",
"35 to 39",
"40 to 44",
"45 to 49",
"50 to 54",
"55 to 59",
"60 and older",
]
df_age_coefficients_full = pd.read_csv(
ppj("OUT_RESULTS", "empirics", "cps_age_coefficients_full.csv"), index_col=0
)
df_age_coefficients_edu_reduced = pd.read_csv(
ppj("OUT_RESULTS", "empirics", "cps_age_coefficients_edu_reduced.csv"),
index_col=0,
)
transition_statistics_mean = pd.read_csv(
ppj("OUT_RESULTS", "empirics", "cps_transition_statistics.csv"),
index_col=["age_group", "education_group"],
)
transition_statistics_estimated = {}
for statistic in [
"1m_eu_effects",
"1m_eu_emmeans",
"3m_eu_effects",
"3m_eu_emmeans",
"1m_ue_effects",
"1m_ue_emmeans",
"3m_ue_effects",
"3m_ue_emmeans",
]:
tmp = pd.read_csv(
ppj(
"OUT_RESULTS",
"empirics",
"cps_transition_probability_" + statistic + ".csv",
),
index_col=["x", "group"],
)
tmp = tmp.rename(
columns={
"predicted": "prediction",
"std.error": "std",
"conf.low": "ci_lower",
"conf.high": "ci_upper",
},
)
tmp.index = tmp.index.rename(["age_group", "education_group"])
tmp.index = pd.MultiIndex.from_frame(tmp.index.to_frame().replace("1", "overall"))
transition_statistics_estimated[statistic] = tmp
transition_statistics_estimated_mean = pd.concat(
[
transition_statistics_estimated[i].prediction.rename(i)
for i in transition_statistics_estimated.keys()
],
axis=1,
)
calibration_old = json.load(
open(ppj("IN_MODEL_SPECS", "analytics_calibration_base_flat_no_caps.json"))
)
targets_transitions = pd.read_csv(
ppj("OUT_RESULTS", "empirics", "cps_transition_probabilities.csv"),
index_col=["age_group", "education_reduced"],
)
targets_unemployment = pd.read_csv(
ppj("OUT_RESULTS", "empirics", "cps_unemployment_probabilities.csv"),
index_col=["age_group", "education_reduced"],
)
pd.options.display.max_colwidth = 100
global tck
#####################################################
# FUNCTIONS
#####################################################
def _eval_fit(coefficients, controls, calibration):
"""
Wrapper function for minimization over ui replacement rate vector defined
by *coefficients*.
:parameter:
coefficients : array
UI replacement rates at UI spline nodes for which to solve the model_analysis.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in
*solve_model*)
:returns:
objective : float
Value of objective function at *coefficients*
equilibrium_instrument_rate : float
Value of instrument rate that ensures balanced budget at *coefficients*
"""
# load calibration
age_min = calibration["age_min"]
leisure_base = np.array(calibration["leisure_base"])
leisure_grid = np.array(calibration["leisure_grid"])
type_weights = np.array(calibration["type_weights"])
# get derivative of leisure utility from coefficients
leisure_utility_dx_new = interpolate.PchipInterpolator(leisure_base, coefficients)(
leisure_grid
)
# update calibration
calibration["leisure_utility_dx"] = leisure_utility_dx_new.tolist()
# solve model_analysis
results = _solve_run({}, controls, calibration)
# extract outputs
share_nonemployed_mean = pd.DataFrame(
np.array(results["share_nonemployed"]).T, columns=["high", "medium", "low"]
)
share_searching_mean = pd.DataFrame(
np.array(results["share_searching"]).T, columns=["high", "medium", "low"]
)
unemployment_rate = (share_searching_mean + share_nonemployed_mean) / 2
job_finding_probability_mean = pd.DataFrame(
np.array(results["job_finding_probability_searching_all_mean"]).T,
columns=["high", "medium", "low"],
)
job_finding_rate_mean = pd.DataFrame(
np.array(results["job_finding_rate_searching_all_mean"]).T,
columns=["high", "medium", "low"],
)
equilibrium_instrument_rate = results["equilibrium_instrument_rate"]
unemployment_rate_by_age_group = _average_by_age_group(
unemployment_rate, age_min, age_thresholds_urate, age_groups_urate
)
job_finding_probability_by_age_group = _average_by_age_group(
job_finding_probability_mean, age_min, age_thresholds_urate, age_groups_urate
)
job_finding_rate_by_age_group = _average_by_age_group(
job_finding_rate_mean, age_min, age_thresholds_urate, age_groups_urate
)
unemployment_rate_by_age_group = unemployment_rate_by_age_group.drop(
["20", "60 and older"]
)
job_finding_probability_by_age_group = job_finding_probability_by_age_group.drop(
["20", "60 and older"]
)
job_finding_rate_by_age_group = job_finding_rate_by_age_group.drop(
["20", "60 and older"]
)
# compute objective for MAXIMIZATION
fit = np.average(
np.sqrt(
np.sum(np.square(unemployment_rate_by_age_group - target_unemployment))
),
weights=type_weights,
)
objective = -fit
return objective, equilibrium_instrument_rate
def _jacobian_fit(coefficients, controls, calibration):
"""
Compute two-sided gradient of a expected average value at model_analysis entry w.r.t. the
parameters of the unemployment insurance rate using finite differences.
:parameter:
coefficients : array
Coordinates at which to compute gradient.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in *solve_model*)
:returns:
jacobian : array
Gradient of objective function at point described by *coefficients*
_JACOBIAN calculates ... # todo: complete docstring
"""
# load controls
show_progress = controls["show_progress"]
n_workers = controls["n_workers"]
step_size_init = controls["step_size_jacobian"]
# load calibration
age_min = calibration["age_min"]
leisure_base = np.array(calibration["leisure_base"])
leisure_grid = np.array(calibration["leisure_grid"])
type_weights = np.array(calibration["type_weights"])
# calculate control variables
n_coefficients = coefficients.shape[0]
n_runs = n_coefficients * 2
# prepare computation of Jacobian
step_size_diff = step_size_init * np.maximum(abs(coefficients), 1)
delta = np.full(n_coefficients, np.nan)
fx = np.full(n_runs, np.nan)
coefficients_all = np.repeat(coefficients, n_runs).reshape(-1, n_runs)
for idx in range(n_coefficients):
coefficients_all[idx, idx] += step_size_diff[idx]
coefficients_all[idx, idx + n_coefficients] += -step_size_diff[idx]
delta[idx] = (
coefficients_all[idx, idx] - coefficients_all[idx, idx + n_coefficients]
)
leisure_utility_dx_all = np.full((len(leisure_grid), n_runs), np.nan)
for run_idx in range(n_runs):
leisure_utility_dx_tmp = interpolate.PchipInterpolator(
leisure_base, coefficients_all[:, run_idx]
)(leisure_grid)
leisure_utility_dx_tmp = np.minimum(leisure_utility_dx_tmp, 0.0)
leisure_utility_dx_all[:, run_idx] = leisure_utility_dx_tmp
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{"leisure_utility_dx": leisure_utility_dx_all[:, run_idx]},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_workers) as pool:
if show_progress:
out = tuple(
tqdm.tqdm(
pool.istarmap(_solve_run, inputs),
total=n_runs,
desc="Jacobian",
ascii=True,
ncols=94,
)
)
else:
out = pool.starmap(_solve_run, inputs)
# extract results
for run_idx in range(n_runs):
share_nonemployed_mean = pd.DataFrame(
np.array(out[run_idx]["share_nonemployed"]).T,
columns=["high", "medium", "low"],
)
share_searching_mean = pd.DataFrame(
np.array(out[run_idx]["share_searching"]).T,
columns=["high", "medium", "low"],
)
unemployment_rate = (share_searching_mean + share_nonemployed_mean) / 2
unemployment_rate_by_age_group = _average_by_age_group(
unemployment_rate, age_min, age_thresholds_urate, age_groups_urate
)
unemployment_rate_by_age_group = unemployment_rate_by_age_group.drop(
["20", "60 and older"]
)
fit = np.average(
np.sqrt(
np.sum(np.square(unemployment_rate_by_age_group - target_unemployment))
),
weights=type_weights,
)
fx[run_idx] = -fit
# reshape
fx = np.moveaxis(np.stack((fx[:n_coefficients], fx[n_coefficients:])), 0, -1)
jacobian = np.full(n_coefficients, np.nan)
for idx in range(n_coefficients):
jacobian[idx] = (fx[idx, 0] - fx[idx, 1]) / delta[idx]
return jacobian
def qnewton(func, jac, x_ini, controls, *args):
"""
Solve unconstrained maximization problem using quasi-Newton methods.
:parameter:
func : functional
Objective function to maximize.
jac : functional
Function that returns function value and Jacobian of
objective function.
x_ini : array
Initial guess for coefficients of local maximum.
controls : dict
Dictionary of function controls (details see below)
*args : tuple
Additional arguments for objective function.
:returns:
x : float
Coefficients of local maximum of objective function.
fx : float
Value of objective function at x
g : array [len(x) x 1]
Gradient of objective function at x
hessian : array [len(x) x len(x)]
Approximation of the inverse Hessian of the objective function at x.
:raises:
ValueError : NaNs or INFs in coefficients.
The user defined functions FUNC and JAC must have the following syntax
fx, instr_eq = f(x, controls, *args)
g = jac(x, controls, *args)
where, in either case, the additional variables are the ones passed to QNEWTON
:controls:
interpolation_method : string,
Interpolation method for 1D interpolation ("linear" or "cubic")
n_iterations_jacobian_max : int,
Maximum number of model_analysis solution iterations for computation of
jacobian
n_iterations_opt_max : int,
Maximum number of iterations of the optimization algorithm
n_iterations_solve_max : int,
Maximum number of model_analysis solution iterations for computation of
value of objective function
n_iterations_step_max : int,
Maximum number of iterations of the step search algorithm
n_simulations : int,
Number of simulations for model_analysis simulation
n_workers : int,
Number of cores used for parallel processing
run_simulation : bool,
Flag to activate / deactivate model_analysis simulation
show_progress : bool,
Flag to activate / deactivate output of progress bar for gradient
computation
show_progress_solve : bool,
Flag to activate / deactivate output of status updates for model_analysis
solution
show_summary : bool,
Flag to activate / deactivate output of summary statistics for model_analysis
solution iterations
step_method : string,
Step search method ("bt" or "gold") # todo: adjust after implementation of bhhh
step_size_jacobian : float,
Size of disturbance for finite difference calculation in gradient
computation
tolerance_solve : float,
Tolerance for government budget balance in model_analysis solution algorithm
eps0 : float
zero factor (used in convergence criteria) (default = 1)
n_iterations_opt_max : int
Maximum major iterations (default = 250)
n_iterations_step_max : int
Maximum step search iterations (default = 50)
step_method : str
Method to calculate optimal step length. Available options
- "full" : step length is set to 1
- "bhhh" : BHHH STEP (currently not implemented)
# todo: adjust after implementation
- "bt" : BT STEP (default)
- "gold" : GOLD STEP (called others fail)
tol : float
convergence tolerance (default = sqrt(eps))
Modified from the corresponding file by Paul L. Fackler & Mario J.Miranda
paul_fackler@ncsu.edu, miranda.4@osu.edu
"""
# load controls
n_iterations_opt_max = controls["n_iterations_opt_max"]
interpolation_method = controls["interpolation_method"]
tolerance_bfgs_update = controls["tolerance_bfgs_update"]
tolerance_convergence_gradient = controls["tolerance_convergence_gradient"]
tolerance_convergence_marquardt = controls["tolerance_convergence_marquardt"]
tolerance_slope_min = controls["tolerance_slope_min"]
zero_factor_convergence_marquardt = controls["zero_factor_convergence_marquardt"]
# load calibration
instrument = calibration["instrument"]
bounds_lower = calibration["bounds_lower"]
bounds_upper = calibration["bounds_upper"]
####################
# initiate algorithm
iteration_opt = 0
k = x_ini.shape[0]
reset = True
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: start \n"
"################################################"
"##############################################\n"
)
print("compute initial function value")
fx0, instr_eq = func(x_ini, controls, *args)
# update equilibrium instrument rate
if instrument == "consumption_tax":
calibration["consumption_tax_rate_init"][interpolation_method] = instr_eq
elif instrument == "income_tax_rate":
calibration["income_tax_rate_init"][interpolation_method] = instr_eq
print("compute initial Jacobian")
g0 = jac(x_ini, controls, *args)
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: initialization \n"
" iteration"
+ " " * (81 - len(f"{iteration_opt:4d}"))
+ f"{iteration_opt:4d}\n"
" starting coefficient vector"
+ " " * (63 - len("[" + ", ".join(f"{i:1.5f}" for i in x_ini) + "]"))
+ "["
+ ", ".join(f"{i:1.5f}" for i in x_ini)
+ "]\n"
" starting value of objective function"
+ " " * (54 - len(f"{fx0:1.5f}"))
+ f"{fx0:1.5f}\n"
" starting gradient norm"
+ " " * (68 - len(f"{np.linalg.norm(g0):9.4f}"))
+ f"{np.linalg.norm(g0):9.4f}\n"
"################################################"
"##############################################\n"
)
# get approximate hessian
hessian = -np.identity(k) / max(abs(fx0), 1)
if np.all(abs(g0) < tolerance_convergence_gradient):
print("Gradient tolerance reached at starting value")
return x_ini, fx0, g0, hessian, instr_eq
####################
# start iteration
x = x_ini
fx = fx0
g = g0
d = 0
while iteration_opt <= n_iterations_opt_max:
iteration_opt += 1
d = -np.dot(hessian, g0) # search direction
# if increase in objective in the direction of search is too low,
# revert to steepest ascent (B = I)
if np.dot(d, g0) / np.dot(d, d) < tolerance_slope_min:
hessian = -np.identity(k) / max(abs(fx0), 1)
d = g0 / max(abs(fx0), 1)
reset = 1
print("compute optimal step length")
s, fx, instr_eq, iterations, err = get_step_size(
func, x, fx0, g0, d, controls, *args
)
# check for step search failure
if fx <= fx0:
if reset: # if already using steepest ascent, break
warnings.warn("Iterations stuck in qnewton")
return x, fx0, g0, hessian, instr_eq
else: # else, try again with steepest ascent
hessian = -np.identity(k) / max(abs(fx0), 1)
d = g0 / max(abs(fx0), 1)
s, fx, instr_eq, iterations, err = get_step_size(
func, x, fx0, g0, d, controls, *args
)
if err:
warnings.warn("Cannot find suitable step in qnewton")
return x, fx0, g0, hessian, instr_eq
# run some checks, then update step and current coefficient vector
if np.logical_or(np.any(np.isnan(x + (s * d))), np.any(np.isinf(x + (s * d)))):
raise ValueError("NaNs or INFs in coefficients.")
elif np.logical_or(
np.any(x + (s * d) < bounds_lower), np.any(x + (s * d) > bounds_upper)
):
warnings.warn("Coefficient values out of bounds")
break
else:
d = s * d
x = x + d
# update equilibrium instrument rate
if instrument == "consumption_tax":
calibration["consumption_tax_rate_init"][interpolation_method] = instr_eq
elif instrument == "income_tax_rate":
calibration["income_tax_rate_init"][interpolation_method] = instr_eq
# compute Jacobian
print("compute jacobian after step")
g = jac(x, controls, *args)
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: optimization \n"
" iteration"
+ " " * (81 - len(f"{iteration_opt:4d}"))
+ f"{iteration_opt:4d}\n"
" current coefficient vector"
+ " " * (64 - len("[" + ", ".join(f"{i:1.5f}" for i in x) + "]"))
+ "["
+ ", ".join(f"{i:1.5f}" for i in x)
+ "]\n"
" current value of objective function"
+ " " * (55 - len(f"{fx:1.5f}"))
+ f"{fx:1.5f}\n"
" current step norm"
+ " " * (73 - len(f"{np.linalg.norm(d):9.4f}"))
+ f"{np.linalg.norm(d):9.4f}\n"
" current gradient norm"
+ " " * (69 - len(f"{np.linalg.norm(g):9.4f}"))
+ f"{np.linalg.norm(g):9.4f}\n"
"################################################"
"##############################################\n"
)
# test convergence using Marquardt's criterion and gradient test
if np.logical_or(
np.logical_and(
(fx - fx0) / (abs(fx) + zero_factor_convergence_marquardt)
< tolerance_convergence_marquardt,
np.all(
abs(d) / (abs(x) + zero_factor_convergence_marquardt)
< tolerance_convergence_marquardt
),
),
np.all(abs(g) < tolerance_convergence_gradient),
):
print("converged")
break
# update inverse Hessian approximation
u = g - g0
ud = np.dot(u, d)
# if update could be numerically inaccurate, revert to steepest ascent,
# otherwise use BFGS update
if (abs(ud) / (np.linalg.norm(d) * np.linalg.norm(u))) < tolerance_bfgs_update:
hessian = -np.identity(k) / max(abs(fx), 1)
reset = True
else:
w = d - np.dot(hessian, u)
wd = np.outer(w, d)
hessian = (
hessian + ((wd + wd.T) - (np.dot(u, w) * np.outer(d, d)) / ud) / ud
)
reset = False
# update objects for iteration
fx0 = fx
g0 = g
####################
# iteration complete
if iteration_opt == n_iterations_opt_max:
warnings.warn("Maximum iterations exceeded in qnewton")
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: complete \n"
" iteration"
+ " " * (81 - len(f"{iteration_opt:4d}"))
+ f"{iteration_opt:4d}\n"
" final coefficient vector"
+ " " * (66 - len("[" + ", ".join(f"{i:1.5f}" for i in x) + "]"))
+ "["
+ ", ".join(f"{i:1.5f}" for i in x)
+ "]\n"
" final value of objective function"
+ " " * (57 - len(f"{fx:1.5f}"))
+ f"{fx:1.5f}\n"
" final step norm"
+ " " * (75 - len(f"{np.linalg.norm(d):9.4f}"))
+ f"{np.linalg.norm(d):9.4f}\n"
" final gradient norm"
+ " " * (71 - len(f"{np.linalg.norm(g):9.4f}"))
+ f"{np.linalg.norm(g):9.4f}\n"
"################################################"
"##############################################\n"
)
return x, fx, g, hessian, instr_eq
def _sse(coefficients, base_values, target):
"""
Compute sum of squared differences between cubic spline through *coefficients* and *target*.
"""
# compute spline
x = np.array(range(0, 181, 20))
coefficients = np.pad(
coefficients, (0, 2), "constant", constant_values=(coefficients[-1])
)
delta_fit = interpolate.PchipInterpolator(x, coefficients)
# compute sum of squared differences
sse = np.sum(np.square(delta_fit(base_values) - target))
return sse
def _calibrate_separations(statistic):
# define interpolation base
x_range = np.arange(0, 180)
x_val = np.array([10, 30, 50, 70, 90, 110, 130, 150, 170])
# load calibration targets
target = targets_transitions.loc[:, "p_eu_3m_computed"].unstack(level=1)
# interpolate separation rates
separation_rates = pd.DataFrame(index=x_range)
for var in ["low", "medium", "high"]:
separation_rates.loc[:, var] = interpolate.PchipInterpolator(
x_val, target.loc[:, var]
)(x_range)
# calculate averages by age group
separation_rates["age"] = separation_rates.index // 4 + 20
separation_rates["age_group"] = pd.cut(
separation_rates.loc[:, "age"],
age_thresholds,
right=True,
labels=age_groups,
)
separation_rates = separation_rates.drop("age", axis=1)
separation_rates_mean = separation_rates.groupby("age_group").mean()
# plot calibrated values vs. targets
for split in ["low", "medium", "high"]:
# compare vector with target
fig, ax = plt.subplots()
ax.plot(x_val, separation_rates_mean.loc[:, split])
ax.plot(x_val, target.loc[:, split])
fig.savefig(
ppj(
"OUT_FIGURES",
"sandbox",
"calibration_separation_rates_fit_mean_" + split + ".pdf",
)
)
plt.close()
# compare means with targets
fig, ax = plt.subplots()
ax.plot(x_range, separation_rates.loc[:, split])
ax.plot(x_val, target.loc[:, split])
fig.savefig(
ppj(
"OUT_FIGURES",
"sandbox",
"calibration_separation_rates_fit_vector_" + split + ".pdf",
)
)
plt.close()
# collect output
separations_out = {
"low": separation_rates.loc[:, "low"].tolist(),
"medium": separation_rates.loc[:, "medium"].tolist(),
"high": separation_rates.loc[:, "high"].tolist(),
}
return separations_out, target
def _calibrate_wages():
type_weights = np.array([0.3118648, 0.5777581, 0.1103771])
# CONSTRUCT TARGETS
# no split by education
targets_full = df_age_coefficients_full
targets_full.loc[:, "age_group"] = pd.cut(
targets_full.index, age_thresholds_full, right=False, labels=age_groups_full
)
targets_full.coefficient = np.exp(targets_full.coefficient)
targets_full["weighted_coefficient"] = (
targets_full.coefficient * targets_full.weight
)
targets_full = targets_full.groupby(["age_group"])[
["weighted_coefficient", "weight"]
].sum()
targets_full["coefficient"] = (
targets_full.weighted_coefficient / targets_full.weight
)
targets_full.loc["20", "coefficient"] = 1.0
targets_full = targets_full.rename(columns={"coefficient": "overall"})
# split by education
targets_edu = df_age_coefficients_edu_reduced
targets_edu.loc[:, "age_group"] = pd.cut(
targets_edu.index, age_thresholds_edu, right=False, labels=age_groups_edu
)
targets_edu.coefficient = np.exp(targets_edu.coefficient)
targets_edu["weighted_coefficient"] = targets_edu.coefficient * targets_edu.weight
targets_edu = targets_edu.groupby(["age_group", "type"])[
["weighted_coefficient", "weight"]
].sum()
targets_edu["coefficient"] = targets_edu.weighted_coefficient / targets_edu.weight
targets_edu = targets_edu["coefficient"].unstack()
# combine data frames
targets = pd.merge(
targets_full["overall"],
targets_edu,
left_index=True,
right_index=True,
how="left",
)
targets = targets[["overall", "high", "medium", "low"]]
# construct wage level by type for ages 20 to 24
# compute average wage growth
factors = targets.iloc[:2, 0] / targets.iloc[2, 0]
targets.iloc[:2, 1:] = (
np.tile(targets.loc["25 to 29", ["high", "medium", "low"]], 2).reshape((2, 3))
* np.tile(factors, 3).reshape((3, 2)).T
)
# adjust s.t. aggregate wage at 20 = 1.0
scaling = np.average(
targets.loc["20", ["high", "medium", "low"]], weights=type_weights
)
targets.iloc[:, 1:] = targets.iloc[:, 1:] / scaling
# CALIBRATION
# load setup
setup_name = "base_combined_recalibrated"
method = "linear"
# set controls
controls = {
"interpolation_method": method,
"n_iterations_solve_max": 20,
"n_simulations": int(1e4),
"run_simulation": True,
"seed_simulation": 3405,
"show_progress_solve": True,
"show_summary": True,
"tolerance_solve": 1e-4,
}
# load calibration and set some variables
calibration_old = json.load(
open(ppj("IN_MODEL_SPECS", "analytics_calibration_" + setup_name + ".json"))
)
age_min = calibration_old["age_min"]
wage_in = pd.DataFrame(
np.array(calibration_old["wage_hc_factor_vector"]).T,
columns=["high", "medium", "low"],
)
wage_in.loc[:, "overall"] = np.average(
wage_in.loc[:, ["high", "medium", "low"]], weights=type_weights, axis=1
)
# simulate economy
results = _solve_run({}, controls, calibration_old)
# extract simulated hc levels and wage levels
experience_mean = pd.DataFrame(
np.array(results["hc_employed_mean"]).T,
columns=["high", "medium", "low"],
)
wage_mean = pd.DataFrame(
np.array(results["wage_hc_factor_employed_mean"]).T,
columns=["high", "medium", "low"],
)
# construct statistics
experience_mean.loc[:, "overall"] = np.average(
experience_mean.loc[:, ["high", "medium", "low"]], weights=type_weights, axis=1
)
experience_by_age_group = _average_by_age_group(
experience_mean, age_min, age_thresholds_full, age_groups_full
)
experience_by_age_group.iloc[0, :] = 0.0
wage_mean.loc[:, "overall"] = np.average(
wage_mean.loc[:, ["high", "medium", "low"]], weights=type_weights, axis=1
)
wage_by_age_group = _average_by_age_group(
wage_mean, age_min, age_thresholds_full, age_groups_full
)
# create some plots
x_range = np.arange(0, 181)
x_val = np.array([20, 22.5, 27, 32, 37, 42, 47, 52, 57, 62])
for split in ["overall", "high", "medium", "low"]:
fig, ax = plt.subplots()
ax.plot(x_val, wage_by_age_group.loc[:, split])
ax.plot(x_val, targets.loc[:, split])
fig.savefig(
ppj("OUT_FIGURES", "sandbox", "calibration_wages_fit_" + split + ".pdf")
)
plt.close()
# fit wage hc vectors
x_ini = np.arange(1.0, 1.8, 0.1)
cons = tuple(
{"type": "ineq", "fun": lambda x, i=i: x[i + 1] - x[i]} for i in range(7)
)
wage_hc_factor_new = pd.DataFrame(index=np.arange(0, 181))
x_opt = pd.DataFrame(index=np.arange(0, 8))
for split in ["overall", "high", "medium", "low"]:
# fit wage hc curve to minimize difference at simulated
# hc levels at targets
opt = optimize.minimize(
_sse,
x_ini,
args=(
experience_by_age_group.loc[:, split],
targets.loc[:, split],
),
constraints=cons,
)
x_opt.loc[:, split] = opt.x
# interpolate complete wage hc vector
wage_hc_factor_new.loc[:, split] = interpolate.PchipInterpolator(
np.arange(0, 181, 20),
np.pad(opt.x, (0, 2), "constant", constant_values=(opt.x[-1])),
)(x_range)
# plot for visual comparison
for split in ["overall", "high", "medium", "low"]:
# wage in vs. wage out
fig, ax = plt.subplots()
ax.plot(x_range, wage_in.loc[:, split])
ax.plot(x_range, wage_hc_factor_new.loc[:, split])
fig.savefig(
ppj(
"OUT_FIGURES",
"sandbox",
"calibration_wages_fit_in_out_" + split + ".pdf",
)
)
plt.close()
# wage out vs targets
fig, ax = plt.subplots()
ax.plot((x_val - 20) * 4, targets.loc[:, split])
ax.plot(x_range, wage_hc_factor_new.loc[:, split])
fig.savefig(
ppj(
"OUT_FIGURES",
"sandbox",
"calibration_wages_fit_out_target_" + split + ".pdf",
)
)
plt.close()
wage_hc_factor_out = {
"overall": wage_hc_factor_new.loc[:, "overall"].tolist(),
"low": wage_hc_factor_new.loc[:, "low"].tolist(),
"medium": wage_hc_factor_new.loc[:, "medium"].tolist(),
"high": wage_hc_factor_new.loc[:, "high"].tolist(),
}
return wage_hc_factor_out, targets
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
# load calibration and set variables
calibration = json.load(
open(
ppj(
"IN_MODEL_SPECS",
"analytics_calibration_base_combined.json",
)
)
)
# calibrate leisure utility function
# set some variables
leisure_grid = np.linspace(0, 1, 1001)
leisure_base = np.array([0.0, 0.25, 0.5, 0.75, 1.0])
# set starting point for optimization
x_ini = np.array(
[
-0.3820180873012038,
-0.417167939474483,
-3.096225236446147,
-11.067836992907717,
-349.5588188146288,
]
)
run_optimization = False
if run_optimization:
# set controls
controls = {
"interpolation_method": "linear",
"n_iterations_jacobian_max": 10,
"n_iterations_opt_max": 50,
"n_iterations_solve_max": 20,
"n_iterations_step_max": 20,
"n_simulations": int(1e5),
"n_workers": 15,
"run_simulation": True,
"seed_simulation": 3405,
"show_progress": True,
"show_progress_solve": False,
"show_summary": False,
"step_method": "bt",
"step_size_jacobian": 0.025,
"tolerance_bfgs_update": 1e-9,
"tolerance_convergence_gradient": 1e-6,
"tolerance_convergence_marquardt": 1e-4,
"tolerance_solve": 1e-5,
"tolerance_slope_min": 1e-6,
"zero_factor_convergence_marquardt": 1,
}
# load calibration target
targets_transitions = pd.read_csv(
ppj("OUT_RESULTS", "empirics", "cps_transition_probabilities.csv"),
index_col=["age_group", "education_reduced"],
)
targets_unemployment = pd.read_csv(
ppj("OUT_RESULTS", "empirics", "cps_unemployment_probabilities.csv"),
index_col=["age_group", "education_reduced"],
)
target_unemployment = targets_unemployment.loc[:, "estimate"].unstack(level=1)
target_finding = targets_transitions.loc[:, "p_ue_3m_computed"].unstack(level=1)
target_unemployment = target_unemployment.drop("60 to 64")
target_finding = target_finding.drop("60 to 64")
# adjust calibration
leisure_utility_dx_new = interpolate.PchipInterpolator(
leisure_base, x_ini, extrapolate=True
)(leisure_grid)
calibration["leisure_utility_dx"] = leisure_utility_dx_new.tolist()
calibration["leisure_base"] = leisure_base.tolist()
calibration["leisure_grid"] = leisure_grid.tolist()
calibration["bounds_lower"] = [-np.inf] * len(leisure_base)
calibration["bounds_upper"] = [0.0] * len(leisure_base)
# run optimization
x_opt, fx_opt, g_opt, hessian, instr_eq = qnewton(
_eval_fit, _jacobian_fit, x_ini, controls, calibration
)
else:
x_opt = x_ini
leisure_utility_dx_interpolator = interpolate.PchipInterpolator(
leisure_base, x_opt, extrapolate=True
)
leisure_utility_new = leisure_utility_dx_interpolator.antiderivative()(leisure_grid)
leisure_utility_dx_new = leisure_utility_dx_interpolator(leisure_grid)
leisure_utility_dxdx_new = leisure_utility_dx_interpolator.derivative()(
leisure_grid
)
leisure_utility_functions = {
"leisure_utility": leisure_utility_new.tolist(),
"leisure_utility_dx": leisure_utility_dx_new.tolist(),
"leisure_utility_dxdx": leisure_utility_dxdx_new.tolist(),
}
# run calibration
separations_new, targets_separations = _calibrate_separations(
"transition_probability_3m_eu"
)
wage_hc_factors_new, targets_wages = _calibrate_wages()
# store results
with open(
ppj(
"OUT_RESULTS",
"analytics",
"calibration_separations_new.json",
),
"w",
) as outfile:
json.dump(separations_new, outfile, ensure_ascii=False, indent=2)
with open(
ppj(
"OUT_RESULTS",
"analytics",
"calibration_wage_hc_factors_new.json",
),
"w",
) as outfile:
json.dump(wage_hc_factors_new, outfile, ensure_ascii=False, indent=2)
with open(
ppj(
"OUT_RESULTS",
"analytics",
"calibration_leisure_utility_functions_no_inctax.json",
),
"w",
) as outfile:
json.dump(leisure_utility_functions, outfile, ensure_ascii=False, indent=2)
targets_separations.to_csv(
ppj("OUT_RESULTS", "empirics", "calibration_targets_separations.csv")
)
targets_wages.to_csv(
ppj("OUT_RESULTS", "empirics", "calibration_targets_wages.csv")
)
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,966 | simonjheiler/ui_human_capital | refs/heads/main | /src/model_analysis/run_utils.py | """ Utilities for solving and simulating the model_analysis.
This modules contains standardized functions for solving
and simulating throughout the analytical part of the
project.
"""
#####################################################
# IMPORTS
#####################################################
import numpy as np
from scipy import interpolate
from src.model_analysis.solve_model import _solve_and_simulate
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
def _solve_run(program_run, controls, calibration):
# implement program_run
for key in program_run:
calibration[key] = program_run[key]
# solve and simulate
results = _solve_and_simulate(controls, calibration)
return results
def interpolate_1d(x_old, y_old, x_new, method="cubic"):
return interpolate.interp1d(x_old, y_old, kind=method)(x_new)
def interpolate_2d_ordered_to_unordered(
x_old, y_old, z_old, x_new, y_new, method="cubic"
):
"""
Interpolate 2D data over ordered base points at unordered target points.
:parameter:
x_old : array [m x n]
X-component of base points
y_old : array [m x n]
Y-component of base points
z_old : array [m x n]
Function values at base points
x_new : array [o x p]
X-component of target points
y_new : array [o x p]
Y-component of target points
method : string
Degree of interpolator; options
- "linear": 1st degree spline interpolation
- "cubic": 3rd degree spline interpolation
:returns:
z_new : array [o x p]
Interpolated function values at target points
Wrapper function applying scipy.interpolate.RectBivariateSpline.
First column of *x_old* and first row of *y_old* are used to construct
interpolator (implementation designed for interchangeability of methods).
"""
# set degree of interpolator
if method == "linear":
kx = ky = 1
elif method == "cubic":
kx = ky = 3
else:
raise ValueError("Unknown interpolation method %r" % (method))
# get interpolator
interpolator = interpolate.RectBivariateSpline(
x_old[:, 0], y_old[0, :], z_old, kx=kx, ky=ky
)
# call interpolation
out = interpolator.ev(x_new, y_new)
return out
def interpolate_2d_unordered_to_unordered(
x_old, y_old, z_old, x_new, y_new, method="cubic"
):
"""
Interpolate 2D data over unordered base points at unordered target points.
:parameter:
x_old : array [m x n]
X-component of base points
y_old : array [m x n]
Y-component of base points
z_old : array [m x n]
Function values at base points
x_new : array [o x p]
X-component of target points
y_new : array [o x p]
Y-component of target points
method : string
Degree of interpolator; options
- "linear": 1st degree spline interpolation
- "cubic": 3rd degree spline interpolation
:returns:
z_new : array [o x p]
Interpolated function values at target points
Wrapper function applying scipy.interpolate.griddata.
Base points, target points and function values are handled as 2d-arrays,
although they don't need to be on a regular grid (implementation
designed for interchangeability of methods).
"""
shape_out = (x_new.shape[0], x_new.shape[1])
xy_old = np.dstack((x_old, y_old)).reshape(-1, 2)
z_old = z_old.reshape(-1, 1)
xy_new = np.dstack((x_new, y_new)).reshape(-1, 2)
z_new = interpolate.griddata(xy_old, z_old, xy_new, method=method)
return z_new.reshape(shape_out)
def interpolate_2d_unordered_to_unordered_iter(
x_old, y_old, z_old, x_new, y_new, method="cubic"
):
out = np.full(x_new.shape, np.nan)
for nn in range(x_old.shape[0]):
out[nn, :] = interpolate_uex(
y_old[nn, :], z_old[nn, :], y_new[nn, :], method=method
)
return out
def interpolate_extrapolate(x_old, y_old, x_new, method="cubic"):
"""
Interpolate 1D data with *method* and extrapolate linearly.
:parameter:
x_old : array [m x 1]
Base points
y_old : array [m x 1]
Function values at base points
x_new : array [n x 1]
Target points
method : string
Degree of interpolator; options
- "linear": 1st degree spline interpolation
- "cubic": 3rd degree spline interpolation
:returns:
y_new : array [n x 1]
Interpolated function values at target points
"""
f_interpolate = interpolate.interp1d(
x_old, y_old, kind=method, bounds_error=False, fill_value=np.nan
)
f_extrapolate = interpolate.interp1d(
x_old, y_old, kind="linear", bounds_error=False, fill_value="extrapolate"
)
out = ((x_new <= min(x_old)) + (x_new >= max(x_old))) * f_extrapolate(x_new) + (
min(x_old) < x_new
) * (x_new < max(x_old)) * f_interpolate(x_new)
return out
def interpolate_uex(x_old, y_old, x_new, method="cubic"):
"""interpolate 1d data with *method*, extrapolate linearly above grid
and nearest below grid.
"""
# initiate interpolants
f_interpolate = interpolate.interp1d(
x_old, y_old, kind=method, bounds_error=False, fill_value="extrapolate"
)
f_extrapolate = interpolate.interp1d(
x_old, y_old, kind="linear", bounds_error=False, fill_value="extrapolate"
)
# evaluate at new points
out = (
(x_new <= min(x_old)) * np.full(len(x_new), y_old[0])
+ (min(x_old) < x_new)
* (x_new < max(x_old))
* f_interpolate(np.maximum(x_new, np.amin(x_old)))
+ (max(x_old) <= x_new) * f_extrapolate(np.maximum(x_new, np.amin(x_old)))
)
return out
def interpolate_uex2(x_old, y_old, x_new, method="cubic"):
out = (
interpolate.interp1d(
x_old, y_old, kind=method, bounds_error=False, fill_value="extrapolate"
)(np.maximum(x_new, min(x_old)))
* (x_new > min(x_old))
* (x_new < max(x_old))
+ interpolate.interp1d(
x_old, y_old, kind="linear", bounds_error=False, fill_value="extrapolate"
)(np.maximum(x_new, min(x_old)))
* (x_new >= max(x_old))
+ np.full(len(y_old), y_old[0]) * (x_new <= min(x_old))
)
return out
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,967 | simonjheiler/ui_human_capital | refs/heads/main | /src/utilities/optimization_utils.py | """
Utilities for optimization routines.
"""
#####################################################
# IMPORTS
#####################################################
import warnings
import numpy as np
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
def get_step_size(f, x0, f0, g0, d, controls, *args):
"""
Solve a one dimensional optimal step length problem.
:parameter
f : functional
Objective function of maximization problem.
x0 : array
Starting value for step.
f0 : float
Value of objective function evaluated at starting value.
g0 : array
Gradient of the objective function at starting value.
d : array
Search direction vector.
controls : dict
Dictionary of function controls (details see below)
args : tuple
Additional arguments for objective function.
:returns:
s : float
Optimal step length in direction d
fx : float
Value of objective function after optimal step (i.e. at x0 + s * d)
iteration : int
Number of iterations conducted in step size calculation
errcode : bool
Error flag: TRUE if step method returns error flag
instr_eq : float
Equilibrium instrument rate after optimal step (i.e. at x0 + s * d)
Wrapper function to compute step lengths in multidimensional optimization
problems.
Controls (relevant for this routine):
method : string
Method to calculate optimal step length. Available options
- "full" : step length is set to 1
- "bhhh" : BHHH STEP (currently not implemented)
# todo: adjust after implementation
- "bt" : BT STEP (default)
- "gold" : GOLD STEP
n_iterations_step_max : int
Maximum number of iterations for step length calculation
Modified from the corresponding file by Paul L. Fackler & Mario J. Miranda
Copyright(c) 1997 - 2000, Paul L. Fackler & Mario J. Miranda
paul_fackler@ncsu.edu, miranda.4@osu.edu
"""
method = controls["step_method"]
if method == "full":
fx, instr_eq = f(x0 + d, controls, *args)
if fx < f0:
s = 1
iteration = 1
errcode = False
print("full step")
return s, fx, instr_eq, iteration, errcode
else:
s, fx, instr_eq, iteration, errcode = step_size_gold(
f, x0, f0, g0, d, controls, *args
)
print("gold step")
return s, fx, instr_eq, iteration, errcode
elif method == "bhhh":
s, fx, instr_eq, iteration, errcode = step_size_bhhh(
f, x0, f0, g0, d, controls, *args
)
if not errcode:
print("bhhh step")
return s, fx, instr_eq, iteration, errcode
else:
s, fx, instr_eq, iterations_2, errcode = step_size_gold(
f, x0, f0, g0, d, controls, *args
)
iteration = iteration + iterations_2
print("gold step")
return s, fx, instr_eq, iteration, errcode
elif method == "bt":
s, fx, instr_eq, iteration, errcode = step_size_bt(
f, x0, f0, g0, d, controls, *args
)
if not errcode:
print("BT step")
return s, fx, instr_eq, iteration, errcode
else:
s, fx, instr_eq, iterations_2, errcode = step_size_gold(
f, x0, f0, g0, d, controls, *args
)
iteration = iteration + iterations_2
print("gold step")
return s, fx, instr_eq, iteration, errcode
elif method == "gold":
s, fx, instr_eq, iteration, errcode = step_size_gold(
f, x0, f0, g0, d, controls, *args
)
print("gold step")
return s, fx, instr_eq, iteration, errcode
else:
raise ValueError(
"Step method unknown; please select one of " "['full', 'bt', 'gold']"
)
def step_size_bt(f, x0, f0, g0, d, controls, *args):
"""
Compute approximate minimum step length
:parameter
f : functional
Objective function of the maximization problem
x0 : array
Starting point for the current step
f0 : float
Value of the objective function at the starting point
g0 : array
Gradient vector of the objective function at the starting
point
d : array
Search direction vector
controls : dict
Dictionary of function controls (details see below)
args : tuple
Additional arguments passed to objective function
:returns
s : float
Optimal step size in direction d
fs : float
Value of the objective function after optimal step (i.e. at x0 + s * d)
iterations : int
Number of iterations conducted to find optimal step size
errcode: bool
Error flag: TRUE if
- function fails to find a suitable step length, or
- cubic approximation finds a negative root
instr_eq : float
Equilibrium instrument rate after optimal step (i.e. at x0 + s * d)
*step_size_bt* uses a backtracking method similar to Algorithm 6.3 .5 in
Dennis and Schnabel, Numerical Methods for Unconstrained Optimization
and Nonlinear Equations or *LNSRCH* in sec 9.7 of Press, et al.,
Numerical Recipes. The algorithm approximates the function with a cubic
using the function value and derivative at the initial point and two
additional points.It determines the minimum of the approximation. If this
is acceptable it returns, otherwise it uses the current and previous point
to form a new approximation. The convergence criteria is similar to that
discussed in Berndt, et.al., Annals of Economic and Social Measurement,
1974, pp. 653 - 665 (see description of *step_size_bhhh*).The change in the step
size is also limited to ensure that
lb * s(k) <= s(k + 1) <= ub * s(k)
(defaults: lb = 0.1, ub = 0.5).
Controls (relevant for this routine):
n_iterations_step_max : int
Maximum number of iterations for step length calculation
"""
# Initializations
n_iterations_step_max = controls["n_iterations_step_max"]
delta = 1e-4 # Defines cone of convergence; must be on (0, 1 / 2)
ub = 0.5 # Upper bound on acceptable reduction in s.
lb = 0.1 # Lower bound on acceptable reduction in s.
errcode = False
dg = np.dot(d, g0) # directional derivative
slope_bound_lower = delta * dg
slope_bound_upper = (1 - delta) * dg
iteration = 0
# (I) FULL STEP IN THE DIRECTION OF D
step = 1
fs, instr_eq = f(x0 + step * d, controls, *args)
iteration += 1
# check if value of objective after step is in cone of convergence
slope = (fs - f0) / step
# if slope_bound_lower <= slope <= slope_bound_upper:
# return step, fs, instr_eq, iteration, errcode
if slope_bound_lower <= slope:
return step, fs, instr_eq, iteration, errcode
# (II) QUADRATIC APPROXIMATION OF OBJECTIVE FUNCTION
# AND OPTIMAL STEP IN THE SEARCH DIRECTION
# use f(x), f'(x) and f(x + s * d) to approximate f
# with 2nd degree polynomial:
# f(x + s * d) = h(s) = a * s^2 + b * s + c
# <=> h is maximal at s = - b / (2a)
c = f0
b = dg
a = (fs - step * b - c) / step ** 2
# store initial step and value after initial step
# for use in cubic approximation
step_2 = step
fs_2 = fs
step_1 = -b / (2 * a)
step_1 = max(step_1, lb) # ensure lower bound on step length
fs_1, instr_eq = f(x0 + step_1 * d, controls, *args)
iteration += 1
# check if value of objective after step is in cone of convergence
slope = (fs_1 - f0) / step_1
if slope_bound_lower <= slope <= slope_bound_upper:
return step_1, fs_1, instr_eq, iteration, errcode
# (III) CUBIC APPROXIMATION OF OBJECTIVE FUNCTION
# AND OPTIMAL STEP IN THE SEARCH DIRECTION
# (i) use f(x), f'(x), f(x + s1 * d), and f(x + s2 * d) to
# approximate with 3rd degree polynomial:
# f(x + s * d) = h(s) = a * s^3 + b * s^2 + c * s + d
# (ii) check if value of objective function is within cone of
# convergence after optimal candidate step; if not, update
# s_2 to s_1 and compute new candidate step
while iteration < n_iterations_step_max:
d = f0
c = dg
b = step_1 * (fs_2 - c * step_2 - d) / (
step_2 ** 2 * (step_1 - step_2)
) - step_2 * (fs_1 - c * step_1 - d) / (step_1 ** 2 * (step_1 - step_2))
a = (fs_1 - c * step_1 - d) / (step_1 ** 2 * (step_1 - step_2)) - (
fs_2 - c * step_2 - d
) / (step_2 ** 2 * (step_1 - step_2))
# store current step and value after current step
# for use in cubic approximation
step_2 = step_1
fs_2 = fs_1
if a == 0: # quadratic fits exactly
step_1 = -c / (2 * b)
else:
# optimal step is given by root of first derivative
# at s = (-b + sqrt( b^2 - 3 * a * c)) / (3 * a)
# check for complex root in solution of polynomial
if (b ** 2 - 3 * a * c) < 0:
errcode = 2
return step_1, fs_1, instr_eq, iteration, errcode
else:
step_1 = (-b + np.sqrt(b ** 2 - 3 * a * c)) / (3 * a)
# ensure acceptable step size
step_1 = max(min(step_1, ub * step_2), lb * step_2)
fs_1, instr_eq = f(x0 + step_1 * d, controls, *args)
iteration += 1
# check if value of objective after step is in cone of convergence
slope = (fs_1 - f0) / step_1
if slope_bound_lower <= slope <= slope_bound_upper:
return step_1, fs_1, instr_eq, iteration, errcode
if iteration == n_iterations_step_max:
warnings.warn("maximum number of step size iterations " "in BT search reached")
errcode = True
return step_1, fs_1, instr_eq, iteration, errcode
def step_size_bhhh(f, x0, f0, g0, d, controls, *args):
"""
Compute an approximate minimum step length
:parameter
f : functional
Objective function of the maximization problem
x0 : array
Starting point for the current step
f0 : float
Value of the objective function at the starting point
g0 : array
Gradient vector of the objective function at the starting
point (note: not used)
d : array
Search direction vector
controls : dict
Dictionary of function controls (details see below)
args : tuple
Additional arguments passed to objective function
:returns
s : float
Optimal step size in direction d
fs : float
Value of the objective function after optimal step (i.e. at x0 + s * d)
iterations : int
Number of iterations conducted to find optimal step size
errcode: bool
Error flag: TRUE if maximum iterations are exceeded
instr_eq : float
Equilibrium instrument rate after optimal step (i.e. at x0 + s * d)
*step_size_bhhh* ... # todo: complete
Controls (relevant for this routine):
n_iterations_step_max : int
Maximum number of iterations for step length calculation
"""
pass
def step_size_gold(f, x0, f0, g0, d, controls, *args):
"""
Compute an approximate minimum step length
:parameter
f : functional
Objective function of the maximization problem
x0 : array
Starting point for the current step
f0 : float
Value of the objective function at the starting point
g0 : array
Gradient vector of the objective function at the starting
point (note: not used)
d : array
Search direction vector
controls : dict
Dictionary of function controls (details see below)
args : tuple
Additional arguments passed to objective function
:returns
s : float
Optimal step size in direction d
fs : float
Value of the objective function after optimal step (i.e. at x0 + s * d)
iterations : int
Number of iterations conducted to find optimal step size
errcode: bool
Error flag: TRUE if maximum iterations are exceeded
instr_eq : float
Equilibrium instrument rate after optimal step (i.e. at x0 + s * d)
*step_size_gold* uses step doubling to find an initial bracket and then uses
the golden search method to find a maximum value within the bracket.
Iterations cease if the bracket is less than TOL or a maximum number
of iterations is reached.
Controls (relevant for this routine):
n_iterations_step_max : int
Maximum number of iterations for step length calculation
"""
# load controls
n_iterations_step_max = controls["n_iterations_step_max"]
tol = 1e-4 # tolerance used for Golden search algorithm
errcode = True # TRUE if the search is unsuccessful; otherwise FALSE
# initiate bracket search
bound_lower = 0
bound_upper = 1
f_lower = f0
f_upper, instr_eq = f(x0 + d, controls, *args)
iteration = 1
if f_lower <= f_upper: #
while iteration < n_iterations_step_max:
# bound_lower = bound_upper
# f_lower = f_upper
bound_upper *= 2
f_upper, instr_eq = f(x0 + bound_upper * d, controls, *args)
iteration += 1
if f_lower >= f_upper:
break
# stop step size calculation if maximum number of iterations exceeded
if iteration == n_iterations_step_max:
warnings.warn("Maximum number of iterations exceeded in bracket search.")
step_out = bound_upper
f_out = f_upper
instr_eq_out = instr_eq
return step_out, f_out, instr_eq_out, iteration, errcode
# once initial bracket has been found, start golden section search
# compute auxiliary variables
alpha_1 = (3 - np.sqrt(5)) / 2
alpha_2 = (np.sqrt(5) - 1) / 2
tol = tol * (alpha_1 * alpha_2)
# initiate first iteration
step_1 = bound_lower + alpha_1 * (bound_upper - bound_lower)
step_2 = bound_lower + alpha_2 * (bound_upper - bound_lower)
dist = bound_upper - bound_lower
f_1, instr_eq_1 = f(x0 + step_1 * d, controls, *args)
f_2, instr_eq_2 = f(x0 + step_2 * d, controls, *args)
iteration += 2
# Golden search to find minimum
while iteration < n_iterations_step_max:
if f_1 > f_2:
dist = dist * alpha_2
bound_lower = bound_lower
bound_upper = step_2
step_2 = step_1
step_1 = bound_lower + alpha_1 * dist
f_2 = f_1
f_1, instr_eq_1 = f(x0 + step_1 * d, controls, *args)
iteration += 1
elif f_1 < f_2:
dist = dist * alpha_2
bound_lower = step_1
bound_upper = bound_upper
step_1 = step_2
step_2 = bound_lower + alpha_2 * dist
f_1 = f_2
f_2, instr_eq_2 = f(x0 + step_2 * d, controls, *args)
iteration += 1
else:
warnings.warn("objective function value identical at both interior points")
break
if dist < tol:
print("minimum bracket size reached")
errcode = False
break
if f_1 > f_2:
step_out = step_1
f_out = f_1
instr_eq_out = instr_eq_1
else:
step_out = step_2
f_out = f_2
instr_eq_out = instr_eq_2
return step_out, f_out, instr_eq_out, iteration, errcode
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,968 | simonjheiler/ui_human_capital | refs/heads/main | /src/model_analysis/welfare.py | """ Compute consumption equivalents.
This module computes the equivalent consumption changes required
to compensate for welfare differentials.
"""
#####################################################
# IMPORTS
#####################################################
import json
import sys
import numba as nb
import numpy as np
import pandas as pd
from scipy import interpolate
from bld.project_paths import project_paths_join as ppj
from src.utilities.interpolation_utils import interpolate_2d_ordered_to_unordered
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
@nb.njit
def consumption_utility(x):
if risk_aversion_coefficient == 1:
return np.log(x)
else:
return x ** (1 - risk_aversion_coefficient) / (1 - risk_aversion_coefficient)
def _hc_after_loss_n_agents(
hc_before_loss,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
hc_after_loss = np.full(hc_before_loss.shape, np.nan)
for type_idx in range(n_types):
hc_after_loss[type_idx, ...] = _hc_after_loss_1_agent(
hc_before_loss[type_idx, ...],
wage_loss_factor_vector[type_idx, :],
wage_loss_reference_vector[type_idx, :],
period_idx,
)
return hc_after_loss
def _hc_after_loss_1_agent(
hc_before_loss,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
func = interpolate.interp1d(
wage_loss_reference_vector,
hc_grid,
kind="linear",
bounds_error=False,
fill_value=0.0,
)
val = np.maximum(
wage_hc_factor_interpolated_1_agent(
np.minimum(hc_before_loss, hc_max), wage_loss_reference_vector
)
* wage_loss_factor_vector[period_idx],
wage_hc_factor_interpolated_1_agent(0, wage_loss_reference_vector),
)
return func(val)
@nb.njit
def job_finding_probability(x):
return contact_rate * x
def simulate_ui_benefits(
pre_displacement_wage, replacement_rate_vector, floor, cap, period_idx
):
benefits = np.full(pre_displacement_wage.shape, np.nan)
for type_idx in range(pre_displacement_wage.shape[0]):
benefits[type_idx, :] = _ui_benefits(
pre_displacement_wage[type_idx, ...],
replacement_rate_vector[type_idx, ...],
floor,
cap,
period_idx,
)
return benefits
def _ui_benefits(
pre_displacement_wage,
replacement_rate_vector,
floor,
cap,
period_idx,
):
benefits = replacement_rate_vector[..., period_idx] * pre_displacement_wage
benefits = np.minimum(cap, benefits)
benefits = np.maximum(floor, benefits)
return benefits
def wage_hc_factor_interpolated_1_agent(x, wage_hc_factor_vector):
return interpolate.interp1d(
hc_grid,
wage_hc_factor_vector,
kind="linear",
bounds_error=False,
fill_value="extrapolate",
)(x)
def _get_pv_consumption_utility(calibration, results, controls):
global contact_rate
global hc_grid
global interpolation_method
global risk_aversion_coefficient
global n_types
global hc_max
# load controls
interpolation_method = controls["interpolation_method"]
# load calibration
assets_grid = np.array(calibration["assets_grid"])
# assets_max = calibration["assets_max"]
assets_min = calibration["assets_min"]
contact_rate = calibration["contact_rate"]
discount_factor = calibration["discount_factor"]
n_periods_retired = calibration["n_periods_retired"]
n_periods_working = calibration["n_periods_working"]
n_types = calibration["n_types"]
hc_grid_reduced = np.array(calibration["hc_grid_reduced"])
hc_loss_probability = np.array(calibration["hc_loss_probability"])
risk_aversion_coefficient = calibration["risk_aversion_coefficient"]
separation_rate_vector = np.array(calibration["separation_rate_vector"])
type_weights = np.array(calibration["type_weights"])
ui_cap = calibration["ui_cap"]
ui_floor = calibration["ui_floor"]
ui_replacement_rate_vector = np.array(calibration["ui_replacement_rate_vector"])
wage_hc_factor_vector = np.array(calibration["wage_hc_factor_vector"])
wage_level = calibration["wage_level"]
wage_loss_factor_vector = np.array(calibration["wage_loss_factor_vector"])
wage_loss_reference_vector = np.array(calibration["wage_loss_reference_vector"])
# load results
policy_consumption_employed = np.array(results["policy_consumption_employed"])
policy_consumption_unemployed = np.array(results["policy_consumption_unemployed"])
policy_consumption_unemployed_loss = np.array(
results["policy_consumption_unemployed_loss"]
)
policy_effort_searching = np.array(results["policy_effort_searching"])
policy_effort_searching_loss = np.array(results["policy_effort_searching_loss"])
income_tax_rate = np.array(results["equilibrium_instrument_rate"])
# compute derived parameters
hc_grid = np.arange(n_periods_working + 1)
hc_max = np.amax(hc_grid)
income_tax_rate_vector = np.repeat(income_tax_rate, n_periods_working).reshape(
(n_types, n_periods_working)
)
assets_grid_size = len(assets_grid)
hc_grid_reduced_size = len(hc_grid_reduced)
interest_rate = (1 - discount_factor) / discount_factor
borrowing_limit_n_e_a = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), assets_min
)
assets_grid_e_a = (
np.repeat(assets_grid, hc_grid_reduced_size)
.reshape(assets_grid_size, hc_grid_reduced_size)
.T
)
# assets_grid_e_a1 = np.append(
# assets_grid_e_a, np.full((hc_grid_reduced_size, 1), assets_max), axis=1
# )
assets_grid_n_e_a = np.tile(assets_grid, n_types * hc_grid_reduced_size).reshape(
(n_types, hc_grid_reduced_size, assets_grid_size)
)
hc_grid_reduced_e_a = np.repeat(hc_grid_reduced, assets_grid_size).reshape(
hc_grid_reduced_size, assets_grid_size
)
# hc_grid_reduced_e_a1 = np.append(
# hc_grid_reduced_e_a, hc_grid_reduced[..., np.newaxis], axis=1
# )
wage_hc_factor_vector_reduced = np.array(
[wage_hc_factor_vector[i, hc_grid_reduced] for i in range(n_types)]
)
wage_hc_factor_grid_n_e_a = np.repeat(
wage_hc_factor_vector_reduced,
assets_grid_size,
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
if ui_cap == "None":
ui_cap = np.Inf
if ui_floor == "None":
ui_floor = 0.0
# initiate
period_idx = n_periods_working - 1
discount_factor_retirement = (1 - discount_factor ** n_periods_retired) / (
1 - discount_factor
)
pv_consumption_utility_employed_next = (
discount_factor_retirement
* consumption_utility(policy_consumption_employed[:, :, :, period_idx + 1])
)
pv_consumption_utility_unemployed_next = (
discount_factor_retirement
* consumption_utility(policy_consumption_unemployed[:, :, :, period_idx + 1])
)
pv_consumption_utility_unemployed_loss_next = (
discount_factor_retirement
* consumption_utility(
policy_consumption_unemployed_loss[:, :, :, period_idx + 1]
)
)
while period_idx >= 0:
# unemployed with human capital loss
assets_unemployed_loss_next = np.maximum(
(1 + interest_rate) * assets_grid_n_e_a
+ simulate_ui_benefits(
wage_level * wage_hc_factor_grid_n_e_a,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
- policy_consumption_unemployed_loss[:, :, :, period_idx],
borrowing_limit_n_e_a,
)
effort_searching_loss_next = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
continuation_value_employed_loss = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
continuation_value_unemployed_loss = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
for type_idx in range(n_types):
effort_searching_loss_next[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
policy_effort_searching_loss[type_idx, :, :, period_idx + 1],
hc_grid_reduced_e_a,
assets_unemployed_loss_next[type_idx, :, :],
interpolation_method,
)
continuation_value_employed_loss[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
pv_consumption_utility_employed_next[type_idx, :, :],
_hc_after_loss_1_agent(
hc_grid_reduced_e_a,
wage_loss_factor_vector[type_idx, :],
wage_loss_reference_vector[type_idx, :],
period_idx + 1,
),
assets_unemployed_loss_next[type_idx, :, :],
interpolation_method,
)
continuation_value_unemployed_loss[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
pv_consumption_utility_unemployed_loss_next[type_idx, :, :],
hc_grid_reduced_e_a,
assets_unemployed_loss_next[type_idx, :, :],
interpolation_method,
)
pv_consumption_utility_unemployed_loss_now = (
consumption_utility(policy_consumption_unemployed_loss[:, :, :, period_idx])
+ discount_factor
* job_finding_probability(effort_searching_loss_next)
* continuation_value_employed_loss
+ discount_factor
* (1 - job_finding_probability(effort_searching_loss_next))
* continuation_value_unemployed_loss
)
# unemployed
assets_unemployed_next = np.maximum(
(1 + interest_rate) * assets_grid_n_e_a
+ simulate_ui_benefits(
wage_level * wage_hc_factor_grid_n_e_a,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
- policy_consumption_unemployed[:, :, :, period_idx],
borrowing_limit_n_e_a,
)
effort_searching_next = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
effort_searching_loss_next = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
continuation_value_employed = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
continuation_value_employed_loss = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
continuation_value_unemployed = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
continuation_value_unemployed_loss = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
for type_idx in range(n_types):
effort_searching_next[type_idx, :, :] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
policy_effort_searching[type_idx, :, :, period_idx + 1],
hc_grid_reduced_e_a,
assets_unemployed_next[type_idx, :, :],
interpolation_method,
)
effort_searching_loss_next[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
policy_effort_searching_loss[type_idx, :, :, period_idx + 1],
hc_grid_reduced_e_a,
assets_unemployed_next[type_idx, :, :],
interpolation_method,
)
continuation_value_employed[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
pv_consumption_utility_employed_next[type_idx, :, :],
hc_grid_reduced_e_a,
assets_unemployed_next[type_idx, :, :],
interpolation_method,
)
continuation_value_employed_loss[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
pv_consumption_utility_employed_next[type_idx, :, :],
_hc_after_loss_1_agent(
hc_grid_reduced_e_a,
wage_loss_factor_vector[type_idx, :],
wage_loss_reference_vector[type_idx, :],
period_idx + 1,
),
assets_unemployed_next[type_idx, :, :],
interpolation_method,
)
continuation_value_unemployed[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
pv_consumption_utility_unemployed_next[type_idx, :, :],
hc_grid_reduced_e_a,
assets_unemployed_next[type_idx, :, :],
interpolation_method,
)
continuation_value_unemployed_loss[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
pv_consumption_utility_unemployed_loss_next[type_idx, :, :],
hc_grid_reduced_e_a,
assets_unemployed_next[type_idx, :, :],
interpolation_method,
)
pv_consumption_utility_unemployed_now = (
consumption_utility(policy_consumption_unemployed[:, :, :, period_idx])
+ discount_factor
* np.repeat(
(1 - hc_loss_probability), hc_grid_reduced_size * assets_grid_size
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
* (
job_finding_probability(effort_searching_next)
* continuation_value_employed
+ (1 - job_finding_probability(effort_searching_next))
* continuation_value_unemployed
)
+ discount_factor
* np.repeat(
hc_loss_probability, hc_grid_reduced_size * assets_grid_size
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
* (
job_finding_probability(effort_searching_loss_next)
* continuation_value_employed_loss
+ (1 - job_finding_probability(effort_searching_loss_next))
* continuation_value_unemployed_loss
)
)
# employed
assets_employed_next = np.maximum(
(1 + interest_rate) * assets_grid_n_e_a
+ np.repeat(
(1 - income_tax_rate_vector[:, period_idx]),
(hc_grid_reduced_size * assets_grid_size),
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
* wage_level
* wage_hc_factor_grid_n_e_a
- policy_consumption_unemployed_loss[:, :, :, period_idx],
borrowing_limit_n_e_a,
)
effort_searching_next = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
continuation_value_employed = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
continuation_value_unemployed = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
for type_idx in range(n_types):
effort_searching_next[type_idx, :, :] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
policy_effort_searching[type_idx, :, :, period_idx + 1],
np.minimum(hc_grid_reduced_e_a + 1, hc_max),
assets_employed_next[type_idx, :, :],
interpolation_method,
)
continuation_value_employed[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
pv_consumption_utility_employed_next[type_idx, :, :],
np.minimum(hc_grid_reduced_e_a + 1, hc_max),
assets_employed_next[type_idx, :, :],
interpolation_method,
)
continuation_value_unemployed[
type_idx, :, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_e_a,
assets_grid_e_a,
pv_consumption_utility_unemployed_next[type_idx, :, :],
np.minimum(hc_grid_reduced_e_a + 1, hc_max),
assets_employed_next[type_idx, :, :],
interpolation_method,
)
pv_consumption_utility_employed_now = (
consumption_utility(policy_consumption_employed[:, :, :, period_idx])
+ discount_factor
* np.repeat(
(1 - separation_rate_vector[:, period_idx]),
hc_grid_reduced_size * assets_grid_size,
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
* continuation_value_employed
+ discount_factor
* np.repeat(
separation_rate_vector[:, period_idx],
hc_grid_reduced_size * assets_grid_size,
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
* continuation_value_unemployed
)
# initiate next iteration
pv_consumption_utility_employed_next = pv_consumption_utility_employed_now
pv_consumption_utility_unemployed_next = pv_consumption_utility_unemployed_now
pv_consumption_utility_unemployed_loss_next = (
pv_consumption_utility_unemployed_loss_now
)
period_idx -= 1
# iteration complete
pv_consumption_utility_searching = (
policy_effort_searching[:, :, :, 0] * pv_consumption_utility_employed_now
+ (1 - policy_effort_searching[:, :, :, 0])
* pv_consumption_utility_unemployed_now
)
pv_consumption_utility_at_entry = np.full(n_types, np.nan)
for type_idx in range(n_types):
pv_consumption_utility_at_entry[type_idx] = interpolate.interp1d(
assets_grid,
pv_consumption_utility_searching[type_idx, 0, :],
kind=interpolation_method,
)(0.0)
pv_consumption_utility_at_entry = pd.DataFrame(
data=pv_consumption_utility_at_entry,
index=["high", "medium", "low"],
columns=["pv consumption utility"],
).T
pv_consumption_utility_at_entry.loc[
"pv consumption utility", "overall"
] = np.average(
pv_consumption_utility_at_entry.loc[
"pv consumption utility", ["high", "medium", "low"]
],
weights=type_weights,
)
# # solve for present value of consumption utility
# period_idx = n_periods_working - 1
#
# consumption_diff = -npf.pmt(
# interest_rate,
# n_periods_retired + (n_periods_working - (period_idx + 1)),
# assets_max - np.amax(assets_grid),
# )
#
# # unemployed with hc loss
# assets_next_period_unemployed_loss = (
# assets_grid_e_a * (1 + interest_rate)
# + ui_replacement_rate_vector[period_idx] * wage_level * wage_hc_factor_e_a
# - policy_consumption_unemployed_loss[:, :, period_idx]
# )
#
# value_unemployed_diff = -npf.pv(
# interest_rate,
# n_periods_retired + (n_periods_working - (period_idx + 1)),
# consumption_utility(cq[:, -1] + consumption_diff)
# - consumption_utility(cq[:, -1]),
# )
#
# continuation_value_unemployed_loss = interpolate_2d_ordered_to_unordered(
# hc_grid_reduced_e_a1,
# assets_grid_e_a1,
# np.append(
# QtU[hc_grid_reduced, :],
# QtU[hc_grid_reduced, -1] + value_unemployed_diff[hc_grid_reduced, :],
# axis=1,
# ),
# hc_after_depreciation(
# hc_grid_reduced_e_a,
# period_idx + 1,
# wage_loss_factor_vector,
# wage_hc_factor_vector,
# ),
# assets_next_period_unemployed_loss,
# )
# pv_consumption_utility_unemployed_loss_now = (
# consumption_utility(policy_consumption_unemployed_loss[:, :, period_idx])
# + discount_factor * continuation_value_unemployed_loss
# )
#
# # unemployed
# assets_next_period = (
# assets_grid_e_a * (1 + interest_rate)
# + ui_replacement_rate_vector[n_periods_working]
# * wage_level
# * wage_hc_factor_e_a
# - policy_consumption_unemployed[:, :, period_idx]
# )
#
# continuation_value_unemployed = np.full(
# (hc_grid_reduced_size, assets_grid_size), np.nan
# )
# for nn in range(hc_grid_reduced_size):
# continuation_value_unemployed[nn, :] = interpolate.interp1d(
# assets_grid,
# QtU[hc_grid_reduced[nn], :],
# kind=interpolation_method,
# bounds_error=False,
# fill_value="extrapolate",
# )(assets_next_period[nn, :])
#
# continuation_value_unemployed_loss = interpolate_2d_ordered_to_unordered(
# hc_grid_reduced_e_a1,
# assets_grid_e_a1,
# np.append(
# QtU[hc_grid_reduced, :],
# QtU[hc_grid_reduced, -1] + value_unemployed_diff[hc_grid_reduced, :],
# axis=1,
# ),
# hc_after_depreciation(
# hc_grid_reduced_e_a,
# period_idx + 1,
# wage_loss_factor_vector,
# wage_hc_factor_vector,
# ),
# assets_next_period,
# )
# pv_consumption_utility_unemployed_now = (
# consumption_utility(policy_consumption_unemployed[:, :, period_idx])
# + discount_factor * (1 - hc_loss_probability) * continuation_value_unemployed
# + discount_factor * hc_loss_probability * continuation_value_unemployed_loss
# )
#
# # employed
# assets_next_period = (
# assets_grid_e_a * (1 + interest_rate)
# + wage_level
# * wage_hc_factor_e_a
# * (1 - income_tax_rate_vector[n_periods_working])
# - policy_consumption_employed[:, :, period_idx]
# )
#
# value_employed_diff = -npf.pv(
# interest_rate,
# n_periods_retired + (n_periods_working - (period_idx + 1)),
# consumption_utility(cq[:, -1] + consumption_diff)
# - consumption_utility(cq[:, -1]),
# )
#
# continuation_value_employed = np.full(
# (hc_grid_reduced_size, assets_grid_size), np.nan
# )
# for nn in range(hc_grid_reduced_size):
# nnn = min(hc_grid_reduced[nn] + 1, n_periods_working + 1)
# continuation_value_employed[nn, :] = interpolate.interp1d(
# np.append(assets_grid, assets_max),
# np.append(Qt[nnn, :], Qt[nnn, -1] + value_employed_diff[nnn, -1]),
# kind=interpolation_method,
# )(assets_next_period[nn, :])
#
# pv_consumption_utility_employed_now = (
# consumption_utility(policy_consumption_employed[:, :, period_idx])
# + discount_factor * continuation_value_employed
# )
#
# # searching
# pv_consumption_utility_searching_now = (
# job_finding_probability(policy_effort_searching[:, :, period_idx])
# * pv_consumption_utility_employed_now
# + (1 - job_finding_probability(policy_effort_searching[:, :, period_idx]))
# * pv_consumption_utility_unemployed_now
# )
#
# # searching with human capital loss
# pv_consumption_utility_employed_loss_now = np.full(
# (hc_grid_reduced_size, assets_grid_size), np.nan
# )
# for i in range(assets_grid_size):
# pv_consumption_utility_employed_loss_now[:, i] = interpolate.interp1d(
# wage_hc_factor_vector[hc_grid_reduced],
# pv_consumption_utility_employed_now[:, i],
# kind=interpolation_method,
# )(
# hc_after_depreciation(
# hc_grid_reduced,
# n_periods_working,
# wage_loss_factor_vector,
# wage_hc_factor_vector,
# )
# )
#
# pv_consumption_utility_searching_loss_now = (
# job_finding_probability(policy_effort_searching_loss[:, :, period_idx])
# * pv_consumption_utility_employed_loss_now
# + (1 - job_finding_probability(policy_effort_searching_loss[:, :, period_idx]))
# * pv_consumption_utility_unemployed_loss_now
# )
#
# # initiate next iteration
# pv_consumption_utility_unemployed_loss_next = (
# pv_consumption_utility_unemployed_loss_now
# )
# pv_consumption_utility_unemployed_next = pv_consumption_utility_unemployed_now
# pv_consumption_utility_employed_next = pv_consumption_utility_employed_now
# pv_consumption_utility_searching_loss_next = (
# pv_consumption_utility_searching_loss_now
# )
# pv_consumption_utility_searching_next = pv_consumption_utility_searching_now
#
# period_idx -= 1
#
# while period_idx > 0:
#
# # unemployed with human capital loss
# assets_next_period = np.maximum(
# (
# assets_grid_e_a * (1 + interest_rate)
# + ui_replacement_rate_vector[period_idx]
# * wage_level
# * wage_hc_factor_e_a
# - policy_consumption_unemployed_loss[:, :, period_idx]
# ),
# borrowing_limit_e_a,
# )
#
# continuation_value_unemployed_loss = np.full(
# (hc_grid_reduced_size, assets_grid_size), np.nan
# )
# for nn in range(hc_grid_reduced_size):
# continuation_value_unemployed_loss[nn, :] = interpolate.interp1d(
# assets_grid,
# pv_consumption_utility_searching_loss_next[nn, :],
# kind=interpolation_method,
# bounds_error=False,
# fill_value="extrapolate",
# )(assets_next_period[nn, :])
#
# pv_consumption_utility_unemployed_loss_now = (
# consumption_utility(policy_consumption_unemployed_loss[:, :, period_idx])
# + discount_factor * continuation_value_unemployed_loss
# )
#
# # unemployed
# assets_next_period = max(
# (
# assets_grid_e_a * (1 + interest_rate)
# + ui_replacement_rate_vector[period_idx]
# * wage_level
# * wage_hc_factor_e_a
# - policy_consumption_unemployed[:, :, period_idx]
# ),
# borrowing_limit_e_a,
# )
#
# continuation_value_searching = np.full(
# (hc_grid_reduced_size, assets_grid_size), np.nan
# )
# continuation_value_searching_loss = np.full(
# (hc_grid_reduced_size, assets_grid_size), np.nan
# )
# for nn in range(hc_grid_reduced_size):
# continuation_value_searching[nn, :] = interpolate.interp1d(
# assets_grid,
# pv_consumption_utility_searching_next[nn, :],
# kind=interpolation_method,
# bounds_error=False,
# fill_value="extrapolate",
# )(assets_next_period[nn, :])
# continuation_value_searching_loss[nn, :] = interpolate.interp1d(
# assets_grid,
# pv_consumption_utility_searching_loss_next[nn, :],
# kind=interpolation_method,
# bounds_error=False,
# fill_value="extrapolate",
# )(assets_next_period[nn, :])
#
# pv_consumption_utility_unemployed_now = (
# consumption_utility(policy_consumption_unemployed[:, :, period_idx])
# + discount_factor * (1 - hc_loss_probability) * continuation_value_searching
# + discount_factor * hc_loss_probability * continuation_value_searching_loss
# )
#
# # employed
# assets_next_period = max(
# (
# assets_grid_e_a * (1 + interest_rate)
# + wage_level
# * wage_hc_factor_e_a
# * (1 - income_tax_rate_vector[period_idx])
# - policy_consumption_employed[:, :, period_idx]
# ),
# borrowing_limit_e_a,
# )
#
# value_employed_diff = -npf.pv(
# interest_rate,
# n_periods_retired + n_periods_working - period_idx,
# consumption_utility(
# policy_consumption_employed[:, -1, period_idx] + consumption_diff
# )
# - consumption_utility(policy_consumption_employed[:, -1, period_idx]),
# )
# value_unemployed_diff = -npf.pv(
# interest_rate,
# n_periods_retired + n_periods_working - period_idx,
# consumption_utility(
# policy_consumption_unemployed[:, -1, period_idx] + consumption_diff
# )
# - consumption_utility(policy_consumption_unemployed[:, -1, period_idx]),
# )
#
# continuation_value_employed_plus = interpolate_2d_ordered_to_unordered(
# hc_grid_reduced_e_a1,
# assets_grid_e_a1,
# np.append(
# pv_consumption_utility_employed_next,
# pv_consumption_utility_employed_next[:, -1] + value_employed_diff,
# ),
# min(hc_grid_reduced_e_a + 1, 180),
# assets_next_period,
# )
# continuation_value_searching_plus = interpolate_2d_ordered_to_unordered(
# hc_grid_reduced_e_a1,
# assets_grid_e_a1,
# np.append(
# pv_consumption_utility_searching_next[1:hc_grid_reduced_size, :],
# pv_consumption_utility_searching_next[1:hc_grid_reduced_size, -1]
# + value_unemployed_diff,
# ),
# min(hc_grid_reduced_e_a + 1, 180),
# assets_next_period,
# )
#
# pv_consumption_utility_employed_now = (
# consumption_utility(policy_consumption_employed[:, :, period_idx])
# + discount_factor
# * (1 - separation_rate_vector[period_idx])
# * continuation_value_employed_plus
# + discount_factor
# * separation_rate_vector(period_idx)
# * continuation_value_searching_plus
# )
#
# # searching
# continuation_value_employed_loss = np.full(
# (hc_grid_reduced_size, assets_grid_size), np.nan
# )
# for i in range(assets_grid_size):
# continuation_value_employed_loss[:, i] = interpolate.interp1d(
# wage_hc_factor_vector[hc_grid_reduced],
# pv_consumption_utility_employed_now[:, i],
# interpolation_method,
# bounds_error=False,
# fill_value="extrapolate",
# )(
# hc_after_depreciation(
# hc_grid_reduced,
# period_idx,
# wage_loss_factor_vector,
# wage_hc_factor_vector,
# )
# )
#
# pv_consumption_utility_searching_loss_now = (
# job_finding_probability(policy_effort_searching_loss[:, :, period_idx])
# * continuation_value_employed_loss
# + (
# 1
# - job_finding_probability(
# policy_effort_searching_loss[:, :, period_idx]
# )
# )
# * pv_consumption_utility_unemployed_loss_now
# )
# pv_consumption_utility_searching_now = (
# job_finding_probability(policy_effort_searching[:, :, period_idx])
# * pv_consumption_utility_employed_now
# + (1 - job_finding_probability(policy_effort_searching[:, :, period_idx]))
# * pv_consumption_utility_unemployed_now
# )
#
# # initiate next iteration
# pv_consumption_utility_unemployed_loss_next = (
# pv_consumption_utility_unemployed_loss_now
# )
# pv_consumption_utility_unemployed_next = pv_consumption_utility_unemployed_now
# pv_consumption_utility_employed_next = pv_consumption_utility_employed_now
# pv_consumption_utility_searching_loss_next = (
# pv_consumption_utility_searching_loss_now
# )
# pv_consumption_utility_searching_next = pv_consumption_utility_searching_now
#
# period_idx -= 1
#
# pv_consumption_utility = interpolate.interp1d(
# assets_grid,
# pv_consumption_utility_searching_now[1, :],
# kind=interpolation_method,
# )(0.0)
return pv_consumption_utility_at_entry
def _get_consumption_equivalents(setup_base, results_files, controls):
# load calibration of baseline economy
calibration_base = json.load(
open(ppj("IN_MODEL_SPECS", "analytics_calibration_" + setup_base + ".json"))
)
n_types = calibration_base["n_types"]
type_weights = np.array(calibration_base["type_weights"])
# load results of all setups
results = {}
for setup in results_files.keys():
results[setup] = json.load(
open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_" + results_files[setup] + "_" + method + ".json",
)
)
)
results_base = results["Baseline"]
# extract some variables
risk_aversion_coefficient = calibration_base["risk_aversion_coefficient"]
if n_types == 1:
split = ["welfare_overall"]
elif n_types == 3:
split = ["welfare_high", "welfare_medium", "welfare_low"]
welfare_all = pd.DataFrame(
index=results_files.keys(),
data=[results[setup]["welfare"] for setup in results_files.keys()],
columns=split,
)
if n_types == 1:
welfare_all.loc[:, "delta_overall"] = (
welfare_all.loc[:, "welfare_overall"]
- welfare_all.loc["Baseline", "welfare_overall"]
)
elif n_types == 3:
welfare_all.loc[:, "welfare_overall"] = np.average(
welfare_all.loc[:, split], weights=type_weights, axis=1
)
for group in ["overall", "high", "medium", "low"]:
welfare_all.loc[:, "delta_" + group] = (
welfare_all.loc[:, "welfare_" + group]
- welfare_all.loc["Baseline", "welfare_" + group]
)
# compute present value of consumption utility in baseline economy
pv_consumption_utility = _get_pv_consumption_utility(
calibration_base, results_base, controls
)
# compute consumption equivalents for all setups
if n_types == 1:
welfare_all.loc[:, "consumption equivalent"] = (
(
1.0
+ welfare_all.loc[:, "delta to baseline"]
/ pv_consumption_utility.loc["pv consumption utility", "overall"]
)
** (1.0 / (1.0 - risk_aversion_coefficient))
- 1.0
) * 100.0
welfare_all.loc[:, "consumption equivalent relative to first best"] = (
welfare_all.loc[:, "consumption equivalent"]
/ welfare_all.loc["First best", "consumption equivalent"]
- 1
) * 100
elif n_types == 3:
for group in ["overall", "high", "medium", "low"]:
welfare_all.loc[:, "ce_" + group] = (
(
1.0
+ welfare_all.loc[:, "delta_" + group]
/ pv_consumption_utility.loc["pv consumption utility", group]
)
** (1.0 / (1.0 - risk_aversion_coefficient))
- 1.0
) * 100.0
welfare_all.loc[:, "ce_relative_" + group] = (
welfare_all.loc[:, "ce_" + group]
/ welfare_all.loc["First best", "ce_" + group]
) * 100.0
return welfare_all
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
try:
method = sys.argv[1]
equilibrium_condition = sys.argv[2]
except IndexError:
method = "linear"
equilibrium_condition = "combined"
analysis_plan = {
"baseline": "base_" + equilibrium_condition,
"results": {
"First best": "base_" + equilibrium_condition + "_first_best",
"Age and type dependent": "opt_rate_both_"
+ equilibrium_condition
+ "_results",
"Age and type dependent (fixed budget)": "opt_rate_both_"
+ "fixed_budget"
+ "_results",
"Age dependent": "opt_rate_age_" + equilibrium_condition + "_results",
"Constant rate, floor and cap": "opt_rate_floor_cap_"
+ equilibrium_condition
+ "_results",
"Baseline": "base_" + equilibrium_condition + "_results",
},
}
setup_baseline = analysis_plan["baseline"]
results_files = analysis_plan["results"]
# set controls
controls = {
"interpolation_method": method,
}
# compute consumption equivalents
consumption_equivalents = _get_consumption_equivalents(
setup_baseline, results_files, controls
)
# store results
consumption_equivalents.to_csv(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_welfare_comparison_"
+ equilibrium_condition
+ "_"
+ method
+ ".csv",
)
)
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,969 | simonjheiler/ui_human_capital | refs/heads/main | /src/model_analysis/optimization.py | """
Find optimal life cycle profiles of UI benefit replacement rates and tax rates.
"""
#####################################################
# IMPORTS
#####################################################
import copy
import json
import src.utilities.istarmap_3_8 # noqa, noreorder
import multiprocessing
import sys
import warnings
import numpy as np
import tqdm
from scipy import interpolate
from bld.project_paths import project_paths_join as ppj
from src.model_analysis.run_utils import _solve_run
from src.utilities.optimization_utils import get_step_size
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
def _eval_rate(coefficients, controls, calibration):
"""
Wrapper function for minimization over [ui_rate].
:parameter:
coefficients : array
Value of [ui_rate] at which to solve the model_analysis.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in
*solve_model*)
:returns:
objective : float
Value of objective function at *coefficients*
equilibrium_quantities : float
Value of instrument rate that ensures balanced budget at *coefficients*
"""
# update calibration
calibration["ui_replacement_rate_vector"] = np.full(
(n_types, n_periods_working), coefficients[0]
)
# solve model_analysis
results = _solve_run({}, controls, calibration)
# extract outputs
objective = results["average_pv_utility_computed_corrected"]
equilibrium_quantities = {
"instrument_rate": results["equilibrium_instrument_rate"],
"transfers_pensions": results["equilibrium_transfers_pensions"],
"transfers_lumpsum": results["equilibrium_transfers_lumpsum"],
}
return objective, equilibrium_quantities
def _eval_rate_floor_cap(coefficients, controls, calibration):
"""
Wrapper function for minimization over [ui_rate, ui_floor, ui_cap].
:parameter:
coefficients : array
Values of [ui_rate, ui_floor, ui_cap] at which to solve the model_analysis.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in
*solve_model*)
:returns:
objective : float
Value of objective function at *coefficients*
equilibrium_quantities : float
Value of instrument rate that ensures balanced budget at *coefficients*
"""
# update calibration
calibration["ui_replacement_rate_vector"] = np.full(
(n_types, n_periods_working), coefficients[0]
)
calibration["ui_floor"] = coefficients[1]
calibration["ui_cap"] = coefficients[2]
# solve model_analysis
results = _solve_run({}, controls, calibration)
# extract outputs
objective = results["average_pv_utility_computed_corrected"]
equilibrium_quantities = {
"instrument_rate": results["equilibrium_instrument_rate"],
"transfers_pensions": results["equilibrium_transfers_pensions"],
"transfers_lumpsum": results["equilibrium_transfers_lumpsum"],
}
return objective, equilibrium_quantities
def _eval_rate_type(coefficients, controls, calibration):
"""
Wrapper function for minimization over ui replacement rate vector defined
by *coefficients*.
:parameter:
coefficients : array
UI replacement rates at UI spline nodes for which to solve the model_analysis.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in
*solve_model*)
:returns:
objective : float
Value of objective function at *coefficients*
equilibrium_quantities : float
Value of instrument rate that ensures balanced budget at *coefficients*
"""
# load calibration
n_periods_working = calibration["n_periods_working"]
# get UI replacement rate vector
ui_replacement_rate_vector = np.repeat(coefficients, n_periods_working).reshape(
(n_types, n_periods_working)
)
# update calibration
calibration["ui_replacement_rate_vector"] = ui_replacement_rate_vector.tolist()
# solve model_analysis
results = _solve_run({}, controls, calibration)
# extract outputs
objective = results["average_pv_utility_computed_corrected"]
equilibrium_quantities = {
"instrument_rate": results["equilibrium_instrument_rate"],
"transfers_pensions": results["equilibrium_transfers_pensions"],
"transfers_lumpsum": results["equilibrium_transfers_lumpsum"],
}
return objective, equilibrium_quantities
def _eval_rate_vector(coefficients, controls, calibration):
"""
Wrapper function for minimization over ui replacement rate vector defined
by *coefficients*.
:parameter:
coefficients : array
UI replacement rates at UI spline nodes for which to solve the model_analysis.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in
*solve_model*)
:returns:
objective : float
Value of objective function at *coefficients*
equilibrium_quantities : float
Value of instrument rate that ensures balanced budget at *coefficients*
"""
# load calibration
n_periods_working = calibration["n_periods_working"]
ui_replacement_rate_grid_reduced = np.array(calibration["ui_grid"])
ui_replacement_rate_min = calibration["ui_replacement_rate_min"]
# get UI replacement rate vector
ui_replacement_rate_vector = interpolate.PchipInterpolator(
ui_replacement_rate_grid_reduced, coefficients
)(np.linspace(0, n_periods_working - 1, n_periods_working))
ui_replacement_rate_vector = np.maximum(
ui_replacement_rate_vector, ui_replacement_rate_min
)
ui_replacement_rate_vector = np.tile(ui_replacement_rate_vector, n_types).reshape(
(n_types, n_periods_working)
)
# update calibration
calibration["ui_replacement_rate_vector"] = ui_replacement_rate_vector.tolist()
# solve model_analysis
results = _solve_run({}, controls, calibration)
# extract outputs
objective = results["average_pv_utility_computed_corrected"]
equilibrium_quantities = {
"instrument_rate": results["equilibrium_instrument_rate"],
"transfers_pensions": results["equilibrium_transfers_pensions"],
"transfers_lumpsum": results["equilibrium_transfers_lumpsum"],
}
return objective, equilibrium_quantities
def _eval_rate_age_type(coefficients, controls, calibration):
"""
Wrapper function for minimization over ui replacement rate vector defined
by *coefficients*.
:parameter:
coefficients : array
UI replacement rates at UI spline nodes for which to solve the model_analysis.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in
*solve_model*)
:returns:
objective : float
Value of objective function at *coefficients*
equilibrium_quantities : float
Value of instrument rate that ensures balanced budget at *coefficients*
"""
# load calibration
n_periods_working = calibration["n_periods_working"]
ui_grid_reduced = np.array(calibration["ui_grid"])
ui_replacement_rate_min = calibration["ui_replacement_rate_min"]
# compute derived variables
ui_replacement_rate_grid = np.linspace(0, n_periods_working - 1, n_periods_working)
ui_replacement_rate_grid_reduced_size = len(ui_grid_reduced)
# get UI replacement rate vector
ui_replacement_rate_vector = np.full((n_types, n_periods_working), np.nan)
for type_idx in range(n_types):
idx_start = ui_replacement_rate_grid_reduced_size * type_idx
idx_end = ui_replacement_rate_grid_reduced_size * (type_idx + 1)
ui_replacement_rate_vector[type_idx, :] = interpolate.PchipInterpolator(
ui_grid_reduced, coefficients[idx_start:idx_end]
)(ui_replacement_rate_grid)
ui_replacement_rate_vector = np.maximum(
ui_replacement_rate_vector, ui_replacement_rate_min
)
# update calibration
calibration["ui_replacement_rate_vector"] = ui_replacement_rate_vector.tolist()
# solve model_analysis
results = _solve_run({}, controls, calibration)
# extract outputs
objective = results["average_pv_utility_computed_corrected"]
equilibrium_quantities = {
"instrument_rate": results["equilibrium_instrument_rate"],
"transfers_pensions": results["equilibrium_transfers_pensions"],
"transfers_lumpsum": results["equilibrium_transfers_lumpsum"],
}
return objective, equilibrium_quantities
def _jacobian_rate(coefficients, controls, calibration):
"""
Wrapper function to compute two-sided gradient of objective function
w.r.t. [ui_rate] using finite differences.
:parameter:
coefficients : array
Value of [ui_rate] at which to compute the gradient.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in *solve_model*)
:returns:
jacobian : array
Gradient of objective function at point described by *coefficients*
_JACOBIAN calculates ... # todo: complete docstring
"""
# load controls
show_progress = controls["show_progress"]
n_workers = controls["n_workers"]
step_size_init = controls["step_size_jacobian"]
# calculate control variables
n_coefficients = coefficients.shape[0]
n_runs = n_coefficients * 2
# prepare computation of Jacobian
step_size_diff = step_size_init * np.maximum(abs(coefficients), 1)
delta = np.full(n_coefficients, np.nan)
fx = np.full(n_runs, np.nan)
coefficients_all = np.repeat(coefficients, n_runs).reshape(-1, n_runs)
for idx in range(n_coefficients):
coefficients_all[idx, idx] += step_size_diff[idx]
coefficients_all[idx, idx + n_coefficients] += -step_size_diff[idx]
delta[idx] = (
coefficients_all[idx, idx] - coefficients_all[idx, idx + n_coefficients]
)
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{
"ui_replacement_rate_vector": np.full(
(n_types, n_periods_working), coefficients_all[0, run_idx]
),
},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_workers) as pool:
if show_progress:
out = tuple(
tqdm.tqdm(
pool.istarmap(_solve_run, inputs),
total=n_runs,
desc="Jacobian",
ascii=True,
ncols=94,
)
)
else:
out = pool.starmap(_solve_run, inputs)
# extract results
for run_idx in range(n_runs):
fx[run_idx] = np.squeeze(out[run_idx]["average_pv_utility_computed_corrected"])
# reshape
fx = np.moveaxis(np.stack((fx[:n_coefficients], fx[n_coefficients:])), 0, -1)
jacobian = np.full(n_coefficients, np.nan)
for idx in range(n_coefficients):
jacobian[idx] = (fx[idx, 0] - fx[idx, 1]) / delta[idx]
return jacobian
def _jacobian_rate_floor_cap(coefficients, controls, calibration):
"""
Wrapper function to compute two-sided gradient of objective function
w.r.t. the vector [ui_rate, ui_floor, ui_cap] using finite differences.
:parameter:
coefficients : array
Values of [ui_rate, ui_floor, ui_cap] at which to compute the gradient.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in *solve_model*)
:returns:
jacobian : array
Gradient of objective function at point described by *coefficients*
_JACOBIAN calculates ... # todo: complete docstring
"""
# load controls
show_progress = controls["show_progress"]
n_workers = controls["n_workers"]
step_size_init = controls["step_size_jacobian"]
# calculate control variables
n_coefficients = coefficients.shape[0]
n_runs = n_coefficients * 2
# prepare computation of Jacobian
step_size_diff = step_size_init * np.maximum(abs(coefficients), 1)
delta = np.full(n_coefficients, np.nan)
fx = np.full(n_runs, np.nan)
coefficients_all = np.repeat(coefficients, n_runs).reshape(-1, n_runs)
for idx in range(n_coefficients):
coefficients_all[idx, idx] += step_size_diff[idx]
coefficients_all[idx, idx + n_coefficients] += -step_size_diff[idx]
delta[idx] = (
coefficients_all[idx, idx] - coefficients_all[idx, idx + n_coefficients]
)
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{
"ui_replacement_rate_vector": np.full(
(n_types, n_periods_working), coefficients_all[0, run_idx]
),
"ui_floor": coefficients_all[1, run_idx],
"ui_cap": coefficients_all[2, run_idx],
},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_workers) as pool:
if show_progress:
out = tuple(
tqdm.tqdm(
pool.istarmap(_solve_run, inputs),
total=n_runs,
desc="Jacobian",
ascii=True,
ncols=94,
)
)
else:
out = pool.starmap(_solve_run, inputs)
# extract results
for run_idx in range(n_runs):
fx[run_idx] = np.squeeze(out[run_idx]["average_pv_utility_computed_corrected"])
# reshape
fx = np.moveaxis(np.stack((fx[:n_coefficients], fx[n_coefficients:])), 0, -1)
jacobian = np.full(n_coefficients, np.nan)
for idx in range(n_coefficients):
jacobian[idx] = (fx[idx, 0] - fx[idx, 1]) / delta[idx]
return jacobian
def _jacobian_rate_type(coefficients, controls, calibration):
"""
Compute two-sided gradient of a expected average value at model_analysis entry w.r.t. the
parameters of the unemployment insurance rate using finite differences.
:parameter:
coefficients : array
Coordinates at which to compute gradient.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in *solve_model*)
:returns:
jacobian : array
Gradient of objective function at point described by *coefficients*
_JACOBIAN calculates ... # todo: complete docstring
"""
# load controls
show_progress = controls["show_progress"]
n_workers = controls["n_workers"]
step_size_init = controls["step_size_jacobian"]
# load calibration
n_periods_working = calibration["n_periods_working"]
# calculate control variables
n_coefficients = coefficients.shape[0]
n_runs = n_coefficients * 2
# prepare computation of Jacobian
step_size_diff = step_size_init * np.maximum(abs(coefficients), 1)
delta = np.full(n_coefficients, np.nan)
fx = np.full(n_runs, np.nan)
coefficients_all = np.repeat(coefficients, n_runs).reshape(-1, n_runs)
for idx in range(n_coefficients):
coefficients_all[idx, idx] += step_size_diff[idx]
coefficients_all[idx, idx + n_coefficients] += -step_size_diff[idx]
delta[idx] = (
coefficients_all[idx, idx] - coefficients_all[idx, idx + n_coefficients]
)
ui_replacement_rate_vector_all = np.full(
(n_types, n_periods_working, n_runs), np.nan
)
for run_idx in range(n_runs):
ui_replacement_rate_vector_tmp = np.repeat(
coefficients_all[:, run_idx], n_periods_working
).reshape((n_types, n_periods_working))
ui_replacement_rate_vector_all[:, :, run_idx] = ui_replacement_rate_vector_tmp
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{
"ui_replacement_rate_vector": ui_replacement_rate_vector_all[
:, :, run_idx
]
},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_workers) as pool:
if show_progress:
out = tuple(
tqdm.tqdm(
pool.istarmap(_solve_run, inputs),
total=n_runs,
desc="Jacobian",
ascii=True,
ncols=94,
)
)
else:
out = pool.starmap(_solve_run, inputs)
# extract results
for run_idx in range(n_runs):
fx[run_idx] = np.squeeze(out[run_idx]["average_pv_utility_computed_corrected"])
# reshape
fx = np.moveaxis(np.stack((fx[:n_coefficients], fx[n_coefficients:])), 0, -1)
jacobian = np.full(n_coefficients, np.nan)
for idx in range(n_coefficients):
jacobian[idx] = (fx[idx, 0] - fx[idx, 1]) / delta[idx]
return jacobian
def _jacobian_rate_vector(coefficients, controls, calibration):
"""
Compute two-sided gradient of a expected average value at model_analysis entry w.r.t. the
parameters of the unemployment insurance rate using finite differences.
:parameter:
coefficients : array
Coordinates at which to compute gradient.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in *solve_model*)
:returns:
jacobian : array
Gradient of objective function at point described by *coefficients*
_JACOBIAN calculates ... # todo: complete docstring
"""
# load controls
show_progress = controls["show_progress"]
n_workers = controls["n_workers"]
step_size_init = controls["step_size_jacobian"]
# load calibration
n_periods_working = calibration["n_periods_working"]
ui_replacement_rate_grid_reduced = np.array(calibration["ui_grid"])
ui_replacement_rate_min = calibration["ui_replacement_rate_min"]
# calculate control variables
n_coefficients = coefficients.shape[0]
n_runs = n_coefficients * 2
# prepare computation of Jacobian
step_size_diff = step_size_init * np.maximum(abs(coefficients), 1)
delta = np.full(n_coefficients, np.nan)
fx = np.full(n_runs, np.nan)
coefficients_all = np.repeat(coefficients, n_runs).reshape(-1, n_runs)
for idx in range(n_coefficients):
coefficients_all[idx, idx] += step_size_diff[idx]
coefficients_all[idx, idx + n_coefficients] += -step_size_diff[idx]
delta[idx] = (
coefficients_all[idx, idx] - coefficients_all[idx, idx + n_coefficients]
)
ui_replacement_rate_vector_all = np.full(
(n_types, n_periods_working, n_runs), np.nan
)
for run_idx in range(n_runs):
ui_replacement_rate_vector_tmp = interpolate.PchipInterpolator(
ui_replacement_rate_grid_reduced, coefficients_all[:, run_idx]
)(np.linspace(0, n_periods_working - 1, n_periods_working))
ui_replacement_rate_vector_tmp = np.maximum(
ui_replacement_rate_vector_tmp, ui_replacement_rate_min
)
ui_replacement_rate_vector_tmp = np.tile(
ui_replacement_rate_vector_tmp, n_types
).reshape((n_types, n_periods_working))
ui_replacement_rate_vector_all[:, :, run_idx] = ui_replacement_rate_vector_tmp
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{
"ui_replacement_rate_vector": ui_replacement_rate_vector_all[
:, :, run_idx
]
},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_workers) as pool:
if show_progress:
out = tuple(
tqdm.tqdm(
pool.istarmap(_solve_run, inputs),
total=n_runs,
desc="Jacobian",
ascii=True,
ncols=94,
)
)
else:
out = pool.starmap(_solve_run, inputs)
# extract results
for run_idx in range(n_runs):
fx[run_idx] = np.squeeze(out[run_idx]["average_pv_utility_computed_corrected"])
# reshape
fx = np.moveaxis(np.stack((fx[:n_coefficients], fx[n_coefficients:])), 0, -1)
jacobian = np.full(n_coefficients, np.nan)
for idx in range(n_coefficients):
jacobian[idx] = (fx[idx, 0] - fx[idx, 1]) / delta[idx]
return jacobian
def _jacobian_rate_age_type(coefficients, controls, calibration):
"""
Compute two-sided gradient of a expected average value at model_analysis entry w.r.t. the
parameters of the unemployment insurance rate using finite differences.
:parameter:
coefficients : array
Coordinates at which to compute gradient.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in *solve_model*)
:returns:
jacobian : array
Gradient of objective function at point described by *coefficients*
_JACOBIAN calculates ... # todo: complete docstring
"""
# load controls
show_progress = controls["show_progress"]
n_workers = controls["n_workers"]
step_size_init = controls["step_size_jacobian"]
# load calibration
n_periods_working = calibration["n_periods_working"]
ui_replacement_rate_grid_reduced = np.array(calibration["ui_grid"])
ui_replacement_rate_min = calibration["ui_replacement_rate_min"]
# compute derived variables
ui_replacement_rate_grid = np.linspace(0, n_periods_working - 1, n_periods_working)
ui_replacement_rate_grid_reduced_size = len(ui_replacement_rate_grid_reduced)
# calculate control variables
n_coefficients = len(coefficients)
n_runs = n_coefficients * 2
# prepare computation of Jacobian
step_size_diff = step_size_init * np.maximum(abs(coefficients), 1)
delta = np.full(n_coefficients, np.nan)
fx = np.full((n_coefficients, 2), np.nan)
coefficients_all = np.repeat(coefficients, n_runs).reshape((n_coefficients, n_runs))
for idx in range(n_coefficients):
coefficients_all[idx, idx] += step_size_diff[idx]
coefficients_all[idx, idx + n_coefficients] += -step_size_diff[idx]
delta[idx] = (
coefficients_all[idx, idx] - coefficients_all[idx, idx + n_coefficients]
)
ui_replacement_rate_vector_all = np.full(
(n_types, n_periods_working, n_runs), np.nan
)
for run_idx in range(n_runs):
ui_replacement_rate_vector_tmp = np.full((n_types, n_periods_working), np.nan)
for type_idx in range(n_types):
idx_start = ui_replacement_rate_grid_reduced_size * type_idx
idx_end = ui_replacement_rate_grid_reduced_size * (type_idx + 1)
ui_replacement_rate_vector_tmp[type_idx, :] = interpolate.PchipInterpolator(
ui_replacement_rate_grid_reduced,
coefficients_all[idx_start:idx_end, run_idx],
)(ui_replacement_rate_grid)
ui_replacement_rate_vector_tmp = np.maximum(
ui_replacement_rate_vector_tmp, ui_replacement_rate_min
)
ui_replacement_rate_vector_all[:, :, run_idx] = ui_replacement_rate_vector_tmp
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{
"ui_replacement_rate_vector": ui_replacement_rate_vector_all[
:, :, run_idx
]
},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_workers) as pool:
if show_progress:
out = tuple(
tqdm.tqdm(
pool.istarmap(_solve_run, inputs),
total=n_runs,
desc="Jacobian",
ascii=True,
ncols=94,
)
)
else:
out = pool.starmap(_solve_run, inputs)
# extract results
for coefficient_idx in range(n_coefficients):
for shock_idx in range(2):
fx[coefficient_idx, shock_idx] = np.array(
out[coefficient_idx + n_coefficients * shock_idx][
"average_pv_utility_computed_corrected"
]
)
# compute jacobian
jacobian = np.full(n_coefficients, np.nan)
for idx in range(n_coefficients):
jacobian[idx] = (fx[idx, 0] - fx[idx, 1]) / delta[idx]
return jacobian
def qnewton(func, jac, x_ini, controls, *args):
"""
Solve unconstrained maximization problem using quasi-Newton methods.
:parameter:
func : functional
Objective function to maximize.
jac : functional
Function that returns function value and Jacobian of
objective function.
x_ini : array
Initial guess for coefficients of local maximum.
controls : dict
Dictionary of function controls (details see below)
*args : tuple
Additional arguments for objective function.
:returns:
x : float
Coefficients of local maximum of objective function.
fx : float
Value of objective function at x
g : array [len(x) x 1]
Gradient of objective function at x
hessian : array [len(x) x len(x)]
Approximation of the inverse Hessian of the objective function at x.
:raises:
ValueError : NaNs or INFs in coefficients.
The user defined functions FUNC and JAC must have the following syntax
fx, equilibrium_quantities = f(x, controls, *args)
g = jac(x, controls, *args)
where, in either case, the additional variables are the ones passed to QNEWTON
:controls:
interpolation_method : string,
Interpolation method for 1D interpolation ("linear" or "cubic")
n_iterations_jacobian_max : int,
Maximum number of model_analysis solution iterations for computation of
jacobian
n_iterations_opt_max : int,
Maximum number of iterations of the optimization algorithm
n_iterations_solve_max : int,
Maximum number of model_analysis solution iterations for computation of
value of objective function
n_iterations_step_max : int,
Maximum number of iterations of the step search algorithm
n_simulations : int,
Number of simulations for model_analysis simulation
n_workers : int,
Number of cores used for parallel processing
run_simulation : bool,
Flag to activate / deactivate model_analysis simulation
show_progress : bool,
Flag to activate / deactivate output of progress bar for gradient
computation
show_progress_solve : bool,
Flag to activate / deactivate output of status updates for model_analysis
solution
show_summary : bool,
Flag to activate / deactivate output of summary statistics for model_analysis
solution iterations
step_method : string,
Step search method ("bt" or "gold") # todo: adjust after implementation of bhhh
step_size_jacobian : float,
Size of disturbance for finite difference calculation in gradient
computation
tolerance_solve : float,
Tolerance for government budget balance in model_analysis solution algorithm
eps0 : float
zero factor (used in convergence criteria) (default = 1)
n_iterations_opt_max : int
Maximum major iterations (default = 250)
n_iterations_step_max : int
Maximum step search iterations (default = 50)
step_method : str
Method to calculate optimal step length. Available options
- "full" : step length is set to 1
- "bhhh" : BHHH STEP (currently not implemented)
# todo: adjust after implementation
- "bt" : BT STEP (default)
- "gold" : GOLD STEP (called others fail)
tol : float
convergence tolerance (default = sqrt(eps))
Modified from the corresponding file by Paul L. Fackler & Mario J.Miranda
paul_fackler@ncsu.edu, miranda.4@osu.edu
"""
# load controls
n_iterations_opt_max = controls["n_iterations_opt_max"]
interpolation_method = controls["interpolation_method"]
tolerance_bfgs_update = controls["tolerance_bfgs_update"]
tolerance_convergence_gradient = controls["tolerance_convergence_gradient"]
tolerance_convergence_marquardt = controls["tolerance_convergence_marquardt"]
tolerance_slope_min = controls["tolerance_slope_min"]
zero_factor_convergence_marquardt = controls["zero_factor_convergence_marquardt"]
# load calibration
instrument = calibration["instrument"]
bounds_lower = calibration["bounds_lower"]
bounds_upper = calibration["bounds_upper"]
####################
# initiate algorithm
iteration_opt = 0
k = x_ini.shape[0]
reset = True
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: start \n"
"################################################"
"##############################################\n"
)
print("compute initial function value")
fx0, equilibrium_quantities = func(x_ini, controls, *args)
# update equilibrium instrument rate
if instrument == "tax_consumption_rate":
calibration["tax_consumption_init"][
interpolation_method
] = equilibrium_quantities["instrument_rate"]
elif instrument == "tax_ui_rate":
calibration["tax_ui_init"][interpolation_method] = equilibrium_quantities[
"instrument_rate"
]
calibration["transfers_pensions_init"] = equilibrium_quantities[
"transfers_pensions"
]
calibration["transfers_lumpsum_init"] = equilibrium_quantities["transfers_lumpsum"]
# compute Jacobian
print("compute initial Jacobian")
g0 = jac(x_ini, controls, *args)
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: initialization \n"
" iteration"
+ " " * (81 - len(f"{iteration_opt:4d}"))
+ f"{iteration_opt:4d}\n"
" starting coefficient vector"
+ " " * (63 - len("[" + ", ".join(f"{i:1.5f}" for i in x_ini) + "]"))
+ "["
+ ", ".join(f"{i:1.5f}" for i in x_ini)
+ "]\n"
" starting value of objective function"
+ " " * (54 - len(f"{fx0:1.5f}"))
+ f"{fx0:1.5f}\n"
" starting gradient norm"
+ " " * (68 - len(f"{np.linalg.norm(g0):9.4f}"))
+ f"{np.linalg.norm(g0):9.4f}\n"
"################################################"
"##############################################\n"
)
# get approximate hessian
hessian = -np.identity(k) / max(abs(fx0), 1)
if np.all(abs(g0) < tolerance_convergence_gradient):
print("Gradient tolerance reached at starting value")
return x_ini, fx0, g0, hessian, equilibrium_quantities
####################
# start iteration
x = x_ini
fx = fx0
g = g0
d = 0
while iteration_opt <= n_iterations_opt_max:
iteration_opt += 1
d = -np.dot(hessian, g0) # search direction
# if increase in objective in the direction of search is too low,
# revert to steepest ascent (B = I)
if np.dot(d, g0) / np.dot(d, d) < tolerance_slope_min:
hessian = -np.identity(k) / max(abs(fx0), 1)
d = g0 / max(abs(fx0), 1)
reset = 1
print("compute optimal step length")
s, fx, equilibrium_quantities, iterations, err = get_step_size(
func, x, fx0, g0, d, controls, *args
)
# check for step search failure
if fx <= fx0:
if reset: # if already using steepest ascent, break
warnings.warn("Iterations stuck in qnewton")
return x, fx0, g0, hessian, equilibrium_quantities
else: # else, try again with steepest ascent
hessian = -np.identity(k) / max(abs(fx0), 1)
d = g0 / max(abs(fx0), 1)
s, fx, equilibrium_quantities, iterations, err = get_step_size(
func, x, fx0, g0, d, controls, *args
)
if err:
warnings.warn("Cannot find suitable step in qnewton")
return x, fx0, g0, hessian, equilibrium_quantities
# run some checks, then update step and current coefficient vector
if np.logical_or(np.any(np.isnan(x + (s * d))), np.any(np.isinf(x + (s * d)))):
raise ValueError("NaNs or INFs in coefficients.")
elif np.logical_or(
np.any(x + (s * d) < bounds_lower), np.any(x + (s * d) > bounds_upper)
):
warnings.warn("Coefficient values out of bounds")
break
else:
d = s * d
x = x + d
# update equilibrium instrument rate
if instrument == "tax_consumption_rate":
calibration["tax_consumption_init"][
interpolation_method
] = equilibrium_quantities["instrument_rate"]
elif instrument == "tax_ui_rate":
calibration["tax_ui_init"][interpolation_method] = equilibrium_quantities[
"instrument_rate"
]
calibration["transfers_pensions_init"] = equilibrium_quantities[
"transfers_pensions"
]
calibration["transfers_lumpsum_init"] = equilibrium_quantities[
"transfers_lumpsum"
]
# compute Jacobian
print("compute jacobian after step")
g = jac(x, controls, *args)
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: optimization \n"
" iteration"
+ " " * (81 - len(f"{iteration_opt:4d}"))
+ f"{iteration_opt:4d}\n"
" current coefficient vector"
+ " " * (64 - len("[" + ", ".join(f"{i:1.5f}" for i in x) + "]"))
+ "["
+ ", ".join(f"{i:1.5f}" for i in x)
+ "]\n"
" current value of objective function"
+ " " * (55 - len(f"{fx:1.5f}"))
+ f"{fx:1.5f}\n"
" current step norm"
+ " " * (73 - len(f"{np.linalg.norm(d):9.4f}"))
+ f"{np.linalg.norm(d):9.4f}\n"
" current gradient norm"
+ " " * (69 - len(f"{np.linalg.norm(g):9.4f}"))
+ f"{np.linalg.norm(g):9.4f}\n"
"################################################"
"##############################################\n"
)
# test convergence using Marquardt's criterion and gradient test
if np.logical_or(
np.logical_and(
(fx - fx0) / (abs(fx) + zero_factor_convergence_marquardt)
< tolerance_convergence_marquardt,
np.all(
abs(d) / (abs(x) + zero_factor_convergence_marquardt)
< tolerance_convergence_marquardt
),
),
np.all(abs(g) < tolerance_convergence_gradient),
):
print("converged")
break
# update inverse Hessian approximation
u = g - g0
ud = np.dot(u, d)
# if update could be numerically inaccurate, revert to steepest ascent,
# otherwise use BFGS update
if (abs(ud) / (np.linalg.norm(d) * np.linalg.norm(u))) < tolerance_bfgs_update:
hessian = -np.identity(k) / max(abs(fx), 1)
reset = True
else:
w = d - np.dot(hessian, u)
wd = np.outer(w, d)
hessian = (
hessian + ((wd + wd.T) - (np.dot(u, w) * np.outer(d, d)) / ud) / ud
)
reset = False
# update objects for iteration
fx0 = fx
g0 = g
####################
# iteration complete
if iteration_opt == n_iterations_opt_max:
warnings.warn("Maximum iterations exceeded in qnewton")
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: complete \n"
" iteration"
+ " " * (81 - len(f"{iteration_opt:4d}"))
+ f"{iteration_opt:4d}\n"
" final coefficient vector"
+ " " * (66 - len("[" + ", ".join(f"{i:1.5f}" for i in x) + "]"))
+ "["
+ ", ".join(f"{i:1.5f}" for i in x)
+ "]\n"
" final value of objective function"
+ " " * (57 - len(f"{fx:1.5f}"))
+ f"{fx:1.5f}\n"
" final step norm"
+ " " * (75 - len(f"{np.linalg.norm(d):9.4f}"))
+ f"{np.linalg.norm(d):9.4f}\n"
" final gradient norm"
+ " " * (71 - len(f"{np.linalg.norm(g):9.4f}"))
+ f"{np.linalg.norm(g):9.4f}\n"
"################################################"
"##############################################\n"
)
return x, fx, g, hessian, equilibrium_quantities
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
try:
setup_name = sys.argv[1]
method = sys.argv[2]
except IndexError:
setup_name = "opt_rate_both_fixed_budget"
method = "linear"
# load calibration and set variables
calibration = json.load(
open(ppj("IN_MODEL_SPECS", "analytics_calibration_" + setup_name + ".json"))
)
# set controls
controls = {
"interpolation_method": method,
"n_iterations_jacobian_max": 10,
"n_iterations_opt_max": 50,
"n_iterations_solve_max": 20,
"n_iterations_step_max": 20,
"n_simulations": int(1e4),
"n_workers": 15,
"run_simulation": False,
"seed_simulation": 3405,
"show_progress": True,
"show_progress_solve": False,
"show_summary": False,
"step_method": "bt",
"step_size_jacobian": 0.015,
"tolerance_bfgs_update": 1e-9,
"tolerance_convergence_gradient": 1e-6,
"tolerance_convergence_marquardt": 1e-4,
"tolerance_solve": 1e-4,
"tolerance_slope_min": 1e-6,
"zero_factor_convergence_marquardt": 1,
}
# load some variables from calibration
n_periods_working = calibration["n_periods_working"]
n_types = calibration["n_types"]
# get starting value
try:
x_ini = np.array(calibration["ui_params_opt_init"])
except KeyError:
if "rate_only" in setup_name:
x_ini = np.array([0.5])
elif "rate_age" in setup_name:
x_ini = np.array([0.5, 0.5, 0.5, 0.5, 0.5])
elif "rate_type" in setup_name:
x_ini = np.array([0.5, 0.5, 0.5])
elif "rate_both" in setup_name:
x_ini = np.array(
[
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
],
)
elif "rate_floor_cap" in setup_name:
x_ini = np.array([0.5, 0.2, 0.8])
else:
raise ValueError(
"Scope of optimization unclear; "
"please select setup name containing one of "
"['rate_only', 'rate_age', 'rate_type', 'rate_both', 'rate_floor_cap']"
)
# update calibration
if "rate_only" in setup_name:
calibration["ui_replacement_rate_vector"] = np.full(
(n_types, n_periods_working), x_ini[0]
).tolist()
calibration["bounds_lower"] = np.array([0.0])
calibration["bounds_upper"] = np.array([1.0])
elif "rate_age" in setup_name:
ui_grid = np.array(calibration["ui_grid"])
ui_replacement_rate_min = calibration["ui_replacement_rate_min"]
ui_vector = interpolate.PchipInterpolator(ui_grid, x_ini)(
np.linspace(0, n_periods_working - 1, n_periods_working)
)
ui_vector = np.maximum(ui_vector, ui_replacement_rate_min)
ui_vector = np.tile(ui_vector, n_types).reshape((n_types, n_periods_working))
calibration["ui_replacement_rate_vector"] = ui_vector.tolist()
calibration["bounds_lower"] = np.full(len(x_ini), -0.2)
calibration["bounds_upper"] = np.full(len(x_ini), 1.0)
elif "rate_type" in setup_name:
calibration["ui_replacement_rate_vector"] = np.repeat(
x_ini, n_periods_working
).reshape((n_types, n_periods_working))
calibration["bounds_lower"] = np.array([0.0, 0.0, 0.0])
calibration["bounds_upper"] = np.array([1.0, 1.0, 1.0])
elif "rate_both" in setup_name:
ui_grid = np.array(calibration["ui_grid"])
ui_replacement_rate_min = calibration["ui_replacement_rate_min"]
ui_vector = np.full((n_types, n_periods_working), np.nan)
for type_idx in range(n_types):
ui_vector[type_idx, :] = interpolate.PchipInterpolator(
ui_grid,
x_ini[len(ui_grid) * type_idx : len(ui_grid) * (type_idx + 1)],
)(np.linspace(0, n_periods_working - 1, n_periods_working))
ui_vector = np.maximum(ui_vector, ui_replacement_rate_min)
calibration["ui_replacement_rate_vector"] = ui_vector.tolist()
calibration["bounds_lower"] = np.full((n_types, len(x_ini)), -0.25)
calibration["bounds_upper"] = np.full((n_types, len(x_ini)), 1.2)
elif "rate_floor_cap" in setup_name:
calibration["ui_replacement_rate_vector"] = np.full(
(n_types, n_periods_working), x_ini[0]
).tolist()
calibration["ui_floor"] = x_ini[1]
calibration["ui_cap"] = x_ini[2]
calibration["bounds_lower"] = np.array([0.0, 0.0, 0.0])
calibration["bounds_upper"] = np.array([1.0, 2.0, 2.0])
else:
raise ValueError(
"Scope of optimization unclear; "
"please select setup name containing one of "
"['rate_only', 'rate_age', 'rate_type', 'rate_both', 'rate_floor_cap']"
)
# optimize
if "rate_only" in setup_name:
func = _eval_rate
jac = _jacobian_rate
elif "rate_age" in setup_name:
func = _eval_rate_vector
jac = _jacobian_rate_vector
elif "rate_type" in setup_name:
func = _eval_rate_type
jac = _jacobian_rate_type
elif "rate_both" in setup_name:
func = _eval_rate_age_type
jac = _jacobian_rate_age_type
elif "rate_floor_cap" in setup_name:
func = _eval_rate_floor_cap
jac = _jacobian_rate_floor_cap
else:
raise ValueError(
"Scope of optimization unclear; "
"please select setup name containing one of "
"['rate_only', 'rate_age', 'rate_type', 'rate_both', 'rate_floor_cap']"
)
# run optimization
x_opt, fx_opt, g_opt, hessian, equilibrium_quantities = qnewton(
func, jac, x_ini, controls, calibration
)
# compile & store results
results = {
"optimization": method,
"ui_coefficients_opt": x_opt,
"welfare_opt": fx_opt,
"equilibrium_quantities": equilibrium_quantities,
}
for item in results:
try:
results[item] = results[item].tolist()
except AttributeError:
pass
with open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_" + setup_name + "_optimization_" + method + ".json",
),
"w",
) as outfile:
json.dump(results, outfile, ensure_ascii=False, indent=2)
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,970 | simonjheiler/ui_human_capital | refs/heads/main | /src/model_analysis/solve_model.py | import copy
import json
import sys
import warnings
import numba as nb
import numpy as np
import numpy_financial as npf
from scipy import interpolate
from bld.project_paths import project_paths_join as ppj
from src.utilities.interpolation_utils import interpolate_1d
from src.utilities.interpolation_utils import interpolate_2d_ordered_to_unordered
from src.utilities.interpolation_utils import interpolate_2d_unordered_to_unordered_iter
from src.utilities.interpolation_utils import interpolate_n_h_a_ordered_to_unordered
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
def conditional_mean(array, condition, axis):
if axis == 0:
signature = "i...,i... -> ..."
elif axis == 1:
signature = "ij..., ij... -> i..."
elif axis == 2:
signature = "ijk..., ijk... -> ij..."
else:
signature = None
print("axis parameter unknown; select on of [0, 1, 2]")
return np.einsum(signature, array, condition) / condition.sum(axis)
def simulate_ui_benefits(
pre_displacement_wage, replacement_rate_vector, floor, cap, period_idx
):
benefits = np.full(pre_displacement_wage.shape, np.nan)
for type_idx in range(pre_displacement_wage.shape[0]):
benefits[type_idx, :] = _ui_benefits(
pre_displacement_wage[type_idx, ...],
replacement_rate_vector[type_idx, ...],
floor,
cap,
period_idx,
)
return benefits
# @nb.njit
def _ui_benefits(
pre_displacement_wage,
replacement_rate_vector,
floor,
cap,
period_idx,
):
benefits = replacement_rate_vector[..., period_idx] * pre_displacement_wage
benefits = np.minimum(cap, benefits)
benefits = np.maximum(floor, benefits)
return benefits
def _apply_borrowing_limit_employed(
consumption_on_grid,
assets_next,
wage_hc_factor_grid,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
period_idx,
):
# adjust consumption for binding borrowing constraint
consumption_corrected = (
assets_next >= borrowing_limit_h_a
) * consumption_on_grid + (assets_next < borrowing_limit_h_a) * (
(1 - tax_ss - tax_ui[period_idx] - tax_income)
* wage_level
* wage_hc_factor_grid
+ assets_grid_h_a
+ (1 - tax_income) * interest_rate_raw * assets_grid_h_a
+ transfers_lumpsum
- borrowing_limit_h_a
)
# adjust assets for binding borrowing constraint
assets_next_corrected = np.maximum(assets_next, borrowing_limit_h_a)
return consumption_corrected, assets_next_corrected
def _apply_borrowing_limit_unemployed(
consumption_on_grid,
assets_next,
wage_hc_factor_grid,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
# adjust consumption for binding borrowing constraint
consumption_corrected = (
assets_next >= borrowing_limit_h_a
) * consumption_on_grid + (assets_next < borrowing_limit_h_a) * (
_ui_benefits(
wage_level * wage_hc_factor_grid,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
+ assets_grid_h_a
+ (1 - tax_income) * interest_rate_raw * assets_grid_h_a
+ transfers_lumpsum
- borrowing_limit_h_a
)
# adjust assets for binding borrowing constraint
assets_next_corrected = np.maximum(assets_next, borrowing_limit_h_a)
return consumption_corrected, assets_next_corrected
def _foc_employed(
policy_effort_searching_next,
policy_consumption_employed_next,
policy_consumption_unemployed_next,
separation_rate_vector,
tax_income,
period_idx,
):
# interpolate next period consumption and search effort policies
# to account for hc increase
policy_consumption_employed_plus_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_consumption_employed_next,
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
assets_grid_h_a,
method=interpolation_method,
)
policy_consumption_unemployed_plus_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_consumption_unemployed_next,
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
assets_grid_h_a,
method=interpolation_method,
)
policy_effort_searching_plus_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_next,
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
assets_grid_h_a,
method=interpolation_method,
)
# consumption via FOC [hc x assets]
consumption_employed_off_grid = _consumption_utility_dx_inverted(
discount_factor
* (1 + (1 - tax_income) * interest_rate_raw)
* (
separation_rate_vector[period_idx]
* (
job_finding_probability(policy_effort_searching_plus_next)
* _consumption_utility_dx(policy_consumption_employed_plus_next)
+ (1 - job_finding_probability(policy_effort_searching_plus_next))
* _consumption_utility_dx(policy_consumption_unemployed_plus_next)
)
+ (1 - separation_rate_vector[period_idx])
* _consumption_utility_dx(policy_consumption_employed_plus_next)
)
)
return consumption_employed_off_grid
def _foc_unemployed(
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_consumption_employed_next,
policy_consumption_unemployed_next,
policy_consumption_unemployed_loss_next,
hc_loss_probability,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
period_idx,
):
# interpolate next period consumption policies to account fo hc loss
policy_consumption_employed_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_consumption_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
assets_grid_h_a,
method=interpolation_method,
)
# back out optimal consumption via FOC
consumption_unemployed_off_grid = _consumption_utility_dx_inverted(
discount_factor
* (1 + (1 - tax_income) * interest_rate_raw)
* (
(1 - hc_loss_probability)
* (
job_finding_probability(policy_effort_searching_next)
* _consumption_utility_dx(policy_consumption_employed_next)
+ (1 - job_finding_probability(policy_effort_searching_next))
* _consumption_utility_dx(policy_consumption_unemployed_next)
)
+ hc_loss_probability
* (
job_finding_probability(policy_effort_searching_loss_next)
* _consumption_utility_dx(policy_consumption_employed_loss_next)
+ (1 - job_finding_probability(policy_effort_searching_loss_next))
* _consumption_utility_dx(policy_consumption_unemployed_loss_next)
)
)
)
return consumption_unemployed_off_grid
def _foc_unemployed_loss(
policy_effort_searching_loss_next,
policy_consumption_employed_next,
policy_consumption_unemployed_loss_next,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
period_idx,
):
# interpolate next period consumption policies to account fo hc loss
policy_consumption_employed_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_consumption_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
assets_grid_h_a,
method=interpolation_method,
)
# back out optimal consumption via FOC
consumption_unemployed_loss_off_grid = _consumption_utility_dx_inverted(
discount_factor
* (1 + (1 - tax_income) * interest_rate_raw)
* (
job_finding_probability(policy_effort_searching_loss_next)
* _consumption_utility_dx(policy_consumption_employed_loss_next)
+ (1 - job_finding_probability(policy_effort_searching_loss_next))
* _consumption_utility_dx(policy_consumption_unemployed_loss_next)
)
)
return consumption_unemployed_loss_off_grid
def _get_cost_ui_employed(
cost_ui_employed_next,
cost_ui_unemployed_next,
policy_effort_searching_next,
policy_assets_employed_now,
separation_rate_vector,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for increase in hc and choice of assets next period
cost_ui_employed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
cost_ui_employed_next, cost_ui_employed_next[:, -1, np.newaxis], axis=1
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
cost_ui_unemployed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
cost_ui_unemployed_next, cost_ui_unemployed_next[:, -1, np.newaxis], axis=1
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
effort_searching_plus_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
policy_effort_searching_next,
policy_effort_searching_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
# calculate current period cost
cost_ui_employed = (
1
/ (1 + interest_rate_raw)
* (1 - separation_rate_vector[period_idx])
* cost_ui_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* effort_searching_plus_next
* cost_ui_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* (1 - effort_searching_plus_next)
* cost_ui_unemployed_next_interpolated
)
return cost_ui_employed
def _get_cost_ui_unemployed(
cost_ui_employed_next,
cost_ui_unemployed_next,
cost_ui_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
wage_hc_factor_vector,
wage_loss_factor_vector,
wage_loss_reference_vector,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
cost_ui_employed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
cost_ui_employed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
cost_ui_unemployed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
cost_ui_unemployed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
cost_ui_unemployed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
cost_ui_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
cost_ui_employed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
cost_ui_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_now,
method=interpolation_method,
)
effort_searching_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
# calculate current period cost
cost_ui_unemployed = (
np.repeat(
_ui_benefits(
wage_level * wage_hc_factor_vector[hc_grid_reduced],
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
),
assets_grid_size,
).reshape(hc_grid_reduced_size, assets_grid_size)
+ 1
/ (1 + interest_rate_raw)
* (1 - hc_loss_probability)
* (
job_finding_probability(effort_searching_next)
* cost_ui_employed_next_interpolated
+ (1 - job_finding_probability(effort_searching_next))
* cost_ui_unemployed_next_interpolated
)
+ 1
/ (1 + interest_rate_raw)
* hc_loss_probability
* (
job_finding_probability(effort_searching_loss_next)
* cost_ui_employed_loss_next_interpolated
+ (1 - job_finding_probability(effort_searching_loss_next))
* cost_ui_unemployed_loss_next_interpolated
)
)
return cost_ui_unemployed
def _get_cost_ui_unemployed_loss(
cost_ui_employed_next,
cost_ui_unemployed_loss_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_loss_now,
wage_hc_factor_vector,
wage_loss_factor_vector,
wage_loss_reference_vector,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
cost_ui_employed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
cost_ui_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
cost_ui_unemployed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
cost_ui_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
# calculate current period cost
cost_ui_unemployed_loss = np.repeat(
_ui_benefits(
wage_level * wage_hc_factor_vector[hc_grid_reduced],
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
),
assets_grid_size,
).reshape(hc_grid_reduced_size, assets_grid_size) + 1 / (1 + interest_rate_raw) * (
effort_searching_loss_next * cost_ui_employed_loss_next_interpolated
+ (1 - effort_searching_loss_next) * cost_ui_unemployed_loss_next_interpolated
)
return cost_ui_unemployed_loss
def _get_revenue_ss_employed(
revenue_ss_employed_next,
revenue_ss_unemployed_next,
policy_effort_searching_next,
policy_assets_employed_now,
separation_rate_vector,
wage_hc_factor_vector,
tax_ss,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for increase in hc and choice of assets next period
revenue_ss_employed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
revenue_ss_employed_next,
revenue_ss_employed_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
revenue_ss_unemployed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
revenue_ss_unemployed_next,
revenue_ss_unemployed_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
effort_searching_plus_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
policy_effort_searching_next,
policy_effort_searching_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
# calculate current period cost
revenue_ss_employed = (
np.repeat(
(tax_ss * wage_level * wage_hc_factor_vector[hc_grid_reduced]),
assets_grid_size,
).reshape(hc_grid_reduced_size, assets_grid_size)
+ 1
/ (1 + interest_rate_raw)
* (1 - separation_rate_vector[period_idx])
* revenue_ss_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* effort_searching_plus_next
* revenue_ss_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* (1 - effort_searching_plus_next)
* revenue_ss_unemployed_next_interpolated
)
return revenue_ss_employed
def _get_revenue_ss_unemployed(
revenue_ss_employed_next,
revenue_ss_unemployed_next,
revenue_ss_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
revenue_ss_employed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ss_employed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
revenue_ss_unemployed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ss_unemployed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
revenue_ss_unemployed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ss_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
revenue_ss_employed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ss_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_now,
method=interpolation_method,
)
effort_searching_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
# calculate current period cost
revenue_ss_unemployed = 1 / (1 + interest_rate_raw) * (1 - hc_loss_probability) * (
job_finding_probability(effort_searching_next)
* revenue_ss_employed_next_interpolated
+ (1 - job_finding_probability(effort_searching_next))
* revenue_ss_unemployed_next_interpolated
) + 1 / (1 + interest_rate_raw) * hc_loss_probability * (
job_finding_probability(effort_searching_loss_next)
* revenue_ss_employed_loss_next_interpolated
+ (1 - job_finding_probability(effort_searching_loss_next))
* revenue_ss_unemployed_loss_next_interpolated
)
return revenue_ss_unemployed
def _get_revenue_ss_unemployed_loss(
revenue_ss_employed_next,
revenue_ss_unemployed_loss_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_loss_now,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
revenue_ss_employed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ss_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
revenue_ss_unemployed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ss_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
# calculate current period cost
revenue_ss_unemployed_loss = (
1
/ (1 + interest_rate_raw)
* (
effort_searching_loss_next * revenue_ss_employed_loss_next_interpolated
+ (1 - effort_searching_loss_next)
* revenue_ss_unemployed_loss_next_interpolated
)
)
return revenue_ss_unemployed_loss
def _get_revenue_ui_employed(
revenue_ui_employed_next,
revenue_ui_unemployed_next,
policy_effort_searching_next,
policy_assets_employed_now,
separation_rate_vector,
wage_hc_factor_vector,
tax_ui,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for increase in hc and choice of assets next period
revenue_ui_employed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
revenue_ui_employed_next,
revenue_ui_employed_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
revenue_ui_unemployed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
revenue_ui_unemployed_next,
revenue_ui_unemployed_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
effort_searching_plus_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
policy_effort_searching_next,
policy_effort_searching_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
# calculate current period cost
revenue_ui_employed = (
np.repeat(
(tax_ui[period_idx] * wage_level * wage_hc_factor_vector[hc_grid_reduced]),
assets_grid_size,
).reshape(hc_grid_reduced_size, assets_grid_size)
+ 1
/ (1 + interest_rate_raw)
* (1 - separation_rate_vector[period_idx])
* revenue_ui_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* effort_searching_plus_next
* revenue_ui_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* (1 - effort_searching_plus_next)
* revenue_ui_unemployed_next_interpolated
)
return revenue_ui_employed
def _get_revenue_ui_unemployed(
revenue_ui_employed_next,
revenue_ui_unemployed_next,
revenue_ui_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
revenue_ui_employed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ui_employed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
revenue_ui_unemployed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ui_unemployed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
revenue_ui_unemployed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ui_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
revenue_ui_employed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ui_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_now,
method=interpolation_method,
)
effort_searching_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
# calculate current period cost
revenue_ui_unemployed = 1 / (1 + interest_rate_raw) * (1 - hc_loss_probability) * (
job_finding_probability(effort_searching_next)
* revenue_ui_employed_next_interpolated
+ (1 - job_finding_probability(effort_searching_next))
* revenue_ui_unemployed_next_interpolated
) + 1 / (1 + interest_rate_raw) * hc_loss_probability * (
job_finding_probability(effort_searching_loss_next)
* revenue_ui_employed_loss_next_interpolated
+ (1 - job_finding_probability(effort_searching_loss_next))
* revenue_ui_unemployed_loss_next_interpolated
)
return revenue_ui_unemployed
def _get_revenue_ui_unemployed_loss(
revenue_ui_employed_next,
revenue_ui_unemployed_loss_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_loss_now,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
revenue_ui_employed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ui_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
revenue_ui_unemployed_loss_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_ui_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
# calculate current period cost
revenue_ui_unemployed_loss = (
1
/ (1 + interest_rate_raw)
* (
effort_searching_loss_next * revenue_ui_employed_loss_next_interpolated
+ (1 - effort_searching_loss_next)
* revenue_ui_unemployed_loss_next_interpolated
)
)
return revenue_ui_unemployed_loss
def _get_revenue_lumpsum_employed(
revenue_lumpsum_employed_next,
revenue_lumpsum_unemployed_next,
policy_effort_searching_next,
policy_assets_employed_now,
separation_rate_vector,
wage_hc_factor_vector,
tax_income,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for increase in hc and choice of assets next period
revenue_lumpsum_employed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
revenue_lumpsum_employed_next,
revenue_lumpsum_employed_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
revenue_lumpsum_unemployed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
revenue_lumpsum_unemployed_next,
revenue_lumpsum_unemployed_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
effort_searching_plus_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
policy_effort_searching_next,
policy_effort_searching_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
# calculate current period cost
revenue_lumpsum_employed = (
np.repeat(
(tax_income * wage_level * wage_hc_factor_vector[hc_grid_reduced]),
assets_grid_size,
).reshape(hc_grid_reduced_size, assets_grid_size)
+ tax_income * interest_rate_raw * assets_grid_h_a
+ 1
/ (1 + interest_rate_raw)
* (1 - separation_rate_vector[period_idx])
* revenue_lumpsum_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* effort_searching_plus_next
* revenue_lumpsum_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* (1 - effort_searching_plus_next)
* revenue_lumpsum_unemployed_next_interpolated
)
return revenue_lumpsum_employed
def _get_revenue_lumpsum_unemployed(
revenue_lumpsum_employed_next,
revenue_lumpsum_unemployed_next,
revenue_lumpsum_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
revenue_lumpsum_employed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_lumpsum_employed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
revenue_lumpsum_unemployed_next_interpolated = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_lumpsum_unemployed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
revenue_lumpsum_unemployed_loss_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_lumpsum_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
)
revenue_lumpsum_employed_loss_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_lumpsum_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_now,
method=interpolation_method,
)
)
effort_searching_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
# calculate current period cost
revenue_lumpsum_unemployed = (
tax_income * interest_rate_raw * assets_grid_h_a
+ 1
/ (1 + interest_rate_raw)
* (1 - hc_loss_probability)
* (
job_finding_probability(effort_searching_next)
* revenue_lumpsum_employed_next_interpolated
+ (1 - job_finding_probability(effort_searching_next))
* revenue_lumpsum_unemployed_next_interpolated
)
+ 1
/ (1 + interest_rate_raw)
* hc_loss_probability
* (
job_finding_probability(effort_searching_loss_next)
* revenue_lumpsum_employed_loss_next_interpolated
+ (1 - job_finding_probability(effort_searching_loss_next))
* revenue_lumpsum_unemployed_loss_next_interpolated
)
)
return revenue_lumpsum_unemployed
def _get_revenue_lumpsum_unemployed_loss(
revenue_lumpsum_employed_next,
revenue_lumpsum_unemployed_loss_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_loss_now,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
revenue_lumpsum_employed_loss_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_lumpsum_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
)
revenue_lumpsum_unemployed_loss_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_lumpsum_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
# calculate current period cost
revenue_lumpsum_unemployed_loss = (
tax_income * interest_rate_raw * assets_grid_h_a
+ 1
/ (1 + interest_rate_raw)
* (
effort_searching_loss_next * revenue_lumpsum_employed_loss_next_interpolated
+ (1 - effort_searching_loss_next)
* revenue_lumpsum_unemployed_loss_next_interpolated
)
)
return revenue_lumpsum_unemployed_loss
def _get_revenue_consumption_employed(
revenue_consumption_employed_next,
revenue_consumption_unemployed_next,
policy_consumption_employed_now,
policy_effort_searching_next,
policy_assets_employed_now,
separation_rate_vector,
tax_consumption,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for increase in hc and choice of assets next period
revenue_consumption_employed_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
revenue_consumption_employed_next,
revenue_consumption_employed_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
)
revenue_consumption_unemployed_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
revenue_consumption_unemployed_next,
revenue_consumption_unemployed_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
)
effort_searching_plus_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
policy_effort_searching_next,
policy_effort_searching_next[:, -1, np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
policy_assets_employed_now,
method=interpolation_method,
)
# calculate current period cost
revenue_consumption_employed = (
tax_consumption * policy_consumption_employed_now
+ 1
/ (1 + interest_rate_raw)
* (1 - separation_rate_vector[period_idx])
* revenue_consumption_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* effort_searching_plus_next
* revenue_consumption_employed_next_interpolated
+ 1
/ (1 + interest_rate_raw)
* separation_rate_vector[period_idx]
* (1 - effort_searching_plus_next)
* revenue_consumption_unemployed_next_interpolated
)
return revenue_consumption_employed
def _get_revenue_consumption_unemployed(
revenue_consumption_employed_next,
revenue_consumption_unemployed_next,
revenue_consumption_unemployed_loss_next,
policy_consumption_unemployed_now,
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_consumption,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
revenue_consumption_employed_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_consumption_employed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
)
revenue_consumption_unemployed_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_consumption_unemployed_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
)
revenue_consumption_unemployed_loss_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_consumption_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
)
revenue_consumption_employed_loss_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_consumption_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_now,
method=interpolation_method,
)
)
effort_searching_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_now,
method=interpolation_method,
)
# calculate current period cost
revenue_consumption_unemployed = (
tax_consumption * policy_consumption_unemployed_now
+ 1
/ (1 + interest_rate_raw)
* (1 - hc_loss_probability)
* (
job_finding_probability(effort_searching_next)
* revenue_consumption_employed_next_interpolated
+ (1 - job_finding_probability(effort_searching_next))
* revenue_consumption_unemployed_next_interpolated
)
+ 1
/ (1 + interest_rate_raw)
* hc_loss_probability
* (
job_finding_probability(effort_searching_loss_next)
* revenue_consumption_employed_loss_next_interpolated
+ (1 - job_finding_probability(effort_searching_loss_next))
* revenue_consumption_unemployed_loss_next_interpolated
)
)
return revenue_consumption_unemployed
def _get_revenue_consumption_unemployed_loss(
revenue_consumption_employed_next,
revenue_consumption_unemployed_loss_next,
policy_consumption_unemployed_loss_now,
policy_effort_searching_loss_next,
policy_assets_unemployed_loss_now,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_consumption,
period_idx,
):
# interpolate next period cost functions and search effort policy
# to account for hc loss and choice of assets next period
revenue_consumption_employed_loss_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_consumption_employed_next,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
)
revenue_consumption_unemployed_loss_next_interpolated = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
revenue_consumption_unemployed_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
)
effort_searching_loss_next = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss_next,
hc_grid_reduced_h_a,
policy_assets_unemployed_loss_now,
method=interpolation_method,
)
# calculate current period cost
revenue_consumption_unemployed_loss = (
tax_consumption * policy_consumption_unemployed_loss_now
+ 1
/ (1 + interest_rate_raw)
* (
effort_searching_loss_next
* revenue_consumption_employed_loss_next_interpolated
+ (1 - effort_searching_loss_next)
* revenue_consumption_unemployed_loss_next_interpolated
)
)
return revenue_consumption_unemployed_loss
@nb.njit
def _get_duration_weeks(
job_finding_probability_quarter,
duration_quarter,
):
# transform quarterly job finding probability to weekly job finding probability
job_finding_probability_week = 1 - (1 - job_finding_probability_quarter) ** (1 / 13)
# calculate expected additional unemployment duration in weeks
additional_duration_weeks = (
(1 - job_finding_probability_week)
- (1 + job_finding_probability_week * 12)
* (1 - job_finding_probability_week) ** 13
) / (job_finding_probability_week * (1 - (1 - job_finding_probability_week) ** 13))
# correct for small probabilities (expression approaches 6 in the limit,
# but gets numerically unstable for small probabilities)
additional_duration_weeks = (
job_finding_probability_quarter > 0.001
) * additional_duration_weeks + (
job_finding_probability_quarter <= 0.001
) * np.full_like(
additional_duration_weeks, 6.0
)
# transform quarterly duration to weekly duration
duration_weeks = duration_quarter * 13
# add expected additional weeks
duration_weeks += additional_duration_weeks
return duration_weeks
def _get_value_employed(
policy_consumption_employed_now,
policy_consumption_employed_next,
policy_consumption_unemployed_next,
value_searching_next,
value_employed_next,
assets_next,
separation_rate_vector,
period_idx,
):
# expand interpolation grid
consumption_diff = -npf.pmt(
(1 - discount_factor) / discount_factor,
n_periods_retired + (n_periods_working - (period_idx + 1)),
assets_max - np.amax(assets_grid),
)
value_employed_diff = -npf.pv(
(1 - discount_factor) / discount_factor,
n_periods_retired + (n_periods_working - (period_idx + 1)),
consumption_utility(policy_consumption_employed_next[:, -1] + consumption_diff)
- consumption_utility(policy_consumption_employed_next[:, -1]),
)
value_unemployed_diff = -npf.pv(
(1 - discount_factor) / discount_factor,
n_periods_retired + (n_periods_working - (period_idx + 1)),
consumption_utility(
policy_consumption_unemployed_next[:, -1] + consumption_diff
)
- consumption_utility(policy_consumption_unemployed_next[:, -1]),
)
# interpolate continuation value on grid to reflect increase in
# hc and choice for assets next period
continuation_value_employed = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
value_employed_next,
(value_employed_next[:, -1] + value_employed_diff)[..., np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
assets_next,
method=interpolation_method,
)
continuation_value_searching = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
value_searching_next,
(value_searching_next[:, -1] + value_unemployed_diff)[..., np.newaxis],
axis=1,
),
np.minimum(hc_grid_reduced_h_a + 1, hc_max),
assets_next,
method=interpolation_method,
)
# calculate value function from consumption level and continuation value
value = (
consumption_utility(policy_consumption_employed_now)
+ discount_factor
* (1 - separation_rate_vector[period_idx])
* continuation_value_employed
+ discount_factor
* separation_rate_vector[period_idx]
* continuation_value_searching
)
return value
def _get_value_unemployed(
policy_consumption_unemployed_now,
value_searching_next,
value_searching_loss_next,
assets_next,
wage_loss_probability,
):
# interpolate continuation value on grid to reflect choice for
# assets next period
continuation_value_searching = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
value_searching_next,
hc_grid_reduced_h_a,
assets_next,
method=interpolation_method,
)
continuation_value_searching_loss = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
value_searching_loss_next,
hc_grid_reduced_h_a,
assets_next,
method=interpolation_method,
)
# calculate value function from consumption level and continuation value
value = (
consumption_utility(policy_consumption_unemployed_now)
+ discount_factor * (1 - wage_loss_probability) * continuation_value_searching
+ discount_factor * wage_loss_probability * continuation_value_searching_loss
)
return value
def _get_value_unemployed_loss(
policy_consumption,
value_searching_next,
assets_next,
):
# interpolate continuation value on grid
continuation_value_searching_loss = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
value_searching_next,
hc_grid_reduced_h_a,
assets_next,
method=interpolation_method,
)
# calculate value function from consumption level and continuation value
value = (
consumption_utility(policy_consumption)
+ discount_factor * continuation_value_searching_loss
)
return value
def _interpolate_consumption_on_grid(
consumption_off_grid,
assets_off_grid,
):
# interpolate consumption on grid
consumption_on_grid = interpolate_2d_unordered_to_unordered_iter(
hc_grid_reduced_h_a,
assets_off_grid.astype(float),
consumption_off_grid.astype(float),
hc_grid_reduced_h_a,
assets_grid_h_a,
method=interpolation_method,
)
# # interpolate consumption on grid
# consumption_on_grid = interpolate_2d_unordered_to_unordered(
# hc_grid_reduced_h_a,
# assets_off_grid,
# consumption_off_grid,
# hc_grid_reduced_h_a,
# assets_grid_h_a,
# method=method_2d
# )
# # fill values outside convex hull with nearest neighbor
# if np.any(np.isnan(consumption_on_grid)):
# consumption_fill = interpolate_2d_unordered_to_unordered(
# hc_grid_reduced_h_a,
# assets_off_grid,
# consumption_off_grid,
# hc_grid_reduced_h_a,
# assets_grid_h_a,
# method="nearest",
# )
# consumption_on_grid[np.isnan(consumption_on_grid)] = consumption_fill[
# np.isnan(consumption_on_grid)
# ]
return consumption_on_grid
def _invert_bc_employed(
consumption_off_grid,
wage_hc_factor_grid,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
period_idx,
):
assets_off_grid = (
consumption_off_grid
+ assets_grid_h_a
- transfers_lumpsum
- (1 - tax_ss + tax_ui[period_idx] - tax_income)
* wage_level
* wage_hc_factor_grid
) / (1 + (1 - tax_income) * interest_rate_raw)
return assets_off_grid
def _invert_bc_unemployed(
consumption_off_grid,
wage_hc_factor_grid,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
benefits = _ui_benefits(
wage_level * wage_hc_factor_grid,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
assets_off_grid = (
consumption_off_grid + assets_grid_h_a - transfers_lumpsum - benefits
) / (1 + (1 - tax_income) * interest_rate_raw)
return assets_off_grid
def _solve_bc_employed(
consumption_now,
wage_hc_factor_grid,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
period_idx,
):
assets_next = (
assets_grid_h_a * (1 + (1 - tax_income) * interest_rate_raw)
+ (1 - tax_ss - tax_ui[period_idx] - tax_income)
* wage_level
* wage_hc_factor_grid
+ transfers_lumpsum
- consumption_now
)
return assets_next
def _solve_bc_unemployed(
consumption_now,
wage_hc_factor_grid,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
benefits = _ui_benefits(
wage_level * wage_hc_factor_grid,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
assets_next = (
assets_grid_h_a * (1 + (1 - tax_income) * interest_rate_raw)
+ benefits
+ transfers_lumpsum
- consumption_now
)
return assets_next
def _solve_one_period(
policy_consumption_employed_next,
policy_consumption_unemployed_next,
policy_consumption_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
value_employed_next,
value_searching_next,
value_searching_loss_next,
cost_ui_employed_next,
cost_ui_unemployed_next,
cost_ui_unemployed_loss_next,
hc_loss_probability,
revenue_ss_employed_next,
revenue_ss_unemployed_next,
revenue_ss_unemployed_loss_next,
revenue_ui_employed_next,
revenue_ui_unemployed_next,
revenue_ui_unemployed_loss_next,
revenue_lumpsum_employed_next,
revenue_lumpsum_unemployed_next,
revenue_lumpsum_unemployed_loss_next,
separation_rate_vector,
wage_hc_factor_grid,
wage_hc_factor_vector,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
# SOLVE FOR: unemployed after hc loss
(
policy_consumption_unemployed_loss_now,
policy_assets_unemployed_loss_now,
value_unemployed_loss_now,
) = _solve_unemployed_loss(
policy_consumption_employed_next,
policy_consumption_unemployed_loss_next,
policy_effort_searching_loss_next,
value_searching_loss_next,
wage_hc_factor_grid,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# SOLVE FOR: unemployed
(
policy_consumption_unemployed_now,
policy_assets_unemployed_now,
value_unemployed_now,
) = _solve_unemployed(
policy_consumption_employed_next,
policy_consumption_unemployed_next,
policy_consumption_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
value_searching_next,
value_searching_loss_next,
hc_loss_probability,
wage_hc_factor_grid,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# SOLVE FOR: employed
(
policy_consumption_employed_now,
policy_assets_employed_now,
value_employed_now,
) = _solve_employed(
policy_consumption_employed_next,
policy_consumption_unemployed_next,
policy_effort_searching_next,
value_employed_next,
value_searching_next,
separation_rate_vector,
wage_hc_factor_grid,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
period_idx,
)
# SOLVE FOR: searching and searching with hc loss
(
policy_effort_searching_now,
policy_effort_searching_loss_now,
value_searching_now,
value_searching_loss_now,
) = _solve_searching(
value_employed_now,
value_unemployed_now,
value_unemployed_loss_now,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
)
# update revenue and cost functions
cost_ui_unemployed_loss_now = _get_cost_ui_unemployed_loss(
cost_ui_employed_next,
cost_ui_unemployed_loss_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_loss_now,
wage_hc_factor_vector,
wage_loss_factor_vector,
wage_loss_reference_vector,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
cost_ui_unemployed_now = _get_cost_ui_unemployed(
cost_ui_employed_next,
cost_ui_unemployed_next,
cost_ui_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
wage_hc_factor_vector,
wage_loss_factor_vector,
wage_loss_reference_vector,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
cost_ui_employed_now = _get_cost_ui_employed(
cost_ui_employed_next,
cost_ui_unemployed_next,
policy_effort_searching_next,
policy_assets_employed_now,
separation_rate_vector,
period_idx,
)
revenue_ss_unemployed_loss_now = _get_revenue_ss_unemployed_loss(
revenue_ss_employed_next,
revenue_ss_unemployed_loss_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_loss_now,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
)
revenue_ss_unemployed_now = _get_revenue_ss_unemployed(
revenue_ss_employed_next,
revenue_ss_unemployed_next,
revenue_ss_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
)
revenue_ss_employed_now = _get_revenue_ss_employed(
revenue_ss_employed_next,
revenue_ss_unemployed_next,
policy_effort_searching_next,
policy_assets_employed_now,
separation_rate_vector,
wage_hc_factor_vector,
tax_ss,
period_idx,
)
revenue_ui_unemployed_loss_now = _get_revenue_ui_unemployed_loss(
revenue_ui_employed_next,
revenue_ui_unemployed_loss_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_loss_now,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
)
revenue_ui_unemployed_now = _get_revenue_ui_unemployed(
revenue_ui_employed_next,
revenue_ui_unemployed_next,
revenue_ui_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
)
revenue_ui_employed_now = _get_revenue_ui_employed(
revenue_ui_employed_next,
revenue_ui_unemployed_next,
policy_effort_searching_next,
policy_assets_employed_now,
separation_rate_vector,
wage_hc_factor_vector,
tax_ui,
period_idx,
)
revenue_lumpsum_unemployed_loss_now = _get_revenue_lumpsum_unemployed_loss(
revenue_lumpsum_employed_next,
revenue_lumpsum_unemployed_loss_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_loss_now,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
period_idx,
)
revenue_lumpsum_unemployed_now = _get_revenue_lumpsum_unemployed(
revenue_lumpsum_employed_next,
revenue_lumpsum_unemployed_next,
revenue_lumpsum_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
period_idx,
)
revenue_lumpsum_employed_now = _get_revenue_lumpsum_employed(
revenue_lumpsum_employed_next,
revenue_lumpsum_unemployed_next,
policy_effort_searching_next,
policy_assets_employed_now,
separation_rate_vector,
wage_hc_factor_vector,
tax_income,
period_idx,
)
return (
policy_consumption_employed_now,
policy_consumption_unemployed_now,
policy_consumption_unemployed_loss_now,
policy_effort_searching_now,
policy_effort_searching_loss_now,
value_employed_now,
value_unemployed_now,
value_unemployed_loss_now,
value_searching_now,
value_searching_loss_now,
cost_ui_employed_now,
cost_ui_unemployed_now,
cost_ui_unemployed_loss_now,
revenue_ss_employed_now,
revenue_ss_unemployed_now,
revenue_ss_unemployed_loss_now,
revenue_ui_employed_now,
revenue_ui_unemployed_now,
revenue_ui_unemployed_loss_now,
revenue_lumpsum_employed_now,
revenue_lumpsum_unemployed_now,
revenue_lumpsum_unemployed_loss_now,
)
def _solve_employed(
policy_consumption_employed_next,
policy_consumption_unemployed_next,
policy_effort_searching_next,
value_employed_next,
value_searching_next,
separation_rate_vector,
wage_hc_factor_grid,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
period_idx,
):
# consumption from FOC [hc x assets]
consumption_employed_off_grid = _foc_employed(
policy_effort_searching_next,
policy_consumption_employed_next,
policy_consumption_unemployed_next,
separation_rate_vector,
tax_income,
period_idx,
)
# back out implicit current period asset levels (off grid)
# for next period asset levels (on grid) [hc x assets]
assets_off_grid = _invert_bc_employed(
consumption_employed_off_grid,
wage_hc_factor_grid,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
period_idx,
)
# interpolate consumption levels for (on grid) asset levels [hc x assets]
consumption_employed_on_grid = _interpolate_consumption_on_grid(
consumption_employed_off_grid, assets_off_grid
)
# back out implicit asset level next period from budget constraint
assets_employed_next = _solve_bc_employed(
consumption_employed_on_grid,
wage_hc_factor_grid,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
period_idx,
)
# correct consumption for binding borrowing limit
(
policy_consumption_employed_now,
assets_employed_next,
) = _apply_borrowing_limit_employed(
consumption_employed_on_grid,
assets_employed_next,
wage_hc_factor_grid,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
period_idx,
)
# calculate value function
value_employed_now = _get_value_employed(
policy_consumption_employed_now,
policy_consumption_employed_next,
policy_consumption_unemployed_next,
value_searching_next,
value_employed_next,
assets_employed_next,
separation_rate_vector,
period_idx,
)
return policy_consumption_employed_now, assets_employed_next, value_employed_now
def _solve_unemployed(
policy_consumption_employed_next,
policy_consumption_unemployed_next,
policy_consumption_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
value_searching_next,
value_searching_loss_next,
hc_loss_probability,
wage_hc_factor_grid,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
# consumption via FOC [hc x assets]
consumption_unemployed_off_grid = _foc_unemployed(
policy_effort_searching_next,
policy_effort_searching_loss_next,
policy_consumption_employed_next,
policy_consumption_unemployed_next,
policy_consumption_unemployed_loss_next,
hc_loss_probability,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
period_idx,
)
# back out implicit current period asset levels (off grid)
# for next period asset levels (on grid)
assets_off_grid = _invert_bc_unemployed(
consumption_unemployed_off_grid,
wage_hc_factor_grid,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# interpolate consumption levels for (on grid) asset levels
consumption_unemployed_on_grid = _interpolate_consumption_on_grid(
consumption_unemployed_off_grid, assets_off_grid
)
# back out implicit asset level next period from budget constraint
assets_unemployed_next = _solve_bc_unemployed(
consumption_unemployed_on_grid,
wage_hc_factor_grid,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# correct consumption and asset policy for binding borrowing limit
(
policy_consumption_unemployed_now,
policy_assets_unemployed_now,
) = _apply_borrowing_limit_unemployed(
consumption_unemployed_on_grid,
assets_unemployed_next,
wage_hc_factor_grid,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# calculate value function
value_unemployed_now = _get_value_unemployed(
policy_consumption_unemployed_now,
value_searching_next,
value_searching_loss_next,
policy_assets_unemployed_now,
hc_loss_probability,
)
return (
policy_consumption_unemployed_now,
policy_assets_unemployed_now,
value_unemployed_now,
)
def _solve_unemployed_loss(
policy_consumption_employed_next,
policy_consumption_unemployed_loss_next,
policy_effort_searching_loss_next,
value_searching_loss_next,
wage_hc_factor_grid,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
# consumption via FOC [hc level x asset level]
consumption_unemployed_loss_off_grid = _foc_unemployed_loss(
policy_effort_searching_loss_next,
policy_consumption_employed_next,
policy_consumption_unemployed_loss_next,
wage_loss_factor_vector,
wage_loss_reference_vector,
tax_income,
period_idx,
)
# back out implicit current period asset levels (off grid)
# for next period asset levels (on grid)
assets_off_grid = _invert_bc_unemployed(
consumption_unemployed_loss_off_grid,
wage_hc_factor_grid,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# interpolate consumption levels for (on grid) asset levels
consumption_unemployed_loss_on_grid = _interpolate_consumption_on_grid(
consumption_unemployed_loss_off_grid,
assets_off_grid,
)
# back out implicit asset level next period from budget constraint
assets_unemployed_loss_next = _solve_bc_unemployed(
consumption_unemployed_loss_on_grid,
wage_hc_factor_grid,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# correct consumption and asset policy for binding borrowing limit
(
policy_consumption_unemployed_loss_now,
policy_assets_unemployed_loss_now,
) = _apply_borrowing_limit_unemployed(
consumption_unemployed_loss_on_grid,
assets_unemployed_loss_next,
wage_hc_factor_grid,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# calculate value function
value_unemployed_loss_now = _get_value_unemployed_loss(
policy_consumption_unemployed_loss_now,
value_searching_loss_next,
policy_assets_unemployed_loss_now,
)
return (
policy_consumption_unemployed_loss_now,
policy_assets_unemployed_loss_now,
value_unemployed_loss_now,
)
def _solve_searching(
value_employed_now,
value_unemployed_now,
value_unemployed_loss_now,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
# interpolate value of being employed at depreciated hc levels
value_employed_loss_now = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
value_employed_now,
_hc_after_loss_1_agent(
hc_grid_reduced_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
),
assets_grid_h_a,
method=interpolation_method,
)
# # solve using grid search
# (policy_effort_searching_now, value_searching_now) = _solve_searching_iter(
# value_employed_now, value_unemployed_now
# )
# (
# policy_effort_searching_loss_now,
# value_searching_loss_now,
# ) = _solve_searching_iter(value_employed_loss_now, value_unemployed_loss_now)
# solve using FOCs
(policy_effort_searching_now, value_searching_now) = _solve_searching_foc(
value_employed_now, value_unemployed_now
)
(policy_effort_searching_loss_now, value_searching_loss_now) = _solve_searching_foc(
value_employed_loss_now, value_unemployed_loss_now
)
return (
policy_effort_searching_now,
policy_effort_searching_loss_now,
value_searching_now,
value_searching_loss_now,
)
def _solve_searching_base(value_employed, value_unemployed):
# initiate objects
policy = np.full((hc_grid_reduced_size, assets_grid_size), np.nan)
value = np.full((hc_grid_reduced_size, assets_grid_size), np.nan)
# solve for optimal search effort using grid search method
for asset_level in range(assets_grid_size):
search_returns = (
np.tile(
leisure_utility_interpolated(search_effort_grid),
hc_grid_reduced_size,
).reshape(hc_grid_reduced_size, search_effort_grid_size)
+ job_finding_probability_grid
* np.repeat(
value_employed[:, asset_level], search_effort_grid_size
).reshape(hc_grid_reduced_size, search_effort_grid_size)
+ (1 - job_finding_probability_grid)
* np.repeat(
value_unemployed[:, asset_level], search_effort_grid_size
).reshape(hc_grid_reduced_size, search_effort_grid_size)
)
search_effort_idx = search_returns.argmax(axis=1)
value[:, asset_level] = np.array(
[search_returns[row, col] for row, col in enumerate(search_effort_idx)]
)
policy[:, asset_level] = search_effort_grid[search_effort_idx].T
return policy, value
def on_grid(x):
return search_effort_grid[np.abs(x - search_effort_grid).argmin()]
def on_grid_vectorized(x):
return np.vectorize(on_grid)(x)
@nb.njit
def on_grid_iter(array_in):
dims = array_in.shape
array_out = np.full(array_in.shape, np.nan)
for x_idx in range(dims[0]):
for y_idx in range(dims[1]):
array_out[x_idx, y_idx] = search_effort_grid[
np.abs(array_in[x_idx, y_idx] - search_effort_grid).argmin()
]
return array_out
def _solve_searching_foc(value_employed, value_unemployed):
# optimal effort from FOC / constraints
effort_off_grid = leisure_utility_dx_inverted(value_unemployed - value_employed)
effort_off_grid[
(value_unemployed - value_employed) > leisure_utility_dx_min
] = search_effort_grid[0]
effort_off_grid[
(value_unemployed - value_employed) < leisure_utility_dx_max
] = search_effort_grid[-1]
# get nearest values on grid
# policy = on_grid_iter(effort_off_grid)
policy = effort_off_grid
value = (
leisure_utility_interpolated(policy)
+ job_finding_probability(policy) * value_employed
+ (1 - job_finding_probability(policy)) * value_unemployed
)
return policy.astype(float), value.astype(float)
def _solve_searching_vectorized(
value_employed,
value_unemployed,
):
# solve for optimal search effort using grid search method
search_returns = (
np.repeat(
leisure_utility_interpolated(search_effort_grid),
hc_grid_reduced_size * assets_grid_size,
)
.reshape((search_effort_grid_size, assets_grid_size, hc_grid_reduced_size))
.T
+ np.repeat(
job_finding_probability_grid,
hc_grid_reduced_size * assets_grid_size,
)
.reshape((search_effort_grid_size, assets_grid_size, hc_grid_reduced_size))
.T
* np.tile(value_employed, search_effort_grid_size)
.T.reshape(search_effort_grid_size, assets_grid_size, hc_grid_reduced_size)
.T
+ np.repeat(
(1 - job_finding_probability_grid),
hc_grid_reduced_size * assets_grid_size,
)
.reshape((search_effort_grid_size, assets_grid_size, hc_grid_reduced_size))
.T
* np.tile(value_unemployed, search_effort_grid_size)
.T.reshape((search_effort_grid_size, assets_grid_size, hc_grid_reduced_size))
.T
)
search_effort_idx = np.argmax(search_returns, axis=2)
value = np.amax(search_returns, axis=2)
policy = search_effort_grid[search_effort_idx]
return policy, value
@nb.njit
def _solve_searching_iter(
value_employed_now,
value_unemployed_now,
):
# initiate objects
policy = np.full((hc_grid_reduced_size, assets_grid_size), np.nan)
value = np.full((hc_grid_reduced_size, assets_grid_size), np.nan)
# solve for optimal search effort using grid search method
for hc_level in range(hc_grid_reduced_size):
for asset_level in range(assets_grid_size):
search_returns = (
leisure_utility_on_search_grid
+ job_finding_probability_grid
* np.full(
search_effort_grid_size,
value_employed_now[hc_level, asset_level],
)
+ (1 - job_finding_probability_grid)
* np.full(
search_effort_grid_size,
value_unemployed_now[hc_level, asset_level],
)
)
search_effort_idx = search_returns.argmax()
value[hc_level, asset_level] = search_returns[search_effort_idx]
policy[hc_level, asset_level] = search_effort_grid[search_effort_idx]
return policy, value
@nb.njit
def consumption_utility(x):
if risk_aversion_coefficient == 1:
return np.log(x)
else:
return x ** (1 - risk_aversion_coefficient) / (1 - risk_aversion_coefficient)
def _hc_after_loss_n_agents(
hc_before_loss,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
hc_after_loss = np.full(hc_before_loss.shape, np.nan)
for type_idx in range(n_types):
hc_after_loss[type_idx, ...] = _hc_after_loss_1_agent(
hc_before_loss[type_idx, ...],
wage_loss_factor_vector[type_idx, :],
wage_loss_reference_vector[type_idx, :],
period_idx,
)
return hc_after_loss
def _hc_after_loss_1_agent(
hc_before_loss,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx,
):
func = interpolate.interp1d(
wage_loss_reference_vector,
hc_grid,
kind="linear",
bounds_error=False,
fill_value=0.0,
)
val = np.maximum(
wage_hc_factor_interpolated_1_agent(
np.minimum(hc_before_loss, hc_max), wage_loss_reference_vector
)
* wage_loss_factor_vector[period_idx],
wage_hc_factor_interpolated_1_agent(0, wage_loss_reference_vector),
)
return func(val)
@nb.njit
def job_finding_probability(x):
return contact_rate * x
def leisure_utility_dx_interpolated(x):
return interpolate.interp1d(
leisure_grid, leisure_utility_dx, kind=interpolation_method
)(x)
def leisure_utility_dxdx_interpolated(x):
return interpolate.interp1d(
leisure_grid, leisure_utility_dxdx, kind=interpolation_method
)(x)
def leisure_utility_interpolated(x):
return interpolate.interp1d(
leisure_grid, leisure_utility, kind=interpolation_method
)(x)
def leisure_utility_dx_inverted(x):
return interpolate.interp1d(
leisure_utility_dx,
leisure_grid,
kind=interpolation_method,
bounds_error=False,
fill_value=np.nan,
)(x)
@nb.njit
def _consumption_utility_dx(x):
return x ** (-risk_aversion_coefficient)
@nb.njit
def _consumption_utility_dx_inverted(x):
return x ** (-1 / risk_aversion_coefficient)
def wage_hc_factor_interpolated_1_agent(x, wage_hc_factor_vector):
return interpolate.interp1d(
hc_grid,
wage_hc_factor_vector,
kind="linear",
bounds_error=False,
fill_value="extrapolate",
)(x)
def _simulate_transition_consumption_searching(
employed_simulated_now,
unemployed_simulated_now,
unemployed_loss_simulated_now,
nonemployed_simulated_now,
hc_simulated,
duration_unemployed_simulated,
duration_employed_simulated,
period_idx,
separation_rate_vector,
wage_loss_probability,
):
# simulate transition events
job_loss_event_simulated = np.array(
[
separation_rate_vector[i, period_idx] > np.random.rand(n_simulations)
for i in range(n_types)
]
)
hc_loss_event_simulated = np.array(
[
wage_loss_probability[i] >= np.random.rand(n_simulations)
for i in range(n_types)
]
)
# simulate transitions in employment status
employed_simulated_next = employed_simulated_now * (
1 - job_loss_event_simulated
).astype(bool)
searching_simulated_next = (
unemployed_simulated_now * (1 - hc_loss_event_simulated)
+ employed_simulated_now * job_loss_event_simulated
).astype(bool)
searching_loss_simulated_next = (
unemployed_loss_simulated_now
+ unemployed_simulated_now * hc_loss_event_simulated
).astype(bool)
# simulate experience transition
hc_simulated[
employed_simulated_now
] += 1 # increase experience of employed workers by 1
# update duration tracker
duration_unemployed_simulated[
nonemployed_simulated_now
] += 1 # increase duration of unemployed (with / without hc loss) by 1
duration_unemployed_simulated[
employed_simulated_now
] = 0 # set duration of now employed to 0
duration_employed_simulated[employed_simulated_now] += 1
duration_employed_simulated[nonemployed_simulated_now] = 0
return (
employed_simulated_next,
searching_simulated_next,
searching_loss_simulated_next,
duration_unemployed_simulated,
duration_employed_simulated,
hc_simulated,
)
def _simulate_consumption(
policy_consumption_employed,
policy_consumption_unemployed,
policy_consumption_unemployed_loss,
employed_simulated,
unemployed_simulated,
unemployed_loss_simulated,
hc_simulated,
assets_simulated,
period_idx,
):
# calculate consumption differential to increase asset grid
consumption_diff = -npf.pmt(
(1 - discount_factor) / discount_factor,
n_periods_retired + (n_periods_working - period_idx + 1),
assets_max - max(assets_grid),
)
# initiate intermediate objects
consumption_employed_simulated = np.full((n_types, n_simulations), np.nan)
consumption_unemployed_simulated = np.full((n_types, n_simulations), np.nan)
consumption_unemployed_loss_simulated = np.full((n_types, n_simulations), np.nan)
# interpolate consumption policies
for type_idx in range(n_types):
consumption_unemployed_simulated[
type_idx, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
policy_consumption_unemployed[type_idx, :, :, period_idx],
(
policy_consumption_unemployed[type_idx, :, -1, period_idx]
+ consumption_diff
)[..., np.newaxis],
axis=1,
),
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
method=interpolation_method,
)
consumption_unemployed_loss_simulated[
type_idx, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
policy_consumption_unemployed_loss[type_idx, :, :, period_idx],
(
policy_consumption_unemployed_loss[type_idx, :, -1, period_idx]
+ consumption_diff
)[..., np.newaxis],
axis=1,
),
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
method=interpolation_method,
)
consumption_employed_simulated[
type_idx, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a1,
assets_grid_h_a1,
np.append(
policy_consumption_employed[type_idx, :, :, period_idx],
(
policy_consumption_employed[type_idx, :, -1, period_idx]
+ consumption_diff
)[..., np.newaxis],
axis=1,
),
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
method=interpolation_method,
)
# construct combined array of simulated consumption levels
consumption_simulated = (
consumption_employed_simulated * employed_simulated
+ consumption_unemployed_simulated * unemployed_simulated
+ consumption_unemployed_loss_simulated * unemployed_loss_simulated
)
if np.any(np.isnan(consumption_simulated)):
warnings.warn("NaN values in simulated consumption levels")
return consumption_simulated
def _simulate_savings(
employed_simulated,
nonemployed_simulated,
consumption_simulated,
assets_simulated,
wage_hc_factor_simulated,
tax_ss,
tax_ui,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
# compute next period asset holdings via bc
savings_employed_simulated = (
np.repeat((1 + (1 - tax_income) * interest_rate_raw), n_simulations).reshape(
(n_types, n_simulations)
)
* assets_simulated
+ np.repeat(
(1 - tax_ss - tax_ui[:, period_idx] - tax_income), n_simulations
).reshape((n_types, n_simulations))
* wage_level
* wage_hc_factor_simulated
+ np.repeat(transfers_lumpsum, n_simulations).reshape((n_types, n_simulations))
- consumption_simulated
) * employed_simulated
savings_nonemployed_simulated = (
np.repeat((1 + (1 - tax_income) * interest_rate_raw), n_simulations).reshape(
(n_types, n_simulations)
)
* assets_simulated
+ simulate_ui_benefits(
wage_level * wage_hc_factor_simulated,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
+ np.repeat(transfers_lumpsum, n_simulations).reshape((n_types, n_simulations))
- consumption_simulated
) * nonemployed_simulated
# construct combined array of simulated consumption levels
savings_simulated = savings_employed_simulated + savings_nonemployed_simulated
# run some checks on savings, then return
if np.any(np.isnan(savings_simulated)):
warnings.warn("NaN values in simulated savings")
if np.any(savings_simulated < assets_min):
warnings.warn(
"simulated savings below lower bound of asset grid; adjusting savings."
)
savings_simulated = np.maximum(savings_simulated, assets_min + eps)
if np.any(savings_simulated > assets_max):
warnings.warn(
"simulated savings above upper bound of asset grid; adjusting savings."
)
savings_simulated = np.minimum(savings_simulated, assets_max - eps)
return savings_simulated
def _get_statistics_consumption_phase(
employed_simulated,
unemployed_simulated,
unemployed_loss_simulated,
nonemployed_simulated,
consumption_simulated,
wage_hc_factor_simulated,
wage_hc_factor_pre_displacement_simulated,
duration_unemployed_simulated,
duration_since_displacement_simulated,
hc_simulated,
assets_simulated,
income_tax_rate_vector,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
):
# labor force status statistics
share_employed = np.mean(employed_simulated, axis=1)
share_unemployed = np.mean(unemployed_simulated, axis=1)
share_unemployed_loss = np.mean(unemployed_loss_simulated, axis=1)
share_nonemployed = np.mean(nonemployed_simulated, axis=1)
# consumption statistics
log_consumption_employed_mean = conditional_mean(
np.log(consumption_simulated), employed_simulated, axis=1
)
log_consumption_nonemployed_mean = conditional_mean(
np.log(consumption_simulated), nonemployed_simulated, axis=1
)
consumption_employed_mean = conditional_mean(
consumption_simulated, employed_simulated, axis=1
)
consumption_nonemployed_mean = conditional_mean(
consumption_simulated, nonemployed_simulated, axis=1
)
# consumption_nonemployed_stats = np.array(
# [
# np.mean(consumption_nonemployed_simulated[nonemployed_simulated]),
# np.median(consumption_nonemployed_simulated[nonemployed_simulated]),
# np.min(consumption_nonemployed_simulated[nonemployed_simulated]),
# np.std(consumption_nonemployed_simulated[nonemployed_simulated]),
# ]
# )
# utility statistics
marginal_utility_nonemployed_mean = conditional_mean(
_consumption_utility_dx(consumption_simulated), nonemployed_simulated, axis=1
)
# hc statistics
hc_mean = np.mean(hc_simulated, axis=1)
hc_employed_mean = conditional_mean(hc_simulated, employed_simulated, axis=1)
hc_nonemployed_mean = conditional_mean(hc_simulated, nonemployed_simulated, axis=1)
# wage statistics
wage_hc_factor_mean = np.mean(wage_hc_factor_simulated, axis=1)
wage_hc_factor_employed_mean = conditional_mean(
wage_hc_factor_simulated, employed_simulated, axis=1
)
wage_hc_factor_unemployed_loss_mean = conditional_mean(
wage_hc_factor_simulated, unemployed_loss_simulated, axis=1
)
wage_hc_factor_nonemployed_mean = conditional_mean(
wage_hc_factor_simulated, nonemployed_simulated, axis=1
)
wage_hc_factor_displaced_mean = np.full((n_types, 6), np.nan)
wage_hc_factor_nondisplaced_mean = np.full((n_types, 6), np.nan)
for time in range(6):
wage_hc_factor_displaced_mean[:, time] = conditional_mean(
wage_hc_factor_pre_displacement_simulated,
np.logical_and(
nonemployed_simulated,
duration_unemployed_simulated == time,
),
axis=1,
)
wage_hc_factor_nondisplaced_mean[:, time] = conditional_mean(
wage_hc_factor_simulated,
np.logical_and(
employed_simulated,
duration_since_displacement_simulated == time,
),
axis=1,
)
wage_hc_factor_pre_displacement_mean = conditional_mean(
wage_hc_factor_pre_displacement_simulated, nonemployed_simulated, axis=1
)
# income statistics
labor_income_simulated = (
wage_level
* wage_hc_factor_simulated
* np.repeat((1 - income_tax_rate_vector[:, period_idx]), n_simulations).reshape(
(n_types, n_simulations)
)
* employed_simulated
)
pre_unemployment_wage_simulated = (
wage_level * wage_hc_factor_pre_displacement_simulated * nonemployed_simulated
)
ui_benefits_simulated = (
simulate_ui_benefits(
pre_unemployment_wage_simulated,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
* nonemployed_simulated
)
income_simulated = labor_income_simulated + ui_benefits_simulated
income_median = np.median(income_simulated, axis=1)
assets_over_income_mean = np.full(n_types, np.nan)
for type_idx in range(n_types):
assets_over_income_mean[type_idx] = np.mean(
(assets_simulated[type_idx, :] / labor_income_simulated[type_idx, :])[
employed_simulated[type_idx, :]
]
)
# UI statistics
ui_benefits_mean = conditional_mean(
ui_benefits_simulated, nonemployed_simulated, axis=1
)
ui_effective_replacement_rate = conditional_mean(
ui_benefits_simulated, nonemployed_simulated, axis=1
) / conditional_mean(pre_unemployment_wage_simulated, nonemployed_simulated, axis=1)
ui_share_floor_binding = conditional_mean(
(ui_benefits_simulated == ui_floor).astype(int), nonemployed_simulated, axis=1
)
ui_share_cap_binding = conditional_mean(
(ui_benefits_simulated == ui_cap).astype(int), nonemployed_simulated, axis=1
)
# wealth statistics
assets_mean = np.mean(assets_simulated, axis=1)
assets_nonemployed_mean = conditional_mean(
assets_simulated, nonemployed_simulated, axis=1
)
assets_distribution = np.full((n_types, assets_grid_size), np.nan)
distribution_hc_assets_nonemployed = np.full(
(n_types, hc_grid_reduced_size, assets_grid_size), np.nan
)
for type_idx in range(n_types):
assets_distribution[type_idx, :] = (
np.histogram(
a=assets_simulated[type_idx, :], bins=np.append(assets_grid, np.inf)
)[0]
/ n_simulations
)
distribution_hc_assets_nonemployed[type_idx, :] = (
np.histogram2d(
x=np.squeeze(
hc_simulated[type_idx, nonemployed_simulated[type_idx, :]]
),
y=np.squeeze(
assets_simulated[type_idx, nonemployed_simulated[type_idx, :]]
),
bins=(
np.append(hc_grid_reduced, n_periods_working + 1),
np.append(assets_grid, np.inf),
),
)[0]
/ np.sum(nonemployed_simulated[type_idx, :])
)
log_assets_over_income_nonemployed_mean = conditional_mean(
np.log(assets_simulated / wage_hc_factor_simulated),
np.logical_and(assets_simulated > 0, nonemployed_simulated),
axis=1,
)
return (
share_employed,
share_unemployed,
share_unemployed_loss,
share_nonemployed,
log_consumption_employed_mean,
log_consumption_nonemployed_mean,
consumption_employed_mean,
consumption_nonemployed_mean,
wage_hc_factor_mean,
wage_hc_factor_employed_mean,
wage_hc_factor_unemployed_loss_mean,
wage_hc_factor_nonemployed_mean,
wage_hc_factor_displaced_mean,
wage_hc_factor_nondisplaced_mean,
wage_hc_factor_pre_displacement_mean,
marginal_utility_nonemployed_mean,
income_median,
hc_mean,
hc_employed_mean,
hc_nonemployed_mean,
ui_benefits_mean,
ui_effective_replacement_rate,
ui_share_floor_binding,
ui_share_cap_binding,
assets_mean,
assets_nonemployed_mean,
assets_distribution,
assets_over_income_mean,
distribution_hc_assets_nonemployed,
log_assets_over_income_nonemployed_mean,
)
def _solve_and_simulate(controls, calibration):
global assets_grid_size
global assets_grid
global assets_grid_h_a
global assets_grid_h_a1
global assets_grid_n_h_a
global assets_grid_n_h_a1
global assets_max
global assets_min
global borrowing_limit_h_a
global contact_rate
global discount_factor
global eps
global hc_max
global hc_grid
global hc_grid_reduced
global hc_grid_reduced_h_a
global hc_grid_reduced_h_a1
global hc_grid_reduced_n_h_a
global hc_grid_reduced_n_h_a1
global hc_grid_reduced_size
global interest_rate_raw
global interpolation_method
global interpolation_method
global job_finding_probability_grid
global leisure_grid
global leisure_utility
global leisure_utility_dx
global leisure_utility_dxdx
global leisure_utility_dx_min
global leisure_utility_dx_max
global leisure_utility_on_search_grid
global n_periods_working
global n_periods_retired
global n_simulations
global n_types
global risk_aversion_coefficient
global search_effort_grid
global search_effort_grid_size
global wage_level
# load controls
eps = 0.0000000000001
interpolation_method = controls["interpolation_method"]
n_iter_solve_max = controls["n_iterations_solve_max"]
n_simulations = controls["n_simulations"]
seed_simulation = controls["seed_simulation"]
show_progress = controls["show_progress_solve"]
show_summary = controls["show_summary"]
tolerance_solve = controls["tolerance_solve"]
# load calibration
# general parameters
assets_grid = np.array(calibration["assets_grid"])
assets_max = calibration["assets_max"]
assets_min = calibration["assets_min"]
consumption_min = calibration["consumption_min"]
contact_rate = calibration["contact_rate"]
discount_factor = calibration["discount_factor"]
equilibrium_condition = calibration["equilibrium_condition"]
hc_grid_reduced = np.array(calibration["hc_grid_reduced"])
hc_loss_probability = np.array(calibration["hc_loss_probability"])
leisure_utility = np.array(calibration["leisure_utility"])
leisure_utility_dx = np.array(calibration["leisure_utility_dx"])
leisure_utility_dxdx = np.array(calibration["leisure_utility_dxdx"])
n_periods_retired = calibration["n_periods_retired"]
n_periods_working = calibration["n_periods_working"]
n_types = calibration["n_types"]
risk_aversion_coefficient = calibration["risk_aversion_coefficient"]
search_effort_grid_size = calibration["search_effort_grid_size"]
search_effort_max = calibration["search_effort_max"]
search_effort_min = calibration["search_effort_min"]
separation_rate_vector = np.array(calibration["separation_rate_vector"])
transfers_pensions = np.array(calibration["transfers_pensions_init"])
transfers_lumpsum = np.array(calibration["transfers_lumpsum_init"])
type_weights = np.array(calibration["type_weights"])
ui_cap = calibration["ui_cap"]
ui_floor = calibration["ui_floor"]
ui_replacement_rate_vector = np.array(calibration["ui_replacement_rate_vector"])
wage_hc_factor_vector = np.array(calibration["wage_hc_factor_vector"])
wage_level = calibration["wage_level"]
wage_loss_factor_vector = np.array(calibration["wage_loss_factor_vector"])
wage_loss_reference_vector = np.array(calibration["wage_loss_reference_vector"])
# exogenous taxes
tax_income = np.array(calibration["tax_income"])
tax_ss = np.array(calibration["tax_ss"])
# initial values for endogenous taxes
tax_consumption_init = np.array(
calibration["tax_consumption_init"][interpolation_method]
)
tax_ui_init = np.array(calibration["tax_ui_init"][interpolation_method])
instrument = calibration["instrument"]
# calculate derived parameters
assets_grid_size = len(assets_grid)
hc_grid_reduced_size = len(hc_grid_reduced)
hc_grid = np.arange(n_periods_working + 1)
hc_max = np.amax(hc_grid)
interest_rate_raw = (1 - discount_factor) / discount_factor
leisure_grid = np.linspace(
search_effort_min, search_effort_max, len(leisure_utility)
)
search_effort_grid = np.linspace(
search_effort_min, search_effort_max, search_effort_grid_size
)
leisure_utility_on_search_grid = leisure_utility_interpolated(search_effort_grid)
leisure_utility_dx_max = leisure_utility_dx_interpolated(search_effort_max)
leisure_utility_dx_min = leisure_utility_dx_interpolated(search_effort_min)
job_finding_probability_grid = job_finding_probability(search_effort_grid)
borrowing_limit_h_a = np.full((hc_grid_reduced_size, assets_grid_size), assets_min)
n_types = separation_rate_vector.shape[0]
# generate grids
assets_grid_h_a = (
np.repeat(assets_grid, hc_grid_reduced_size)
.reshape(assets_grid_size, hc_grid_reduced_size)
.T
)
assets_grid_h_a1 = np.append(
assets_grid_h_a, np.full((hc_grid_reduced_size, 1), assets_max), axis=1
)
assets_grid_n_h_a = np.tile(assets_grid, n_types * hc_grid_reduced_size).reshape(
(n_types, hc_grid_reduced_size, assets_grid_size)
)
assets_grid_n_h_a1 = np.append(
assets_grid_n_h_a,
np.full((n_types, hc_grid_reduced_size, 1), assets_max),
axis=2,
)
hc_grid_reduced_h_a = np.repeat(hc_grid_reduced, assets_grid_size).reshape(
hc_grid_reduced_size, assets_grid_size
)
hc_grid_reduced_h_a1 = np.append(
hc_grid_reduced_h_a, hc_grid_reduced[..., np.newaxis], axis=1
)
hc_grid_reduced_n_h_a = np.tile(
hc_grid_reduced, n_types * assets_grid_size
).reshape((n_types, assets_grid_size, hc_grid_reduced_size))
hc_grid_reduced_n_h_a = np.moveaxis(hc_grid_reduced_n_h_a, 2, 1)
hc_grid_reduced_n_h_a1 = np.append(
hc_grid_reduced_n_h_a,
np.tile(hc_grid_reduced, n_types).reshape((n_types, hc_grid_reduced_size, 1)),
axis=2,
)
wage_hc_factor_grid = np.repeat(
wage_hc_factor_vector[:, hc_grid_reduced],
assets_grid_size,
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
# check instrument
if instrument not in [
"tax_ui_rate",
"tax_ui_shift",
"tax_consumption",
]:
raise ValueError(
"error in equilibrium instrument; choose one of "
"['tax_ui_rate', 'tax_ui_shift', 'tax_consumption']"
)
# load targets for fixed budget calibration
if equilibrium_condition == "fixed_budget":
pv_balance_lumpsum_target = np.array(calibration["pv_balance_lumpsum_target"])
pv_balance_ui_target = np.array(calibration["pv_balance_ui_target"])
pv_balance_ss_target = np.array(calibration["pv_balance_ss_target"])
# set initial values for policy rates
tax_consumption = tax_consumption_init
# check if initial instrument rate rate is consistent with equilibrium condition
if equilibrium_condition == "combined":
if instrument == "tax_ui_rate":
if not np.all(tax_income == tax_income[0]):
raise ValueError(
"error in input variable tax_ui;"
" with combined budget, income tax rate required to be equal across types"
)
else:
pass
elif instrument == "tax_ui_shift":
if not np.all(tax_income == tax_income[0]):
raise ValueError(
"error in input variable tax_ui;"
" with combined budget, income tax rate required"
" to be equal across types"
)
else:
pass
elif instrument == "tax_consumption":
if not np.all(tax_consumption_init == tax_consumption_init[0]):
raise ValueError(
"error in input variable tax_consumption_init;"
" with combined budget, consumption tax rate required"
" to be equal across types"
)
else:
pass
else:
pass
else:
pass
# construct ui tax rate vector from initial values
if tax_ui_init.shape == (n_types,):
tax_ui = tax_ui_init
tax_ui_vector = np.repeat(tax_ui, n_periods_working).reshape(
(n_types, n_periods_working)
)
elif tax_ui_init.shape == (n_types, n_periods_working):
tax_ui_vector = tax_ui_init
tax_ui_shift = 0.0
else:
raise ValueError("error in input variable tax_ui")
if ui_cap == "None":
ui_cap = np.Inf
if ui_floor == "None":
ui_floor = 0.0
# initiate objects for iteration
instrument_hist = []
n_iter = 0
#######################################################
# SOLUTION
while n_iter <= n_iter_solve_max:
# Initialize objects
# store current instrument rate
if instrument == "tax_ui_rate":
instrument_hist += [copy.deepcopy(tax_ui)]
elif instrument == "tax_ui_shift":
instrument_hist += [copy.deepcopy(tax_ui_shift)]
elif instrument == "tax_consumption":
instrument_hist += [copy.deepcopy(tax_consumption)]
# policy functions [type x hc x assets x age (working + first retirement period)]
policy_consumption_unemployed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
policy_consumption_unemployed_loss = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
policy_consumption_employed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
policy_effort_searching = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
policy_effort_searching_loss = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
# value functions [type x hc x assets x age]
value_unemployed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
value_unemployed_loss = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
value_employed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
value_searching = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
value_searching_loss = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
# government program cost and revenue functions [type x hc x assets x age]
cost_ui_employed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
cost_ui_unemployed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
cost_ui_unemployed_loss = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
revenue_ss_employed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
revenue_ss_unemployed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
revenue_ss_unemployed_loss = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
revenue_ui_employed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
revenue_ui_unemployed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
revenue_ui_unemployed_loss = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
revenue_lumpsum_employed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
revenue_lumpsum_unemployed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
revenue_lumpsum_unemployed_loss = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working + 1,
),
np.nan,
)
# RETIREMENT PERIOD (discounted to first period of retirement)
# retirement consumption levels [type x hc x assets]
# (annuity income from assets + pension benefits - consumption tax)
annuity_factor = (
(1 - tax_income)
* interest_rate_raw
* (1 + (1 - tax_income) * interest_rate_raw) ** n_periods_retired
/ ((1 + (1 - tax_income) * interest_rate_raw) ** n_periods_retired - 1)
)
annuity_income = assets_grid_n_h_a * np.repeat(
annuity_factor, (hc_grid_reduced_size * assets_grid_size)
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
pension_income = np.repeat(
transfers_pensions, (hc_grid_reduced_size * assets_grid_size)
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
transfer_income = np.repeat(
transfers_lumpsum, (hc_grid_reduced_size * assets_grid_size)
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
policy_consumption_retired = np.repeat(
1 / (1 + tax_consumption), (hc_grid_reduced_size * assets_grid_size)
).reshape((n_types, hc_grid_reduced_size, assets_grid_size)) * (
annuity_income + pension_income + transfer_income
)
policy_consumption_retired = np.maximum(
policy_consumption_retired, consumption_min
)
# value of being retired [hc full x assets]
value_retired = (
(1 - discount_factor ** n_periods_retired)
/ (1 - discount_factor)
* consumption_utility(policy_consumption_retired)
)
# revenue from income tax during retirement
revenue_factor_lumpsum_retirement = (
(1 - tax_income)
* (1 + (1 - tax_income) * interest_rate_raw) ** n_periods_retired
- (
(
1
- tax_income
* (1 + (1 - tax_income) * interest_rate_raw) ** n_periods_retired
)
* (1 + interest_rate_raw) ** n_periods_retired
)
) / (
((1 + (1 - tax_income) * interest_rate_raw) ** n_periods_retired - 1)
* (1 + interest_rate_raw) ** (n_periods_retired - 1)
)
revenue_lumpsum_retirement = assets_grid_n_h_a * np.repeat(
revenue_factor_lumpsum_retirement, (hc_grid_reduced_size * assets_grid_size)
).reshape((n_types, hc_grid_reduced_size, assets_grid_size))
# # cost to the government of paying pension benefits throughout retirement
# # by hc level (reduced) [type x hc x assets]
# cost_retired = (1 - discount_factor ** n_periods_retired) / (
# 1 - discount_factor
# ) / np.repeat(
# (1 + tax_consumption), (hc_grid_reduced_size * assets_grid_size)
# ).reshape(
# (n_types, hc_grid_reduced_size, assets_grid_size)
# ) * pension_income - 1 / discount_factor * np.repeat(
# tax_consumption / (1 + tax_consumption),
# (hc_grid_reduced_size * assets_grid_size),
# ).reshape(
# (n_types, hc_grid_reduced_size, assets_grid_size)
# ) * assets_grid_n_h_a
# Starting in the last period of the working life
period_idx = n_periods_working - 1 # zero-based indexing
# store policy, value, and cost functions for first period of
# retirement (hc loss materializes upon retirement)
policy_consumption_employed[:, :, :, -1] = policy_consumption_retired
policy_consumption_unemployed[:, :, :, -1] = policy_consumption_retired
policy_consumption_unemployed_loss[
:, :, :, -1
] = interpolate_n_h_a_ordered_to_unordered(
hc_grid_reduced_n_h_a,
assets_grid_n_h_a,
policy_consumption_retired,
_hc_after_loss_n_agents(
hc_grid_reduced_n_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
assets_grid_n_h_a,
method=interpolation_method,
)
# workers transition to "employed" status in retirement with certainty,
# but without incurring disutility from searching; technical assumption
# to facilitate coding, but without consequences for results
policy_effort_searching[:, :, :, -1] = 1.0
policy_effort_searching_loss[:, :, :, -1] = 1.0
value_employed[:, :, :, -1] = value_retired
value_unemployed[:, :, :, -1] = value_retired
value_unemployed_loss[:, :, :, -1] = interpolate_n_h_a_ordered_to_unordered(
hc_grid_reduced_n_h_a,
assets_grid_n_h_a,
value_retired,
_hc_after_loss_n_agents(
hc_grid_reduced_n_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
assets_grid_n_h_a,
method=interpolation_method,
)
value_searching[:, :, :, -1] = value_retired
value_searching_loss[:, :, :, -1] = interpolate_n_h_a_ordered_to_unordered(
hc_grid_reduced_n_h_a,
assets_grid_n_h_a,
value_retired,
_hc_after_loss_n_agents(
hc_grid_reduced_n_h_a,
wage_loss_factor_vector,
wage_loss_reference_vector,
period_idx + 1,
),
assets_grid_n_h_a,
method=interpolation_method,
)
# government cost functions
cost_ui_employed[:, :, :, -1] = 0.0
cost_ui_unemployed[:, :, :, -1] = 0.0
cost_ui_unemployed_loss[:, :, :, -1] = 0.0
# cost of pensions and transfers do not need to be computed
# recursively (constant streams), but PVs of cost of pensions
# and transfers are computed at the end of the solution
# government revenue functions
revenue_ss_employed[:, :, :, -1] = 0.0
revenue_ss_unemployed[:, :, :, -1] = 0.0
revenue_ss_unemployed_loss[:, :, :, -1] = 0.0
revenue_ui_employed[:, :, :, -1] = 0.0
revenue_ui_unemployed[:, :, :, -1] = 0.0
revenue_ui_unemployed_loss[:, :, :, -1] = 0.0
revenue_lumpsum_employed[:, :, :, -1] = revenue_lumpsum_retirement
revenue_lumpsum_unemployed[:, :, :, -1] = revenue_lumpsum_retirement
revenue_lumpsum_unemployed_loss[:, :, :, -1] = revenue_lumpsum_retirement
# WORKING PERIOD (solving backwards for each t):
while period_idx >= 0:
for type_idx in range(n_types):
# load policy, value, and cost functions for the next period
policy_consumption_employed_next = policy_consumption_employed[
type_idx, :, :, period_idx + 1
]
policy_consumption_unemployed_next = policy_consumption_unemployed[
type_idx, :, :, period_idx + 1
]
policy_consumption_unemployed_loss_next = (
policy_consumption_unemployed_loss[type_idx, :, :, period_idx + 1]
)
policy_effort_searching_next = policy_effort_searching[
type_idx, :, :, period_idx + 1
]
policy_effort_searching_loss_next = policy_effort_searching_loss[
type_idx, :, :, period_idx + 1
]
value_employed_next = value_employed[type_idx, :, :, period_idx + 1]
value_searching_next = value_searching[type_idx, :, :, period_idx + 1]
value_searching_loss_next = value_searching_loss[
type_idx, :, :, period_idx + 1
]
cost_ui_employed_next = cost_ui_employed[type_idx, :, :, period_idx + 1]
cost_ui_unemployed_next = cost_ui_unemployed[
type_idx, :, :, period_idx + 1
]
cost_ui_unemployed_loss_next = cost_ui_unemployed_loss[
type_idx, :, :, period_idx + 1
]
revenue_ss_employed_next = revenue_ss_employed[
type_idx, :, :, period_idx + 1
]
revenue_ss_unemployed_next = revenue_ss_unemployed[
type_idx, :, :, period_idx + 1
]
revenue_ss_unemployed_loss_next = revenue_ss_unemployed_loss[
type_idx, :, :, period_idx + 1
]
revenue_ui_employed_next = revenue_ui_employed[
type_idx, :, :, period_idx + 1
]
revenue_ui_unemployed_next = revenue_ui_unemployed[
type_idx, :, :, period_idx + 1
]
revenue_ui_unemployed_loss_next = revenue_ui_unemployed_loss[
type_idx, :, :, period_idx + 1
]
revenue_lumpsum_employed_next = revenue_lumpsum_employed[
type_idx, :, :, period_idx + 1
]
revenue_lumpsum_unemployed_next = revenue_lumpsum_unemployed[
type_idx, :, :, period_idx + 1
]
revenue_lumpsum_unemployed_loss_next = revenue_lumpsum_unemployed_loss[
type_idx, :, :, period_idx + 1
]
# solve period
(
policy_consumption_employed_now,
policy_consumption_unemployed_now,
policy_consumption_unemployed_loss_now,
policy_effort_searching_now,
policy_effort_searching_loss_now,
value_employed_now,
value_unemployed_now,
value_unemployed_loss_now,
value_searching_now,
value_searching_loss_now,
cost_ui_employed_now,
cost_ui_unemployed_now,
cost_ui_unemployed_loss_now,
revenue_ss_employed_now,
revenue_ss_unemployed_now,
revenue_ss_unemployed_loss_now,
revenue_ui_employed_now,
revenue_ui_unemployed_now,
revenue_ui_unemployed_loss_now,
revenue_lumpsum_employed_now,
revenue_lumpsum_unemployed_now,
revenue_lumpsum_unemployed_loss_now,
) = _solve_one_period(
policy_consumption_employed_next,
policy_consumption_unemployed_next,
policy_consumption_unemployed_loss_next,
policy_effort_searching_next,
policy_effort_searching_loss_next,
value_employed_next,
value_searching_next,
value_searching_loss_next,
cost_ui_employed_next,
cost_ui_unemployed_next,
cost_ui_unemployed_loss_next,
hc_loss_probability[type_idx, ...],
revenue_ss_employed_next,
revenue_ss_unemployed_next,
revenue_ss_unemployed_loss_next,
revenue_ui_employed_next,
revenue_ui_unemployed_next,
revenue_ui_unemployed_loss_next,
revenue_lumpsum_employed_next,
revenue_lumpsum_unemployed_next,
revenue_lumpsum_unemployed_loss_next,
separation_rate_vector[type_idx, ...],
wage_hc_factor_grid[type_idx, ...],
wage_hc_factor_vector[type_idx, ...],
wage_loss_factor_vector[type_idx, ...],
wage_loss_reference_vector[type_idx, ...],
tax_ss[type_idx, ...],
tax_ui_vector[type_idx, ...],
tax_income[type_idx, ...],
transfers_lumpsum[type_idx, ...],
ui_replacement_rate_vector[type_idx, ...],
ui_floor,
ui_cap,
period_idx,
)
# store results
policy_consumption_employed[
type_idx, :, :, period_idx
] = policy_consumption_employed_now
policy_consumption_unemployed[
type_idx, :, :, period_idx
] = policy_consumption_unemployed_now
policy_consumption_unemployed_loss[
type_idx, :, :, period_idx
] = policy_consumption_unemployed_loss_now
policy_effort_searching[
type_idx, :, :, period_idx
] = policy_effort_searching_now
policy_effort_searching_loss[
type_idx, :, :, period_idx
] = policy_effort_searching_loss_now
value_employed[type_idx, :, :, period_idx] = value_employed_now
value_unemployed[type_idx, :, :, period_idx] = value_unemployed_now
value_unemployed_loss[
type_idx, :, :, period_idx
] = value_unemployed_loss_now
value_searching[type_idx, :, :, period_idx] = value_searching_now
value_searching_loss[
type_idx, :, :, period_idx
] = value_searching_loss_now
cost_ui_employed[type_idx, :, :, period_idx] = cost_ui_employed_now
cost_ui_unemployed[type_idx, :, :, period_idx] = cost_ui_unemployed_now
cost_ui_unemployed_loss[
type_idx, :, :, period_idx
] = cost_ui_unemployed_loss_now
revenue_ss_employed[
type_idx, :, :, period_idx
] = revenue_ss_employed_now
revenue_ss_unemployed[
type_idx, :, :, period_idx
] = revenue_ss_unemployed_now
revenue_ss_unemployed_loss[
type_idx, :, :, period_idx
] = revenue_ss_unemployed_loss_now
revenue_ui_employed[
type_idx, :, :, period_idx
] = revenue_ui_employed_now
revenue_ui_unemployed[
type_idx, :, :, period_idx
] = revenue_ui_unemployed_now
revenue_ui_unemployed_loss[
type_idx, :, :, period_idx
] = revenue_ui_unemployed_loss_now
revenue_lumpsum_employed[
type_idx, :, :, period_idx
] = revenue_lumpsum_employed_now
revenue_lumpsum_unemployed[
type_idx, :, :, period_idx
] = revenue_lumpsum_unemployed_now
revenue_lumpsum_unemployed_loss[
type_idx, :, :, period_idx
] = revenue_lumpsum_unemployed_loss_now
# initiate next iteration
period_idx -= 1
# obtain aggregate measures
pv_utility_computed = np.full(n_types, np.nan)
pv_cost_ui_computed = np.full(n_types, np.nan)
pv_revenue_ss_computed = np.full(n_types, np.nan)
pv_revenue_ui_computed = np.full(n_types, np.nan)
pv_revenue_lumpsum_computed = np.full(n_types, np.nan)
for type_idx in range(n_types):
# pv of utility of searcher with age=0, assets=0 and hc=0
pv_utility_computed[type_idx] = interpolate_1d(
assets_grid,
value_searching[type_idx, 0, :, 0],
0,
method=interpolation_method,
)
# pv of net cost to government for searcher with age=0, assets=0 and hc=0
search_effort_at_entry = interpolate_1d(
assets_grid,
policy_effort_searching[type_idx, 0, :, 0],
0,
method=interpolation_method,
)
pv_cost_ui_computed[type_idx] = (
1 - search_effort_at_entry
) * interpolate_1d(
assets_grid,
cost_ui_unemployed[type_idx, 0, :, 0],
0,
method=interpolation_method,
) + search_effort_at_entry * interpolate_1d(
assets_grid,
cost_ui_employed[type_idx, 0, :, 0],
0,
method=interpolation_method,
)
pv_revenue_ss_computed[type_idx] = (
1 - search_effort_at_entry
) * interpolate_1d(
assets_grid,
revenue_ss_unemployed[type_idx, 0, :, 0],
0,
method=interpolation_method,
) + search_effort_at_entry * interpolate_1d(
assets_grid,
revenue_ss_employed[type_idx, 0, :, 0],
0,
method=interpolation_method,
)
pv_revenue_ui_computed[type_idx] = (
1 - search_effort_at_entry
) * interpolate_1d(
assets_grid,
revenue_ui_unemployed[type_idx, 0, :, 0],
0,
method=interpolation_method,
) + search_effort_at_entry * interpolate_1d(
assets_grid,
revenue_ui_employed[type_idx, 0, :, 0],
0,
method=interpolation_method,
)
pv_revenue_lumpsum_computed[type_idx] = (
1 - search_effort_at_entry
) * interpolate_1d(
assets_grid,
revenue_lumpsum_unemployed[type_idx, 0, :, 0],
0,
method=interpolation_method,
) + search_effort_at_entry * interpolate_1d(
assets_grid,
revenue_lumpsum_employed[type_idx, 0, :, 0],
0,
method=interpolation_method,
)
# cost functions
# cost to the government of paying pension benefits throughout retirement
# by hc level (reduced) [type x hc x assets]
pv_cost_factor_ss = ((1 + interest_rate_raw) ** n_periods_retired - 1) / (
interest_rate_raw
* (1 + interest_rate_raw) ** (n_periods_working + n_periods_retired)
)
pv_cost_ss_computed = transfers_pensions * pv_cost_factor_ss
# cost to the government of paying lump-sum transfers throughout retirement
# by hc level (reduced) [type x hc x assets]
pv_cost_factor_lumpsum = (
(1 + interest_rate_raw) ** (n_periods_working + n_periods_retired + 1) - 1
) / (
interest_rate_raw
* (1 + interest_rate_raw) ** (n_periods_working + n_periods_retired)
)
pv_cost_lumpsum_computed = transfers_lumpsum * pv_cost_factor_lumpsum
# computed balance for government programs
pv_balance_ss_computed = pv_revenue_ss_computed - pv_cost_ss_computed
pv_balance_ui_computed = pv_revenue_ui_computed - pv_cost_ui_computed
pv_balance_lumpsum_computed = (
pv_revenue_lumpsum_computed - pv_cost_lumpsum_computed
)
# correct for unbalanced government budget
pv_utility_corrected = pv_utility_computed + 0.55 * pv_balance_ui_computed
# average over types
average_pv_cost_ui_computed = (
np.average(pv_cost_ui_computed, weights=type_weights)
).reshape(
1,
)
average_pv_revenue_ss_computed = (
np.average(pv_revenue_ss_computed, weights=type_weights)
).reshape(
1,
)
# average_pv_revenue_ui_computed = (
# np.average(pv_revenue_ui_computed, weights=type_weights)
# ).reshape(1,)
average_pv_revenue_lumpsum_computed = (
np.average(pv_revenue_lumpsum_computed, weights=type_weights)
).reshape(
1,
)
average_pv_utility_computed = (
np.average(pv_utility_computed, weights=type_weights)
).reshape(
1,
)
average_pv_balance_ss_computed = (
np.average(pv_balance_ss_computed, weights=type_weights)
).reshape(
1,
)
average_pv_balance_ui_computed = (
np.average(pv_balance_ui_computed, weights=type_weights)
).reshape(
1,
)
average_pv_balance_lumpsum_computed = (
np.average(pv_balance_lumpsum_computed, weights=type_weights)
).reshape(
1,
)
average_pv_utility_computed_corrected = (
np.average(pv_utility_corrected, weights=type_weights)
).reshape(
1,
)
# find quantities for government budget constraint
if equilibrium_condition == "combined":
pv_lumpsum_net = average_pv_balance_lumpsum_computed
pv_ui_net = average_pv_balance_ui_computed
pv_ss_net = average_pv_balance_ss_computed
pv_revenue_lumpsum = average_pv_revenue_lumpsum_computed
pv_revenue_ss = average_pv_revenue_ss_computed
utility_at_entry = average_pv_utility_computed
utility_at_entry_corrected = average_pv_utility_computed_corrected
instrument_init = (
np.average(instrument_hist[0], weights=type_weights)
).reshape(
1,
)
instrument_now = (
np.average(instrument_hist[-1], weights=type_weights)
).reshape(
1,
)
elif equilibrium_condition == "fixed_budget":
pv_lumpsum_net = pv_balance_lumpsum_computed - pv_balance_lumpsum_target
pv_ss_net = pv_balance_ss_computed - pv_balance_ss_target
pv_ui_net = pv_balance_ui_computed - pv_balance_ui_target
pv_revenue_lumpsum = pv_revenue_lumpsum_computed - pv_balance_lumpsum_target
pv_revenue_ss = pv_revenue_ss_computed - pv_balance_ss_target
utility_at_entry = pv_utility_computed
utility_at_entry_corrected = pv_utility_corrected
instrument_init = instrument_hist[0]
instrument_now = instrument_hist[-1]
else:
raise ValueError(
"error in equilibrium condition; choose one of "
"['combined', 'fixed_budget']"
)
# print output summary
if show_summary:
print(
"\n###############################################"
"###############################################\n"
"MODEL SOLUTION: \n"
" iteration" + " " * (81 - len(f"{n_iter:4d}")) + f"{n_iter:4d}\n"
" equilibrium condition"
+ " " * (69 - len(f"{equilibrium_condition}"))
+ f"{equilibrium_condition}\n"
" balance unemployment insurance (pv)"
+ " "
* (55 - len("[" + ", ".join(f"{i:1.7f}" for i in pv_ui_net) + "]"))
+ "["
+ ", ".join(f"{i:1.7f}" for i in pv_ui_net)
+ "]\n"
" balance social security (pv)"
+ " "
* (62 - len("[" + ", ".join(f"{i:1.7f}" for i in pv_ss_net) + "]"))
+ "["
+ ", ".join(f"{i:1.7f}" for i in pv_ss_net)
+ "]\n"
" balance general tax and transfers (pv)"
+ " "
* (52 - len("[" + ", ".join(f"{i:1.7f}" for i in pv_lumpsum_net) + "]"))
+ "["
+ ", ".join(f"{i:1.7f}" for i in pv_lumpsum_net)
+ "]\n"
" welfare (pv utility at entry)"
+ " "
* (
61
- len("[" + ", ".join(f"{i:1.5f}" for i in utility_at_entry) + "]")
)
+ "["
+ ", ".join(f"{i:1.5f}" for i in utility_at_entry)
+ "]\n"
" welfare corrected"
+ " "
* (
73
- len(
"["
+ ", ".join(f"{i:1.5f}" for i in utility_at_entry_corrected)
+ "]"
)
)
+ "["
+ ", ".join(f"{i:1.5f}" for i in utility_at_entry_corrected)
+ "]\n"
f" initial instrument rate ({instrument})"
+ " "
* (
53
- len("[" + ", ".join(f"{i:1.7f}" for i in instrument_init) + "]")
)
+ "["
+ ", ".join(f"{i:1.7f}" for i in instrument_init)
+ "]\n"
f" current instrument rate ({instrument})"
+ " "
* (53 - len("[" + ", ".join(f"{i:1.7f}" for i in instrument_now) + "]"))
+ "["
+ ", ".join(f"{i:1.7f}" for i in instrument_now)
+ "]\n"
"################################################"
"##############################################\n"
)
# check government budget constraint
if (
all(
[
np.all(abs(pv_ss_net) <= tolerance_solve),
np.all(abs(pv_ui_net) <= tolerance_solve),
np.all(abs(pv_lumpsum_net) <= tolerance_solve),
]
)
or n_iter == n_iter_solve_max
):
break # don't update tax rate in output iteration
else: # and prepare next outer iteration of solution algorithm
# update iteration counter
n_iter += 1
# compute adjustment factor
adjustment_factor = 0.0079 * n_iter ** (-0.25)
# update instrument rate to balance UI budget
if instrument == "tax_ui_rate":
tax_ui -= adjustment_factor * pv_ui_net
tax_ui_vector = np.repeat(tax_ui, n_periods_working).reshape(
(n_types, n_periods_working)
)
elif instrument == "tax_ui_shift":
tax_ui_shift -= adjustment_factor * pv_ui_net
tax_ui_vector = tax_ui_vector + tax_ui_shift
elif instrument == "tax_consumption":
tax_consumption += adjustment_factor * pv_ui_net
# update transfers and pensions to balance budget of other gov't programs
adjustment_weight = 1 / (n_iter + 1)
transfers_pensions = (
adjustment_weight * transfers_pensions
+ (1 - adjustment_weight) * pv_revenue_ss / pv_cost_factor_ss
)
transfers_lumpsum = (
adjustment_weight * transfers_lumpsum
+ (1 - adjustment_weight) * pv_revenue_lumpsum / pv_cost_factor_lumpsum
)
if show_progress:
print("end solution")
########################################################
# SIMULATION
if controls["run_simulation"]:
# I: INITIALISATION
# set seed
np.random.seed(seed_simulation)
# (a) initiate objects for simulation
# booleans for worker status
searching_loss_simulated = np.zeros((n_types, n_simulations), dtype=bool)
searching_simulated = np.ones((n_types, n_simulations), dtype=bool)
employed_simulated = np.zeros((n_types, n_simulations), dtype=bool)
searching_all_simulated = (
searching_simulated + searching_loss_simulated
).astype(bool)
# assets tracker
assets_simulated = np.zeros((n_types, n_simulations))
# unemployment duration trackers
duration_unemployed_simulated = np.zeros((n_types, n_simulations))
duration_since_displacement_simulated = np.zeros((n_types, n_simulations))
# human capital tracker
hc_simulated = np.zeros((n_types, n_simulations))
hc_pre_displacement_simulated = np.zeros((n_types, n_simulations))
# value tracker
pv_utility_simulated = np.zeros((n_types, n_simulations))
# (c) initiate objects for statistics
# aggregate statistics
discount_factor_compounded = 1
pv_revenue_ss_simulated = np.zeros(n_types)
pv_revenue_ui_simulated = np.zeros(n_types)
pv_revenue_lumpsum_simulated = np.zeros(n_types)
pv_revenue_consumption_simulated = np.zeros(n_types)
pv_cost_ss_simulated = np.zeros(n_types)
pv_cost_ui_simulated = np.zeros(n_types)
pv_cost_lumpsum_simulated = np.zeros(n_types)
pv_cost_consumption_simulated = np.zeros(n_types)
pv_revenue_total_simulated = np.zeros(n_types)
pv_cost_total_simulated = np.zeros(n_types)
# government budget statistics
average_cost_ss_simulated = np.full((n_types, n_periods_working), np.nan)
average_cost_ui_simulated = np.full((n_types, n_periods_working), np.nan)
average_cost_lumpsum_simulated = np.full((n_types, n_periods_working), np.nan)
average_cost_consumption_simulated = np.full(
(n_types, n_periods_working), np.nan
)
average_cost_total_simulated = np.full((n_types, n_periods_working), np.nan)
average_revenue_ss_simulated = np.full((n_types, n_periods_working), np.nan)
average_revenue_ui_simulated = np.full((n_types, n_periods_working), np.nan)
average_revenue_lumpsum_simulated = np.full(
(n_types, n_periods_working), np.nan
)
average_revenue_consumption_simulated = np.full(
(n_types, n_periods_working), np.nan
)
average_revenue_total_simulated = np.full((n_types, n_periods_working), np.nan)
average_balance_total_simulated = np.full((n_types, n_periods_working), np.nan)
# hc statistics
hc_mean = np.full((n_types, n_periods_working), np.nan)
hc_employed_mean = np.full((n_types, n_periods_working), np.nan)
hc_nonemployed_mean = np.full((n_types, n_periods_working), np.nan)
# wage and income statistics
income_median = np.full((n_types, n_periods_working), np.nan)
wage_hc_factor_mean = np.full((n_types, n_periods_working), np.nan)
wage_hc_factor_employed_mean = np.full((n_types, n_periods_working), np.nan)
wage_hc_factor_unemployed_loss_mean = np.full(
(n_types, n_periods_working), np.nan
)
wage_hc_factor_nonemployed_mean = np.full((n_types, n_periods_working), np.nan)
wage_hc_factor_displaced_mean = np.full((n_types, 6, n_periods_working), np.nan)
wage_hc_factor_nondisplaced_mean = np.full(
(n_types, 6, n_periods_working), np.nan
)
wage_hc_factor_pre_displacement_mean = np.full(
(n_types, n_periods_working), np.nan
)
# ui benefit statistics
ui_benefits_mean = np.full((n_types, n_periods_working), np.nan)
ui_effective_replacement_rate = np.full((n_types, n_periods_working), np.nan)
ui_share_floor_binding = np.full((n_types, n_periods_working), np.nan)
ui_share_cap_binding = np.full((n_types, n_periods_working), np.nan)
# wealth statistics
assets_mean = np.full((n_types, n_periods_working), np.nan)
assets_nonemployed_mean = np.full((n_types, n_periods_working), np.nan)
assets_distribution = np.full(
(n_types, assets_grid_size, n_periods_working), np.nan
)
assets_over_income_mean = np.full((n_types, n_periods_working), np.nan)
log_assets_over_income_nonemployed_mean = np.full(
(n_types, n_periods_working), np.nan
)
# utility statistics
marginal_utility_nonemployed_mean = np.full(
(n_types, n_periods_working), np.nan
)
# labor force status statistics
share_employed = np.full((n_types, n_periods_working), np.nan)
share_unemployed = np.full((n_types, n_periods_working), np.nan)
share_unemployed_loss = np.full((n_types, n_periods_working), np.nan)
share_nonemployed = np.full((n_types, n_periods_working), np.nan)
share_searching = np.full((n_types, n_periods_working), np.nan)
# consumption statistics
consumption_employed_mean = np.full((n_types, n_periods_working), np.nan)
consumption_nonemployed_mean = np.full((n_types, n_periods_working), np.nan)
# consumption_nonemployed_stats = np.full((n_types, 4, n_periods_working), np.nan)
log_consumption_employed_mean = np.full((n_types, n_periods_working), np.nan)
log_consumption_nonemployed_mean = np.full((n_types, n_periods_working), np.nan)
pv_consumption_simulated = np.zeros((n_types, n_simulations))
# labor markets statistics
job_finding_probability_searching_mean = np.full(
(n_types, n_periods_working), np.nan
)
job_finding_probability_searching_loss_mean = np.full(
(n_types, n_periods_working), np.nan
)
job_finding_probability_searching_all_mean = np.full(
(n_types, n_periods_working), np.nan
)
job_finding_rate_searching_mean = np.full((n_types, n_periods_working), np.nan)
job_finding_rate_searching_loss_mean = np.full(
(n_types, n_periods_working), np.nan
)
job_finding_rate_searching_all_mean = np.full(
(n_types, n_periods_working), np.nan
)
duration_unemployed_weeks_mean = np.full((n_types, n_periods_working), np.nan)
duration_unemployed_median = np.full((n_types, n_periods_working), np.nan)
duration_unemployed_stdev = np.full((n_types, n_periods_working), np.nan)
wage_loss_median = np.full((n_types, n_periods_working), np.nan)
# cross sectional statistics
distribution_hc_assets_nonemployed = np.full(
(
n_types,
hc_grid_reduced_size,
assets_grid_size,
n_periods_working,
),
np.nan,
)
# II: SIMULATION
# (a) simulation from 1 to end of working life
for period_idx in range(n_periods_working):
# (i) search phase
# simulate search effort
effort_searching_simulated = np.full((n_types, n_simulations), np.nan)
effort_searching_loss_simulated = np.full((n_types, n_simulations), np.nan)
for type_idx in range(n_types):
effort_searching_simulated[
type_idx, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching[type_idx, :, :, period_idx],
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
)
effort_searching_loss_simulated[
type_idx, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss[type_idx, :, :, period_idx],
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
)
effort_searching_simulated = np.minimum(
np.maximum(effort_searching_simulated, 0.0), 1.0
)
effort_searching_loss_simulated = np.minimum(
np.maximum(effort_searching_loss_simulated, 0.0), 1.0
)
job_finding_probability_searching_simulated = job_finding_probability(
effort_searching_simulated
)
job_finding_probability_searching_loss_simulated = job_finding_probability(
effort_searching_loss_simulated
)
# compute search phase statistics
share_searching[:, period_idx] = np.mean(searching_all_simulated, axis=1)
job_finding_probability_searching_all_mean[
:, period_idx
] = conditional_mean(
(
job_finding_probability_searching_simulated * searching_simulated
+ job_finding_probability_searching_loss_simulated
* searching_loss_simulated
),
searching_all_simulated,
axis=1,
)
job_finding_probability_searching_mean[:, period_idx] = conditional_mean(
job_finding_probability_searching_simulated, searching_simulated, axis=1
)
job_finding_probability_searching_loss_mean[
:, period_idx
] = conditional_mean(
job_finding_probability_searching_loss_simulated,
searching_loss_simulated,
axis=1,
)
# generate transition events
job_finding_event_searching_simulated = (
job_finding_probability_searching_simulated
>= np.random.rand(n_types, n_simulations)
).astype(bool)
job_finding_event_searching_loss_simulated = (
job_finding_probability_searching_loss_simulated
>= np.random.rand(n_types, n_simulations)
).astype(bool)
# calculate average job finding rates
job_finding_rate_searching_all_mean[:, period_idx] = conditional_mean(
(
job_finding_event_searching_simulated * searching_simulated
+ job_finding_event_searching_loss_simulated
* searching_loss_simulated
),
searching_all_simulated,
axis=1,
)
job_finding_rate_searching_mean[:, period_idx] = conditional_mean(
job_finding_event_searching_simulated, searching_simulated, axis=1
)
job_finding_rate_searching_loss_mean[:, period_idx] = conditional_mean(
job_finding_event_searching_loss_simulated,
searching_loss_simulated,
axis=1,
)
# calculate unemployment duration statistics # todo: check timing
duration_unemployed_simulated_weeks = _get_duration_weeks(
(
job_finding_probability_searching_simulated * searching_simulated
+ job_finding_probability_searching_loss_simulated
* searching_loss_simulated
),
duration_unemployed_simulated,
)
duration_unemployed_weeks_mean[:, period_idx] = conditional_mean(
np.minimum(
duration_unemployed_simulated_weeks, 98
), # unemployment duration is capped at 98 weeks in the data
searching_all_simulated,
axis=1,
)
duration_unemployed_median[:, period_idx] = [
np.median(
duration_unemployed_simulated[i, searching_all_simulated[i, :]]
)
for i in range(n_types)
]
duration_unemployed_stdev[:, period_idx] = [
np.std(duration_unemployed_simulated[i, searching_all_simulated[i, :]])
for i in range(n_types)
]
# simulate transitions from search phase to consumption phase
# transitions of labor force status
employed_simulated = (
employed_simulated
+ searching_simulated * job_finding_event_searching_simulated
+ searching_loss_simulated * job_finding_event_searching_loss_simulated
).astype(bool)
unemployed_simulated = (
searching_simulated * (1 - job_finding_event_searching_simulated)
).astype(bool)
unemployed_loss_simulated = (
searching_loss_simulated
* (1 - job_finding_event_searching_loss_simulated)
).astype(bool)
nonemployed_simulated = (
unemployed_simulated + unemployed_loss_simulated
).astype(bool)
# simulate hc transition to consumption phase
hc_loss_simulated = np.full((n_types, n_simulations), np.nan)
pre_displacement_wage_simulated = np.full((n_types, n_simulations), np.nan)
new_wage_simulated = np.full((n_types, n_simulations), np.nan)
for type_idx in range(n_types):
hc_loss_simulated[type_idx, :] = (
(
hc_simulated[type_idx, :]
- _hc_after_loss_1_agent(
hc_simulated[type_idx, :],
wage_loss_factor_vector[type_idx, :],
wage_loss_reference_vector[type_idx, :],
period_idx,
)
)
* searching_loss_simulated[type_idx, :]
* job_finding_event_searching_loss_simulated[type_idx, :]
)
pre_displacement_wage_simulated[type_idx, :] = (
wage_level
* wage_hc_factor_interpolated_1_agent(
hc_simulated[type_idx, :], wage_hc_factor_vector[type_idx, :]
)
* searching_loss_simulated[type_idx, :]
* job_finding_event_searching_loss_simulated[type_idx, :]
)
new_wage_simulated[type_idx, :] = (
wage_level
* wage_hc_factor_interpolated_1_agent(
hc_simulated[type_idx, :] - hc_loss_simulated[type_idx, :],
wage_hc_factor_vector[type_idx, :],
)
* searching_loss_simulated[type_idx, :]
* job_finding_event_searching_loss_simulated[type_idx, :]
)
# calculate wage loss statistics
wage_loss_simulated = new_wage_simulated - pre_displacement_wage_simulated
wage_loss_median[:, period_idx] = np.array(
[
np.median(
wage_loss_simulated[
type_idx,
searching_loss_simulated[type_idx, :]
* job_finding_event_searching_loss_simulated[type_idx, :],
]
)
for type_idx in range(n_types)
]
)
# simulate hc loss upon reemployment
hc_simulated = hc_simulated - hc_loss_simulated
# transition of durations
duration_unemployed_simulated = (
duration_unemployed_simulated
+ searching_simulated * (1 - job_finding_event_searching_simulated)
+ searching_loss_simulated
* (1 - job_finding_event_searching_loss_simulated)
) # +1 for workers that remain unemployed
duration_unemployed_simulated = (
duration_unemployed_simulated * nonemployed_simulated
) # =0 for everyone else
# check for error in state simulation
if (
np.sum(
unemployed_simulated
+ unemployed_loss_simulated
+ employed_simulated
)
< n_simulations
):
warnings.warn(
f"ERROR! in transition from search phase "
f"to consumption phase in period {period_idx}"
)
# (ii) consumption phase
# simulate consumption
consumption_simulated = _simulate_consumption(
policy_consumption_employed,
policy_consumption_unemployed,
policy_consumption_unemployed_loss,
employed_simulated,
unemployed_simulated,
unemployed_loss_simulated,
hc_simulated,
assets_simulated,
period_idx,
)
# update wages
wage_hc_factor_simulated = np.array(
[
wage_hc_factor_interpolated_1_agent(
hc_simulated[i, :], wage_hc_factor_vector[i, :]
)
for i in range(n_types)
]
)
wage_hc_factor_pre_displacement_simulated = np.array(
[
wage_hc_factor_interpolated_1_agent(
hc_pre_displacement_simulated[i, :], wage_hc_factor_vector[i, :]
)
for i in range(n_types)
]
)
# simulate savings
savings_simulated = _simulate_savings(
employed_simulated,
nonemployed_simulated,
consumption_simulated,
assets_simulated,
wage_hc_factor_simulated,
tax_ss,
tax_ui_vector,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# compute consumption phase statistics
# update pv of simulated consumption
pv_consumption_simulated += (
1 / (1 + interest_rate_raw) ** period_idx
) * consumption_simulated
# update aggregate variables
revenue_ss_simulated = (
np.repeat(tax_ss, n_simulations).reshape((n_types, n_simulations))
* wage_level
* wage_hc_factor_simulated
* employed_simulated
)
revenue_ui_simulated = (
np.repeat(tax_ui_vector[:, period_idx], n_simulations).reshape(
(n_types, n_simulations)
)
* wage_level
* wage_hc_factor_simulated
* employed_simulated
)
revenue_lumpsum_simulated = (
np.repeat(tax_income, n_simulations).reshape((n_types, n_simulations))
* wage_level
* wage_hc_factor_simulated
* employed_simulated
+ np.repeat(tax_income, n_simulations).reshape((n_types, n_simulations))
* interest_rate_raw
* assets_simulated
)
revenue_consumption_simulated = (
np.repeat(tax_consumption, n_simulations).reshape(
n_types, n_simulations
)
* consumption_simulated
)
cost_ui_simulated = (
simulate_ui_benefits(
wage_level * wage_hc_factor_pre_displacement_simulated,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
* nonemployed_simulated
)
cost_lumpsum_simulated = np.repeat(
transfers_lumpsum, n_simulations
).reshape((n_types, n_simulations))
average_revenue_ss_simulated[:, period_idx] = (
np.sum(revenue_ss_simulated, axis=1) / n_simulations
)
average_revenue_ui_simulated[:, period_idx] = (
np.sum(revenue_ui_simulated, axis=1) / n_simulations
)
average_revenue_lumpsum_simulated[:, period_idx] = (
np.sum(revenue_lumpsum_simulated, axis=1) / n_simulations
)
average_revenue_consumption_simulated[:, period_idx] = (
np.sum(revenue_consumption_simulated, axis=1) / n_simulations
)
average_cost_ss_simulated[:, period_idx] = np.zeros(n_types) / n_simulations
average_cost_ui_simulated[:, period_idx] = (
np.sum(cost_ui_simulated, axis=1) / n_simulations
)
average_cost_lumpsum_simulated[:, period_idx] = (
np.sum(cost_lumpsum_simulated, axis=1) / n_simulations
)
average_cost_consumption_simulated[:, period_idx] = (
np.zeros(n_types) / n_simulations
)
average_cost_total_simulated[:, period_idx] = (
np.zeros(n_types) # no cost of social security during working age
+ np.sum(cost_ui_simulated, axis=1)
+ np.sum(cost_lumpsum_simulated, axis=1)
+ np.zeros(n_types) # consumption tax not used to finance anything
) / n_simulations
average_revenue_total_simulated[:, period_idx] = (
np.sum(revenue_ss_simulated, axis=1)
+ np.sum(revenue_ui_simulated, axis=1)
+ np.sum(revenue_lumpsum_simulated, axis=1)
+ np.sum(revenue_consumption_simulated, axis=1)
) / n_simulations
average_balance_total_simulated[:, period_idx] = (
average_revenue_total_simulated[:, period_idx]
- average_cost_total_simulated[:, period_idx]
)
# get statistics
(
share_employed[:, period_idx],
share_unemployed[:, period_idx],
share_unemployed_loss[:, period_idx],
share_nonemployed[:, period_idx],
log_consumption_employed_mean[:, period_idx],
log_consumption_nonemployed_mean[:, period_idx],
consumption_employed_mean[:, period_idx],
consumption_nonemployed_mean[:, period_idx],
wage_hc_factor_mean[:, period_idx],
wage_hc_factor_employed_mean[:, period_idx],
wage_hc_factor_unemployed_loss_mean[:, period_idx],
wage_hc_factor_nonemployed_mean[:, period_idx],
wage_hc_factor_displaced_mean[:, :, period_idx],
wage_hc_factor_nondisplaced_mean[:, :, period_idx],
wage_hc_factor_pre_displacement_mean[:, period_idx],
marginal_utility_nonemployed_mean[:, period_idx],
income_median[:, period_idx],
hc_mean[:, period_idx],
hc_employed_mean[:, period_idx],
hc_nonemployed_mean[:, period_idx],
ui_benefits_mean[:, period_idx],
ui_effective_replacement_rate[:, period_idx],
ui_share_floor_binding[:, period_idx],
ui_share_cap_binding[:, period_idx],
assets_mean[:, period_idx],
assets_nonemployed_mean[:, period_idx],
assets_distribution[:, :, period_idx],
assets_over_income_mean[:, period_idx],
distribution_hc_assets_nonemployed[:, :, :, period_idx],
log_assets_over_income_nonemployed_mean[:, period_idx],
) = _get_statistics_consumption_phase(
employed_simulated,
unemployed_simulated,
unemployed_loss_simulated,
nonemployed_simulated,
consumption_simulated,
wage_hc_factor_simulated,
wage_hc_factor_pre_displacement_simulated,
duration_unemployed_simulated,
duration_since_displacement_simulated,
hc_simulated,
assets_simulated,
tax_ui_vector,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# update simulated discounted value
pv_utility_simulated[
searching_all_simulated
] += discount_factor_compounded * leisure_utility_interpolated(
effort_searching_simulated[searching_all_simulated]
)
pv_utility_simulated[
employed_simulated
] += discount_factor_compounded * consumption_utility(
consumption_simulated[employed_simulated]
)
pv_utility_simulated[
nonemployed_simulated
] += discount_factor_compounded * consumption_utility(
np.maximum(
consumption_simulated[nonemployed_simulated],
consumption_min,
)
)
if np.any(np.isnan(pv_utility_simulated)):
warnings.warn("NaN in simulated discounted value at birth")
# simulate transition
# simulate transition events
hc_loss_event_simulated = (
np.repeat(hc_loss_probability, n_simulations).reshape(
n_types, n_simulations
)
>= np.random.rand(n_types, n_simulations)
).astype(bool)
job_loss_event_simulated = (
np.repeat(separation_rate_vector[:, period_idx], n_simulations).reshape(
n_types, n_simulations
)
>= np.random.rand(n_types, n_simulations)
).astype(bool)
# simulate experience transition
hc_simulated = (
hc_simulated
+ np.full((n_types, n_simulations), 1.0) * employed_simulated
) # increase experience of employed workers by 1
hc_pre_displacement_simulated = (
hc_pre_displacement_simulated * nonemployed_simulated
+ hc_simulated * employed_simulated
)
# update duration tracker # todo: resolve this abomination
duration_unemployed_simulated = (
duration_unemployed_simulated
+ np.logical_and(
duration_unemployed_simulated >= 1,
duration_unemployed_simulated <= 5,
)
)
duration_unemployed_simulated = (
duration_unemployed_simulated
- 10
* (duration_unemployed_simulated > 0)
* employed_simulated
* job_loss_event_simulated
)
duration_unemployed_simulated = (
duration_unemployed_simulated
+ (duration_unemployed_simulated == 0)
* employed_simulated
* job_loss_event_simulated
)
duration_unemployed_simulated = duration_unemployed_simulated - 6 * (
duration_unemployed_simulated > 5
)
duration_unemployed_simulated = np.maximum(duration_unemployed_simulated, 0)
duration_since_displacement_simulated += (
np.full((n_types, n_simulations), 1.0)
* employed_simulated
* (1 - job_loss_event_simulated)
) # +1 for all workers that are still employed
duration_since_displacement_simulated *= (
np.full((n_types, n_simulations), 1.0)
* employed_simulated
* (1 - job_loss_event_simulated)
) # =0 for everyone else
duration_since_displacement_simulated = np.minimum(
duration_since_displacement_simulated, 5
) # capped at 5
# simulate transitions in employment status
searching_simulated = (
unemployed_simulated * (1 - hc_loss_event_simulated)
+ employed_simulated * job_loss_event_simulated
).astype(bool)
searching_loss_simulated = (
unemployed_loss_simulated
+ unemployed_simulated * hc_loss_event_simulated
).astype(bool)
searching_all_simulated = (
searching_simulated + searching_loss_simulated
).astype(bool)
employed_simulated = (
employed_simulated * (1 - job_loss_event_simulated)
).astype(bool)
# update assets
assets_simulated = savings_simulated
# compound discount factor
discount_factor_compounded = discount_factor_compounded * discount_factor
# check for error in state simulation
if (
np.sum(
searching_simulated + searching_loss_simulated + employed_simulated
)
< n_simulations
):
warnings.warn(
f"ERROR! in transition from consumption phase "
f"in period {period_idx} to search phase in {period_idx + 1}"
)
# retirement period
# simulate one more transition to consumption phase
unemployed_simulated = searching_simulated
unemployed_loss_simulated = searching_loss_simulated
# hc loss materialises upon retirement
hc_loss_simulated = np.full((n_types, n_simulations), np.nan)
for type_idx in range(n_types):
hc_loss_simulated[type_idx, :] = (
hc_simulated[type_idx, :]
- _hc_after_loss_1_agent(
hc_simulated[type_idx, :],
wage_loss_factor_vector[type_idx, :],
wage_loss_reference_vector[type_idx, :],
period_idx + 1,
)
) * searching_loss_simulated[type_idx, :]
hc_simulated = (
hc_simulated - hc_loss_simulated
) # hc loss materializes upon reemployment
# interpolate consumption policies in first period of retirement
consumption_employed_retired_simulated = np.full(
(n_types, n_simulations), np.nan
)
consumption_unemployed_retired_simulated = np.full(
(n_types, n_simulations), np.nan
)
consumption_unemployed_loss_retired_simulated = np.full(
(n_types, n_simulations), np.nan
)
for type_idx in range(n_types):
consumption_employed_retired_simulated[type_idx, :] = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_consumption_employed[type_idx, :, :, period_idx + 1],
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
method=interpolation_method,
)
* employed_simulated[type_idx, :]
)
consumption_unemployed_retired_simulated[type_idx, :] = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_consumption_unemployed[type_idx, :, :, period_idx + 1],
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
method=interpolation_method,
)
* unemployed_simulated[type_idx, :]
)
consumption_unemployed_loss_retired_simulated[type_idx, :] = (
interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_consumption_unemployed_loss[type_idx, :, :, period_idx + 1],
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
method=interpolation_method,
)
* unemployed_loss_simulated[type_idx, :]
)
consumption_retired_simulated = (
consumption_employed_retired_simulated * employed_simulated
+ consumption_unemployed_retired_simulated * unemployed_simulated
+ consumption_unemployed_loss_retired_simulated * unemployed_loss_simulated
)
# compute discounted simulated quantities for retirement period
discount_factor_retirement = (
(1 + interest_rate_raw) ** n_periods_retired - 1
) / (interest_rate_raw * (1 + interest_rate_raw) ** (n_periods_retired - 1))
pv_consumption_simulated += (
discount_factor_compounded
* discount_factor_retirement
* consumption_retired_simulated
)
pv_consumption_simulated = np.mean(pv_consumption_simulated, axis=1)
cost_ss_retired_simulated = discount_factor_retirement * np.repeat(
transfers_pensions, n_simulations
).reshape((n_types, n_simulations))
cost_lumpsum_retired_simulated = discount_factor_retirement * np.repeat(
transfers_lumpsum, n_simulations
).reshape((n_types, n_simulations))
revenue_consumption_retired_simulated = (
discount_factor_retirement
* np.repeat(tax_consumption, n_simulations).reshape(
(n_types, n_simulations)
)
* consumption_retired_simulated
)
revenue_lumpsum_retired_simulated = (
np.repeat(revenue_factor_lumpsum_retirement, n_simulations).reshape(
(n_types, n_simulations)
)
* assets_simulated
)
value_retired_simulated = discount_factor_retirement * consumption_utility(
consumption_retired_simulated
)
# compute PVs of government programs
# compute PVs of streams from working age
for period_idx in range(n_periods_working):
discount_factor_tmp = 1 / (1 + interest_rate_raw) ** period_idx
pv_cost_ss_simulated += (
discount_factor_tmp * average_cost_ss_simulated[:, period_idx]
)
pv_cost_ui_simulated += (
discount_factor_tmp * average_cost_ui_simulated[:, period_idx]
)
pv_cost_lumpsum_simulated += (
discount_factor_tmp * average_cost_lumpsum_simulated[:, period_idx]
)
pv_cost_consumption_simulated += (
discount_factor_tmp * average_cost_consumption_simulated[:, period_idx]
)
pv_cost_total_simulated += discount_factor_tmp * (
average_cost_ss_simulated[:, period_idx]
+ average_cost_ui_simulated[:, period_idx]
+ average_cost_lumpsum_simulated[:, period_idx]
+ average_cost_consumption_simulated[:, period_idx]
)
pv_revenue_ss_simulated += (
discount_factor_tmp * average_revenue_ss_simulated[:, period_idx]
)
pv_revenue_ui_simulated += (
discount_factor_tmp * average_revenue_ui_simulated[:, period_idx]
)
pv_revenue_lumpsum_simulated += (
discount_factor_tmp * average_revenue_lumpsum_simulated[:, period_idx]
)
pv_revenue_consumption_simulated += (
discount_factor_tmp
* average_revenue_consumption_simulated[:, period_idx]
)
pv_revenue_total_simulated += discount_factor_tmp * (
average_revenue_ss_simulated[:, period_idx]
+ average_revenue_ui_simulated[:, period_idx]
+ average_revenue_lumpsum_simulated[:, period_idx]
+ average_revenue_consumption_simulated[:, period_idx]
)
total_benefits = np.sum(average_cost_ui_simulated, axis=1)
# add discounted simulated retirement value to
# simulated expected discounted value at birth
# and expected discounted government cost
pv_utility_simulated += discount_factor_compounded * value_retired_simulated
pv_cost_ss_simulated += (
1
/ (1 + interest_rate_raw) ** n_periods_working
* np.sum(cost_ss_retired_simulated, axis=1)
/ n_simulations
)
pv_cost_lumpsum_simulated += (
1
/ (1 + interest_rate_raw) ** n_periods_working
* np.sum(cost_lumpsum_retired_simulated, axis=1)
/ n_simulations
)
pv_cost_total_simulated += (
1
/ (1 + interest_rate_raw) ** n_periods_working
* (
np.sum(cost_ss_retired_simulated, axis=1)
+ np.sum(cost_lumpsum_retired_simulated, axis=1)
)
/ n_simulations
)
pv_revenue_lumpsum_simulated += (
1
/ (1 + interest_rate_raw) ** n_periods_working
* np.sum(revenue_lumpsum_retired_simulated, axis=1)
/ n_simulations
)
pv_revenue_consumption_simulated += (
1
/ (1 + interest_rate_raw) ** n_periods_working
* np.sum(revenue_consumption_retired_simulated, axis=1)
/ n_simulations
)
pv_revenue_total_simulated += (
1
/ (1 + interest_rate_raw) ** n_periods_working
* (
np.sum(revenue_lumpsum_retired_simulated, axis=1)
+ np.sum(revenue_consumption_retired_simulated, axis=1)
)
/ n_simulations
)
# compute average PV of utility over simulations
pv_utility_simulated = np.mean(pv_utility_simulated, axis=1)
# compute PVs of balances
pv_balance_ss_simulated = pv_revenue_ss_simulated - pv_cost_ss_simulated
pv_balance_ui_simulated = pv_revenue_ui_simulated - pv_cost_ui_simulated
pv_balance_lumpsum_simulated = (
pv_revenue_lumpsum_simulated - pv_cost_lumpsum_simulated
)
# pv_balance_consumption_simulated = (
# pv_revenue_consumption_simulated - pv_cost_consumption_simulated
# )
pv_balance_total_simulated = (
pv_revenue_total_simulated - pv_cost_total_simulated
)
# average over types
average_pv_utility_simulated = np.average(
pv_utility_simulated, weights=type_weights
).reshape(
1,
)
average_pv_balance_ss_simulated = np.average(
pv_balance_ss_simulated, weights=type_weights
).reshape(
1,
)
average_pv_balance_ui_simulated = np.average(
pv_balance_ui_simulated, weights=type_weights
).reshape(
1,
)
average_pv_balance_lumpsum_simulated = np.average(
pv_balance_lumpsum_simulated, weights=type_weights
).reshape(
1,
)
# average_pv_balance_consumption_simulated = np.average(
# pv_balance_consumption_simulated, weights=type_weights
# ).reshape(1,)
average_pv_balance_total_simulated = np.average(
pv_balance_total_simulated, weights=type_weights
).reshape(
1,
)
average_pv_utility_simulated_corrected = (
average_pv_utility_computed + 0 * average_pv_balance_total_simulated
)
# print output
if show_progress:
print("end simulation")
#########################################################
# (b) compute outcomes and store results
# (i) compute outcomes
net_government_spending_all = np.append(
average_balance_total_simulated,
np.repeat(-transfers_pensions * n_simulations, n_periods_retired).reshape(
(n_types, n_periods_retired)
),
axis=1,
)
# find quantities for government budget constraint
if equilibrium_condition == "combined":
welfare_simulated = average_pv_utility_simulated
diff_pv_utility = average_pv_utility_simulated - average_pv_utility_computed
balance_ss = average_pv_balance_ss_simulated
balance_ui = average_pv_balance_ui_simulated
balance_lumpsum = average_pv_balance_lumpsum_simulated
elif equilibrium_condition == "fixed_budget":
welfare_simulated = pv_utility_simulated
diff_pv_utility = pv_utility_simulated - pv_utility_computed
balance_ss = pv_balance_ss_simulated
balance_ui = pv_balance_ui_simulated
balance_lumpsum = pv_balance_lumpsum_simulated
else:
raise ValueError(
"error in equilibrium condition; choose one of "
"['combined', 'fixed_budget']"
)
if show_summary:
print(
"\n###############################################"
"###############################################\n"
"MODEL SIMULATION: \n"
" number of simulations"
+ " " * (69 - len(f"{n_simulations}"))
+ f"{n_simulations}\n"
" value at entry (mean simulated)"
+ " "
* (
59
- len("[" + ", ".join(f"{i:1.5f}" for i in welfare_simulated) + "]")
)
+ "["
+ ", ".join(f"{i:1.5f}" for i in welfare_simulated)
+ "]\n"
" difference in value at entry"
+ " "
* (
62
- len("[" + ", ".join(f"{i:1.5f}" for i in diff_pv_utility) + "]")
)
+ "["
+ ", ".join(f"{i:1.5f}" for i in diff_pv_utility)
+ "]\n"
" balance social security (pv)"
+ " "
* (62 - len("[" + ", ".join(f"{i:1.2f}" for i in balance_ss) + "]"))
+ "["
+ ", ".join(f"{i:1.2f}" for i in balance_ss)
+ "]\n"
" balance unemployment insurance (pv)"
+ " "
* (55 - len("[" + ", ".join(f"{i:1.2f}" for i in balance_ui) + "]"))
+ "["
+ ", ".join(f"{i:1.2f}" for i in balance_ui)
+ "]\n"
" balance general tax and transfers (pv)"
+ " "
* (
52
- len("[" + ", ".join(f"{i:1.2f}" for i in balance_lumpsum) + "]")
)
+ "["
+ ", ".join(f"{i:1.2f}" for i in balance_lumpsum)
+ "]\n"
"################################################"
"##############################################\n"
)
# (ii) store some results
average_pv_cost_computed = average_pv_balance_ui_computed.item()
average_pv_balance_total_simulated = average_pv_balance_total_simulated.item()
average_pv_utility_computed = average_pv_utility_computed.item()
average_pv_utility_computed_corrected = (
average_pv_utility_computed_corrected.item()
)
average_pv_utility_simulated = average_pv_utility_simulated.item()
average_pv_utility_simulated_corrected = (
average_pv_utility_simulated_corrected.item()
)
out = {
"assets_mean": assets_mean,
"assets_distribution": assets_distribution,
"assets_nonemployed_mean": assets_nonemployed_mean,
"assets_over_income_mean": assets_over_income_mean,
"average_pv_cost_computed": average_pv_cost_computed,
"average_pv_balance_total_simulated": average_pv_balance_total_simulated,
"average_pv_utility_computed": average_pv_utility_computed,
"average_pv_utility_computed_corrected": average_pv_utility_computed_corrected,
"average_pv_utility_simulated": average_pv_utility_simulated,
"average_pv_utility_simulated_corrected": average_pv_utility_simulated_corrected,
"consumption_employed_mean": consumption_employed_mean,
"consumption_nonemployed_mean": consumption_nonemployed_mean,
"distribution_assets_hc_nonemployed": distribution_hc_assets_nonemployed,
"duration_unemployed_weeks_mean": duration_unemployed_weeks_mean,
"equilibrium_instrument_rate": instrument_hist[-1],
"equilibrium_transfers_lumpsum": transfers_lumpsum,
"equilibrium_transfers_pensions": transfers_pensions,
"hc_mean": hc_mean,
"hc_employed_mean": hc_employed_mean,
"hc_nonemployed_mean": hc_nonemployed_mean,
"wage_hc_factor_displaced_mean": wage_hc_factor_displaced_mean,
"wage_hc_factor_nondisplaced_mean": wage_hc_factor_nondisplaced_mean,
"job_finding_probability_searching_mean": job_finding_probability_searching_mean, # noqa:B950
"job_finding_probability_searching_all_mean": job_finding_probability_searching_all_mean, # noqa:B950
"job_finding_probability_searching_loss_mean": job_finding_probability_searching_loss_mean, # noqa:B950
"job_finding_rate_searching_mean": job_finding_rate_searching_mean,
"job_finding_rate_searching_all_mean": job_finding_rate_searching_all_mean,
"job_finding_rate_searching_loss_mean": job_finding_rate_searching_loss_mean,
"log_consumption_employed_mean": log_consumption_employed_mean,
"log_consumption_nonemployed_mean": log_consumption_nonemployed_mean,
"marginal_utility_nonemployed_mean": marginal_utility_nonemployed_mean,
"average_balance_total_simulated": average_balance_total_simulated,
"net_government_spending_all": net_government_spending_all,
"policy_consumption_employed": policy_consumption_employed,
"policy_consumption_unemployed": policy_consumption_unemployed,
"policy_consumption_unemployed_loss": policy_consumption_unemployed_loss,
"policy_effort_searching": policy_effort_searching,
"policy_effort_searching_loss": policy_effort_searching_loss,
"pv_balance_total_simulated": pv_balance_total_simulated,
"pv_consumption_simulated": pv_consumption_simulated,
"pv_cost_ss_computed": pv_cost_ss_computed,
"pv_cost_ui_computed": pv_cost_ui_computed,
"pv_cost_lumpsum_computed": pv_cost_lumpsum_computed,
"pv_cost_ss_simulated": pv_cost_ss_simulated,
"pv_cost_ui_simulated": pv_cost_ui_simulated,
"pv_cost_lumpsum_simulated": pv_cost_lumpsum_simulated,
"pv_cost_consumption_simulated": pv_cost_consumption_simulated,
"pv_revenue_ss_computed": pv_revenue_ss_computed,
"pv_revenue_ui_computed": pv_revenue_ui_computed,
"pv_revenue_lumpsum_computed": pv_revenue_lumpsum_computed,
"pv_revenue_ss_simulated": pv_revenue_ss_simulated,
"pv_revenue_ui_simulated": pv_revenue_ui_simulated,
"pv_revenue_lumpsum_simulated": pv_revenue_lumpsum_simulated,
"pv_revenue_consumption_simulated": pv_revenue_consumption_simulated,
"share_employed": share_employed,
"share_nonemployed": share_nonemployed,
"share_searching": share_searching,
"share_unemployed": share_unemployed,
"share_unemployed_loss": share_unemployed_loss,
"total_benefits": total_benefits,
"ui_benefits_mean": ui_benefits_mean,
"ui_effective_replacement_rate_mean": ui_effective_replacement_rate,
"ui_share_floor_binding": ui_share_floor_binding,
"ui_share_cap_binding": ui_share_cap_binding,
"wage_hc_factor_employed_mean": wage_hc_factor_employed_mean,
"wage_hc_factor_nonemployed_mean": wage_hc_factor_nonemployed_mean,
"wage_hc_factor_pre_displacement_mean": wage_hc_factor_pre_displacement_mean,
"wage_loss_median": wage_loss_median,
"welfare": pv_utility_corrected,
}
else:
average_pv_cost_computed = average_pv_cost_ui_computed.item()
average_pv_utility_computed = average_pv_utility_computed.item()
average_pv_utility_computed_corrected = (
average_pv_utility_computed_corrected.item()
)
out = {
"average_pv_cost_computed": average_pv_cost_computed,
"average_pv_utility_computed": average_pv_utility_computed,
"average_pv_utility_computed_corrected": average_pv_utility_computed_corrected,
"equilibrium_instrument_rate": instrument_hist[-1],
"equilibrium_transfers_lumpsum": transfers_lumpsum,
"equilibrium_transfers_pensions": transfers_pensions,
"policy_consumption_employed": policy_consumption_employed,
"policy_consumption_unemployed": policy_consumption_unemployed,
"policy_consumption_unemployed_loss": policy_consumption_unemployed_loss,
"policy_effort_searching": policy_effort_searching,
"policy_effort_searching_loss": policy_effort_searching_loss,
"welfare": pv_utility_corrected,
}
for item in out:
try:
out[item] = out[item].tolist()
except AttributeError:
pass
if show_progress:
print("end main")
return out
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
try:
setup_name = sys.argv[1]
method = sys.argv[2]
except IndexError:
setup_name = "opt_rate_both_combined"
method = "linear"
# load calibration and set some variables
calibration = json.load(
open(ppj("IN_MODEL_SPECS", "analytics_calibration_" + setup_name + ".json"))
)
# set controls
controls = {
"interpolation_method": method,
"n_iterations_solve_max": 20,
"n_simulations": int(1e5),
"run_simulation": True,
"seed_simulation": 3405,
"show_progress_solve": True,
"show_summary": True,
"tolerance_solve": 1e-5,
}
# solve and simulate
results = _solve_and_simulate(controls, calibration)
# store results
with open(
ppj(
"OUT_RESULTS",
"analytics",
"analytics_" + setup_name + "_results_" + method + ".json",
),
"w",
) as outfile:
json.dump(results, outfile, ensure_ascii=False, indent=2)
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,971 | simonjheiler/ui_human_capital | refs/heads/main | /src/utilities/sandbox.py | import copy
import json # noqa:F401
import src.utilities.istarmap_3_8 # noqa, noreorder
import multiprocessing
import sys # noqa:F401
import tqdm # noqa:F401
import warnings # noqa:F401
import numba as nb # noqa:F401
import numpy as np # noqa:F401
import pandas as pd # noqa:F401
from scipy import interpolate # noqa:F401
from bld.project_paths import project_paths_join as ppj # noqa:F401
from src.model_analysis.run_utils import _solve_run
from src.utilities.optimization_utils import get_step_size
#####################################################
# PARAMETERS
#####################################################
age_thresholds_urate = [-np.inf, 21, 25, 30, 35, 40, 45, 50, 55, 60, np.inf]
age_groups_urate = [
"20",
"21 to 24",
"25 to 29",
"30 to 34",
"35 to 39",
"40 to 44",
"45 to 49",
"50 to 54",
"55 to 59",
"60 and older",
]
#####################################################
# FUNCTIONS
#####################################################
def _average_by_age_group(array_in, age_min, thresholds, labels):
array_in.loc[:, "age"] = array_in.index // 4 + age_min
array_in.loc[:, "age_group"] = pd.cut(
array_in.age, thresholds, right=False, labels=labels
)
array_out = array_in.groupby("age_group").mean()
array_out = array_out.drop(columns="age")
return array_out
def _eval_fit(coefficients, controls, calibration):
"""
Wrapper function for minimization over ui replacement rate vector defined
by *coefficients*.
:parameter:
coefficients : array
UI replacement rates at UI spline nodes for which to solve the model_analysis.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in
*solve_model*)
:returns:
objective : float
Value of objective function at *coefficients*
equilibrium_instrument_rate : float
Value of instrument rate that ensures balanced budget at *coefficients*
"""
# load calibration
age_min = calibration["age_min"]
leisure_base = np.array(calibration["leisure_base"])
leisure_grid = np.array(calibration["leisure_grid"])
type_weights = np.array(calibration["type_weights"])
# get derivative of leisure utility from coefficients
leisure_utility_new = interpolate.PchipInterpolator(
leisure_base, coefficients
).antiderivative()(leisure_grid)
leisure_utility_dx_new = interpolate.PchipInterpolator(leisure_base, coefficients)(
leisure_grid
)
leisure_utility_dxdx_new = interpolate.PchipInterpolator(
leisure_base, coefficients
).derivative()(leisure_grid)
# update calibration
calibration["leisure_utility"] = leisure_utility_new.tolist()
calibration["leisure_utility_dx"] = leisure_utility_dx_new.tolist()
calibration["leisure_utility_dxdx"] = leisure_utility_dxdx_new.tolist()
# solve model_analysis
results = _solve_run({}, controls, calibration)
# extract outputs
share_nonemployed_mean = pd.DataFrame(
np.array(results["share_nonemployed"]).T, columns=["high", "medium", "low"]
)
share_searching_mean = pd.DataFrame(
np.array(results["share_searching"]).T, columns=["high", "medium", "low"]
)
unemployment_rate = (share_searching_mean + share_nonemployed_mean) / 2
job_finding_probability_mean = pd.DataFrame(
np.array(results["job_finding_probability_searching_all_mean"]).T,
columns=["high", "medium", "low"],
)
job_finding_rate_mean = pd.DataFrame(
np.array(results["job_finding_rate_searching_all_mean"]).T,
columns=["high", "medium", "low"],
)
equilibrium_instrument_rate = results["equilibrium_instrument_rate"]
unemployment_rate_by_age_group = _average_by_age_group(
unemployment_rate, age_min, age_thresholds_urate, age_groups_urate
)
job_finding_probability_by_age_group = _average_by_age_group(
job_finding_probability_mean, age_min, age_thresholds_urate, age_groups_urate
)
job_finding_rate_by_age_group = _average_by_age_group(
job_finding_rate_mean, age_min, age_thresholds_urate, age_groups_urate
)
unemployment_rate_by_age_group = unemployment_rate_by_age_group.drop(
["20", "60 and older"]
)
job_finding_probability_by_age_group = job_finding_probability_by_age_group.drop(
["20", "60 and older"]
)
job_finding_rate_by_age_group = job_finding_rate_by_age_group.drop(
["20", "60 and older"]
)
# compute objective for MAXIMIZATION
fit = np.average(
np.sqrt(
np.sum(np.square(unemployment_rate_by_age_group - target_unemployment))
),
weights=type_weights,
)
objective = -fit
return objective, equilibrium_instrument_rate
def _jacobian_fit(coefficients, controls, calibration):
"""
Compute two-sided gradient of a expected average value at model_analysis entry w.r.t. the
parameters of the unemployment insurance rate using finite differences.
:parameter:
coefficients : array
Coordinates at which to compute gradient.
controls : dict
Collection of control variables for computation (details see
description of *qnewton*)
calibration : dict
Collection of model_analysis parameters (details see description in *solve_model*)
:returns:
jacobian : array
Gradient of objective function at point described by *coefficients*
_JACOBIAN calculates ... # todo: complete docstring
"""
# load controls
show_progress = controls["show_progress"]
n_workers = controls["n_workers"]
step_size_init = controls["step_size_jacobian"]
# load calibration
age_min = calibration["age_min"]
leisure_base = np.array(calibration["leisure_base"])
leisure_grid = np.array(calibration["leisure_grid"])
type_weights = np.array(calibration["type_weights"])
# calculate control variables
n_coefficients = coefficients.shape[0]
n_runs = n_coefficients * 2
# prepare computation of Jacobian
step_size_diff = step_size_init * np.maximum(abs(coefficients), 1)
delta = np.full(n_coefficients, np.nan)
fx = np.full(n_runs, np.nan)
coefficients_all = np.repeat(coefficients, n_runs).reshape(-1, n_runs)
for idx in range(n_coefficients):
coefficients_all[idx, idx] += step_size_diff[idx]
coefficients_all[idx, idx + n_coefficients] += -step_size_diff[idx]
delta[idx] = (
coefficients_all[idx, idx] - coefficients_all[idx, idx + n_coefficients]
)
leisure_utility_dx_all = np.full((len(leisure_grid), n_runs), np.nan)
for run_idx in range(n_runs):
leisure_utility_dx_tmp = interpolate.PchipInterpolator(
leisure_base, coefficients_all[:, run_idx]
)(leisure_grid)
leisure_utility_dx_tmp = np.minimum(leisure_utility_dx_tmp, 0.0)
leisure_utility_dx_all[:, run_idx] = leisure_utility_dx_tmp
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{"leisure_utility_dx": leisure_utility_dx_all[:, run_idx]},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_workers) as pool:
if show_progress:
out = tuple(
tqdm.tqdm(
pool.istarmap(_solve_run, inputs),
total=n_runs,
desc="Jacobian",
ascii=True,
ncols=94,
)
)
else:
out = pool.starmap(_solve_run, inputs)
# extract results
for run_idx in range(n_runs):
share_nonemployed_mean = pd.DataFrame(
np.array(out[run_idx]["share_nonemployed"]).T,
columns=["high", "medium", "low"],
)
share_searching_mean = pd.DataFrame(
np.array(out[run_idx]["share_searching"]).T,
columns=["high", "medium", "low"],
)
unemployment_rate = (share_searching_mean + share_nonemployed_mean) / 2
unemployment_rate_by_age_group = _average_by_age_group(
unemployment_rate, age_min, age_thresholds_urate, age_groups_urate
)
unemployment_rate_by_age_group = unemployment_rate_by_age_group.drop(
["20", "60 and older"]
)
fit = np.average(
np.sqrt(
np.sum(np.square(unemployment_rate_by_age_group - target_unemployment))
),
weights=type_weights,
)
fx[run_idx] = -fit
# reshape
fx = np.moveaxis(np.stack((fx[:n_coefficients], fx[n_coefficients:])), 0, -1)
jacobian = np.full(n_coefficients, np.nan)
for idx in range(n_coefficients):
jacobian[idx] = (fx[idx, 0] - fx[idx, 1]) / delta[idx]
return jacobian
def qnewton(func, jac, x_ini, controls, *args):
"""
Solve unconstrained maximization problem using quasi-Newton methods.
:parameter:
func : functional
Objective function to maximize.
jac : functional
Function that returns function value and Jacobian of
objective function.
x_ini : array
Initial guess for coefficients of local maximum.
controls : dict
Dictionary of function controls (details see below)
*args : tuple
Additional arguments for objective function.
:returns:
x : float
Coefficients of local maximum of objective function.
fx : float
Value of objective function at x
g : array [len(x) x 1]
Gradient of objective function at x
hessian : array [len(x) x len(x)]
Approximation of the inverse Hessian of the objective function at x.
:raises:
ValueError : NaNs or INFs in coefficients.
The user defined functions FUNC and JAC must have the following syntax
fx, instr_eq = f(x, controls, *args)
g = jac(x, controls, *args)
where, in either case, the additional variables are the ones passed to QNEWTON
:controls:
interpolation_method : string,
Interpolation method for 1D interpolation ("linear" or "cubic")
n_iterations_jacobian_max : int,
Maximum number of model_analysis solution iterations for computation of
jacobian
n_iterations_opt_max : int,
Maximum number of iterations of the optimization algorithm
n_iterations_solve_max : int,
Maximum number of model_analysis solution iterations for computation of
value of objective function
n_iterations_step_max : int,
Maximum number of iterations of the step search algorithm
n_simulations : int,
Number of simulations for model_analysis simulation
n_workers : int,
Number of cores used for parallel processing
run_simulation : bool,
Flag to activate / deactivate model_analysis simulation
show_progress : bool,
Flag to activate / deactivate output of progress bar for gradient
computation
show_progress_solve : bool,
Flag to activate / deactivate output of status updates for model_analysis
solution
show_summary : bool,
Flag to activate / deactivate output of summary statistics for model_analysis
solution iterations
step_method : string,
Step search method ("bt" or "gold") # todo: adjust after implementation of bhhh
step_size_jacobian : float,
Size of disturbance for finite difference calculation in gradient
computation
tolerance_solve : float,
Tolerance for government budget balance in model_analysis solution algorithm
eps0 : float
zero factor (used in convergence criteria) (default = 1)
n_iterations_opt_max : int
Maximum major iterations (default = 250)
n_iterations_step_max : int
Maximum step search iterations (default = 50)
step_method : str
Method to calculate optimal step length. Available options
- "full" : step length is set to 1
- "bhhh" : BHHH STEP (currently not implemented)
# todo: adjust after implementation
- "bt" : BT STEP (default)
- "gold" : GOLD STEP (called others fail)
tol : float
convergence tolerance (default = sqrt(eps))
Modified from the corresponding file by Paul L. Fackler & Mario J.Miranda
paul_fackler@ncsu.edu, miranda.4@osu.edu
"""
# load controls
n_iterations_opt_max = controls["n_iterations_opt_max"]
interpolation_method = controls["interpolation_method"]
tolerance_bfgs_update = controls["tolerance_bfgs_update"]
tolerance_convergence_gradient = controls["tolerance_convergence_gradient"]
tolerance_convergence_marquardt = controls["tolerance_convergence_marquardt"]
tolerance_slope_min = controls["tolerance_slope_min"]
zero_factor_convergence_marquardt = controls["zero_factor_convergence_marquardt"]
# load calibration
instrument = calibration["instrument"]
bounds_lower = calibration["bounds_lower"]
bounds_upper = calibration["bounds_upper"]
####################
# initiate algorithm
iteration_opt = 0
k = x_ini.shape[0]
reset = True
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: start \n"
"################################################"
"##############################################\n"
)
print("compute initial function value")
fx0, instr_eq = func(x_ini, controls, *args)
# update equilibrium instrument rate
if instrument == "consumption_tax":
calibration["consumption_tax_rate_init"][interpolation_method] = instr_eq
elif instrument == "income_tax_rate":
calibration["income_tax_rate_init"][interpolation_method] = instr_eq
print("compute initial Jacobian")
g0 = jac(x_ini, controls, *args)
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: initialization \n"
" iteration"
+ " " * (81 - len(f"{iteration_opt:4d}"))
+ f"{iteration_opt:4d}\n"
" starting coefficient vector"
+ " " * (63 - len("[" + ", ".join(f"{i:1.5f}" for i in x_ini) + "]"))
+ "["
+ ", ".join(f"{i:1.5f}" for i in x_ini)
+ "]\n"
" starting value of objective function"
+ " " * (54 - len(f"{fx0:1.5f}"))
+ f"{fx0:1.5f}\n"
" starting gradient norm"
+ " " * (68 - len(f"{np.linalg.norm(g0):9.4f}"))
+ f"{np.linalg.norm(g0):9.4f}\n"
"################################################"
"##############################################\n"
)
# get approximate hessian
hessian = -np.identity(k) / max(abs(fx0), 1)
if np.all(abs(g0) < tolerance_convergence_gradient):
print("Gradient tolerance reached at starting value")
return x_ini, fx0, g0, hessian, instr_eq
####################
# start iteration
x = x_ini
fx = fx0
g = g0
d = 0
while iteration_opt <= n_iterations_opt_max:
iteration_opt += 1
d = -np.dot(hessian, g0) # search direction
# if increase in objective in the direction of search is too low,
# revert to steepest ascent (B = I)
if np.dot(d, g0) / np.dot(d, d) < tolerance_slope_min:
hessian = -np.identity(k) / max(abs(fx0), 1)
d = g0 / max(abs(fx0), 1)
reset = 1
print("compute optimal step length")
s, fx, instr_eq, iterations, err = get_step_size(
func, x, fx0, g0, d, controls, *args
)
# check for step search failure
if fx <= fx0:
if reset: # if already using steepest ascent, break
warnings.warn("Iterations stuck in qnewton")
return x, fx0, g0, hessian, instr_eq
else: # else, try again with steepest ascent
hessian = -np.identity(k) / max(abs(fx0), 1)
d = g0 / max(abs(fx0), 1)
s, fx, instr_eq, iterations, err = get_step_size(
func, x, fx0, g0, d, controls, *args
)
if err:
warnings.warn("Cannot find suitable step in qnewton")
return x, fx0, g0, hessian, instr_eq
# run some checks, then update step and current coefficient vector
if np.logical_or(np.any(np.isnan(x + (s * d))), np.any(np.isinf(x + (s * d)))):
raise ValueError("NaNs or INFs in coefficients.")
elif np.logical_or(
np.any(x + (s * d) < bounds_lower), np.any(x + (s * d) > bounds_upper)
):
warnings.warn("Coefficient values out of bounds")
break
else:
d = s * d
x = x + d
# update equilibrium instrument rate
if instrument == "consumption_tax":
calibration["consumption_tax_rate_init"][interpolation_method] = instr_eq
elif instrument == "income_tax_rate":
calibration["income_tax_rate_init"][interpolation_method] = instr_eq
# compute Jacobian
print("compute jacobian after step")
g = jac(x, controls, *args)
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: optimization \n"
" iteration"
+ " " * (81 - len(f"{iteration_opt:4d}"))
+ f"{iteration_opt:4d}\n"
" current coefficient vector"
+ " " * (64 - len("[" + ", ".join(f"{i:1.5f}" for i in x) + "]"))
+ "["
+ ", ".join(f"{i:1.5f}" for i in x)
+ "]\n"
" current value of objective function"
+ " " * (55 - len(f"{fx:1.5f}"))
+ f"{fx:1.5f}\n"
" current step norm"
+ " " * (73 - len(f"{np.linalg.norm(d):9.4f}"))
+ f"{np.linalg.norm(d):9.4f}\n"
" current gradient norm"
+ " " * (69 - len(f"{np.linalg.norm(g):9.4f}"))
+ f"{np.linalg.norm(g):9.4f}\n"
"################################################"
"##############################################\n"
)
# test convergence using Marquardt's criterion and gradient test
if np.logical_or(
np.logical_and(
(fx - fx0) / (abs(fx) + zero_factor_convergence_marquardt)
< tolerance_convergence_marquardt,
np.all(
abs(d) / (abs(x) + zero_factor_convergence_marquardt)
< tolerance_convergence_marquardt
),
),
np.all(abs(g) < tolerance_convergence_gradient),
):
print("converged")
break
# update inverse Hessian approximation
u = g - g0
ud = np.dot(u, d)
# if update could be numerically inaccurate, revert to steepest ascent,
# otherwise use BFGS update
if (abs(ud) / (np.linalg.norm(d) * np.linalg.norm(u))) < tolerance_bfgs_update:
hessian = -np.identity(k) / max(abs(fx), 1)
reset = True
else:
w = d - np.dot(hessian, u)
wd = np.outer(w, d)
hessian = (
hessian + ((wd + wd.T) - (np.dot(u, w) * np.outer(d, d)) / ud) / ud
)
reset = False
# update objects for iteration
fx0 = fx
g0 = g
####################
# iteration complete
if iteration_opt == n_iterations_opt_max:
warnings.warn("Maximum iterations exceeded in qnewton")
print(
"\n###############################################"
"###############################################\n"
"QNEWTON: complete \n"
" iteration"
+ " " * (81 - len(f"{iteration_opt:4d}"))
+ f"{iteration_opt:4d}\n"
" final coefficient vector"
+ " " * (66 - len("[" + ", ".join(f"{i:1.5f}" for i in x) + "]"))
+ "["
+ ", ".join(f"{i:1.5f}" for i in x)
+ "]\n"
" final value of objective function"
+ " " * (57 - len(f"{fx:1.5f}"))
+ f"{fx:1.5f}\n"
" final step norm"
+ " " * (75 - len(f"{np.linalg.norm(d):9.4f}"))
+ f"{np.linalg.norm(d):9.4f}\n"
" final gradient norm"
+ " " * (71 - len(f"{np.linalg.norm(g):9.4f}"))
+ f"{np.linalg.norm(g):9.4f}\n"
"################################################"
"##############################################\n"
)
return x, fx, g, hessian, instr_eq
#####################################################
# SCRIPT
#####################################################
if __name__ == "__main__":
# load calibration and set variables
calibration = json.load(
open(
ppj(
"IN_MODEL_SPECS",
"analytics_calibration_base_combined_inctax.json",
)
)
)
# set controls
controls = {
"interpolation_method": "linear",
"n_iterations_jacobian_max": 10,
"n_iterations_opt_max": 50,
"n_iterations_solve_max": 20,
"n_iterations_step_max": 20,
"n_simulations": int(1e5),
"n_workers": 15,
"run_simulation": True,
"seed_simulation": 3405,
"show_progress": True,
"show_progress_solve": False,
"show_summary": False,
"step_method": "bt",
"step_size_jacobian": 0.025,
"tolerance_bfgs_update": 1e-9,
"tolerance_convergence_gradient": 1e-6,
"tolerance_convergence_marquardt": 1e-4,
"tolerance_solve": 1e-5,
"tolerance_slope_min": 1e-6,
"zero_factor_convergence_marquardt": 1,
}
# load calibration target
targets_transitions = pd.read_csv(
ppj("OUT_RESULTS", "empirics", "cps_transition_probabilities.csv"),
index_col=["age_group", "education_reduced"],
)
targets_unemployment = pd.read_csv(
ppj("OUT_RESULTS", "empirics", "cps_unemployment_probabilities.csv"),
index_col=["age_group", "education_reduced"],
)
target_unemployment = targets_unemployment.loc[:, "estimate"].unstack(level=1)
target_finding = targets_transitions.loc[:, "p_ue_3m_computed"].unstack(level=1)
target_unemployment = target_unemployment.drop("60 to 64")
target_finding = target_finding.drop("60 to 64")
# set starting point for optimization
x_ini = np.array(
[
-0.38058201782797624,
-0.4363001134658238,
-2.779983793878501,
-11.064102042328775,
-349.55882,
]
# [
# -0.44035,
# -0.58798,
# -2.91690,
# -11.06073,
# -349.55882
# ]
# [
# -0.43814176351636497,
# -0.5988137997537946,
# -2.9211951315288576,
# -11.060335732038416,
# -349.55881966924466,
# ]
# [
# -0.41704,
# -0.70502,
# -3.02191,
# -11.05694,
# -349.55882
# ]
# [
# -0.44405,
# -0.47981,
# -3.09841,
# -11.06310,
# -349.55882
# ]
# [
# -0.3835288573545746,
# -1.2516126353720638,
# -2.2047056582301963,
# -11.035550211288463,
# -349.5588394449981
# ]
)
# adjust calibration
leisure_grid = np.linspace(0, 1, 1001)
leisure_base = np.array([0.0, 0.25, 0.5, 0.75, 1.0])
leisure_utility_dx_new = interpolate.PchipInterpolator(
leisure_base, x_ini, extrapolate=True
)(leisure_grid)
calibration["leisure_utility_dx"] = leisure_utility_dx_new.tolist()
calibration["leisure_base"] = leisure_base.tolist()
calibration["leisure_grid"] = leisure_grid.tolist()
calibration["bounds_lower"] = [-np.inf] * len(leisure_base)
calibration["bounds_upper"] = [0.0] * len(leisure_base)
# run optimization
x_opt, fx_opt, g_opt, hessian, instr_eq = qnewton(
_eval_fit, _jacobian_fit, x_ini, controls, calibration
)
leisure_utility_dx_interpolator = interpolate.PchipInterpolator(
leisure_base, x_opt, extrapolate=True
)
leisure_utility_new = leisure_utility_dx_interpolator.antiderivative()(leisure_grid)
leisure_utility_dx_new = leisure_utility_dx_interpolator(leisure_grid)
leisure_utility_dxdx_new = leisure_utility_dx_interpolator.derivative()(
leisure_grid
)
print("pause")
| {"/src/model_analysis/elasticity_1_step.py": ["/src/model_analysis/run_utils.py"], "/src/model_analysis/elasticity_exact.py": ["/src/model_analysis/run_utils.py"], "/src/model_calibration/adjust_calibration.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/model_analysis/run_utils.py": ["/src/model_analysis/solve_model.py"], "/src/model_analysis/optimization.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"], "/src/utilities/sandbox.py": ["/src/model_analysis/run_utils.py", "/src/utilities/optimization_utils.py"]} |
59,979 | MrG0d/GooglePhoto-SynologyMoments | refs/heads/master | /translate/ru.py | NAME_KEY_METADATA = 'метадата'
NAME_KEY_CHANGED = 'измененный'
NAME_KEY_ARCHIVE = 'Архивы' | {"/run.py": ["/bootstrap.py", "/logger.py", "/translate/ru.py"]} |
59,980 | MrG0d/GooglePhoto-SynologyMoments | refs/heads/master | /logger.py | import logging
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, GRAY, WHITE = range(9)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
class PlainFormatter(logging.Formatter):
FORMAT = '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s (%(filename)s:%(lineno)d)'
def __init__(self):
logging.Formatter.__init__(self, self.FORMAT) # , '%Y-%m-%d %H:%M:%S'
def format(self, record: logging.LogRecord):
return logging.Formatter.format(self, record)
class Logger(logging.Logger):
def __init__(self, name):
super().__init__(name)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(PlainFormatter())
self.addHandler(stream_handler)
stream_handler = logging.FileHandler('error.log')
stream_handler.setLevel(logging.WARNING)
stream_handler.setFormatter(PlainFormatter())
self.addHandler(stream_handler)
| {"/run.py": ["/bootstrap.py", "/logger.py", "/translate/ru.py"]} |
59,981 | MrG0d/GooglePhoto-SynologyMoments | refs/heads/master | /run.py | from datetime import datetime
import json
import os
import re
from bootstrap import get_file_dir_and_filename, hash_file, path
from logger import Logger
from translate.ru import *
hash_processed_photos = {}
logger = Logger('Transform')
PATH_TO_GOOGLE_PHOTO_DIR = path('google_photo')
def get_meta_file(path_to_media_file):
directory, name = get_file_dir_and_filename(path_to_media_file)
basic_metafile_name = (name if '-%s.' % NAME_KEY_CHANGED not in name else name.replace('-%s' % NAME_KEY_CHANGED, ''))
metafile_original_name = '%s.json' % basic_metafile_name
metafile_path = '%s/%s' % (directory, '.' + metafile_original_name)
metafile_original_path = '%s/%s' % (directory, metafile_original_name)
if not os.path.isfile(metafile_path): # find: .name.ext.json
if not os.path.isfile(metafile_original_path): # find: name.ext.json
# logger.info('Error find metafile for file [%s]: %s' % (path_to_media_file, metafile_original_path))
metafile_original_path = '%s/%s.json' % (directory, basic_metafile_name[:-1]) # find: nam.ext.json
if not os.path.isfile(metafile_original_path):
# logger.info('Error find metafile for file [%s]: %s' % (path_to_media_file, metafile_original_path))
metafile_original_path = '%s/%s.json' % (directory, os.path.splitext(basic_metafile_name)[0]) # find: name.json
if not os.path.isfile(metafile_original_path):
# logger.info('Error find metafile for file [%s]: %s' % (path_to_media_file, metafile_original_path))
metafile_original_path = '%s/%s.json' % (directory, os.path.splitext(basic_metafile_name)[0][:-1]) # find: nam.json
if not os.path.isfile(metafile_original_path):
# logger.info('Error find metafile for file [%s]: %s' % (path_to_media_file, metafile_original_path))
matches = re.search(r"(\(\d+\))\.", path_to_media_file)
if matches:
metafile_original_path = '%s/%s%s.json' % (directory, basic_metafile_name.replace(matches[1], ''), matches[1]) # find: name(1).mp4 -> name.mp4(1).json
if not matches or not os.path.isfile(metafile_original_path):
# logger.error('Error find metafile for media [%s]' % (path_to_media_file))
return None
os.rename(metafile_original_path, metafile_path)
return metafile_path
def get_meta_data(path_to_media_file):
path_to_metadata = get_meta_file(path_to_media_file)
if not path_to_metadata:
return None
try:
with open(path_to_metadata) as json_file:
return json.load(json_file)
except Exception as e:
print(e.args)
logger.error('Error read meta-file for photo [%s]: %s' % (path_to_media_file, path_to_metadata))
return None
def get_time_from_name(path_to_media_file):
name = os.path.splitext(path_to_media_file)[0]
matches = re.search(r'(20[1,2]\d)-?(\d{2})-?(\d{2})[_-](\d{2})-?(\d{2})-?(\d{2})', name) # for <Year><Month><Day>
if matches:
date = datetime(int(matches[1]), int(matches[2]), int(matches[3]), int(matches[4]), int(matches[5]), int(matches[6]))
else:
matches = re.search(r'(\d{2})(\d{2})(20[1,2]\d)_(\d{2})(\d{2})(\d{2})', name) # for <Day><Year><Month>
if matches:
date = datetime(int(matches[3]), int(matches[2]), int(matches[1]), int(matches[4]), int(matches[5]), int(matches[6]))
else:
return None
return datetime.timestamp(date)
def processed_files(file_list, ignore_duplicate_message=False):
for file in file_list:
directory, name = get_file_dir_and_filename(file)
if name.startswith('.'):
continue
ext = os.path.splitext(file)[1][1:]
if ext not in ['mp4', 'MP4', 'jpg', 'JPG', '3gp', 'png', 'gif', 'CR2', 'JPEG', 'jpeg', 'MOV', 'mov']:
if ext not in ['json', 'html']:
logger.error('Unsupported file: %s' % file)
continue
hash = hash_file(file)
if hash in hash_processed_photos:
if ignore_duplicate_message is False:
logger.warning('The file has already been processed: %s %r' % (file, hash_processed_photos[hash]))
os.rename(file, '%s/.%s' % (directory, name))
continue
hash_processed_photos[hash] = {'path': file}
timestamp = get_time_from_name(file)
metadata = get_meta_data(file) # for hide metafiles
if metadata and not timestamp:
creation_at = int(metadata['creationTime']['timestamp'])
taken_at = int(metadata['photoTakenTime']['timestamp'])
timestamp = taken_at if taken_at < creation_at else creation_at
if not timestamp:
logger.error('Failed to get time of create a file: %s' % file)
continue
formatted = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
print('-- File: %s [%s]' % (file, formatted))
os.utime(file, (timestamp, timestamp))
def processed(root, dirs, files, ignore_duplicate_message=False):
files = ['%s/%s' % (root, file) for file in files]
print('Directory: %s' % root)
metadata_path = '%s/%s.json' % (root, NAME_KEY_METADATA)
if os.path.isfile(metadata_path):
with open(metadata_path) as json_file:
metadata = json.load(json_file)
os.utime(root, (int(metadata['albumData']['date']['timestamp']), int(metadata['albumData']['date']['timestamp'])))
if len(files):
processed_files(files, ignore_duplicate_message)
def dir_is_album(path_to_dir):
return not re.match(r'^(Photos from \d{4}|'+NAME_KEY_ARCHIVE+')$', os.path.basename(path_to_dir))
# only album
for root, dirs, files in os.walk(PATH_TO_GOOGLE_PHOTO_DIR):
if dir_is_album(root):
processed(root, dirs, files)
# other dirs
for root, dirs, files in os.walk(PATH_TO_GOOGLE_PHOTO_DIR):
if not dir_is_album(root):
processed(root, dirs, files, True)
print("Success")
| {"/run.py": ["/bootstrap.py", "/logger.py", "/translate/ru.py"]} |
59,982 | MrG0d/GooglePhoto-SynologyMoments | refs/heads/master | /translate/en.py | NAME_KEY_METADATA = 'metadata'
NAME_KEY_CHANGED = 'changed'
NAME_KEY_ARCHIVE = 'Archive'
| {"/run.py": ["/bootstrap.py", "/logger.py", "/translate/ru.py"]} |
59,983 | MrG0d/GooglePhoto-SynologyMoments | refs/heads/master | /bootstrap.py | import hashlib
import ntpath
import os
from typing import Iterator, Tuple, AnyStr, List, Optional
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
def path(rel):
return os.path.join(ROOT_PATH, rel.lstrip('/'))
def walk_on_path(path) -> Iterator[Tuple[AnyStr, List[AnyStr], List[AnyStr]]]:
return os.walk(path)
def get_file_dir_and_filename(path_to_file):
return ntpath.dirname(path_to_file), ntpath.basename(path_to_file)
def hash_file(filename) -> Optional[str]:
""""This function returns the SHA-1 hash
of the file passed into it"""
# make a hash object
h = hashlib.sha1()
if not os.path.exists(filename):
return None
# open file for reading in binary mode
with open(filename, 'rb') as file:
# loop till the end of the file
chunk = 0
while chunk != b'':
# read only 1024 bytes at a time
chunk = file.read(1024)
h.update(chunk)
file.close()
# return the hex representation of digest
return h.hexdigest()
| {"/run.py": ["/bootstrap.py", "/logger.py", "/translate/ru.py"]} |
59,985 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/formations/squad.py | from ..geometric_mean import geometric_mean
from .formation import Formation
from ..units.solider import Solider
class Squad(Formation):
def __init__(self, units: list) -> None:
super().__init__(units)
@property
def attack_success(self) -> float:
return geometric_mean([unit.attack_success
for unit in self.active_subformations])
@property
def charged_units(self) -> list:
return [unit for unit in self.active_subformations if unit.is_charged]
@property
def charged_units_count(self) -> int:
return len(self.charged_units)
@property
def damage(self) -> float:
return sum(subformation.damage for subformation in self.charged_units)
@property
def total_damage(self) -> float:
return sum(subformation.damage
for subformation in self.active_subformations)
@property
def power(self) -> float:
return self.is_active * self.total_damage
def get_damage(self, damage: float) -> None:
damage_per_unit = damage / len(self.active_subformations)
for unit in self.active_subformations:
unit.get_damage(damage_per_unit)
def attack(self, enemy_squad: object) -> None:
if self.attack_success <= enemy_squad.attack_success:
print('Attack failed!')
return
damage = self.damage
charged_units = self.charged_units
enemy_squad.get_damage(damage)
print(f'\tSquad is attacking enemy squad. '
f'Inflicted damage: {round(damage, 2)}')
for unit in charged_units:
unit.do_recharge()
if isinstance(unit, Solider):
unit.experience_gain()
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,986 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/rand.py | from random import Random
Rand = Random()
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,987 | igorbeor/py_lesson_battlefield | refs/heads/master | /tests/test_solider.py | from unittest import TestCase
from classes.units.solider import Solider
from classes.rand import Rand
class SoliderTest(TestCase):
def setUp(self):
Rand.seed(123)
self.solider = Solider()
def test_experience(self):
self.solider.experience = 60
self.assertEqual(self.solider.experience, 50)
self.solider.experience = -10
self.assertEqual(self.solider.experience, 0)
def test_attack_success(self):
self.solider.experience = 50
self.solider.health = 100
attack_success = self.solider.attack_success
self.assertGreaterEqual(attack_success, 0)
self.assertLessEqual(attack_success, 1)
self.solider.experience = 0
self.solider.health = 0
attack_success = self.solider.attack_success
self.assertGreaterEqual(attack_success, 0)
self.assertLessEqual(attack_success, 1)
def test_damage(self):
self.solider.experience = 0
self.assertEqual(self.solider.damage, 0.05)
self.solider.experience = 50
self.assertEqual(self.solider.damage, 0.55)
def test_is_active(self):
self.solider.health = 10
self.assertEqual(self.solider.is_active, True)
self.solider.health = 0
self.assertEqual(self.solider.is_active, False)
def test_get_damage(self):
self.solider.health = 100
self.solider.get_damage(120)
self.assertEqual(self.solider.health, 0)
def test_experience_gain(self):
self.solider.experience = 50
self.solider.experience_gain()
self.assertEqual(self.solider.experience, 50)
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,988 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/units/unit.py | from time import monotonic
class Unit:
def __init__(self, health: float, recharge: int) -> None:
self.health = health
self.recharge = recharge
self.recharge_timer = 0
@property
def health(self) -> float:
return self._health
@health.setter
def health(self, value: int) -> None:
self._health = max(min(value, 100), 0)
@property
def recharge(self) -> int:
return self._recharge
@recharge.setter
def recharge(self, value: int) -> None:
self._recharge = max(min(value, 2000), 100)
def do_recharge(self) -> None:
self.recharge_timer = round(monotonic() * 1000) + self.recharge / 100
@property
def is_charged(self) -> bool:
return self.recharge_timer < round(monotonic() * 1000)
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,989 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/strategy.py | from .rand import Rand
from operator import attrgetter
STRATEGIES = {}
def register_class(target_class):
STRATEGIES[target_class.__name__] = target_class
class MetaRegistry(type):
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
if name not in STRATEGIES:
register_class(cls)
return cls
class Random(metaclass=MetaRegistry):
@classmethod
def choose(cls, targets: list) -> object:
return Rand.choice(targets)
class Weekest(metaclass=MetaRegistry):
@classmethod
def choose(cls, targets: list) -> object:
return max(targets, key=attrgetter('power'))
class Strongest(metaclass=MetaRegistry):
@classmethod
def choose(cls, targets: list) -> object:
return min(targets, key=attrgetter('power'))
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,990 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/units/vehicle.py | from ..rand import Rand
from .unit import Unit
from ..geometric_mean import geometric_mean
class Vehicle(Unit):
def __init__(self, operators: list, health: float = 100,
recharge: int = 2000) -> None:
super().__init__(health, recharge)
self.operators = operators
@property
def operators(self) -> list:
return self._operators
@operators.setter
def operators(self, value: list) -> None:
if not isinstance(value, list):
raise ValueError('operators must be list')
if not 1 <= len(value) <= 3:
raise ValueError('operators count must be in range 1-3')
self._operators = value
@property
def total_health(self) -> float:
return sum(operator.health for operator in self.active_operators) \
/ len(self.active_operators) + self.health
@property
def attack_success(self) -> float:
return .5 * (1 + self.health / 100) * \
geometric_mean([operator.attack_success
for operator in self.active_operators])
@property
def damage(self) -> float:
return .1 + sum(operator.experience / 100
for operator in self.active_operators)
@property
def is_active(self) -> bool:
return bool(self.health) and bool(self.active_operators)
@property
def active_operators(self) -> list:
return [operator for operator in self.operators
if operator.is_active]
def get_damage(self, damage: float) -> None:
self.health = max(0, self.health - damage * .6)
if self.health == 0:
for operator in self.active_operators:
operator.health = 0
if len(self.active_operators) > 1:
loser = Rand.choice(self.active_operators)
loser.get_damage(damage * .2)
other_operator_damage = damage * .2 \
/ (len(self.active_operators) - 1)
for operator in self.active_operators:
if operator is not loser:
operator.get_damage(other_operator_damage)
elif len(self.active_operators) == 1:
operator = self.active_operators[0]
operator.get_damage(damage * .4)
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,991 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/battlefield.py | from .units.solider import Solider
from .units.vehicle import Vehicle
from .formations.squad import Squad
from .formations.army import Army
from random import randint, choice, shuffle
from .rand import Rand
from .strategy import STRATEGIES
from json import load
from typing import Optional
ARMY_NAMES = [
'The Ruthless Ravagers',
'The Cluster',
'The Hell Hosts',
'The Death Pack',
'The Myriad',
'The Eclipse',
'The Gexamp',
'The Maron',
'The Akharid',
'The Aflobax'
]
class Battlefield:
def __init__(self, armies: list) -> None:
self.armies = armies
def battle(self):
print('The following armies take part in the battle:')
for army in self.armies:
print(f'\t{army.name} (Stratrgy: '
f'{army.strategy.__class__.__name__}).')
move_counter = 0
while len(self.active_armies) > 1:
move_counter += 1
print('Move: ', move_counter)
for attacking_army in self.active_armies:
enemy_army = Rand.choice([army for army in self.active_armies
if army is not attacking_army])
attacking_army.attack(enemy_army)
print('Number of moves: ', move_counter)
print('Winner: ', self.active_armies[0].name)
@classmethod
def create_new_buttlefield(cls) -> object:
seed = randint(0, 1000)
Rand.seed(seed)
armies = list()
for i in range(3):
shuffle(ARMY_NAMES)
name = ARMY_NAMES[0]
strategy = choice(list(STRATEGIES.keys()))
ARMY_NAMES.pop(0)
armies.append(cls.create_army(strategy, name))
return cls(armies)
@classmethod
def create_battlefield_from_config(cls) -> object:
try:
with open('config/config.json', 'r') as json_file:
config = load(json_file)
except FileNotFoundError:
raise FileNotFoundError('Last game not found!')
Rand.seed(config['seed'])
armies = list()
for army in config['armies']:
armies.append(cls.create_army(army['strategy'],
army['name'], army['squads']))
return cls(armies)
@property
def active_armies(self) -> list:
return [army for army in self.armies if army.is_active]
@classmethod
def create_solider(cls) -> Solider:
return Solider()
@classmethod
def create_vehicle(cls, operator_count: Optional[int] = None) -> Vehicle:
operators = list()
if not operator_count:
operator_count = randint(1, 3)
for i in range(operator_count):
operators.append(cls.create_solider())
return Vehicle(operators)
@classmethod
def create_squad(cls, units_data: Optional[list] = None) -> Squad:
units = list()
if not units_data:
units_data = list()
units_count = randint(5, 10)
for i in range(units_count):
units_data.append({'type': choice(['Solider', 'Vehicle'])})
else:
units_data = units_data['units']
for unit in units_data:
if unit['type'] == 'Solider':
units.append(cls.create_solider())
elif unit['type'] == 'Vehicle':
if 'operators' in unit:
operator_count = len(unit['operators'])
else:
operator_count = None
units.append(cls.create_vehicle(operator_count))
return Squad(units)
@classmethod
def create_army(cls, strategy: str, name: str,
squads_data: Optional[list] = None) -> object:
squads = list()
if not squads_data:
squads_count = randint(2, 5)
squads_data = [None for i in range(squads_count)]
for squad in squads_data:
squads.append(cls.create_squad(squad))
return Army(squads, strategy, name)
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,992 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/units/solider.py | from ..rand import Rand
from .unit import Unit
class Solider(Unit):
def __init__(self, health: float = 100,
recharge: int = 500, experience: int = 0):
super().__init__(health, recharge)
self.experience = experience
@property
def experience(self) -> int:
return self._experience
@experience.setter
def experience(self, value: int) -> None:
self._experience = max(min(value, 50), 0)
@property
def attack_success(self) -> float:
return .5 * (1 + self.health/100) \
* Rand.randint(50 + self.experience, 100) / 100
@property
def damage(self) -> float:
return .05 + self.experience / 100
@property
def is_active(self) -> bool:
return bool(self.health)
def get_damage(self, damage: float) -> None:
self.health = max(0, self.health - damage)
def experience_gain(self) -> None:
self.experience = min(self.experience + 1, 50)
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,993 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/formations/formation.py | class Formation:
def __init__(self, subformations: list) -> None:
self.subformations = subformations
@property
def is_active(self) -> bool:
return any(subformation.is_active
for subformation in self.subformations)
@property
def active_subformations(self) -> list:
return [subformation for subformation in self.subformations
if subformation.is_active]
def attack(self, enemy: object) -> None:
raise NotImplementedError
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,994 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/formations/army.py | from .formation import Formation
from ..strategy import STRATEGIES
class Army(Formation):
def __init__(self, squads: list, strategy: str, name: str) -> None:
super().__init__(squads)
self.strategy = strategy
self.name = name
def attack(self, enemy: object) -> None:
if not self.is_active:
print(f"{self.name} cannot attack, because it has no active"
f" units")
return
squad = max(self.active_subformations,
key=lambda squad: squad.charged_units_count)
if not squad.charged_units_count:
print(f"{self.name} cannot attack, because it has no charged"
f" units.")
return
print(f"{self.name} is attacking {enemy.name}:")
squad.attack(STRATEGIES[self.strategy]
.choose(enemy.active_subformations))
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,995 | igorbeor/py_lesson_battlefield | refs/heads/master | /classes/geometric_mean.py | from math import exp, fsum, log
def geometric_mean(xs: list) -> float:
return exp(fsum(log(x) for x in xs) / len(xs))
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
59,996 | igorbeor/py_lesson_battlefield | refs/heads/master | /app.py | from classes.battlefield import Battlefield
def main():
battlefield = Battlefield.create_battlefield_from_config()
battlefield.battle()
if __name__ == '__main__':
main()
| {"/classes/formations/squad.py": ["/classes/geometric_mean.py", "/classes/formations/formation.py", "/classes/units/solider.py"], "/tests/test_solider.py": ["/classes/units/solider.py", "/classes/rand.py"], "/classes/strategy.py": ["/classes/rand.py"], "/classes/units/vehicle.py": ["/classes/rand.py", "/classes/units/unit.py", "/classes/geometric_mean.py"], "/classes/battlefield.py": ["/classes/units/solider.py", "/classes/units/vehicle.py", "/classes/formations/squad.py", "/classes/formations/army.py", "/classes/rand.py", "/classes/strategy.py"], "/classes/units/solider.py": ["/classes/rand.py", "/classes/units/unit.py"], "/classes/formations/army.py": ["/classes/formations/formation.py", "/classes/strategy.py"], "/app.py": ["/classes/battlefield.py"]} |
60,021 | chomman/sensefarm | refs/heads/master | /src/application/contact.py | """
contact.py
"""
from google.appengine.api import users
from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError
from flask import request, render_template, flash, url_for, redirect
from flask_cache import Cache
from application import app
from decorators import login_required, admin_required
from forms import ContactForm
from models import ContactModel
# Flask-Cache (configured to use App Engine Memcache API)
cache = Cache(app)
def new_contacts():
contacts = ContactModel.query()
form = ContactForm()
if form.validate_on_submit():
contact = ContactModel(
contact_title = form.contact_title.data,
contact_description = form.contact_description.data,
contact_company = form.contact_company.data,
contact_department = form.contact_department.data,
contact_position = form.contact_position.data,
contact_name = form.contact_name.data,
contact_address = form.contact_address.data,
contact_phone = form.contact_phone.data,
contact_mobile = form.contact_mobile.data,
contact_email = form.contact_email.data,
contact_call_time1 = form.contact_call_time1.data,
contact_call_time2 = form.contact_call_time2.data
)
try:
contact.put()
contact_id = contact.key.id()
flash(u'Contact %s successfully saved.' % contact_id, 'success')
return redirect(url_for('list_contacts'))
except CapabilityDisabledError:
flash(u'App Engine Datastore is currently in read-only mode.', 'info')
return redirect(url_for('list_contacts'))
return render_template('contact/new_contact.html', contacts=contacts, form=form)
def list_contacts():
contacts = ContactModel.query()
form = ContactForm()
if form.validate_on_submit():
contact = ContactModel(
contact_title = form.contact_title.data,
contact_description = form.contact_description.data,
contact_company = form.contact_company.data,
contact_department = form.contact_department.data,
contact_position = form.contact_position.data,
contact_name = form.contact_name.data,
contact_address = form.contact_address.data,
contact_phone = form.contact_phone.data,
contact_mobile = form.contact_mobile.data,
contact_email = form.contact_email.data,
contact_call_time1 = form.contact_call_time1.data,
contact_call_time2 = form.contact_call_time2.data
)
try:
contact.put()
contact_id = contact.key.id()
flash(u'Contact %s successfully saved.' % contact_id, 'success')
return redirect(url_for('list_contact'))
except CapabilityDisabledError:
flash(u'App Engine Datastore is currently in read-only mode.', 'info')
return redirect(url_for('list_contact'))
return render_template('contact/list_contact.html', contacts=contacts, form=form)
def edit_contact(contact_id):
contact = ContactModel.get_by_id(contact_id)
form = ContactForm(obj=contact)
if request.method == "POST":
if form.validate_on_submit():
contact.contact_title = form.data.get('contact_title')
contact.contact_description = form.data.get('contact_description')
contact.contact_company = form.data.get('contact_company')
contact.contact_department = form.data.get('contact_department')
contact.contact_position = form.data.get('contact_position')
contact.contact_name = form.data.get('contact_name')
contact.contact_address = form.data.get('contact_address')
contact.contact_phone = form.data.get('contact_phone')
contact.contact_mobile = form.data.get('contact_mobile')
contact.contact_email = form.data.get('contact_email')
contact.contact_call_time1 = form.data.get('contact_call_time1')
contact.contact_call_time2 = form.data.get('contact_call_time2')
contact.put()
flash(u'Contact %s successfully saved.' % contact_id, 'success')
return redirect(url_for('list_contacts'))
return render_template('edit_contact.html', contact=contact, form=form) | {"/src/application/forms.py": ["/src/application/models.py"]} |
60,022 | chomman/sensefarm | refs/heads/master | /src/application/forms.py | """
forms.py
Web forms based on Flask-WTForms
See: http://flask.pocoo.org/docs/patterns/wtforms/
http://wtforms.simplecodes.com/
"""
from flaskext import wtf
from flaskext.wtf import validators
from wtforms.ext.appengine.ndb import model_form
from .models import ExampleModel, ContactModel
class ClassicExampleForm(wtf.Form):
example_name = wtf.TextField('Name', validators=[validators.Required()])
example_description = wtf.TextAreaField('Description', validators=[validators.Required()])
# App Engine ndb model form example
ExampleForm = model_form(ExampleModel, wtf.Form, field_args={
'example_name': dict(validators=[validators.Required()]),
'example_description': dict(validators=[validators.Required()]),
})
ContactForm = model_form(ContactModel, wtf.Form, field_args={
'contact_title': dict(validators=[validators.Required()]),
'contact_description': dict(validators=[validators.Required()]),
'contact_company': dict(validators=[]),
'contact_department': dict(validators=[]),
'contact_position': dict(validators=[validators.Required()]),
'contact_name': dict(validators=[validators.Required()]),
'contact_address': dict(validators=[validators.Required()]),
'contact_phone': dict(validators=[validators.Required()]),
'contact_mobile': dict(validators=[validators.Required()]),
'contact_email': dict(validators=[validators.Required()]),
'contact_call_time1': dict(validators=[validators.Required()]),
'contact_call_time2': dict(validators=[validators.Required()]),
})
| {"/src/application/forms.py": ["/src/application/models.py"]} |
60,023 | chomman/sensefarm | refs/heads/master | /src/application/secret_keys.py | # CSRF- and Session keys
CSRF_SECRET_KEY = 'KVxw41wZtncUM1T2cBMohWcX'
SESSION_KEY = 'b9xnbrv3A9ifjGF1aCtw284I'
| {"/src/application/forms.py": ["/src/application/models.py"]} |
60,024 | chomman/sensefarm | refs/heads/master | /src/application/page.py | """
company.py
URL route handlers
Note that any handler params must match the URL route params.
For example the *say_hello* handler, handling the URL route '/hello/<username>',
must be passed *username* as the argument.
"""
from google.appengine.api import users
from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError
from flask import request, render_template, flash, url_for, redirect
from flask_cache import Cache
from application import app
from decorators import login_required, admin_required
from forms import ExampleForm
from models import ExampleModel
# Flask-Cache (configured to use App Engine Memcache API)
cache = Cache(app)
def home():
return redirect(url_for('company'))
#return render_template('index.html', category='company', action='company/ceo', title="Sensefarm")
def company(pagename = 'ceo'):
return render_template('company/'+pagename+'.html', category='company', action='company/'+pagename)
def service(pagename = 'naming'):
return render_template('service/'+pagename+'.html', category='service', action='service/'+pagename)
def process(pagename = 'step'):
return render_template('process/'+pagename+'.html', category='process', action='process/'+pagename)
| {"/src/application/forms.py": ["/src/application/models.py"]} |
60,025 | chomman/sensefarm | refs/heads/master | /src/application/models.py | """
models.py
App Engine datastore models
"""
from google.appengine.ext import ndb
class ExampleModel(ndb.Model):
"""Example Model"""
example_name = ndb.StringProperty(required=True)
example_description = ndb.TextProperty(required=True)
added_by = ndb.UserProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
class ContactModel(ndb.Model):
"""Contact Model"""
contact_title = ndb.StringProperty(required=True)
contact_description = ndb.TextProperty(required=True)
contact_company = ndb.StringProperty
contact_department = ndb.StringProperty
contact_position = ndb.StringProperty(required=True)
contact_name = ndb.StringProperty(required=True)
contact_address = ndb.StringProperty(required=True)
contact_phone = ndb.StringProperty(required=True)
contact_mobile = ndb.StringProperty(required=True)
contact_email = ndb.StringProperty(required=True)
contact_call_time1 = ndb.DateTimeProperty
contact_call_time2 = ndb.DateTimeProperty
timestamp = ndb.DateTimeProperty(auto_now_add=True)
| {"/src/application/forms.py": ["/src/application/models.py"]} |
60,035 | nhhai196/Ticket-Reassignment | refs/heads/main | /ticket_reassign.py | import sys
import datastructure
import csv
import numpy as np
import cardinalpivot as cp
import ordinalpivot as op
import scarfpivot as sp
import time
import numpy as np
import iterativerounding as ir
import random
#argv[1]: csv file name in the following format
#row 1: number of families, number of games
#row f+1: budget, then decreasing preference order over bundles, of family f
#row #family + 2: capacity of each game
#the quantity in the bundle is the alpha (unscaled)
#argv[2]: epsilon for price
numF, numG, bundle2rank, bundlelist, fb2col, budget, capacity, numcol, A = datastructure.init(sys.argv[1])
#numF: number of family
#numG: number of games
#bundle2rank: bundle maps to the rank, each family has one dictionary
#bundlelist: preference list over bundles, each family has one list
#fb2col: map (family,bundle) to the column index of matrix A
#budget: budget[f-1] is the budget of family f
#capacity: capacity[g-1] is the capacity of game g
#numcol: number of columns for matrix A
#A: the Scarf matrix of size (numF+numG) x numcol, columns are in alphabetic order
print('numF: ' + str(numF))
print('numG: ' + str(numG))
print('bundle2rank:\n' + str(bundle2rank))
print('bundlelist:\n' + str(bundlelist))
print('fb2col:\n' + str(fb2col))
print('budget: ' + str(budget))
print('capacity: ' + str(capacity))
print('numcol: ' + str(numcol))
print('matrix A:\n' + str(A))
clist = [] #contract list
for i in range(numF):
clist.append((-1*(i+1),(),[]))
for i in range(numG):
clist.append((-1*(i+1+numF),(),[]))
#print("clist = ")
#print(clist)
# Test cardinal pivot
#c = (1, bundlelist[1][1], [0,0])
#fbc = (c[0], c[1])
#print(fbc)
b = [random.randint(1,3) for i in range(numF)]
#b = [1 for i in range(numF)]
b = b + capacity
print("b =" + str(b))
#newCB, oldc, newA, newb = cp.cardinalpivot(clist, c, A, b, fb2col)
#print(newCB)
#print(oldc)
#print(newA)
#print(newb)
#a = np.zeros([5 * 10**3, 10**6])
# Test ordinal pivot
print("Init ordinal basis:")
c, initOB = op.initordinalbasis(A, numF, numG, fb2col)
#print(initOB)
rmins = op.getallrowmins(initOB, numF, bundle2rank)
#for i in range(len(rmins)):
# print(rmins[i])
ordlist = datastructure.genordlist(A, numF, bundle2rank, bundlelist, fb2col)
print("matrix A:")
#print(A)
print("ordlist:")
#print(ordlist)
# ordlist in the form (f,b)
col2fb = {value : key for (key, value) in fb2col.items()}
#print(col2fb)
newordlist = []
for l in ordlist:
temp = list(map(lambda x: col2fb[x], l))
newordlist.append(temp)
#print("new")
#print(newordlist)
#clist, newc, newrmins = op.ordinalpivot(initOB, oldc, rmins, numF, numG, bundle2rank, newordlist, fb2col)
#print(clist)
#print(datastructure.weaklyprefer((1,(2,0),[0,0]), (1,(2,0),[0.5,0]), 1, numF, bundle2rank))
start = time.time()
eps = 0.1
x = sp.scarfpivot(eps, clist, initOB, A, b, c, rmins, numF, numG, bundle2rank, newordlist, fb2col, budget, bundlelist)
end = time.time()
print(end - start)
## Iterative Rounding
# remove the slack variable
start = time.time()
print("+++++++++++++ Iterative Rounding +++++++++++++++++")
numrow = numF + numG
A = A[:, numrow:]
print("A= " + str(A))
print("b = "+ str(b))
realb = ir.mul(A, x)
#print(realb)
tol = 10**(-6)
xBar = ir.iterativerounding(A, x, b, tol, numF, numG)
print("xBar = " + str(xBar))
end = time.time()
print("Elapsed time = " + str(end - start))
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,036 | nhhai196/Ticket-Reassignment | refs/heads/main | /scarfpivot.py | ######################## Scarf Pivot ############################
import sys
import datastructure as ds
import csv
import numpy as np
import cardinalpivot as cp
import ordinalpivot as op
import time
import correctness as cor
def scarfpivot(eps, CB, OB, A, b, c, rmins, numf, numg, fp, ordlist, fb2col, budget, budlist):
print("+++++++++++++++++++++++++++++++++ Scarf Pivoting ++++++++++++++++++++++++++++++++++")
count = 0
fcount = 0
while True:
#if (count%50 == 0):
# print("============================= Round " + str(count +1) + " =============================")
#start = time.time()
CB, newc, A, b = cp.cardinalpivot(CB, c, A, b, fb2col)
count = count + 1
#end = time.time()
#print("card time : " + str(end - start))
if count % 100 ==99:
print("round "+str(count+1))
if (fb2col[ds.contract2fb(newc)] == 0):
#x = np.linalg.solve(A,b)
#print("!!!!!!!! x = " + str(x))
#print(CB)
#print(b)
#print(OB)
#print(A)
#print("Card: done")
#if cor.isordbasis(eps, OB, numf, numg, fp, ordlist, fb2col, budget):
# print("@@@@@@@@@@@@@@@@@@@@ Sanity check passed")
#else:
# print("@@@@@@@@@@@@@@@@@@@@ Sanity check failed")
break
#start = time.time()
OB, c, rmins, istar = op.ordinalpivot(eps, OB, newc, rmins, numf, numg, fp, ordlist, fb2col, budget)
#end = time.time()
#print("ord time: " + str(end - start))
if (istar <numf):
fcount += 1
if (fb2col[ds.contract2fb(c)] == 0):
#x = np.linalg.solve(A,b)
#print("!!!!!!!! x = " + str(x))
#print("Ord: done")
break
#if count == 5:
# break
#print("count = " + str(count))
#print("fcount = " + str(fcount))
x = gotdomsol(CB, b, fb2col)
#print(OB)
CEprice = getCEprice(OB, numg)
print("Found a dominating solution:")
print(roundint(x))
print("CE price = " + str(CEprice))
# Sanity check
#print(cor.ispseudoCE(x, CEprice, eps, fb2col, ordlist, budlist, numf, numg, budget))
#print("Length of x = " + str(len(x)))
return x
# get the dominating solution from scarfpivot
def gotdomsol(basis, b, fb2col):
n = len(basis)
x = [0] * len(fb2col)
for i in range(len(basis)):
fb = ds.contract2fb(basis[i])
ind = fb2col[fb]
x[ind] = b[i]
return x[n:]
# get the CE price
def getCEprice(OB, numg):
price = [0] * numg
for g in range(numg):
temp = []
for c in OB:
if not ds.isslack(c):
if c[1][g] > 0: # positive coeff
temp.append(c[2][g])
if len(temp) > 0:
price[g] = min(temp)
return price
def roundint(x):
ans = []
for i in x:
ans.append(round(i,2))
return ans
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,037 | nhhai196/Ticket-Reassignment | refs/heads/main | /datastructure.py | import collections
import csv
import numpy
import sys
import openpyxl as op
#################### MAIN ###########################
#numF = 2;
#s1 = list(1, 2, 4)
#s2 = list(3, 5, 6)
#p1 = list(1, 1, 1)
#p2 = list(0, 0, 0)
#c111 = tuple(1, s1, p1)
#c211 = tuple(2, s1, p1)
#c121 = tuple(1, s2, p1)
#c221 = tuple(2, s2, p1)
#####################################################
# @row: a row that can be either a family or a game
# @a: the first contract
# @b: the second contract
# returns YES if the first is more preferred than the second
# Otherwise returns NO
def strictlyprefer(a, b, row, numf, fp):
if (row < numf):
return fstrictlyprefer(a, b, row, numf, fp)
else:
return gstrictlyprefer(a, b, row, numf)
# for game row
# g is offset by + numF
def gstrictlyprefer(a, b, g, numf):
tol = 10**(-6)
sa = isslack(a)
sb = isslack(b)
# Check if they are equal
if (a == b):
return False
if (a[0] == -g-1): # a is an active slack variable
return False
elif (b[0] == -g-1): # b is an active slack variable
return True
# both are not active
if (sa and sb):
return (a[0] > b[0])
elif (sa and (not sb)):
return True
elif ((not sa) and sb):
return False
else: # both are non-slack variable
za = iszerocoeff(a, g, numf)
zb = iszerocoeff(b, g, numf)
if (za and zb):
return breaktie(a,b)
elif (za and (not zb)):
return True
elif ((not za) and zb):
return False
else: # both are non-zeros
g = g - numf
if not isequal(a[2][g], b[2][g]):
if (a[2][g] + tol > b[2][g]): # compare price
return True
elif (a[2][g] < b[2][g] + tol):
return False
else: # break tie
return breaktie(a,b)
# for a family row
def fstrictlyprefer(a, b, f, numf, fp):
#print("calling fstrictlyprefer")
tol = 10**(-6)
sa = isslack(a)
sb = isslack(b)
# Check if they are equal
if (a == b):
return False
if (a[0] == -f-1): # a is an active slack variable
return False
elif (b[0] == -f-1): # b is an active slack variable
return True
# both are not active
if (sa and sb):
return (a[0] > b[0])
elif (sa and (not sb)):
return True
elif ((not sa) and sb):
return False
else: # both are non-slack variable
za = iszerocoeff(a, f, numf)
zb = iszerocoeff(b, f, numf)
if (za and zb):
#print("Both zeros")
#print (breaktie(a,b))
return breaktie(a,b)
elif (za and (not zb)):
return True
elif ((not za) and zb):
return False
else: # both are non-zeros
#print("--------- Both non-zeros")
#print(a)
#print(b)
#print(f)
#print(fp)
if (fp[f][a[1]] < fp[f][b[1]]):
return True
elif (fp[f][a[1]] > fp[f][b[1]]):
return False
else:
msa = dotproduct(a[1], a[2]) # money spent
msb = dotproduct(b[1], b[2])
if not isequal(msa, msb):
if (msa + tol > msb):
return False
elif (msa < msb + tol):
return True
else:
return breaktie(a,b)
#
def weaklyprefer(a,b,row,numf, fp):
if isequalcon(a,b):
return True
else:
return strictlyprefer(a,b,row, numf,fp)
# dot product of two vectors with the same length
def dotproduct(x, y):
sum = 0
for i in range(len(x)):
sum = sum + x[i] * y[i]
return sum
# Break tie two contracts a and b
def breaktie(a,b):
if (a[0] < b[0]):
return True
elif (a[0] > b[0]):
return False
else:
if (a[1] == b[1]):
return breaktievector(a[2], b[2])
else:
return breaktievector(a[1], b[1])
# @a: the first bundle
# @b: the second bundle
def breaktievector(a, b):
tol = 10**(-6)
n = len(a)
for i in range(n):
if not isequal(a[i], b[i]):
if (a[i] < b[i] + tol):
return True
elif (a[i] + tol > b[i]):
return False
#print("+++++++ Break tie vector: SAME ++++++")
return False
#
def isslack(c):
return (c[0] < 0)
# for non-slack only
def iszerocoeff(a, row, numf):
if (row < numf):
return not(a[0] == row)
else:
return a[1][row - numf] == 0
# check if the same floating point values
def isequal(a, b):
tol = 10**(-6)
return abs(a-b) <= tol
#
def isequalprice(a, b):
#print(a)
#print(b)
for i in range(len(a)):
if not isequal(a[i], b[i]):
return False
#print("TRUEEEEEEEEEEEEEEEEEEEEEEE")
return True
#
def isequalcon(c, d):
#print("c =" +str(c))
#print("d =" +str(d))
return (c[0] == d[0]) and (c[1] == d[1]) and isequalprice(c[2], d[2])
#
def init(str):
bundle2rank = [] #bundle maps to the rank, each family has one dictionary
bundlelist = [] #preference list over bundles, each family has one list
sortedbundle = [] #bundle of interest in incearsing alphabetic order, each family has one
fb2col = {} #map (family,bundle) to the column index of matrix A
numcol = 0
#initialization and create slack contracts
with open(str) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
item_count = 0
for item in row:
if item_count == 0:
numF = int(float(item)) #get number of families
budget = [0]*numF #create budget list
item_count = 1
elif item_count == 1:
numG = int(float(item)) #get number of games
break
line_count += 1
elif line_count < numF+1:
item_count = 0
for item in row:
if item_count == 0:
budget[line_count-1] = float(item)/(2 * (float(item) + 3)) #budget[f-1] denotes the budget of family f
item_count = 1
bundle2rank.append({})
bundlelist.append([])
unsortedlist = []
elif item_count > 0:
if item.strip(): #filter empty string
intlist = [int(float(i)) for i in item.split(',')] #convert string to int
unsortedlist.append(intlist)
inttuple = tuple(intlist)
bundle2rank[line_count-1][inttuple] = item_count #bundle2rank[f-1] maps from a tuple bundle to rank for family f
bundlelist[line_count-1].append(inttuple)
#fb2col[(line_count-1,inttuple)] = numF + numG + numcol # (f-1, bundle) maps to the column index of A
item_count += 1
#numcol += 1
#print(bundle2rank[line_count-1])
#print(bundlelist[line_count-1])
unsortedlist.sort()
sortedbundle.append(unsortedlist)
for item in unsortedlist:
inttuple = tuple(item)
fb2col[(line_count-1,inttuple)] = numF + numG + numcol
numcol += 1
line_count += 1
else:
capacity = [int(float(i)) for i in row if i.strip()] #capacity[g-1] is the capacity of game g
#print(capacity)
#now we have bundle2rank, bundlerank, fb2col, capacity, and budget
#print(fb2col)
A = numpy.zeros((numF+numG,numF+numG+numcol))
#create slack columns/Contracts
#in fb2col, negative family/game id only has the id as the key, no bundle needed
#clist = [] #contract list
for i in range(numF):
# clist.append((-1*(i+1),[],[]))
fb2col[(-1*(i+1), ())] = i
A[i,i] = 1
for i in range(numG):
# clist.append((-1*(i+1+numF),[],[]))
fb2col[(-1*(i+1+numF), ())] = i+numF
A[i+numF,i+numF] = 1
col_count = numF + numG
for i in range(numF):
for j in sortedbundle[i]:
A[i, col_count] = 1
game_count = 0
for k in j:
A[numF + game_count, col_count] = k
game_count += 1
col_count += 1
return numF, numG, bundle2rank, bundlelist, fb2col, budget, capacity, numcol, A
#print(A)
def init_v2(filename,sbud,extra,cap,ub):
book = op.load_workbook(filename)
sheet = book.get_sheet_by_name("Sheet2")
row_num = sheet.max_row
col_num = sheet.max_column
gnum = 0
fnum = row_num-1
for j in range(1,col_num):
if sheet.cell(row=2,column=j).value:
gnum += 1
else:
break
bundle2rank = [] #bundle maps to the rank, each family has one dictionary
bundlelist = [] #preference list over bundles, each family has one list
sortedbundle = [] #bundle of interest in incearsing alphabetic order, each family has one
fb2col = {} #map (family,bundle) to the column index of matrix A
pglist = [] #plist[f][j] denotes family f's j-th most favorite game
blist = [] #blist[f-1] denotes the budget of family f
famsize = [] #famsize[f] denotes the size of family f
numcol = 0
b = []
for i in range(2,row_num+1): #family i-2
rank = []
bundle2rank.append({})
bundlelist.append([])
unsortedlist = []
for j in range(1,gnum+1):
rank.append(sheet.cell(row=i,column=j).value)
pglist.append(rank)
fsize = sheet.cell(row=i,column=gnum+2).value
snum = sheet.cell(row=i,column=gnum+4).value
gsize = sheet.cell(row=i,column=gnum+6).value
b.append(gsize)
famsize.append(fsize)
item_count = 0
for j in range(pow(2,gnum)-1, 0, -1):
bundle = [0]*gnum
bilist = [int(k) for k in list('{0:0b}'.format(j))]
if sum(bilist) <= ub:
bisize = len(bilist)
for k in range(0,gnum-bisize):
bilist.insert(0,0)
for k in range(0,gnum):
if bilist[k]==1:
bundle[rank[k]-1] = fsize + extra
unsortedlist.append(bundle)
inttuple = tuple(bundle)
bundle2rank[i-2][inttuple] = item_count+1 #bundle2rank[f-1] maps from a tuple bundle to rank for family f
bundlelist[i-2].append(inttuple)
item_count += 1
blist.append(fsize-snum + snum*sbud)
unsortedlist.sort()
sortedbundle.append(unsortedlist)
for item in unsortedlist:
inttuple = tuple(item)
fb2col[(i-2,inttuple)] = fnum + gnum + numcol
numcol += 1
b = b + [cap]*gnum
#initialization and create slack contracts
numF = fnum
numG = gnum
A = numpy.zeros((numF+numG,numF+numG+numcol))
for i in range(numF):
# clist.append((-1*(i+1),[],[]))
fb2col[(-1*(i+1), ())] = i
A[i,i] = 1
for i in range(numG):
# clist.append((-1*(i+1+numF),[],[]))
fb2col[(-1*(i+1+numF), ())] = i+numF
A[i+numF,i+numF] = 1
col_count = numF + numG
for i in range(numF):
for j in sortedbundle[i]:
A[i, col_count] = 1
game_count = 0
for k in j:
A[numF + game_count, col_count] = k
game_count += 1
col_count += 1
return fnum, gnum, bundle2rank, bundlelist, fb2col, blist, numcol, A, b, pglist, famsize
def init_v3(filename,sbud,cap):
book = op.load_workbook(filename)
sheet = book.get_sheet_by_name("Sheet2")
row_num = sheet.max_row
col_num = sheet.max_column
gnum = 0
fnum = row_num-1
for j in range(1,col_num):
if sheet.cell(row=2,column=j).value:
gnum += 1
else:
break
bundle2rank = [] #bundle maps to the rank, each family has one dictionary
bundlelist = [] #preference list over bundles, each family has one list
sortedbundle = [] #bundle of interest in incearsing alphabetic order, each family has one
fb2col = {} #map (family,bundle) to the column index of matrix A
pglist = [] #plist[f][j] denotes family f's rank for game j
blist = [] #blist[f-1] denotes the budget of family f
famsize = [] #famsize[f] denotes the size of family f
numcol = 0
b = []
for i in range(2,row_num+1): #family i-2
rank = []
bundle2rank.append({})
bundlelist.append([])
unsortedlist = []
rank1 = []
for j in range(1,gnum+1):
rank.append([j,sheet.cell(row=i,column=j).value])
rank.sort(key = lambda x: x[1], reverse=True)
for j in range(1,gnum+1):
rank1.append(rank[j-1][0])
pglist.append(rank1)
fsize = sheet.cell(row=i,column=gnum+2).value
snum = sheet.cell(row=i,column=gnum+4).value
gsize = sheet.cell(row=i,column=gnum+6).value
b.append(gsize)
famsize.append(fsize)
item_count = 0
for j in range(gnum+8,col_num+1):
item = sheet.cell(row=i,column=j).value
intlist = [int(float(i)) for i in item.split(',')] #convert string to int
unsortedlist.append(intlist)
inttuple = tuple(intlist)
bundle2rank[i-2][inttuple] = item_count #bundle2rank[f-1] maps from a tuple bundle to rank for family f
bundlelist[i-2].append(inttuple)
item_count += 1
#blist.append((fsize-snum + snum*sbud)/(2 * (fsize + 3)))
#blist.append(fsize * fsize /(2 * (fsize + 3)))
#blist.append(fsize * (2 *(fsize +3)))
blist.append(fsize * fsize)
unsortedlist.sort()
sortedbundle.append(unsortedlist)
for item in unsortedlist:
inttuple = tuple(item)
fb2col[(i-2,inttuple)] = fnum + gnum + numcol
numcol += 1
b = b + [cap]*gnum
#initialization and create slack contracts
numF = fnum
numG = gnum
A = numpy.zeros((numF+numG,numF+numG+numcol))
for i in range(numF):
# clist.append((-1*(i+1),[],[]))
fb2col[(-1*(i+1), ())] = i
A[i,i] = 1
for i in range(numG):
# clist.append((-1*(i+1+numF),[],[]))
fb2col[(-1*(i+1+numF), ())] = i+numF
A[i+numF,i+numF] = 1
col_count = numF + numG
for i in range(numF):
for j in sortedbundle[i]:
A[i, col_count] = 1
game_count = 0
for k in j:
A[numF + game_count, col_count] = k
game_count += 1
col_count += 1
id2fam = [ [] for _ in range(row_num-1) ]
sheet = book.get_sheet_by_name("Sheet1")
fam_row_num = sheet.max_row
for j in range(2,fam_row_num+1):
id2fam[sheet.cell(row=j,column=gnum+6).value-1].append(int(float(j))-1)
return fnum, gnum, bundle2rank, bundlelist, fb2col, blist, numcol, A, b, pglist, famsize, id2fam
#generate ordlist where ordlist[i] is the preference of row i over the column index w.r.t the C matrix (i is 0-based)
def genordlist(A, numf, fp, bundlelist, fb2col):
ordlist = []
for i in range(len(A)):
ordlist.append([])
#get type 1 columns
for j in range(len(A)):
if i != j:
ordlist[i].append(j)
#get type 2 columns
for j in range(len(A),len(A[i])):
if A[i][j]==0:
ordlist[i].append(j)
#get type 3 columns
if i < numf: #i is a family row
for j in (bundlelist[i]):
ordlist[i].append(fb2col[(i,j)])
else: #i is a game row
for j in range(len(A),len(A[i])):
if A[i][j] > 0:
ordlist[i].append(j)
#get type 4 column
ordlist[i].append(i)
return ordlist
# contract to fb
def contract2fb(c):
return (c[0], c[1])
# Function for printing out row minimizers
def printbasis(basis, fb2col):
for i in range(len(basis)):
c = basis[i]
print(str(c) + " : " + str(fb2col[(c[0], c[1])]))
## Construct constraint matrix A and b for families (without slak variables)
def consmatrix(A, b, fID2gID, IDlist, bundlelist, nG, gcapacity):
# Compute matrix size
nF = len(fID2gID)
nrows = nF + nG
ncols = 0
for i in range(len(IDlist)):
ncols = ncols + len(IDlist[i]) * len(bundlelist[i])
# Initialize
newA = numpy.zeros((nrows, ncols))
newb = [1] * nF
## Construct
# Slack cols
for i in range(nrows):
newA[:, i] = A[:,i]
newcurrcol = 0
for f in range(nF):
gID = fID2gID(f) # Check starting index 0 or 1?
currcol = 0
for k in range(gID-1): # Veriy this
currcol = currcol + len(bundlelist[k])
for s in range(len(bundlelist[gID])):
newA[:, newcurrcol] = A[:, currcol]
currcol = currcol + 1
newcurrcol = newcurrcol + 1
newb = newb + gcapacity
return newA, newb
def init_family(filename,sbud,cap):
book = op.load_workbook(filename)
sheet = book.get_sheet_by_name("Sheet1")
row_num = sheet.max_row
col_num = sheet.max_column
gnum = 0
fnum = row_num-1
for j in range(1,col_num):
if sheet.cell(row=2,column=j).value:
gnum += 1
else:
break
bundle2rank = [] #bundle maps to the rank, each family has one dictionary
bundlelist = [] #preference list over bundles, each family has one list
sortedbundle = [] #bundle of interest in incearsing alphabetic order, each family has one
fb2col = {} #map (family,bundle) to the column index of matrix A
pglist = [] #plist[f][j] denotes family f's rank for game j
blist = [] #blist[f-1] denotes the budget of family f
famsize = [] #famsize[f] denotes the size of family f
numcol = 0
b = []
for i in range(2,row_num+1): #family i-2
rank = []
bundle2rank.append({})
bundlelist.append([])
unsortedlist = []
rank1 = []
for j in range(1,gnum+1):
rank.append([j,sheet.cell(row=i,column=j).value])
rank.sort(key = lambda x: x[1], reverse=True)
for j in range(1,gnum+1):
rank1.append(rank[j-1][0])
pglist.append(rank1)
fsize = sheet.cell(row=i,column=gnum+2).value
snum = sheet.cell(row=i,column=gnum+4).value
#gsize = sheet.cell(row=i,column=gnum+6).value
b.append(1)
famsize.append(fsize)
item_count = 0
for j in range(gnum+8,col_num+1):
item = sheet.cell(row=i,column=j).value
intlist = [int(float(i)) for i in item.split(',')] #convert string to int
unsortedlist.append(intlist)
inttuple = tuple(intlist)
bundle2rank[i-2][inttuple] = item_count #bundle2rank[f-1] maps from a tuple bundle to rank for family f
bundlelist[i-2].append(inttuple)
item_count += 1
blist.append(fsize-snum + snum*sbud)
unsortedlist.sort()
sortedbundle.append(unsortedlist)
for item in unsortedlist:
inttuple = tuple(item)
fb2col[(i-2,inttuple)] = fnum + gnum + numcol
numcol += 1
b = b + [cap]*gnum
#initialization and create slack contracts
numF = fnum
numG = gnum
A = numpy.zeros((numF+numG,numF+numG+numcol))
for i in range(numF):
# clist.append((-1*(i+1),[],[]))
fb2col[(-1*(i+1), ())] = i
A[i,i] = 1
for i in range(numG):
# clist.append((-1*(i+1+numF),[],[]))
fb2col[(-1*(i+1+numF), ())] = i+numF
A[i+numF,i+numF] = 1
col_count = numF + numG
for i in range(numF):
for j in sortedbundle[i]:
A[i, col_count] = 1
game_count = 0
for k in j:
A[numF + game_count, col_count] = k
game_count += 1
col_count += 1
id2fam = [ [] for _ in range(row_num-1) ]
sheet = book.get_sheet_by_name("Sheet1")
fam_row_num = sheet.max_row
for j in range(2,fam_row_num+1):
id2fam[sheet.cell(row=j,column=gnum+6).value-1].append(int(float(j))-1)
return fnum, gnum, bundle2rank, bundlelist, fb2col, blist, numcol, A, b, pglist, famsize, id2fam
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,038 | nhhai196/Ticket-Reassignment | refs/heads/main | /gen_rand_csv.py | import csv
import sys
import random
#argv[1]: filename
#argv[2]: #families
#argv[3]: #games
#argv[4]: lower bound for family size/budget
#argv[5]: upper bound for family size/budget
#argv[6]: lower bound length of each family's preference list
#argv[7]: upper bound length of each family's preference list
#argv[8]: increase for the alpha function, for example, if it is 2,
#then each entry of each bundle will assign 2 extra seats
#argv[9]: capacity
flb = int(float(sys.argv[4]))
fub = int(float(sys.argv[5]))
llb = int(float(sys.argv[6]))
lub = int(float(sys.argv[7]))
with open(sys.argv[1], 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
numf = int(float(sys.argv[2]))
numg = int(float(sys.argv[3]))
inc = int(float(sys.argv[8]))
csvwriter.writerow([numf,numg])
for f in range(numf):
fsize = random.randint(flb,fub)
n = random.randint(llb,lub)
row = [fsize]
i = 0
#n = min(m,pow(2,fsize)-1)
while i < n:
bundle = []
next = 0
for j in range(numg):
numseats = random.randint(0,fsize)
if numseats>0:
numseats = numseats+inc
bundle.append(numseats)
for item in row:
if item==",".join(map(str, bundle)) or all([v==0 for v in bundle]):
next=1
break
if next==1:
continue
i = i+1
row.append(",".join(map(str, bundle)))
csvwriter.writerow(row)
cap = [int(float(sys.argv[9]))] * numg
csvwriter.writerow(cap)
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,039 | nhhai196/Ticket-Reassignment | refs/heads/main | /ticket_reassign_v3.py | import sys
import datastructure
import csv
import numpy as np
import cardinalpivot as cp
import ordinalpivot as op
import scarfpivot as sp
import time
import numpy as np
import iterativerounding as ir
import random
import statistics as stat
#argv[1]: xlsx file name in the following format
#row 1: column labels: family preference / family size / num seniors / group size /family bundle preference (with alpha and bundle size upper bound)
#row f+1: info of family f
#argv[2]: budget of each senior, each non-senior has budget 1
#argv[3]: game capacity
#argv[4]: epsilon for price
#below: seatoffset = 7 and numscore = 4
#ex: python ticket_reassign_v3.py data-cardinal1.xlsx 1.2 500 0.2 // 7300 rounds
#ex: python ticket_reassign_v3.py data-cardinal1.xlsx 1.2 500 0.05 // > 2M rounds!
#data-cardinal1.xlsx does not have sheet 3
#ex: python ticket_reassign_v3.py data-cardinal2.xlsx 1.2 125 0.2 // 800 rounds
#ex: python ticket_reassign_v3.py data-cardinal2.xlsx 1.2 125 0.15 // > 800K rounds!
#below: seatoffset = 6 and numscore = 3
#ex: python ticket_reassign_v3.py data-cardinal3.xlsx 1.2 500 0.1 // 1800 rounds
#ex: python ticket_reassign_v3.py data-cardinal3.xlsx 1.2 500 0.05 // > 200K rounds!
#below: seatoffset = 6, numscore = 5, fewscores = 4
#ex: python ticket_reassign_v3.py data-cardinal5.xlsx 1.2 500 0.05 // 3600 rounds
#ex: python ticket_reassign_v3.py data-cardinal6.xlsx 1.2 500 0.04 // 7200 rounds, this one has max violation 24.6%, may want to check
numF, numG, bundle2rank, bundlelist, fb2col, budget, numcol, A, b, plist, famsize, idtofam = datastructure.init_v3(sys.argv[1],float(sys.argv[2]),int(float(sys.argv[3])))
#numF: number of family
#numG: number of games
#bundle2rank: bundle maps to the rank, each family has one dictionary
#bundlelist: preference list over bundles, each family has one list
#fb2col: map (family,bundle) to the column index of matrix A
#budget: budget[f-1] is the budget of family f
#numcol: number of columns for matrix A
#A: the Scarf matrix of size (numF+numG) x numcol, columns are in alphabetic order
#b: the capacity vector on RHS
#plist: plist[f][j] denotes family f's j-th most favorite game
#famsize: famsize[f] denotes the size of family f
#id2fam: id2fam[i-1] returns a list of families with group id i
print("++++++++++++++++++++++++++++++++++++++ Data +++++++++++++++++++++++++++++++++++++")
print('numF = ' + str(numF))
print('numG = ' + str(numG))
print('bundle2rank:\n' + str(bundle2rank))
print(len(bundle2rank[1]))
#print('bundlelist:\n' + str(bundlelist))
#print('fb2col:\n' + str(fb2col))
print('numcol = ' + str(numcol))
numrow = numF + numG
print('numrow = ' + str(numrow))
print('budget: ' + str(budget))
print('matrix A:\n' + str(A))
print('vector b:\n' + str(b))
clist = [] #contract list
for i in range(numF):
clist.append((-1*(i+1),(),[]))
for i in range(numG):
clist.append((-1*(i+1+numF),(),[]))
#print("clist = ")
#print(clist)
# Test cardinal pivot
#c = (1, bundlelist[1][1], [0,0])
#fbc = (c[0], c[1])
#print(fbc)
#b = [random.randint(1,3) for i in range(numF)]
#b = [1 for i in range(numF)]
#b = b + capacity
#print("b =" + str(b))
#newCB, oldc, newA, newb = cp.cardinalpivot(clist, c, A, b, fb2col)
#print(newCB)
#print(oldc)
#print(newA)
#print(newb)
#a = np.zeros([5 * 10**3, 10**6])
# Test ordinal pivot
print("Init ordinal basis:")
c, initOB = op.initordinalbasis(A, numF, numG, fb2col)
#print(initOB)
rmins = op.getallrowmins(initOB, numF, bundle2rank)
#for i in range(len(rmins)):
# print(rmins[i])
ordlist = datastructure.genordlist(A, numF, bundle2rank, bundlelist, fb2col)
#print("matrix A:")
#print(A)
#print("ordlist:")
#print(ordlist)
# ordlist in the form (f,b)
col2fb = {value : key for (key, value) in fb2col.items()}
#print(col2fb)
newordlist = []
for l in ordlist:
temp = list(map(lambda x: col2fb[x], l))
newordlist.append(temp)
#print("new")
#print(newordlist)
#clist, newc, newrmins = op.ordinalpivot(initOB, oldc, rmins, numF, numG, bundle2rank, newordlist, fb2col)
#print(clist)
#print(datastructure.weaklyprefer((1,(2,0),[0,0]), (1,(2,0),[0.5,0]), 1, numF, bundle2rank))
start = time.time()
eps = float(sys.argv[4])
x = sp.scarfpivot(eps, clist, initOB, A, b, c, rmins, numF, numG, bundle2rank, newordlist, fb2col, budget, bundlelist)
end = time.time()
print('Scarf elapsed time =' + str(end - start))
## Iterative Rounding
# remove the slack variable
start = time.time()
A = A[:, numrow:]
#print("A= " + str(A))
#print("b = "+ str(b))
#realb = ir.mul(A, x)
#print(realb)
numF2, numG2, bundle2rank2, bundlelist2, fb2col2, budget2, numcol2, A2, b2, plist2, famsize2, idtofam2 = datastructure.init_family(sys.argv[1],float(sys.argv[2]),int(float(sys.argv[3])))
#print('fb2col2:\n' + str(fb2col2))
numrow2 = numF2 + numG2
print('numcol2 = ' + str(numcol2))
A2 = A2[:, numrow2:]
print('id2fam =' + str(idtofam2))
# Redistribute
x2 = ir.redistribute(x, numcol2, idtofam2, numF, numG, fb2col, fb2col2, numrow2)
print(x2)
tol = 10**(-6)
#xBar = ir.iterativerounding(A, x, b, tol, numF, numG)
xBar2 = ir.iterativerounding(A2, x2, b2, tol, numF2, numG2)
print("xBar2 = " + str(xBar2))
end = time.time()
print("Rounding elapsed time = " + str(end - start))
#print(len(xBar))
print((plist))
## Statistics
filename = 'outputs-' + str(numF2) + '-families-prune-top-15-swap-2-offset-4-score-40-bf2-v-1.xlsx'
print(b)
stat.statistics(filename, A2, xBar2, b2, numF2, numG2, fb2col2, plist2, famsize2, bundle2rank2)
#print(bundle2rank2)
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,040 | nhhai196/Ticket-Reassignment | refs/heads/main | /m2py.py | import openpyxl as op
import sys
import csv
#argv[1]: input file name
#argv[2]: output file name
#argv[3]: budget for seniors, each non-senior has 1 dollar
#argv[4]: offset for alpha, i.e. extra number of seats needed
#argv[5]: capacity of a game
#argv[6]: upper bound of bundle size
book = op.load_workbook(sys.argv[1])
sheet = book.get_sheet_by_name("Sheet2")
row_num = sheet.max_row
col_num = sheet.max_column
gnum = 0
for j in range(1,col_num):
if sheet.cell(row=2,column=j).value:
gnum += 1
else:
break
plist = []
#plist[f][i] denotes the i-th favorite bundle of family f
blist = []
#blist[f] denotes the budget of family f
extra = int(float(sys.argv[4]))
sbud = float(sys.argv[3])
ub = int(float(sys.argv[6]))
for i in range(2,row_num+1):
inplist = []
rank = []
for j in range(1,gnum+1):
rank.append(sheet.cell(row=i,column=j).value)
fsize = sheet.cell(row=i,column=gnum+2).value
snum = sheet.cell(row=i,column=gnum+4).value
for j in range(pow(2,gnum)-1, 0, -1):
bundle = [0]*gnum
bilist = [int(i) for i in list('{0:0b}'.format(j))]
if sum(bilist) <= ub:
bisize = len(bilist)
for k in range(0,gnum-bisize):
bilist.insert(0,0)
for k in range(0,gnum):
if bilist[k]==1:
bundle[rank[k]-1] = fsize + extra
inplist.append(bundle)
plist.append(inplist)
blist.append(fsize-snum + snum*sbud)
with open(sys.argv[2], 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow([row_num-1, gnum])
for i in range(0,row_num-1):
row = []
row.append(blist[i])
for j in range(0,len(plist[i])):
#print(plist[j])
#print(','.join(map(str,plist[j])))
row.append(','.join(map(str,plist[i][j])))
csvwriter.writerow(row)
csvwriter.writerow([int(float(sys.argv[5]))]*gnum)
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,041 | nhhai196/Ticket-Reassignment | refs/heads/main | /ordinalpivot.py | ######################## Ordinal Pivot ############################
import datastructure as ds
import copy
import functools as func
import math
import correctness as cor
import time
# @clist : an ordinal basis
# @c : a column that will be added to the basis
# @fp : famimy preferences
# @rmins : row min of the basis
def ordinalpivot(eps, clist, c, rmins, numf, numg, fp, ordlist, fb2col, budget):
#print("++++++++ Ordinal Pivot:")
#eps = 0.1
#budget = 3
numrows = len(clist)
#print("Row minimizers:")
#ds.printbasis(rmins, fb2col)
# Remove column c from the basis
#print("---- Kick out: " + str(c))
#clist.remove(c)
removecon(clist, c)
# Find row minimizers after removing
newrmins, newrm = getnewrowmins(clist, c, rmins, numf, fp)
#print("Row mins after removing :" + str(c))
#ds.printbasis(newrmins, fb2col)
#print("The row that has new row min: " + str(newrm))
# Find the column with 2 row minimizers,
col2mins = getcoltwomins(rmins, newrmins)
#print("Col with two mins: " + str(col2mins))
# Find the row containing the old minimizer
istar = findoldminimizer(col2mins, rmins)
#print("------------ Old minimizer istar = " + str(istar))
#if istar >= numf:
# print("***************************** Game case finally")
#return None
# Find the column k that maximizes c_{istar, k}
newc = []
minprice = [0] * numg
maxtms = [0] * numf
fclist = [[]]* numrows
newc = findcolmax(eps, rmins, istar, newrmins, ordlist, numf, minprice, maxtms, fclist, fb2col, budget)
# Update the basis
clist.append(newc)
#print("---- Push in : " + str(newc) + " : " + str(fb2col[(newc[0], newc[1])]))
# Update row mins of the new basis
newrmins[istar] = newc
# Sanity check
#print("Sanity check:" + str(clist))
#temp = getallrowmins(clist, numf, fp)
#if (temp !=newrmins):
# print("---- ordinal pivot: Something wrong !!!!!!!")
# ds.printbasis(temp, fb2col)
# print(temp)
#print("budget = " +str(budget))
#if cor.isordbasis(eps, clist, numf, numg, fp, ordlist, fb2col, budget):
# print("@@@@@@@@@@@@@@@@@@@@ Sanity check passed")
#else:
# print("@@@@@@@@@@@@@@@@@@@@ Sanity check failed")
# return
#print("New row mins:")
#ds.printbasis(newrmins, fb2col)
# Return
return clist, newc, newrmins, istar
def initordinalbasis(A, numf, numg, fb2col):
initOB = []
# Find the first zeros non-slack col with respect the first row
numcol = len(A[0,:])
for colindex in range(numf+numg, numcol):
if A[0,colindex] == 0:
break
print(colindex)
for (f, b) in fb2col:
#print("key = " + str(key))
#print(value)
if fb2col[(f,b)] == colindex:
break
p = [0] * numg
c = (f, b, p)
initOB.append(c)
for i in range(1, numf+numg):
initOB.append((-1*(i+1),(),[]))
return c, initOB
#
def removecon(clist, c):
for x in clist:
if ds.isequalcon(x, c):
clist.remove(x)
return clist
#
def getrowmin(clist, row, numf, fp):
rm = clist[0]
#print("row = " +str(row))
for i in range(1, len(clist)):
#print(clist[i])
#print(ds.strictlyprefer(rm, clist[i], row, numf, fp))
if ds.strictlyprefer(rm, clist[i], row, numf, fp):
rm = clist[i]
#print("++++Swap")
#print(rm)
return rm
# When removing one col from the basis, only one row minimizer is changed
def getnewrowmins(clist, c, rmins, numf, fp):
row = None;
newrmins = copy.deepcopy(rmins)
for i in range(len(rmins)):
if ds.isequalcon(c, rmins[i]):
row = i
#print("Found new row min" )
break
#print(row)
# Update the min of the row just found above, all others stay the same
newrmins[row] = getrowmin(clist, row, numf, fp)
return newrmins, row
# This fucntion is called only once at the begin
def getallrowmins(clist, numf, fp):
rmins = []
for row in range(len(clist)):
temp = getrowmin(clist, row, numf, fp)
#print("temp = " + str(temp))
rmins.append(getrowmin(clist, row, numf, fp))
return rmins
#
def getcoltwomins(rmins, newrmins):
#print(rmins)
#print(newrmins)
for i in range(len(rmins)):
if not ds.isequalcon(rmins[i], newrmins[i]):
return newrmins[i]
# If not found raise some error
print("getcoltwomins: Something went wrong !!!!")
# Find old minimizer
def findoldminimizer(col2mins, rmins):
for i in range(len(rmins)):
if ds.isequalcon(col2mins, rmins[i]):
return i
print("find old min: Something went wrong!!!!!")
# This is the most challenging function to write
def findcolmax(eps, newrm, istar, rmins, ordlist, numf, minprice, maxtms, fclist, fb2col, budget):
# TODO
start = time.time()
fc, minprice, maxtms = getfeasiblecols(newrm, istar, rmins, ordlist, numf, minprice, maxtms, fclist, fb2col)
#fc, minprice, maxtms = getfeasiblecolsOPT(newrm, istar, rmins, ordlist, numf, minprice, maxtms, fclist, fb2col)
end = time.time()
#print(end - start)
# sort feasible columns in deceasing order of preferences
fc = sortorder(ordlist[istar], fc)
#print("--- Feasible cols = " + str(list(map(lambda x: fb2col[x], fc))))
#print(minprice)
#print(maxtms)
#print("fesible cols len = " + str(len(fc)))
fbmins = list(map(lambda x: (x[0], x[1]), rmins))
# Assuming fc is sorted in decreasing order
# Linear search here, binary search might be better
mpvlist = [] # Store the highest price vectors of feasible cols of type 3
mpglist =[] # Store the highest price of the game of feasible cols of type 3
t3clist = [] # Store all feasible cols of type 3
# pop out the slack col of type 4 in the feasible cols list
scol = fc.pop(len(fc)-1)
for c in fc:
type = getcoltype(c, istar, numf)
if ((type == 1)):
return (c[0], c[1], [])
elif (type == 2):
temp = findbestprice(eps, c, istar, rmins, numf, minprice, maxtms, budget[c[0]], fbmins)
#print("temp = " + str(temp))
if not not temp:
#temp = roundprice(temp)
return (c[0], c[1],temp)
if (type == 3):
if istar < numf: # family case
temp = findbestprice(eps, c, istar, rmins, numf, minprice, maxtms, budget[c[0]], fbmins)
if not not temp:
#temp = roundprice(temp)
return (c[0], c[1],temp)
else: # game case
# In this case need to loop through all feasible cols of type 3
# and find the col that can pay the highest price
#print("@@@@@@ c = " +str(c))
t3clist.append(c)
temp = findbestprice(eps, c, istar, rmins, numf, minprice, maxtms, budget[c[0]], fbmins)
#print(temp)
mpvlist.append(temp)
if temp == []:
mpglist.append(-1)
else:
#temp = roundprice(temp)
mpglist.append(round(temp[istar-numf], 3))
# If not return yet, must be type 3 and istar >= numf
if (istar >= numf):
#print("@@@@ Game case: looking for the best price")
maxval = max(mpglist)
index = mpglist.index(maxval)
fb = t3clist[index]
price = mpvlist[index]
#print(maxval)
#print(mpglist)
#print(mpvlist)
if not not price:
#price = roundprice(price)
return (fb[0], fb[1], price)
# If not return yet, the best col must the the type 4
return (scol[0], scol[1], [])
# always returns the best price or []
def findbestprice(eps, c, istar, rmins, numf, minprice, maxtms, budget, fbmins):
numg = len(rmins) - numf
fbtprice = [0] * numg # family break tie price
gbtprice = [0] * numg # game break tie price
diff = [] # the bestprice must be different from diff
currminprice = [0] * (len(rmins) - numf) #copy.deepcopy(minprice)
currmaxtms = [0] * numf #copy.deepcopy(maxtms)
cc = (c[0], c[1], currminprice)
if c not in fbmins:
for row in range(len(rmins)):
#if (row < numf) and (row != istar): # family case
# print("Do nothing 1")
if (row >= numf) and (row != istar): # game case
g = row - numf
ctype = getcoltype(cc, row, numf)
mtype = getcoltype(rmins[row], row, numf)
if (ctype == 3) and (mtype == 3):
if not ds.breaktie(cc, rmins[row]):
currminprice[g] = rmins[row][2][g] + eps # must be eps higher
#print("---- TODO")
else:
currminprice[g] = rmins[row][2][g]
return bestprice(eps, istar, c[1], currminprice, [], budget+1, budget, numf, diff)
# For a row that already have a contract without price (f,b) in the basis
else:
#print("-------------------------- Interesting Case")
index = fbmins.index(c)
if (index == istar):
for i in range(len(fbmins)):
if (i != istar) and (fbmins[i] == c):
index = i
#print("###################index = " + str(index))
#print("###################istar = " + str(istar))
# best price must be differrent from the existing one
diff = rmins[index][2]
for row in range(len(rmins)):
#if (row < numf) and (row != istar) and (row != index): # family case
# print("---- Family case")
if (row >= numf) and (row != istar) and (row != index): # game case
#print("---- Game case")
g = row - numf
ctype = getcoltype(cc, row, numf)
mtype = getcoltype(rmins[row], row, numf)
if (ctype == 3) and (mtype == 3):
#print("--- Check point")
if not ds.breaktie(cc, rmins[row]):
currminprice[g] = rmins[row][2][g] + eps # must be eps higher
else:
currminprice[g] = rmins[row][2][g]
# same family and same bundle, need to break tie carefuly here
ctype = getcoltype(cc, index, numf) # verify cc
mtype = getcoltype(rmins[index], index, numf)
if (index < numf): # family case
#print("---------------- Family case")
if (ctype == 3) and (mtype == 3): # non-zero coefficient
fbtprice = rmins[index][2]
currmaxtms[index] = ds.dotproduct(c[1], rmins[index][2])
temp = bestprice(eps, istar, c[1], currminprice, fbtprice, currmaxtms[index], budget, numf, diff)
if not temp:
currmaxtms[index] = ds.dotproduct(c[1], rmins[index][2]) - eps
fbtprice = []
return bestprice(eps, istar, c[1], currminprice, fbtprice, currmaxtms[index], budget, numf, diff)
else:
return temp
elif (ctype == 2) and (mtype == 2):
fbtprice = rmins[index][2]
return bestprice(eps, istar, c[1], currminprice, fbtprice, budget+1, budget, numf, diff)
else:
return bestprice(eps, istar, c[1], currminprice, [], budget+1, budget, numf, diff)
else: # game case
#print("---------------- Finally touched game case")
# break tie based on price
g = index - numf
if (istar >= numf):
currminprice[istar - numf] = 0
if (ctype == 3) and (mtype == 3): # non-zero coefficient
#print("*********** Degbug Here")
#print("price = " + str(currminprice))
if (istar <numf):
gbtprice = rmins[index][2]
currminprice[g] = rmins[index][2][g]
temp = bestprice(eps, istar, c[1], currminprice, gbtprice, budget + 1, budget, numf, diff)
if not temp:
gbtprice = []
currminprice[g] = rmins[index][2][g] + eps
return bestprice(eps, istar, c[1], currminprice, gbtprice, budget + 1, budget, numf, diff)
else:
return temp
else:
#print("&&&&&&&&&&&&&&&&&&&&&&&& Check")
gbtprice = rmins[index][2]
currminprice[g] = rmins[index][2][g]
temp1 = bestprice(eps, istar, c[1], currminprice, gbtprice, budget + 1, budget, numf, diff)
gbtprice = []
currminprice[g] = rmins[index][2][g] + eps
temp2 = bestprice(eps, istar, c[1], currminprice, gbtprice, budget + 1, budget, numf, diff)
#print("temp1 =" + str(temp1))
#print("temp2 =" +str(temp2))
if (temp1 == []) and (temp2 == []):
return []
elif (temp1 == []) and (temp2 != []):
return temp2
elif (temp1 != []) and (temp2 == []):
return temp1
else:
tol = 10**(-6)
if not ds.isequalprice(temp1, temp2):
if (temp1[istar - numf] + tol >= temp2[istar-numf]):
return temp1
else:
return temp2
else:
return temp1
elif (ctype == 2) and (mtype == 2):
gbtprice = rmins[index][2]
return bestprice(eps, istar, c[1], currminprice, gbtprice, budget+1, budget, numf, diff)
else:
return bestprice(eps, istar, c[1], currminprice, [], budget+1, budget, numf, diff)
#
def bestprice(eps, istar, alpha, minprice, btprice, maxtot, budget, numf, diff):
if (istar < numf): # family case
return fbestprice(istar, alpha, minprice, btprice, maxtot, budget)
else: # game case
return gbestprice(eps, istar, alpha, numf, minprice, btprice, maxtot, budget, diff)
# Check if there exists a price that is coordinate-wise
# greater than the minprice and is tie-break larger than btprice.
def fbestprice(istar, alpha, minprice, btprice, maxtot, budget):
#print("Debug")
#print(minprice)
#print(btprice)
#print(maxtot)
ms = ds.dotproduct(alpha, minprice)
tol = 10**(-6)
if isfeasibleprice(alpha, minprice, budget) and (ms <= maxtot + tol):
if not btprice: # empty
return minprice
else: # non-empty
if ds.breaktievector(minprice, btprice):
#jstar = breaktieindex(minprice, btprice)
return minprice
else: # No
return []
else: # No
return []
# Game
def gbestprice(eps, istar, alpha, numf, minprice, btprice, maxtot, budget, diff):
ms = ds.dotproduct(alpha, minprice)
tol = 10**(-6)
bestprice = copy.deepcopy(minprice)
g = istar - numf
#print("minprice =" + str(minprice))
#print("btprice =" + str(btprice))
#print(maxtot)
if isfeasibleprice(alpha, minprice, budget) and (ms <= maxtot + tol):
if not btprice: # empty
#print("++++++++++++ gbest price: TODO 1")
bestprice[g] = gfindmaxprice(eps, g, alpha, minprice, maxtot, budget)
else: # non-empty
#print("++++++++++++ gbest price: TODO 2")
if ds.breaktievector(minprice, btprice):
jstar = breaktieindex(minprice, btprice)
#print("jstar = " + str(jstar))
if (g < jstar):
bestprice[g] = btprice[g] # TODO
elif (g == jstar):
#print("THIS CASE")
if (btprice[g] >= eps - 10**(-8)):
bestprice[g] = btprice[g] - eps
else:
bestprice = []
else: # set the price as high as possible
bestprice[g] = gfindmaxprice(eps, g, alpha, minprice, maxtot, budget)
else: # No
bestprice = []
else: # No
bestprice = []
if (bestprice == diff):
bestprice = []
#print("Set bestprice = " + str(bestprice))
return bestprice
#
def gfindmaxprice(eps, g, alpha, minprice, maxtot, budget):
#bestprice = copy.deepcopy(minprice)
tol = 10**(-6)
#print(maxtot)
#print(budget)
#print(alpha)
#print(min(maxtot, budget) - ds.dotproduct(alpha, minprice))
#print(alpha[g] * eps)
beta = math.floor((min(maxtot, budget) - ds.dotproduct(alpha, minprice) + tol)/(alpha[g] * eps))
#print("beta = " + str(beta))
if beta < 0:
print("++++++++ gfindmaxprice: no feasible price")
bestprice = minprice[g] + beta * eps # TODO: check no rounding error
return bestprice
# Takes two price vector x and y (of the same length), returns the smallest index s.t x[i] < y[i]
# If does not exist, returns -1.
def breaktieindex(x,y):
tol = 10**(-6)
for i in range(len(x)):
if (x[i] < y[i] - tol): # TODO
return i
return -1
# Check if a price is feasible given the budget and alpha
def isfeasibleprice(alpha, price, budget):
tol = 10**(-6)
if (ds.dotproduct(alpha, price) <= budget + tol):
return True
else:
return False
# takes a column, a row, and numf
# returns the type of the column with respect to the row
# 1:non-active slack variable, 2: non-slack with zero coefficient,
# 3: non-slack with non-zero coefficient, 4: active slack variable
def getcoltype(c, row, numf):
if ds.isslack(c): # slack variable
if (c[0] == -row-1):
return 4
else:
return 1
else: # non-slack variable
if ds.iszerocoeff(c, row, numf): # zero
return 2
else: # non-zero
return 3
#
def getfeasiblecols(newrm, istar, rmins, ordlist, numf, minprice, maxtms, fclist, fb2col):
# TODO: Store values
# Update for 2 rows only, that is the newrm and istar, others stay the same
#fclist[newrm], minprice, maxtms = getfeasiblecolsone(newrm, rmins[newrm], ordlist[newrm], numf, minprice, maxtms, fclist[newrm])
#fclist[istar], minprice, maxtms = getfeasiblecolsone(istar, rmins[istar], ordlist[istar], numf, minprice, maxtms, fclist[istar])
for row in range(len(rmins)):
if (row != istar):
fclist[row], minprice, maxtms = getfeasiblecolsone(row, rmins[row], ordlist[row], numf, minprice, maxtms, fb2col)
#print("List of feasible cols for each row = "+ str(fclist))
fclist.remove(fclist[istar])
#print("List of feasible cols for each row = "+ str(fclist))
# Get the list of feasible columns by intersecting all the list in fclist
start = time.time()
#fcols = func.reduce(intersection, fclist)
#fcols = set.intersection(*map(set,fclist))
fcols = set(fclist[0]).intersection(*fclist[1:])
#print(fclist)
#fcols = myintersection(fclist)
#print(fcols)
#end = time.time()
#print("time = " + str(end - start))
#time.sleep(0.1)
return fcols, minprice, maxtms
#
def getfeasiblecolsone(row, rmin, order, numf, minprice, maxtms, fb2col):
#print("order = " + str(list(map(lambda x: fb2col[x], order))))
# Ignore the price of rmin
rm = (rmin[0], rmin[1])
#print(rm)
# Find the index of rm in the order
index = order.index(rm)
fc = []
type = getcoltype(rmin, row, numf)
#print ("type = " + str(type) + " ", end ='')
if (type == 1): # rmin can't be a non-active slack variable
print("getfeasiblecolsone: Something wrong!!!!!")
elif (type == 2): # non-slack with zero coefficient
# Remove everying after the index, any price is OK
fc = order[0:index+1]
elif (type == 3): # non-slack with non-zero coefficient
#print("type 3")
if (row < numf): # family case
#print("family case")
# Remove all the cols that is less preferred than rmin
fc = order[0:index+1]
#print("index = " + str(index))
#print(order)
# the total money is at most ...
maxtms[row] = ds.dotproduct(rmin[1], rmin[2])
else: # game case, price matters
#print("game case")
g = row - numf
fc = order[:len(order) - 1]
# The price of game g is at least as expensive as the price of game g at rmin
minprice[g] = rmin[2][g]
else: # active slack variable
#print("type 4")
fc = order[:len(order)-1] # any col, any price is OK
#print("row = " + str(row) + ": " + "fc =" + str(list(map(lambda x: fb2col[x], fc))))
return fc, minprice, maxtms
# Optimization
def getfeasiblecolsOPT(newrm, istar, rmins, ordlist, numf, minprice, maxtms, fclist, fb2col):
temp =[]
for row in range(len(rmins)):
if (row != istar):
fclist[row], minprice, maxtms = getinfeasiblecolsone(row, rmins[row], ordlist[row], numf, minprice, maxtms, fb2col)
if newrm[row][0] < 0: # slack
temp.append((newrm[row][0], newrm[row][1]))
#print("List of feasible cols for each row = "+ str(fclist))
fclist.remove(fclist[istar])
#print("List of feasible cols for each row = "+ str(fclist))
# Get the list of feasible columns by intersecting all the list in fclist
start = time.time()
#fcols = func.reduce(intersection, fclist)
#fcols = set.intersection(*map(set,fclist))
fclist.append(temp)
infcols = set(fclist[0]).union(*fclist[1:])
#print("infcols = " + str(infcols))
fcols = set(fb2col.keys()).symmetric_difference(infcols)
#print(fclist)
#fcols = myintersection(fclist)
#print(set(fb2col.keys()))
#print((fcols))
end = time.time()
#print("time = " + str(end - start))
#time.sleep(0.1)
return fcols, minprice, maxtms
def getinfeasiblecolsone(row, rmin, order, numf, minprice, maxtms, fb2col):
#print("order = " + str(list(map(lambda x: fb2col[x], order))))
#print(order)
# Ignore the price of rmin
rm = (rmin[0], rmin[1])
#print(rm)
# Find the index of rm in the order
index = order.index(rm)
fc = []
type = getcoltype(rmin, row, numf)
#print ("type = " + str(type) + " ", end ='')
if (type == 1): # rmin can't be a non-active slack variable
print("getfeasiblecolsone: Something wrong!!!!!")
elif (type == 2): # non-slack with zero coefficient
# Remove everying after the index, any price is OK
fc = order[index+1:]
elif (type == 3): # non-slack with non-zero coefficient
#print("type 3")
if (row < numf): # family case
#print("family case")
# Remove all the cols that is less preferred than rmin
fc = order[index+1:]
#print("index = " + str(index))
#print(order)
# the total money is at most ...
maxtms[row] = ds.dotproduct(rmin[1], rmin[2])
else: # game case, price matters
#print("game case")
g = row - numf
fc = order[len(order) - 1]
# The price of game g is at least as expensive as the price of game g at rmin
minprice[g] = rmin[2][g]
else: # active slack variable
#print("type 4")
fc = [] #order[len(order)-1] # any col, any price is OK
#print("row = " + str(row) + ": " + "fc =" + str(list(map(lambda x: fb2col[x], fc))))
return fc, minprice, maxtms
####
# list intersection
def intersection(x, y):
x = set(x)
return [a for a in y if a in x ]
# sort a list
def sortorder(sortedlist, sublist):
ssl = set(sublist)
return [x for x in sortedlist if x in ssl]
#
def roundprice(p):
for i in range(len(p)):
p[i] = round(p[i],3)
# my list intersection
def myintersection(list):
n = len(list)
if n == 0:
return []
elif n == 1:
return list[0]
else:
mid = int(round((n-1)/2))
return set(myintersection(list[0:mid])).intersection(*myintersection(list[mid+1:]))
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,042 | nhhai196/Ticket-Reassignment | refs/heads/main | /cardinalpivot.py | ######################## Cardinal Pivot ############################
import numpy
import datastructure as ds
# @clist : a list of contracts that is a cardinal basis. Note that
# the order of contracts is important.
# @c : a new column to add to the basis
# @A : the constraint matrix. Note that this matrix will be updated
# after each cardinal basis step
# @b : the right hand side
# @fb2col : (family, bundle) to column index (of the constraint matrix)
def cardinalpivot(clist, c, A, b, fb2col):
#print("++++++++ Cardinal pivot:")
#ds.printbasis(clist, fb2col)
#print("----- Push in : " +str(c))
numf = len(clist)
# First check that if the contract (ignore the price vector) to add
# already in the basis. If Yes, just add the new contract and remove
# the old one.
for i in range(len(clist)):
tempc = clist[i]
if (tempc[0] == c[0] and tempc[1] == c[1]):
clist[i] = c
#print("----- Kick out: " + str(tempc))
#print(roundmatrix(A))
#print(roundvector(b))
return clist, tempc, A, b
numrows = len(A)
numcols = len(A[0])
#print(numrows)
#print(numcols)
#print(b)
# Index of the entering basic variable (added column)
# TODO: need a mapping from (family, bundle) to index
fbc = (c[0], c[1])
cindex = fb2col[fbc]
#print(cindex)
# Perform ratio test to find the leaving basic variable (revomed column)
minval = 10**10 # some large value
pivotrow = -1 # this will be the index of the leaving basic variable
for row in range(numrows):
if (A[row, cindex] > 0.00001):
temp = b[row]/ A[row, cindex]
if (temp < minval):
minval = temp
pivotrow = row
### Update variables
oldc = clist[pivotrow]
clist[pivotrow] = c
# Initialize to appropriate size
newb = [0] * len(b)
newA = numpy.zeros([numrows, numcols])
## Update the pivotrow
# Copy pivotrow and normalizing to 1
newA[pivotrow, :] = A[pivotrow, :] / A[pivotrow, cindex]
# Update pivotrow of right hand side
newb[pivotrow] = b[pivotrow]/A[pivotrow, cindex]
## Update all other rows
for k in range(numrows):
if (not (k == pivotrow)):
# Set it equal to the original value minus a multiple
# of normalized pivotrow
newA[k, :] = A[k,:] - A[k,cindex] * newA[pivotrow, :]
newb[k] = b[k] - A[k,cindex] * newb[pivotrow]
#print("----- Kick out: " + str(oldc))
#print(roundmatrix(newA))
#print("newb = " + str(roundvector(newb)))
return clist, oldc, newA, newb
def roundmatrix(A):
m = len(A)
n = len(A[0,:])
B = numpy.zeros([m, n])
for i in range(m):
for j in range(n):
B[i][j] = round(A[i][j], 2)
return B
def roundvector(b):
m = len(list(b))
newb = numpy.zeros(m)
for i in range(m):
newb[i] = round(b[i], 2)
return newb | {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,043 | nhhai196/Ticket-Reassignment | refs/heads/main | /correctness.py | ######################## Tetsing #########################################
# This module contains all functions that are needed to test the correctness
# of ordinal pivot and cardinal pivot
import ordinalpivot as op
import math
import datastructure as ds
def isordbasis(eps, basis, numf, numg, fp, ordlist, fb2col, budget):
# Get all the row mins
rmins = op.getallrowmins(basis, numf, fp)
flag = True
for c in fb2col:
if (c[0] >=0):
prices = enumprice(eps, c[1], numg, budget[c[0]])
for p in prices:
contract = (c[0], c[1], p)
if not dominated(contract, rmins, numf, fp):
print("+++++++++++ ERROR: col below not dominated")
print(contract)
print(rmins)
flag = False
#return False
return flag
# def
def dominated(contract, rmins, numf, fp):
for row in range(len(rmins)):
if ds.weaklyprefer(rmins[row], contract, row, numf, fp):
#print(contract, end ='')
#print(" dominated by ", end ='')
#print(rmins[row])
return True
return False
# enumerate all possible prices
def enumprice(eps, alpha, numg, budget):
if numg == 0:
return [[]]
#if numg == 1:
# if (alpha[0] == 0):
# return [[0]]
# else:
# maxp = math.floor(budget/(eps * alpha[0])) + 1
# return [[i * eps] for i in range(maxp)]
else:
allprices = []
if (alpha[0] == 0):
temp = enumprice(eps, alpha[1:], numg-1, budget)
prices = [[0] + x for x in temp]
#print(prices)
allprices = allprices + prices
else:
maxp = math.floor(budget/(eps*alpha[0])) + 1
#print(maxp)
for i in range(maxp):
val = i * eps
#print("val = " +str(val))
temp = enumprice(eps, alpha[1:], numg-1, budget - i * eps * alpha[0])
#print(temp)
prices = [[val] + x for x in temp]
#print(prices)
allprices = allprices + prices
return allprices
def enumpriceall(eps, alpha, numg, budget):
allprices = []
for i in range(len(budget)):
allprices[i] = enumprice(eps, alpha[i], numg, budget[i])
return allprices
## Functions for testing aproximate pseudo CE
def ispseudoCE(x, p, eps, fb2col, ordlist, budlist, numf, numg, budget):
tol = 10**(-6)
for i in range(len(x)):
xi = x[i]
if not abs(xi) <= tol: # positive value
# get the bundle
for (f, s) in fb2col:
#print("key = " + str(key))
#print(value)
colindex = i + numf + numg
if fb2col[(f, s)] == colindex:
break
if not isfoptimal(f, s, p, eps, ordlist[f], budlist[f], budget[f]):
return False
return True
def isfoptimal(f, s, p, eps, order, budlist, bf):
tol = 10**(-6)
temp = eps * sum(s)
temp = bf - temp
index = order.index((f, s))
for c in budlist:
if isaffordable(c, p, temp):
oi = order.index((f,c))
if oi < index: # prefer some affordable bundle
return False
# otherwise
return True
def isaffordable(c, p, budget):
tol = 10**(-6)
money = ds.dotproduct(c, p)
if (abs(budget - money) <= tol):
return True
elif (money <= budget + tol):
return True
else:
return False
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,044 | nhhai196/Ticket-Reassignment | refs/heads/main | /iterativerounding.py | from scipy.optimize import linprog
import datastructure as ds
import numpy as np
import math
import copy
import threading
# A : the constraint matrix
# v: the objective coefficients
# x : a dominating extreme point
# tol: tolerance for double-floating error
def iterativerounding(A, x, b, tol, numf, numg):
print("+++++++++++++++++++++++++++++++++ Iterative Rounding +++++++++++++++++++++++++++++++")
A = list(map(list, A))
# Get the dimension
numcol = len(x)
# Initialize xBar
xBar = [0] * numcol
# Find family binding constraints
A_eq, A_ineq, numfeq, k, b_eq, b_ineq = fbindconstraints(A, x, b, tol, numf, numg)
#print(len(A_eq))
#print(len(A_ineq))
#print(b_eq)
#print(b_ineq)
#print(numfeq)
#print("init k = " + str(k))
# Objective function: minimize
v = [-1] * numcol
# initialize a list for bookkeeping of x variables
xremainind = [i for i in range(numcol)]
round = 1
elim = 0;
while True:
print("++++++++++++ Round = " + str(round))
#t = printint()
round += 1
iind, xint, find, xfrac = seperateintfrac(x, tol)
if not find: # integer solution
#print("Integral sol")
#stop_threads = True
#t.cancel()
break
if len(x) == len(xfrac): # no integral entry
#print("No integral entry, eliminating a constraint")
## looking for a game constraint to delete
xup = roundup(x, tol)
#print("k=" + str(k))
#inds, A_eq_g = gbindconstraints(A_ineq[k:], xfrac, b_ineq[k:], tol)
#print(A_eq_g)
#print(xup)
#print(xup)
#btemp = subtract(mul(A_eq_g, xup), b_ineq[k:])
btemp = subtract(mul(A_ineq[k:], xup), b_ineq[k:])
# greedy choice
elimind = btemp.index(min(btemp))
elimind += k
elim += 1
# Delete the constraint
A_ineq.remove(A_ineq[elimind])
#print(b_ineq[elimind])
b_ineq.remove(b_ineq[elimind])
else: # mixed integer and fractional sol
#print("mixed soltion, fixing integral entries")
## Update the linear program
# The objective coefficients
vint, [v] = partitionmatrix([v], iind)
# For equality
A_eq_int, A_eq = partitionmatrix(A_eq, iind)
b_eq = subtract(b_eq, mul(A_eq_int, xint))
# For inequality
A_ineq_int, A_ineq = partitionmatrix(A_ineq, iind)
b_ineq = subtract(b_ineq, mul(A_ineq_int, xint))
# Update remaining variables
for i in iind:
xBar[xremainind[i]] = x[i]
temp = []
for i in find:
temp.append(xremainind[i])
xremainind = temp
#print("remain indices:" + str(xremainind))
# Clean up useless constraints
A_eq, b_eq = cleanupeq(A_eq, b_eq)
A_ineq, b_ineq, k = cleanupineq(A_ineq, b_ineq, k)
#print("Check k = " + str(k))
#print(v)
#print("A_eq = " + str(len(A_eq)))
#print(b_eq)
#print("A_ineq = " + str(len(A_ineq)))
#print(b_ineq)
# Resolve the updated linear program
if not A_eq and not A_ineq:
#print("This case")
x = [0] * len(xremainind)
elif not A_eq and not b_eq:
res = linprog(v, A_ub=A_ineq, b_ub=b_ineq, A_eq=None, b_eq=None, method='revised simplex')
x = res['x']
elif not A_ineq and not b_ineq:
res = linprog(v, A_ub=None, b_ub=None, A_eq=A_eq, b_eq=b_eq, method='revised simplex')
x = res['x']
else:
res = linprog(v, A_ub=A_ineq, b_ub=b_ineq, A_eq=A_eq, b_eq=b_eq, method='revised simplex')
x = res['x']
#print("x = " + str(x))
# Update the integral solution xBar
for i in iind:
xBar[xremainind[i]] = x[i]
print("eliminated " + str(elim) + " constraints")
return roundint(xBar)
# Print some statement
def printint():
t = threading.Timer(5.0, printint).start()
print('IR: running')
return t
# Takes a solution x, and returns list of indicies i suct that x[i] is integal
def seperateintfrac(x, tol):
iind = []
find = []
xint = []
xfrac = []
for i in range(len(x)):
if abs(x[i] - round(x[i])) <= tol:
iind.append(i)
xint.append(x[i])
else:
find.append(i)
xfrac.append(x[i])
return iind, xint, find, xfrac
# Round up a vector x
def roundup(x, tol):
y = [0] * len(x)
for i in range(len(x)):
if x[i] > tol:
y[i] = math.ceil(x[i])
return y
# Check if a solution is an integer solution
def isintsol(x, tol):
for i in range(len(x)):
if abs(x[i] - round(x[i])) > tol:
return False
return True
# Multiply a matrix with a vector
def mul(A, x):
return [ds.dotproduct(a,x) for a in A]
#
def partitionmatrix(A, iind):
if len(A) == 0:
return [], []
numrow = len(A)
numcol = len(A[0])
Aint = []
Afrac = []
for i in range(numrow):
aint = []
afrac =[]
for j in range(numcol):
if j in iind:
aint.append(A[i][j])
else:
afrac.append(A[i][j])
Aint.append(aint)
Afrac.append(afrac)
return Aint, Afrac
# subtract two vector coordinatewise
def subtract(x, y):
return [x[i]- y[i] for i in range(len(x))]
# find family binding constraints
def fbindconstraints(A, x, b, tol, numf, numg):
val = mul(A, x)
A_eq = []
A_ineq = []
b_eq = []
b_ineq = []
numfeq = 0
numfineq = 0
for i in range(len(b)):
if ( abs(val[i] - b[i]) <= tol) and (i < numf):
A_eq.append(A[i])
b_eq.append(b[i])
numfeq = numfeq + 1
else:
A_ineq.append(A[i])
b_ineq.append(b[i])
if i < numf:
numfineq = numfineq + 1
return A_eq, A_ineq, numfeq, numfineq, b_eq, b_ineq
# find game binding constraints
def gbindconstraints(A, x, b, tol):
#print(A)
#print(x)
temp = mul(A, x)
inds = []
A_eqg = []
for i in range(len(b)):
if abs(temp[i] - b[i]) <= tol:
inds.append(i)
A_eqg.append(A[i])
return inds, A_eqg
# clean up useless constraints
def cleanupeq(A, b):
#if (A == None) or (b == None):
# return None, None
newA = []
newb = []
for row in range(len(A)):
if not allzeros(A[row]):
newA.append(A[row])
newb.append(b[row])
return newA, newb
def cleanupineq(A, b, k):
#if (A == None) or (b == None):
# return None, None, 0
newA = []
newb = []
newk = k
for row in range(len(A)):
if not allzeros(A[row]):
newA.append(A[row])
newb.append(b[row])
elif row < k:
newk = newk - 1
return newA, newb, newk
def allzeros(x):
tol = 10**(-6)
for i in x:
if not (abs(i) <= tol):
return False
return True
def roundint(x):
ans = []
for i in x:
ans.append(round(i))
return ans
#print(subtract([1,2], [-1, -2]))
#print(roundup([0.1, 0, 0.2], 10**(-6)))
### Redistribute evenly the group dominating solution to a family dominating solution
def redistribute(x, numcol2, IDlist, numf, numg, fb2col, fb2col2, numrow2):
newx = [0] * numcol2
for i in range(len(x)):
if x[i] > 0:
(gID, b) = ind2fb(i, fb2col, numf + numg)
newx = redistributeone(x[i], gID, b, IDlist, newx, fb2col2, numrow2)
return newx
def redistributeone(val, gID, b, IDlist, newx, fb2col2, numrow2):
groupsize = len(IDlist[gID]) # Check offset by 1 or not
for f in IDlist[gID]:
col = fb2col2[(f-1, b)] - numrow2
#print(col)
#print(len(newx))
newx[col] = val/groupsize
return newx
def ind2fb(ind, fb2col, numrows):
#print('ind = ' + str(ind))
# offset by the number of rows
for key, value in fb2col.items():
if value == (ind + numrows):
#print('key = ' + str(key))
return key
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,045 | nhhai196/Ticket-Reassignment | refs/heads/main | /gendata_v3.py | import random
from scipy import stats
import numpy as np
import copy
import xlsxwriter
def gendata(filename, numg, numf, fdist, numscore, minsize, numswaps, seatoffset, maxbsize):
numscore, scorelist = genscorelist(numscore, numg, numswaps)
prefcdf = genprefcdf(numscore)
# print(scorelist)
# print(prefcdf)
famdict, tupletoID = genfam(numf, fdist, minsize, scorelist, prefcdf)
# famdict[family][0]=size, famdict[family][1]=#seniors, famdict[family][2]=bundle preference
# tupletoID[tuple([family][i])]= family's ID
group = groupfamily(famdict)
# print(group)
workbook = xlsxwriter.Workbook(filename)
wb = workbook.add_worksheet()
wb.write(0, 0, 'Family Preference')
wb.write(0, numg + 1, 'Family Size')
wb.write(0, numg+ 3, 'Num Seniors')
wb.write(0, numg + 5, 'Group ID')
wb.write(0, numg + 7, 'Family Bundle Preference')
row = 1
for value in famdict.values():
# print(value)
temp = value[2]
# print(temp)
for col in range(numg):
#print(temp[col])
wb.write(row, col, temp[col])
wb.write(row, numg + 1, value[0])
wb.write(row, numg + 3, value[1])
wb.write(row, numg + 5, tupletoID[tuple(value)])
sblist = genblist(value, maxbsize, seatoffset)
for i in range(len(sblist)):
wb.write(row, numg + 7 + i, ",".join(map(str, sblist[i][1])))
row += 1
wb = workbook.add_worksheet()
wb.write(0, 0, 'Family Preference')
wb.write(0, numg + 1, 'Family Size')
wb.write(0, numg + 3, 'Num Seniors')
wb.write(0, numg + 5, 'Group Size')
wb.write(0, numg + 7, 'Family Bundle Preference')
row = 1
for key, value in group.items():
wb.write_row(row, 0, key[2])
wb.write(row, numg + 1, key[0])
wb.write(row, numg + 3, key[1])
wb.write(row, numg + 5, len(value))
sblist = genblist(key, maxbsize, seatoffset)
for i in range(len(sblist)):
wb.write(row, numg + 7 + i, ",".join(map(str, sblist[i][1])))
row += 1
wb = workbook.add_worksheet()
wb.write(0, 0, 'Capacity')
wb.write(0, 1, 'Alpha')
wb.write(1, 0, round(numf * 2.5)) # Hardcode here for capacity
for i in range(len(fdist)):
wb.write(1, i+1, i+1+seatoffset)
# wb = workbook.add_worksheet()
# wb.write(0, 0, 'Family Preference')
# row = 1
# for value in famdict.values():
# sblist = genblist(value, maxbsize, seatoffset)
# for i in range(len(sblist)):
# wb.write(row, i, ",".join(map(str, sblist[i][1])))
# row += 1
workbook.close()
#print(np.random.permutation([i for i in range(1,6)]))
return famdict, group
def genblist(value, ub, extra):
n = len(value[2])
sblist = []
for i in range(2**n-1, 0, -1):
score = 0
bundle = [0]*n
bilist = [int(j) for j in list('{0:0b}'.format(i))]
if sum(bilist) <= ub:
bisize = len(bilist)
for j in range(0,n-bisize):
bilist.insert(0,0)
for j in range(0,n):
if bilist[j]==1:
bundle[j] = value[0] + extra
score = score + value[2][j]
sblist.append([score, bundle])
sblist.sort(key = lambda x: x[0], reverse=True)
return sblist
def groupfamily(famdict):
group = {}
for key, value in famdict.items():
value = tuple(value)
if value not in group:
group[value] = [key]
else:
group[value].append(key)
return group
def genfam(numf, dist, minsize, preflist, prefcdf):
famdict = {}
fdist = distmul(dist, numf)
f = 0;
#print(fdist)
for i in range(len(fdist)):
#print('i =' + str(i))
for j in range(1, fdist[i]+1):
#print('j =' + str(j))
f = f+1
#print('f =' + str(f))
famdict[f] = [i+minsize, (), ()]
#print ("famdict first = " + str(famdict))
famdict = gensenior(famdict)
#print ("famdict second = " + str(famdict))
famdict = genpref(famdict, preflist, prefcdf)
#print ("famdict third = " + str(famdict))
tupletoID = genID(famdict)
return famdict, tupletoID
def genID(famdict):
numf = len(famdict)
tupletoID = {}
ID = 1
for i in range(1, numf+1):
if tuple(famdict[i]) not in tupletoID:
tupletoID[tuple(famdict[i])] = ID
ID += 1
return tupletoID
def gensenior(famdict):
numf = len(famdict)
for i in range(1, numf+1):
famdict[i][1] = randomsenior(famdict[i][0])
return famdict
# random score over games
def randomscore(numg):
score = [0] * numg
for i in range(numg):
score[i] = round(random.uniform(0.2, 1), 4)
return score
def genscorelist(numscore, numg, numswaps):
fewscore = 5
smallslist = [randomscore(numg) for i in range(fewscore)]
scorelist = []
for i in range(numscore):
temp = copy.copy(smallslist[random.randint(0,fewscore-1)])
for j in range(numswaps):
pos1 = random.randint(0, numg-1)
pos2 = random.randint(0, numg-1)
temp[pos1], temp[pos2] = temp[pos2], temp[pos1]
scorelist.append(tuple(temp))
scorelist = list(set(scorelist))
return len(scorelist), scorelist
def genpreflistv2(numpref, numg, numswaps):
fixpref = np.random.permutation([i for i in range(1, numg+1)])
preflist = []
for i in range(numpref):
temp = copy.copy(fixpref)
for j in range(numswaps):
pos1 = random.randint(0, numg-1)
pos2 = random.randint(0, numg-1)
temp[pos1], temp[pos2] = temp[pos2], temp[pos1]
preflist.append(tuple(temp))
preflist = list(set(preflist))
return len(preflist), preflist
def swapPositions(list, pos1, pos2):
list[pos1], list[pos2] = list[pos2], list[pos1]
return list
# Uniform distribution
def genprefcdf(numpref):
temp = [i for i in range(numpref+1)]
return stats.uniform.cdf(temp, loc = 0, scale = numpref)
def genpref(famdict, plist, prefcdf):
numf = len(famdict)
for i in range(numf):
index = randompref(prefcdf)
famdict[i+1][2] = plist[index]
return famdict
# gen a random pref based in preference cdf
def randompref(pcdf):
rnum = random.uniform(0,1)
for ind in range(len(pcdf)-1):
if (pcdf[ind]<= rnum) and (rnum < pcdf[ind+1]):
return ind
def randomsenior(anum):
count = 0
for i in range(anum):
t = random.randint(2,4)
if t == 1:
count += 1
return count
def distmul(dist, num):
n = len(dist)
ans = [round(num * x) for x in dist]
s = sum(ans)
if s > num:
ans[0] = ans[0] + num - s
elif s < num:
ans[0] = ans[0] - num + s
return ans
#filename = 'data-swap-1.xlsx'
#numg = 3
#numf = 3
#fdist = [0.5, 0.5]
#numpref = 1000
#minsize = 2
#numswaps = 1
################# Testing
filename = 'data-cardinal-ID-100.xlsx'
numg = 6
numf = 400
fdist = [0.15, 0.35, 0.3, 0.15, 0.05]
numscore = 20
minsize = 1
numswaps = 2
seatoffset = 6
maxbsize = 2
filename = 'data-cardinal-ID-' + str(numf) + '.xlsx'
gendata(filename, numg, numf, fdist, numscore, minsize, numswaps, seatoffset, maxbsize)
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,046 | nhhai196/Ticket-Reassignment | refs/heads/main | /ticket_reassign_v2.py | import sys
import datastructure
import csv
import numpy as np
import cardinalpivot as cp
import ordinalpivot as op
import scarfpivot as sp
import time
import numpy as np
import iterativerounding as ir
import random
import statistics as stat
#argv[1]: xlsx file name in the following format
#row 1: column labels: family preference / family size / num seniors / group size
#row f+1: info of family f
#argv[2]: budget of each senior, each non-senior has budget 1
#argv[3]: seat offset for alpha
#argv[4]: game capacity
#argv[5]: upper bound for bundle size
#argv[6]: epsilon for price
#ex: python ticket_reassign_v2.py data3-swap.xlsx 1.2 4 150 3 0.1 // with duplicates, 3X00 rounds?
#ex: python ticket_reassign_v2.py data3-swap.xlsx 1.2 2 150 3 0.05 // with duplicates, 5500 rounds
#ex: python ticket_reassign_v2.py data4-swap.xlsx 1.2 3 2000 3 0.1 // with duplicates, 8300 rounds
numF, numG, bundle2rank, bundlelist, fb2col, budget, numcol, A, b, plist, famsize = datastructure.init_v2(sys.argv[1],float(sys.argv[2]),int(float(sys.argv[3])),int(float(sys.argv[4])),int(float(sys.argv[5])))
#numF: number of family
#numG: number of games
#bundle2rank: bundle maps to the rank, each family has one dictionary
#bundlelist: preference list over bundles, each family has one list
#fb2col: map (family,bundle) to the column index of matrix A
#budget: budget[f-1] is the budget of family f
#numcol: number of columns for matrix A
#A: the Scarf matrix of size (numF+numG) x numcol, columns are in alphabetic order
#b: the capacity vector on RHS
#plist: plist[f][j] denotes family f's j-th most favorite game
#famsize: famsize[f] denotes the size of family f
print("++++++++++++++++++++++++++++++++++++++ Data +++++++++++++++++++++++++++++++++++++")
print('numF = ' + str(numF))
print('numG = ' + str(numG))
print('bundle2rank:\n' + str(bundle2rank))
#print('bundlelist:\n' + str(bundlelist))
#print('fb2col:\n' + str(fb2col))
print('numcol = ' + str(numcol))
numrow = numF + numG
print('numrow = ' + str(numrow))
print('budget: ' + str(budget))
print('matrix A:\n' + str(A))
print('vector b:\n' + str(b))
clist = [] #contract list
for i in range(numF):
clist.append((-1*(i+1),(),[]))
for i in range(numG):
clist.append((-1*(i+1+numF),(),[]))
#print("clist = ")
#print(clist)
# Test cardinal pivot
#c = (1, bundlelist[1][1], [0,0])
#fbc = (c[0], c[1])
#print(fbc)
#b = [random.randint(1,3) for i in range(numF)]
#b = [1 for i in range(numF)]
#b = b + capacity
#print("b =" + str(b))
#newCB, oldc, newA, newb = cp.cardinalpivot(clist, c, A, b, fb2col)
#print(newCB)
#print(oldc)
#print(newA)
#print(newb)
#a = np.zeros([5 * 10**3, 10**6])
# Test ordinal pivot
print("Init ordinal basis:")
c, initOB = op.initordinalbasis(A, numF, numG, fb2col)
#print(initOB)
rmins = op.getallrowmins(initOB, numF, bundle2rank)
#for i in range(len(rmins)):
# print(rmins[i])
ordlist = datastructure.genordlist(A, numF, bundle2rank, bundlelist, fb2col)
#print("matrix A:")
#print(A)
#print("ordlist:")
#print(ordlist)
# ordlist in the form (f,b)
col2fb = {value : key for (key, value) in fb2col.items()}
#print(col2fb)
newordlist = []
for l in ordlist:
temp = list(map(lambda x: col2fb[x], l))
newordlist.append(temp)
#print("new")
#print(newordlist)
#clist, newc, newrmins = op.ordinalpivot(initOB, oldc, rmins, numF, numG, bundle2rank, newordlist, fb2col)
#print(clist)
#print(datastructure.weaklyprefer((1,(2,0),[0,0]), (1,(2,0),[0.5,0]), 1, numF, bundle2rank))
start = time.time()
eps = float(sys.argv[6])
x = sp.scarfpivot(eps, clist, initOB, A, b, c, rmins, numF, numG, bundle2rank, newordlist, fb2col, budget, bundlelist)
end = time.time()
print('Scarf elapsed time =' + str(end - start))
## Iterative Rounding
# remove the slack variable
start = time.time()
A = A[:, numrow:]
#print("A= " + str(A))
#print("b = "+ str(b))
#realb = ir.mul(A, x)
#print(realb)
tol = 10**(-6)
xBar = ir.iterativerounding(A, x, b, tol, numF, numG)
print("xBar = " + str(xBar))
end = time.time()
print("Rounding elapsed time = " + str(end - start))
#print(len(xBar))
#print((plist))
## Statistics
filename = 'outputs-' + str(numF) + '-families-' + str(numG) + '-games.xlsx'
print(b)
stat.statistics(filename, A, xBar, b, numF, numG, fb2col, plist, famsize, bundle2rank)
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,047 | nhhai196/Ticket-Reassignment | refs/heads/main | /testing.py | import correctness as cor
print("test 1:")
print(cor.enumprice(0.5, [1], 1, 2))
print("test 2:")
print(cor.enumprice(0.5, [1,2], 2, 2))
print("test 3:")
print(cor.enumprice(0.5, [0,2], 2, 2))
print("test 4:")
print(cor.enumprice(0.5, [2,2], 2, 2)) | {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,048 | nhhai196/Ticket-Reassignment | refs/heads/main | /statistics.py | from openpyxl import *
import copy
from numpy import *
import datastructure as ds
import numpy as np
#
def statistics(filename, A, x, b, numf, numg, fb2col, FP, famsize, bundle2rank):
print("+++++++++++++++++++++++++++++++++++ Statistics +++++++++++++++++++++++++++++++++++++")
print(FP)
nmf, nmp, bysize, fbyng, pbyng, fbypref, pbypref = stathelper(x, numf, numg, fb2col, FP, famsize)
print("nmf = " + str(nmf))
print("nmp = " + str(nmp))
print(bysize)
print(fbyng)
print(pbyng)
print(fbypref)
print(pbypref)
avgnmf = mean(nmf)
avgnmp = mean(nmp)
avgbysize = [0] * 5
# bundle rank
brank, bwrank, avg, avgwrank, count, countbyp, match = matchbundlerank(x, numf, numg, fb2col, bundle2rank, famsize)
print('brank =' + str(brank))
print('count =' + str(count))
print(match)
rbysize = rankbysize(brank, famsize)
# Violations
V, P = violations(A,x,b)
maxv = max(P)
print('V = ' + str(V))
print('P = ' + str(P))
print('max violation =' + str(maxv))
#
for i in range(5):
avgbysize[i] = mean(bysize[i])
# Envy
envy, numenvy, wenvy = countenvy(brank, famsize)
print('envy =' + str(envy))
print(numenvy)
print('wenvy = ' + str(wenvy))
aenvybysize, anumenvybysize, awenvybysize = countenvybysize(envy, numenvy, wenvy, famsize)
avgenvy = sum(envy)/len(envy)
avgnumenvy = sum(numenvy)/len(numenvy)
avgwenvy = sum(wenvy)/len(wenvy)
print('aenvybysize = ' + str(aenvybysize))
print('Average envy = ' + str(avgenvy))
print('Average number of envy families = ' + str(avgnumenvy))
print('Average weighted envy = ' + str(avgwenvy))
## Save to a file
wb=load_workbook(filename)
# save the matching
ws=wb["Sheet1"]
for i in range(numf):
for j in range(numg):
wcell = ws.cell(3+i, 39+j)
wcell.value = round(match[i,j])
ws=wb["Sheet3"]
for i in range(numg):
wcell = ws.cell(3, 35+i)
wcell.value = nmf[i]
for i in range(numg):
wcell = ws.cell(4, 35+i)
wcell.value = nmp[i]
for i in range(5):
for j in range(numg):
wcell = ws.cell(6+i, 35+j)
wcell.value = bysize[i][j]
wcell = ws.cell(13, 10)
wcell.value = 'Scarf'
wcell = ws.cell(14, 10)
wcell.value = avgnmf
wcell = ws.cell(15, 10)
wcell.value = avgnmp
for i in range(5):
wcell = ws.cell(16+i, 10)
wcell.value = avgbysize[i]
for i in range(numg):
wcell = ws.cell(23+i, 10)
wcell.value = fbyng[i]
for i in range(numg):
wcell = ws.cell(32+i, 10)
wcell.value = pbyng[i]
ws=wb["Sheet4"]
for i in range(numg):
wcell = ws.cell(19+i, 10)
wcell.value = fbypref[i]
for i in range(numg):
wcell = ws.cell(28+i, 10)
wcell.value = pbypref[i]
# Save bundlerank
ws=wb["Sheet5"]
numb = len(bundle2rank[0])
print('numb = ' + str(numb))
for i in range(numb+1):
wcell = ws.cell(2+i, 7)
wcell.value = count[i]
for i in range(numb+1):
wcell = ws.cell(2+i, 16)
wcell.value = countbyp[i]
ws = wb["Sheet6"]
for i in range(numf):
wcell = ws.cell(2+i, 7)
wcell.value = brank[i]
ws=wb["Sheet7"]
wcell = ws.cell(2, 7)
wcell.value = avg
wcell = ws.cell(5, 7)
wcell.value = round(std(brank),2)
wcell = ws.cell(8, 7)
wcell.value = avgwrank
wcell = ws.cell(11, 7)
wcell.value = round(std(bwrank),2)
for i in range(5):
wcell = ws.cell(14+i, 7)
wcell.value = round(rbysize[i],2)
# Save envy
ws=wb["Sheet8"]
wcell = ws.cell(2, 7)
wcell.value = avgenvy
wcell = ws.cell(5, 7)
wcell.value = avgnumenvy
wcell = ws.cell(8, 7)
wcell.value = avgwenvy
for i in range(5):
wcell = ws.cell(11+i, 7)
wcell.value = round(aenvybysize[i],2)
for i in range(5):
wcell = ws.cell(11+i, 15)
wcell.value = round(anumenvybysize[i],2)
for i in range(5):
wcell = ws.cell(11+i, 23)
wcell.value = round(awenvybysize[i],2)
wb.save(filename)
return
# Helper function
def stathelper(x, numf, numg, fb2col, FP, famsize):
# Intialize
nmf = [0] * numg
nmp = [0] * numg
fbyng = [0] * numg
pbyng = [0] * numg
fbypref = [0] * numg
pbypref = [0] * numg
bysize = zeros((5,numg), dtype=int32)
# Compute
for i in range(len(x)):
if x[i] >= 1:
(f, b) = ind2fb(i, fb2col, numf + numg)
size = getbundlesize(b)
fbyng[size-1] += x[i]
pbyng[size-1] += x[i] * famsize[f]
#print(b)
#print(bysize)
for j in range(numg):
if b[j] >= 1:
nmf[j] += x[i]
nmp[j] += x[i] * famsize[f]
bysize[famsize[f]-1][j] += x[i]
#print('here')
##print(j)
#print(bysize)
fbypref[FP[f][j] - 1] += x[i]
pbypref[FP[f][j] - 1] += x[i] * famsize[f]
return nmf, nmp, bysize, fbyng, pbyng, fbypref, pbypref
# Compute violations
def violations(A, x, b):
realb = mul(A,x)
V = [0] * len(b)
P = [0] * len(b)
for i in range(len(realb)):
if (realb[i] > b[i]):
V[i] = realb[i] - b[i]
P[i] = round(V[i]/b[i] * 100, 1)
print(realb)
return V, P
def ind2fb(ind, fb2col, numrows):
#print('ind = ' + str(ind))
# offset by the number of rows
for key, value in fb2col.items():
if value == (ind + numrows):
#print('key = ' + str(key))
return key
def getbundlesize(bundle):
size = 0
for i in bundle:
if i > 0:
size += 1
return size
def mean(x):
return round(sum(x)/len(x))
# Multiply a matrix with a vector
def mul(A, x):
return [ds.dotproduct(a,x) for a in A]
# Get bundle rank from the match
def matchbundlerank(x, numf, numg, fb2col, bundlerank, famsize):
numb = len(bundlerank[0])
brank = [numb+1] * numf
bwrank = [numb+1] * sum(famsize)
count = [0] * (numb+1)
countbyp = [0] *(numb+1)
s = 0
sbysize = 0
num = 0
match = np.zeros((numf, numg))
ind = 0
for i in range(len(x)):
if x[i] >= 1- 10**(-6):
print(x[i])
(f, b) = ind2fb(i, fb2col, numf + numg)
for key, value in bundlerank[f].items():
if key == b:
brank[f] = value + 1 # offset by 1
match[f, :] = list(b)
for j in range(famsize[f]):
bwrank[ind] = brank[f]
ind += 1
# count the number of families/people get i-th bundle
count[value] += x[i]
countbyp[value] += x[i] * famsize[f]
s += (value + 1) * x[i]
sbysize += (value + 1) * x[i] * famsize[f]
num += x[i]
# count number of unmatched families
# count[numb] = numf - sum(count[0:numb])
# avgerage rank of matched families
avg = round(s/num, 1)
avgbysize = round(sbysize/sum(famsize))
return brank, bwrank, avg, avgbysize, count, countbyp, match
## Count enviness
def countenvy(brank, S):
nF = len(S)
envy = [0] * nF
numenvy = [0] * nF
wenvy = [0] * nF
for f in range(nF):
sum = 0
count = 0
maxdiff = 0
for h in range(nF):
if (f != h) and (S[f] == S[h]) and (brank[f] > brank[h]): # envy
currdiff = brank[f] - brank[h]
sum += currdiff
numenvy[f] = numenvy[f]+1
if maxdiff < currdiff:
maxdiff = currdiff
if (S[f] == S[h]):
count += 1
envy[f] = maxdiff
if count > 1:
wenvy[f] = sum/(count -1)
return envy, numenvy, wenvy
def rankbysize(brank, S):
nF = len(S)
numfbysize = [0] * 5
for f in range(nF):
numfbysize[S[f]-1] += 1
count = [0] * 5
stari = 0
for i in range(5):
endi = stari + numfbysize[i]
count[i] = average(brank[stari:endi])
#print('here')
#print(count[i])
#print('sub envy = ' + envy )
stari = endi
return count
def countenvybysizehelper(envy, numfbysize):
count = [0] * 5
stari = 0
for i in range(5):
endi = stari + numfbysize[i]
count[i] = average(envy[stari:endi])
#print('here')
#print(count[i])
#print('sub envy = ' + envy )
stari = endi
return count
def countenvybysize(envy, numenvy, wenvy, S):
nF = len(S)
numfbysize = [0] * 5
for f in range(nF):
numfbysize[S[f]-1] += 1
#print('numfbysize =' + str(numfbysize))
aenvybysize = countenvybysizehelper(envy, numfbysize)
anumenvybysize = countenvybysizehelper(numenvy, numfbysize)
awenvybysize = countenvybysizehelper(wenvy, numfbysize)
return aenvybysize, anumenvybysize, awenvybysize
# compute average (mean) of a list of numbers
def average(alist):
l = len(alist)
ans = 0
if l > 0:
ans = sum(alist)/l
return ans
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,049 | nhhai196/Ticket-Reassignment | refs/heads/main | /gendata_v2.py | import random
from scipy import stats
import numpy as np
import copy
import xlsxwriter
def gendata(filename, numg, numf, fdist, numscore, minsize, numswaps, seatoffset):
numscore, scorelist = genscorelist(numscore, numg, numswaps)
prefcdf = genprefcdf(numscore)
print(scorelist)
print(prefcdf)
famdict = genfam(numf, fdist, minsize, scorelist, prefcdf)
group = groupfamily(famdict)
print(group)
workbook = xlsxwriter.Workbook(filename)
wb = workbook.add_worksheet()
wb.write(0, 0, 'Family Preference')
wb.write(0, numg + 1, 'Family Size')
wb.write(0, numg+ 3, 'Num Seniors')
row = 1
for value in famdict.values():
print(value)
temp = value[2]
print(temp)
for col in range(numg):
#print(temp[col])
wb.write(row, col, temp[col])
wb.write(row, numg + 1, value[0])
wb.write(row, numg + 3, value[1])
row += 1
wb = workbook.add_worksheet()
wb.write(0, 0, 'Family Preference')
wb.write(0, numg + 1, 'Family Size')
wb.write(0, numg + 3, 'Num Seniors')
wb.write(0, numg + 5, 'Group Size')
row = 1
for key, value in group.items():
wb.write_row(row, 0, key[2])
wb.write(row, numg + 1, key[0])
wb.write(row, numg + 3, key[1])
wb.write(row, numg + 5, len(value))
row += 1
wb = workbook.add_worksheet()
wb.write(0, 0, 'Capacity')
wb.write(0, 1, 'Alpha')
wb.write(1, 0, round(numf * 2.5)) # Hardcode here
for i in range(numg):
wb.write(1, i+1, i+1+seatoffset)
workbook.close()
#print(np.random.permutation([i for i in range(1,6)]))
return famdict, group
def groupfamily(famdict):
group = {}
for key, value in famdict.items():
value = tuple(value)
if value not in group:
group[value] = [key]
else:
group[value].append(key)
return group
def genfam(numf, dist, minsize, preflist, prefcdf):
famdict = {}
fdist = distmul(dist, numf)
f = 0;
#print(fdist)
for i in range(len(fdist)):
#print('i =' + str(i))
for j in range(1, fdist[i]+1):
#print('j =' + str(j))
f = f+1
#print('f =' + str(f))
famdict[f] = [i+minsize, (), ()]
#print ("famdict first = " + str(famdict))
famdict = gensenior(famdict)
#print ("famdict second = " + str(famdict))
famdict = genpref(famdict, preflist, prefcdf)
#print ("famdict third = " + str(famdict))
return famdict
def gensenior(famdict):
numf = len(famdict)
for i in range(1, numf+1):
famdict[i][1] = randomsenior(famdict[i][0])
return famdict
# random score over games
def randomscore(numg):
score = [0] * numg
for i in range(numg):
score[i] = round(random.uniform(0.2, 1), 4)
return score
def genscorelist(numscore, numg, numswaps):
fewscore = 3
smallslist = [randomscore(numg) for i in range(fewscore)]
scorelist = []
for i in range(numscore):
temp = copy.copy(smallslist[random.randint(0,fewscore-1)])
for j in range(numswaps):
pos1 = random.randint(0, numg-1)
pos2 = random.randint(0, numg-1)
temp[pos1], temp[pos2] = temp[pos2], temp[pos1]
scorelist.append(tuple(temp))
scorelist = list(set(scorelist))
return len(scorelist), scorelist
def genpreflistv2(numpref, numg, numswaps):
fixpref = np.random.permutation([i for i in range(1, numg+1)])
preflist = []
for i in range(numpref):
temp = copy.copy(fixpref)
for j in range(numswaps):
pos1 = random.randint(0, numg-1)
pos2 = random.randint(0, numg-1)
temp[pos1], temp[pos2] = temp[pos2], temp[pos1]
preflist.append(tuple(temp))
preflist = list(set(preflist))
return len(preflist), preflist
def swapPositions(list, pos1, pos2):
list[pos1], list[pos2] = list[pos2], list[pos1]
return list
# Uniform distribution
def genprefcdf(numpref):
temp = [i for i in range(numpref+1)]
return stats.uniform.cdf(temp, loc = 0, scale = numpref)
def genpref(famdict, plist, prefcdf):
numf = len(famdict)
for i in range(numf):
index = randompref(prefcdf)
famdict[i+1][2] = plist[index]
return famdict
# gen a random pref based in preference cdf
def randompref(pcdf):
rnum = random.uniform(0,1)
for ind in range(len(pcdf)-1):
if (pcdf[ind]<= rnum) and (rnum < pcdf[ind+1]):
return ind
def randomsenior(anum):
count = 0
for i in range(anum):
t = random.randint(1,4)
if t == 1:
count += 1
return count
def distmul(dist, num):
n = len(dist)
ans = [round(num * x) for x in dist]
s = sum(ans)
if s > num:
ans[0] = ans[0] + num - s
elif s < num:
ans[0] = ans[0] - num + s
return ans
#filename = 'data-swap-1.xlsx'
#numg = 3
#numf = 3
#fdist = [0.5, 0.5]
#numpref = 1000
#minsize = 2
#numswaps = 1
################# Testing
filename = 'data-cardinal1.xlsx'
numg = 6
numf = 200
fdist = [0.15, 0.35, 0.3, 0.15, 0.05]
numscore = 5
minsize = 1
numswaps = 1
seatoffset = 7
gendata(filename, numg, numf, fdist, numscore, minsize, numswaps, seatoffset)
| {"/ticket_reassign.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py"], "/scarfpivot.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/correctness.py"], "/ticket_reassign_v3.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/ordinalpivot.py": ["/datastructure.py", "/correctness.py"], "/cardinalpivot.py": ["/datastructure.py"], "/correctness.py": ["/ordinalpivot.py", "/datastructure.py"], "/iterativerounding.py": ["/datastructure.py"], "/ticket_reassign_v2.py": ["/datastructure.py", "/cardinalpivot.py", "/ordinalpivot.py", "/scarfpivot.py", "/iterativerounding.py", "/statistics.py"], "/testing.py": ["/correctness.py"], "/statistics.py": ["/datastructure.py"]} |
60,050 | soudabemhashemi/Yektanet | refs/heads/master | /advertiser_mangement/admin.py | from django.contrib import admin
from .models import Ad, Advertiser, Click, View, summaryShit
admin.site.register(Advertiser)
@admin.register(Ad)
class AdAdmin(admin.ModelAdmin):
list_display = ["title", "link", "advertiser"]
list_filter=["approve"]
search_fields = ["title"]
@admin.register(Click)
class ClickAdmin(admin.ModelAdmin):
list_display = ["adID", "date", "ip"]
@admin.register(View)
class ViewAdmin(admin.ModelAdmin):
list_display = ['viewID', 'date', 'ip']
@admin.register(summaryShit)
class summaryShitAdmin(admin.ModelAdmin):
list_display = ['adID', 'date', 'count', 'view_or_click']
| {"/advertiser_mangement/admin.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/migrations/0019_alter_ad_imgurl.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/serializers.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/tasks.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/tests.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/views.py": ["/advertiser_mangement/models.py", "/advertiser_mangement/serializers.py"]} |
60,051 | soudabemhashemi/Yektanet | refs/heads/master | /advertiser_mangement/migrations/0021_summaryshit_view_or_click.py | # Generated by Django 4.0.dev20210715050039 on 2021-07-25 02:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('advertiser_mangement', '0020_summaryshit'),
]
operations = [
migrations.AddField(
model_name='summaryshit',
name='view_or_click',
field=models.IntegerField(choices=[(0, 'Click'), (1, 'View')], default=1),
),
]
| {"/advertiser_mangement/admin.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/migrations/0019_alter_ad_imgurl.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/serializers.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/tasks.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/tests.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/views.py": ["/advertiser_mangement/models.py", "/advertiser_mangement/serializers.py"]} |
60,052 | soudabemhashemi/Yektanet | refs/heads/master | /advertiser_mangement/migrations/0019_alter_ad_imgurl.py | # Generated by Django 4.0.dev20210715050039 on 2021-07-21 11:21
import advertiser_mangement.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('advertiser_mangement', '0018_alter_ad_imgurl_alter_click_date_alter_view_date'),
]
operations = [
migrations.AlterField(
model_name='ad',
name='imgUrl',
field=models.ImageField(upload_to=advertiser_mangement.models.upload_to, verbose_name='Image'),
),
]
| {"/advertiser_mangement/admin.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/migrations/0019_alter_ad_imgurl.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/serializers.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/tasks.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/tests.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/views.py": ["/advertiser_mangement/models.py", "/advertiser_mangement/serializers.py"]} |
60,053 | soudabemhashemi/Yektanet | refs/heads/master | /advertiser_mangement/urls.py | from django.urls import path
from . import views
app_name = 'advertiser_mangement'
urlpatterns = [
# path('', views.AdList.as_view(), name='home'),
path('createAd/', views.createAd.as_view(), name='create_new'),
path('createAdvertiser/', views.createAdvertiser, name='createSdvertiser'),
# path('create_new/create_new/', views.createAd.as_view(), name='create_new'),
# path('countClicks/<int:pk>/', views.countClicks.as_view(), name='countClicks'),
# path('report/', views.report, name='report')
]
| {"/advertiser_mangement/admin.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/migrations/0019_alter_ad_imgurl.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/serializers.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/tasks.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/tests.py": ["/advertiser_mangement/models.py"], "/advertiser_mangement/views.py": ["/advertiser_mangement/models.py", "/advertiser_mangement/serializers.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.