Dorothydu commited on
Commit
eb6215d
·
verified ·
1 Parent(s): cf9729f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 1001-sala-de-aula-master/env/lib64/python3.5/site-packages/pytz/zoneinfo/Europe/Zurich +0 -0
  2. 1001-sala-de-aula-master/env/lib64/python3.5/site-packages/rest_framework/authtoken/serializers.py +34 -0
  3. 1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/command/build_py.py +222 -0
  4. 1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/command/egg_info.py +482 -0
  5. 1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/command/install_egg_info.py +138 -0
  6. 1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/command/install_scripts.py +60 -0
  7. 1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/py31compat.py +52 -0
  8. 1001-sala-de-aula-master/env/lib64/python3.5/site-packages/sqlparse/filters/__init__.py +41 -0
  9. 1001-sala-de-aula-master/env/lib64/python3.5/site-packages/sqlparse/lexer.py +82 -0
  10. 1001-sala-de-aula-master/professor/tests.py +3 -0
  11. 1001-sala-de-aula-master/sala/migrations/0003_auto_20190909_1849.py +24 -0
  12. 1001-sala-de-aula-master/sala/models.py +29 -0
  13. 1001-sala-de-aula-master/sala/serializers.py +35 -0
  14. 1001-sala-de-aula-master/sala_de_aula/wsgi.py +16 -0
  15. 1076LAB-master/1225test/Debug/1225test.tlog/1225test.lastbuildstate +2 -0
  16. 1076LAB-master/Adafruit_Python_DHT/LICENSE +21 -0
  17. 1076LAB-master/Adafruit_Python_DHT/MANIFEST.in +2 -0
  18. 1076LAB-master/Adafruit_Python_DHT/setup.py +110 -0
  19. 1076LAB-master/Adafruit_Python_DHT/source/Raspberry_Pi_2/pi_2_dht_read.c +158 -0
  20. 1076LAB-master/README.md +3 -0
  21. 1076LAB-master/launch/turtlebot3_app.launch +8 -0
  22. 1076LAB-master/launch/turtlebot3_teleop_key.launch +8 -0
  23. 1076LAB-master/nodes/turtlebot3_app +217 -0
  24. 1076LAB-master/nodes/turtlebot3_teleop_key +197 -0
  25. 11777-Group11-master/attention_weight_vis/attention_0.npy +3 -0
  26. 11777-Group11-master/attention_weight_vis/attention_1.npy +3 -0
  27. 11777-Group11-master/attention_weight_vis/attention_10.npy +3 -0
  28. 11777-Group11-master/attention_weight_vis/attention_12.npy +3 -0
  29. 11777-Group11-master/attention_weight_vis/attention_14.npy +3 -0
  30. 11777-Group11-master/attention_weight_vis/attention_3.npy +3 -0
  31. 11777-Group11-master/attention_weight_vis/bertviz/neuron_view.js +959 -0
  32. 11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/file_utils.py +262 -0
  33. 11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/modeling_bert.py +1254 -0
  34. 11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/modeling_gpt2.py +747 -0
  35. 11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/modeling_roberta.py +349 -0
  36. 11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/modeling_transfo_xl_utilities.py +332 -0
  37. 11777-Group11-master/attention_weight_vis/bertviz/util.py +15 -0
  38. 11777-Group11-master/example_data/example.feature.lineidx +8 -0
  39. 11777-Group11-master/example_data/example.feature.tsv +0 -0
  40. 11777-Group11-master/example_data/example_caption.json +1 -0
  41. 11777-Group11-master/idea1OscarSetup.sh +135 -0
  42. 11777-Group11-master/oscar/.DS_Store +0 -0
  43. 11777-Group11-master/oscar/distillation/distiller.py +471 -0
  44. 11777-Group11-master/oscar/distillation/lm_seqs_dataset.py +166 -0
  45. 11777-Group11-master/oscar/distillation/scripts/binarized_data 3.py +96 -0
  46. 11777-Group11-master/oscar/distillation/scripts/binarized_data.py +96 -0
  47. 11777-Group11-master/oscar/distillation/scripts/extract.py +102 -0
  48. 11777-Group11-master/oscar/distillation/scripts/extract_distilbert 2.py +100 -0
  49. 11777-Group11-master/oscar/distillation/train.py +322 -0
  50. 11777-Group11-master/oscar/distillation/training_configs/distiloscar 3.json +20 -0
1001-sala-de-aula-master/env/lib64/python3.5/site-packages/pytz/zoneinfo/Europe/Zurich ADDED
Binary file (1.91 kB). View file
 
1001-sala-de-aula-master/env/lib64/python3.5/site-packages/rest_framework/authtoken/serializers.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from django.contrib.auth import authenticate
2
+ from django.utils.translation import gettext_lazy as _
3
+
4
+ from rest_framework import serializers
5
+
6
+
7
+ class AuthTokenSerializer(serializers.Serializer):
8
+ username = serializers.CharField(label=_("Username"))
9
+ password = serializers.CharField(
10
+ label=_("Password"),
11
+ style={'input_type': 'password'},
12
+ trim_whitespace=False
13
+ )
14
+
15
+ def validate(self, attrs):
16
+ username = attrs.get('username')
17
+ password = attrs.get('password')
18
+
19
+ if username and password:
20
+ user = authenticate(request=self.context.get('request'),
21
+ username=username, password=password)
22
+
23
+ # The authenticate call simply returns None for is_active=False
24
+ # users. (Assuming the default ModelBackend authentication
25
+ # backend.)
26
+ if not user:
27
+ msg = _('Unable to log in with provided credentials.')
28
+ raise serializers.ValidationError(msg, code='authorization')
29
+ else:
30
+ msg = _('Must include "username" and "password".')
31
+ raise serializers.ValidationError(msg, code='authorization')
32
+
33
+ attrs['user'] = user
34
+ return attrs
1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/command/build_py.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from glob import glob
2
+ from distutils.util import convert_path
3
+ import distutils.command.build_py as orig
4
+ import os
5
+ import fnmatch
6
+ import textwrap
7
+ import io
8
+ import distutils.errors
9
+ import collections
10
+ import itertools
11
+
12
+ from setuptools.extern.six.moves import map
13
+
14
+ try:
15
+ from setuptools.lib2to3_ex import Mixin2to3
16
+ except ImportError:
17
+ class Mixin2to3:
18
+ def run_2to3(self, files, doctests=True):
19
+ "do nothing"
20
+
21
+
22
+ class build_py(orig.build_py, Mixin2to3):
23
+ """Enhanced 'build_py' command that includes data files with packages
24
+
25
+ The data files are specified via a 'package_data' argument to 'setup()'.
26
+ See 'setuptools.dist.Distribution' for more details.
27
+
28
+ Also, this version of the 'build_py' command allows you to specify both
29
+ 'py_modules' and 'packages' in the same setup operation.
30
+ """
31
+
32
+ def finalize_options(self):
33
+ orig.build_py.finalize_options(self)
34
+ self.package_data = self.distribution.package_data
35
+ self.exclude_package_data = (self.distribution.exclude_package_data or
36
+ {})
37
+ if 'data_files' in self.__dict__:
38
+ del self.__dict__['data_files']
39
+ self.__updated_files = []
40
+ self.__doctests_2to3 = []
41
+
42
+ def run(self):
43
+ """Build modules, packages, and copy data files to build directory"""
44
+ if not self.py_modules and not self.packages:
45
+ return
46
+
47
+ if self.py_modules:
48
+ self.build_modules()
49
+
50
+ if self.packages:
51
+ self.build_packages()
52
+ self.build_package_data()
53
+
54
+ self.run_2to3(self.__updated_files, False)
55
+ self.run_2to3(self.__updated_files, True)
56
+ self.run_2to3(self.__doctests_2to3, True)
57
+
58
+ # Only compile actual .py files, using our base class' idea of what our
59
+ # output files are.
60
+ self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
61
+
62
+ def __getattr__(self, attr):
63
+ "lazily compute data files"
64
+ if attr == 'data_files':
65
+ self.data_files = self._get_data_files()
66
+ return self.data_files
67
+ return orig.build_py.__getattr__(self, attr)
68
+
69
+ def build_module(self, module, module_file, package):
70
+ outfile, copied = orig.build_py.build_module(self, module, module_file,
71
+ package)
72
+ if copied:
73
+ self.__updated_files.append(outfile)
74
+ return outfile, copied
75
+
76
+ def _get_data_files(self):
77
+ """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
78
+ self.analyze_manifest()
79
+ return list(map(self._get_pkg_data_files, self.packages or ()))
80
+
81
+ def _get_pkg_data_files(self, package):
82
+ # Locate package source directory
83
+ src_dir = self.get_package_dir(package)
84
+
85
+ # Compute package build directory
86
+ build_dir = os.path.join(*([self.build_lib] + package.split('.')))
87
+
88
+ # Strip directory from globbed filenames
89
+ filenames = [
90
+ os.path.relpath(file, src_dir)
91
+ for file in self.find_data_files(package, src_dir)
92
+ ]
93
+ return package, src_dir, build_dir, filenames
94
+
95
+ def find_data_files(self, package, src_dir):
96
+ """Return filenames for package's data files in 'src_dir'"""
97
+ globs = (self.package_data.get('', [])
98
+ + self.package_data.get(package, []))
99
+ files = self.manifest_files.get(package, [])[:]
100
+ for pattern in globs:
101
+ # Each pattern has to be converted to a platform-specific path
102
+ files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
103
+ return self.exclude_data_files(package, src_dir, files)
104
+
105
+ def build_package_data(self):
106
+ """Copy data files into build directory"""
107
+ for package, src_dir, build_dir, filenames in self.data_files:
108
+ for filename in filenames:
109
+ target = os.path.join(build_dir, filename)
110
+ self.mkpath(os.path.dirname(target))
111
+ srcfile = os.path.join(src_dir, filename)
112
+ outf, copied = self.copy_file(srcfile, target)
113
+ srcfile = os.path.abspath(srcfile)
114
+ if (copied and
115
+ srcfile in self.distribution.convert_2to3_doctests):
116
+ self.__doctests_2to3.append(outf)
117
+
118
+ def analyze_manifest(self):
119
+ self.manifest_files = mf = {}
120
+ if not self.distribution.include_package_data:
121
+ return
122
+ src_dirs = {}
123
+ for package in self.packages or ():
124
+ # Locate package source directory
125
+ src_dirs[assert_relative(self.get_package_dir(package))] = package
126
+
127
+ self.run_command('egg_info')
128
+ ei_cmd = self.get_finalized_command('egg_info')
129
+ for path in ei_cmd.filelist.files:
130
+ d, f = os.path.split(assert_relative(path))
131
+ prev = None
132
+ oldf = f
133
+ while d and d != prev and d not in src_dirs:
134
+ prev = d
135
+ d, df = os.path.split(d)
136
+ f = os.path.join(df, f)
137
+ if d in src_dirs:
138
+ if path.endswith('.py') and f == oldf:
139
+ continue # it's a module, not data
140
+ mf.setdefault(src_dirs[d], []).append(path)
141
+
142
+ def get_data_files(self):
143
+ pass # Lazily compute data files in _get_data_files() function.
144
+
145
+ def check_package(self, package, package_dir):
146
+ """Check namespace packages' __init__ for declare_namespace"""
147
+ try:
148
+ return self.packages_checked[package]
149
+ except KeyError:
150
+ pass
151
+
152
+ init_py = orig.build_py.check_package(self, package, package_dir)
153
+ self.packages_checked[package] = init_py
154
+
155
+ if not init_py or not self.distribution.namespace_packages:
156
+ return init_py
157
+
158
+ for pkg in self.distribution.namespace_packages:
159
+ if pkg == package or pkg.startswith(package + '.'):
160
+ break
161
+ else:
162
+ return init_py
163
+
164
+ with io.open(init_py, 'rb') as f:
165
+ contents = f.read()
166
+ if b'declare_namespace' not in contents:
167
+ raise distutils.errors.DistutilsError(
168
+ "Namespace package problem: %s is a namespace package, but "
169
+ "its\n__init__.py does not call declare_namespace()! Please "
170
+ 'fix it.\n(See the setuptools manual under '
171
+ '"Namespace Packages" for details.)\n"' % (package,)
172
+ )
173
+ return init_py
174
+
175
+ def initialize_options(self):
176
+ self.packages_checked = {}
177
+ orig.build_py.initialize_options(self)
178
+
179
+ def get_package_dir(self, package):
180
+ res = orig.build_py.get_package_dir(self, package)
181
+ if self.distribution.src_root is not None:
182
+ return os.path.join(self.distribution.src_root, res)
183
+ return res
184
+
185
+ def exclude_data_files(self, package, src_dir, files):
186
+ """Filter filenames for package's data files in 'src_dir'"""
187
+ globs = (
188
+ self.exclude_package_data.get('', [])
189
+ + self.exclude_package_data.get(package, [])
190
+ )
191
+ bad = set(
192
+ item
193
+ for pattern in globs
194
+ for item in fnmatch.filter(
195
+ files,
196
+ os.path.join(src_dir, convert_path(pattern)),
197
+ )
198
+ )
199
+ seen = collections.defaultdict(itertools.count)
200
+ return [
201
+ fn
202
+ for fn in files
203
+ if fn not in bad
204
+ # ditch dupes
205
+ and not next(seen[fn])
206
+ ]
207
+
208
+
209
+ def assert_relative(path):
210
+ if not os.path.isabs(path):
211
+ return path
212
+ from distutils.errors import DistutilsSetupError
213
+
214
+ msg = textwrap.dedent("""
215
+ Error: setup script specifies an absolute path:
216
+
217
+ %s
218
+
219
+ setup() arguments must *always* be /-separated paths relative to the
220
+ setup.py directory, *never* absolute paths.
221
+ """).lstrip() % path
222
+ raise DistutilsSetupError(msg)
1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/command/egg_info.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """setuptools.command.egg_info
2
+
3
+ Create a distribution's .egg-info directory and contents"""
4
+
5
+ from distutils.filelist import FileList as _FileList
6
+ from distutils.util import convert_path
7
+ from distutils import log
8
+ import distutils.errors
9
+ import distutils.filelist
10
+ import os
11
+ import re
12
+ import sys
13
+ import io
14
+ import warnings
15
+ import time
16
+
17
+ from setuptools.extern import six
18
+ from setuptools.extern.six.moves import map
19
+
20
+ from setuptools import Command
21
+ from setuptools.command.sdist import sdist
22
+ from setuptools.command.sdist import walk_revctrl
23
+ from setuptools.command.setopt import edit_config
24
+ from setuptools.command import bdist_egg
25
+ from pkg_resources import (
26
+ parse_requirements, safe_name, parse_version,
27
+ safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
28
+ import setuptools.unicode_utils as unicode_utils
29
+
30
+ from pkg_resources.extern import packaging
31
+
32
+ try:
33
+ from setuptools_svn import svn_utils
34
+ except ImportError:
35
+ pass
36
+
37
+
38
+ class egg_info(Command):
39
+ description = "create a distribution's .egg-info directory"
40
+
41
+ user_options = [
42
+ ('egg-base=', 'e', "directory containing .egg-info directories"
43
+ " (default: top of the source tree)"),
44
+ ('tag-svn-revision', 'r',
45
+ "Add subversion revision ID to version number"),
46
+ ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
47
+ ('tag-build=', 'b', "Specify explicit tag to add to version number"),
48
+ ('no-svn-revision', 'R',
49
+ "Don't add subversion revision ID [default]"),
50
+ ('no-date', 'D', "Don't include date stamp [default]"),
51
+ ]
52
+
53
+ boolean_options = ['tag-date', 'tag-svn-revision']
54
+ negative_opt = {'no-svn-revision': 'tag-svn-revision',
55
+ 'no-date': 'tag-date'}
56
+
57
+ def initialize_options(self):
58
+ self.egg_name = None
59
+ self.egg_version = None
60
+ self.egg_base = None
61
+ self.egg_info = None
62
+ self.tag_build = None
63
+ self.tag_svn_revision = 0
64
+ self.tag_date = 0
65
+ self.broken_egg_info = False
66
+ self.vtags = None
67
+
68
+ def save_version_info(self, filename):
69
+ values = dict(
70
+ egg_info=dict(
71
+ tag_svn_revision=0,
72
+ tag_date=0,
73
+ tag_build=self.tags(),
74
+ )
75
+ )
76
+ edit_config(filename, values)
77
+
78
+ def finalize_options(self):
79
+ self.egg_name = safe_name(self.distribution.get_name())
80
+ self.vtags = self.tags()
81
+ self.egg_version = self.tagged_version()
82
+
83
+ parsed_version = parse_version(self.egg_version)
84
+
85
+ try:
86
+ is_version = isinstance(parsed_version, packaging.version.Version)
87
+ spec = (
88
+ "%s==%s" if is_version else "%s===%s"
89
+ )
90
+ list(
91
+ parse_requirements(spec % (self.egg_name, self.egg_version))
92
+ )
93
+ except ValueError:
94
+ raise distutils.errors.DistutilsOptionError(
95
+ "Invalid distribution name or version syntax: %s-%s" %
96
+ (self.egg_name, self.egg_version)
97
+ )
98
+
99
+ if self.egg_base is None:
100
+ dirs = self.distribution.package_dir
101
+ self.egg_base = (dirs or {}).get('', os.curdir)
102
+
103
+ self.ensure_dirname('egg_base')
104
+ self.egg_info = to_filename(self.egg_name) + '.egg-info'
105
+ if self.egg_base != os.curdir:
106
+ self.egg_info = os.path.join(self.egg_base, self.egg_info)
107
+ if '-' in self.egg_name:
108
+ self.check_broken_egg_info()
109
+
110
+ # Set package version for the benefit of dumber commands
111
+ # (e.g. sdist, bdist_wininst, etc.)
112
+ #
113
+ self.distribution.metadata.version = self.egg_version
114
+
115
+ # If we bootstrapped around the lack of a PKG-INFO, as might be the
116
+ # case in a fresh checkout, make sure that any special tags get added
117
+ # to the version info
118
+ #
119
+ pd = self.distribution._patched_dist
120
+ if pd is not None and pd.key == self.egg_name.lower():
121
+ pd._version = self.egg_version
122
+ pd._parsed_version = parse_version(self.egg_version)
123
+ self.distribution._patched_dist = None
124
+
125
+ def write_or_delete_file(self, what, filename, data, force=False):
126
+ """Write `data` to `filename` or delete if empty
127
+
128
+ If `data` is non-empty, this routine is the same as ``write_file()``.
129
+ If `data` is empty but not ``None``, this is the same as calling
130
+ ``delete_file(filename)`. If `data` is ``None``, then this is a no-op
131
+ unless `filename` exists, in which case a warning is issued about the
132
+ orphaned file (if `force` is false), or deleted (if `force` is true).
133
+ """
134
+ if data:
135
+ self.write_file(what, filename, data)
136
+ elif os.path.exists(filename):
137
+ if data is None and not force:
138
+ log.warn(
139
+ "%s not set in setup(), but %s exists", what, filename
140
+ )
141
+ return
142
+ else:
143
+ self.delete_file(filename)
144
+
145
+ def write_file(self, what, filename, data):
146
+ """Write `data` to `filename` (if not a dry run) after announcing it
147
+
148
+ `what` is used in a log message to identify what is being written
149
+ to the file.
150
+ """
151
+ log.info("writing %s to %s", what, filename)
152
+ if six.PY3:
153
+ data = data.encode("utf-8")
154
+ if not self.dry_run:
155
+ f = open(filename, 'wb')
156
+ f.write(data)
157
+ f.close()
158
+
159
+ def delete_file(self, filename):
160
+ """Delete `filename` (if not a dry run) after announcing it"""
161
+ log.info("deleting %s", filename)
162
+ if not self.dry_run:
163
+ os.unlink(filename)
164
+
165
+ def tagged_version(self):
166
+ version = self.distribution.get_version()
167
+ # egg_info may be called more than once for a distribution,
168
+ # in which case the version string already contains all tags.
169
+ if self.vtags and version.endswith(self.vtags):
170
+ return safe_version(version)
171
+ return safe_version(version + self.vtags)
172
+
173
+ def run(self):
174
+ self.mkpath(self.egg_info)
175
+ installer = self.distribution.fetch_build_egg
176
+ for ep in iter_entry_points('egg_info.writers'):
177
+ ep.require(installer=installer)
178
+ writer = ep.resolve()
179
+ writer(self, ep.name, os.path.join(self.egg_info, ep.name))
180
+
181
+ # Get rid of native_libs.txt if it was put there by older bdist_egg
182
+ nl = os.path.join(self.egg_info, "native_libs.txt")
183
+ if os.path.exists(nl):
184
+ self.delete_file(nl)
185
+
186
+ self.find_sources()
187
+
188
+ def tags(self):
189
+ version = ''
190
+ if self.tag_build:
191
+ version += self.tag_build
192
+ if self.tag_svn_revision:
193
+ version += '-r%s' % self.get_svn_revision()
194
+ if self.tag_date:
195
+ version += time.strftime("-%Y%m%d")
196
+ return version
197
+
198
+ @staticmethod
199
+ def get_svn_revision():
200
+ if 'svn_utils' not in globals():
201
+ return "0"
202
+ return str(svn_utils.SvnInfo.load(os.curdir).get_revision())
203
+
204
+ def find_sources(self):
205
+ """Generate SOURCES.txt manifest file"""
206
+ manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
207
+ mm = manifest_maker(self.distribution)
208
+ mm.manifest = manifest_filename
209
+ mm.run()
210
+ self.filelist = mm.filelist
211
+
212
+ def check_broken_egg_info(self):
213
+ bei = self.egg_name + '.egg-info'
214
+ if self.egg_base != os.curdir:
215
+ bei = os.path.join(self.egg_base, bei)
216
+ if os.path.exists(bei):
217
+ log.warn(
218
+ "-" * 78 + '\n'
219
+ "Note: Your current .egg-info directory has a '-' in its name;"
220
+ '\nthis will not work correctly with "setup.py develop".\n\n'
221
+ 'Please rename %s to %s to correct this problem.\n' + '-' * 78,
222
+ bei, self.egg_info
223
+ )
224
+ self.broken_egg_info = self.egg_info
225
+ self.egg_info = bei # make it work for now
226
+
227
+
228
+ class FileList(_FileList):
229
+ """File list that accepts only existing, platform-independent paths"""
230
+
231
+ def append(self, item):
232
+ if item.endswith('\r'): # Fix older sdists built on Windows
233
+ item = item[:-1]
234
+ path = convert_path(item)
235
+
236
+ if self._safe_path(path):
237
+ self.files.append(path)
238
+
239
+ def extend(self, paths):
240
+ self.files.extend(filter(self._safe_path, paths))
241
+
242
+ def _repair(self):
243
+ """
244
+ Replace self.files with only safe paths
245
+
246
+ Because some owners of FileList manipulate the underlying
247
+ ``files`` attribute directly, this method must be called to
248
+ repair those paths.
249
+ """
250
+ self.files = list(filter(self._safe_path, self.files))
251
+
252
+ def _safe_path(self, path):
253
+ enc_warn = "'%s' not %s encodable -- skipping"
254
+
255
+ # To avoid accidental trans-codings errors, first to unicode
256
+ u_path = unicode_utils.filesys_decode(path)
257
+ if u_path is None:
258
+ log.warn("'%s' in unexpected encoding -- skipping" % path)
259
+ return False
260
+
261
+ # Must ensure utf-8 encodability
262
+ utf8_path = unicode_utils.try_encode(u_path, "utf-8")
263
+ if utf8_path is None:
264
+ log.warn(enc_warn, path, 'utf-8')
265
+ return False
266
+
267
+ try:
268
+ # accept is either way checks out
269
+ if os.path.exists(u_path) or os.path.exists(utf8_path):
270
+ return True
271
+ # this will catch any encode errors decoding u_path
272
+ except UnicodeEncodeError:
273
+ log.warn(enc_warn, path, sys.getfilesystemencoding())
274
+
275
+
276
+ class manifest_maker(sdist):
277
+ template = "MANIFEST.in"
278
+
279
+ def initialize_options(self):
280
+ self.use_defaults = 1
281
+ self.prune = 1
282
+ self.manifest_only = 1
283
+ self.force_manifest = 1
284
+
285
+ def finalize_options(self):
286
+ pass
287
+
288
+ def run(self):
289
+ self.filelist = FileList()
290
+ if not os.path.exists(self.manifest):
291
+ self.write_manifest() # it must exist so it'll get in the list
292
+ self.filelist.findall()
293
+ self.add_defaults()
294
+ if os.path.exists(self.template):
295
+ self.read_template()
296
+ self.prune_file_list()
297
+ self.filelist.sort()
298
+ self.filelist.remove_duplicates()
299
+ self.write_manifest()
300
+
301
+ def _manifest_normalize(self, path):
302
+ path = unicode_utils.filesys_decode(path)
303
+ return path.replace(os.sep, '/')
304
+
305
+ def write_manifest(self):
306
+ """
307
+ Write the file list in 'self.filelist' to the manifest file
308
+ named by 'self.manifest'.
309
+ """
310
+ self.filelist._repair()
311
+
312
+ # Now _repairs should encodability, but not unicode
313
+ files = [self._manifest_normalize(f) for f in self.filelist.files]
314
+ msg = "writing manifest file '%s'" % self.manifest
315
+ self.execute(write_file, (self.manifest, files), msg)
316
+
317
+ def warn(self, msg): # suppress missing-file warnings from sdist
318
+ if not msg.startswith("standard file not found:"):
319
+ sdist.warn(self, msg)
320
+
321
+ def add_defaults(self):
322
+ sdist.add_defaults(self)
323
+ self.filelist.append(self.template)
324
+ self.filelist.append(self.manifest)
325
+ rcfiles = list(walk_revctrl())
326
+ if rcfiles:
327
+ self.filelist.extend(rcfiles)
328
+ elif os.path.exists(self.manifest):
329
+ self.read_manifest()
330
+ ei_cmd = self.get_finalized_command('egg_info')
331
+ self._add_egg_info(cmd=ei_cmd)
332
+ self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
333
+
334
+ def _add_egg_info(self, cmd):
335
+ """
336
+ Add paths for egg-info files for an external egg-base.
337
+
338
+ The egg-info files are written to egg-base. If egg-base is
339
+ outside the current working directory, this method
340
+ searchs the egg-base directory for files to include
341
+ in the manifest. Uses distutils.filelist.findall (which is
342
+ really the version monkeypatched in by setuptools/__init__.py)
343
+ to perform the search.
344
+
345
+ Since findall records relative paths, prefix the returned
346
+ paths with cmd.egg_base, so add_default's include_pattern call
347
+ (which is looking for the absolute cmd.egg_info) will match
348
+ them.
349
+ """
350
+ if cmd.egg_base == os.curdir:
351
+ # egg-info files were already added by something else
352
+ return
353
+
354
+ discovered = distutils.filelist.findall(cmd.egg_base)
355
+ resolved = (os.path.join(cmd.egg_base, path) for path in discovered)
356
+ self.filelist.allfiles.extend(resolved)
357
+
358
+ def prune_file_list(self):
359
+ build = self.get_finalized_command('build')
360
+ base_dir = self.distribution.get_fullname()
361
+ self.filelist.exclude_pattern(None, prefix=build.build_base)
362
+ self.filelist.exclude_pattern(None, prefix=base_dir)
363
+ sep = re.escape(os.sep)
364
+ self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
365
+ is_regex=1)
366
+
367
+
368
+ def write_file(filename, contents):
369
+ """Create a file with the specified name and write 'contents' (a
370
+ sequence of strings without line terminators) to it.
371
+ """
372
+ contents = "\n".join(contents)
373
+
374
+ # assuming the contents has been vetted for utf-8 encoding
375
+ contents = contents.encode("utf-8")
376
+
377
+ with open(filename, "wb") as f: # always write POSIX-style manifest
378
+ f.write(contents)
379
+
380
+
381
+ def write_pkg_info(cmd, basename, filename):
382
+ log.info("writing %s", filename)
383
+ if not cmd.dry_run:
384
+ metadata = cmd.distribution.metadata
385
+ metadata.version, oldver = cmd.egg_version, metadata.version
386
+ metadata.name, oldname = cmd.egg_name, metadata.name
387
+ try:
388
+ # write unescaped data to PKG-INFO, so older pkg_resources
389
+ # can still parse it
390
+ metadata.write_pkg_info(cmd.egg_info)
391
+ finally:
392
+ metadata.name, metadata.version = oldname, oldver
393
+
394
+ safe = getattr(cmd.distribution, 'zip_safe', None)
395
+
396
+ bdist_egg.write_safety_flag(cmd.egg_info, safe)
397
+
398
+
399
+ def warn_depends_obsolete(cmd, basename, filename):
400
+ if os.path.exists(filename):
401
+ log.warn(
402
+ "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
403
+ "Use the install_requires/extras_require setup() args instead."
404
+ )
405
+
406
+
407
+ def _write_requirements(stream, reqs):
408
+ lines = yield_lines(reqs or ())
409
+ append_cr = lambda line: line + '\n'
410
+ lines = map(append_cr, lines)
411
+ stream.writelines(lines)
412
+
413
+
414
+ def write_requirements(cmd, basename, filename):
415
+ dist = cmd.distribution
416
+ data = six.StringIO()
417
+ _write_requirements(data, dist.install_requires)
418
+ extras_require = dist.extras_require or {}
419
+ for extra in sorted(extras_require):
420
+ data.write('\n[{extra}]\n'.format(**vars()))
421
+ _write_requirements(data, extras_require[extra])
422
+ cmd.write_or_delete_file("requirements", filename, data.getvalue())
423
+
424
+
425
+ def write_setup_requirements(cmd, basename, filename):
426
+ data = StringIO()
427
+ _write_requirements(data, cmd.distribution.setup_requires)
428
+ cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
429
+
430
+
431
+ def write_toplevel_names(cmd, basename, filename):
432
+ pkgs = dict.fromkeys(
433
+ [
434
+ k.split('.', 1)[0]
435
+ for k in cmd.distribution.iter_distribution_names()
436
+ ]
437
+ )
438
+ cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
439
+
440
+
441
+ def overwrite_arg(cmd, basename, filename):
442
+ write_arg(cmd, basename, filename, True)
443
+
444
+
445
+ def write_arg(cmd, basename, filename, force=False):
446
+ argname = os.path.splitext(basename)[0]
447
+ value = getattr(cmd.distribution, argname, None)
448
+ if value is not None:
449
+ value = '\n'.join(value) + '\n'
450
+ cmd.write_or_delete_file(argname, filename, value, force)
451
+
452
+
453
+ def write_entries(cmd, basename, filename):
454
+ ep = cmd.distribution.entry_points
455
+
456
+ if isinstance(ep, six.string_types) or ep is None:
457
+ data = ep
458
+ elif ep is not None:
459
+ data = []
460
+ for section, contents in sorted(ep.items()):
461
+ if not isinstance(contents, six.string_types):
462
+ contents = EntryPoint.parse_group(section, contents)
463
+ contents = '\n'.join(sorted(map(str, contents.values())))
464
+ data.append('[%s]\n%s\n\n' % (section, contents))
465
+ data = ''.join(data)
466
+
467
+ cmd.write_or_delete_file('entry points', filename, data, True)
468
+
469
+
470
+ def get_pkg_info_revision():
471
+ """
472
+ Get a -r### off of PKG-INFO Version in case this is an sdist of
473
+ a subversion revision.
474
+ """
475
+ warnings.warn("get_pkg_info_revision is deprecated.", DeprecationWarning)
476
+ if os.path.exists('PKG-INFO'):
477
+ with io.open('PKG-INFO') as f:
478
+ for line in f:
479
+ match = re.match(r"Version:.*-r(\d+)\s*$", line)
480
+ if match:
481
+ return int(match.group(1))
482
+ return 0
1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/command/install_egg_info.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from distutils import log, dir_util
2
+ import os, sys
3
+
4
+ from setuptools.extern.six.moves import map
5
+
6
+ from setuptools import Command
7
+ from setuptools.archive_util import unpack_archive
8
+ import pkg_resources
9
+
10
+
11
+ class install_egg_info(Command):
12
+ """Install an .egg-info directory for the package"""
13
+
14
+ description = "Install an .egg-info directory for the package"
15
+
16
+ user_options = [
17
+ ('install-dir=', 'd', "directory to install to"),
18
+ ]
19
+
20
+ def initialize_options(self):
21
+ self.install_dir = None
22
+ self.install_layout = None
23
+ self.prefix_option = None
24
+
25
+ def finalize_options(self):
26
+ self.set_undefined_options('install_lib',
27
+ ('install_dir', 'install_dir'))
28
+ self.set_undefined_options('install',('install_layout','install_layout'))
29
+ if sys.hexversion > 0x2060000:
30
+ self.set_undefined_options('install',('prefix_option','prefix_option'))
31
+ ei_cmd = self.get_finalized_command("egg_info")
32
+ basename = pkg_resources.Distribution(
33
+ None, None, ei_cmd.egg_name, ei_cmd.egg_version
34
+ ).egg_name() + '.egg-info'
35
+
36
+ if self.install_layout:
37
+ if not self.install_layout.lower() in ['deb']:
38
+ raise DistutilsOptionError("unknown value for --install-layout")
39
+ self.install_layout = self.install_layout.lower()
40
+ basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '')
41
+ elif self.prefix_option or 'real_prefix' in sys.__dict__:
42
+ # don't modify for virtualenv
43
+ pass
44
+ else:
45
+ basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '')
46
+
47
+ self.source = ei_cmd.egg_info
48
+ self.target = os.path.join(self.install_dir, basename)
49
+ self.outputs = []
50
+
51
+ def run(self):
52
+ self.run_command('egg_info')
53
+ if os.path.isdir(self.target) and not os.path.islink(self.target):
54
+ dir_util.remove_tree(self.target, dry_run=self.dry_run)
55
+ elif os.path.exists(self.target):
56
+ self.execute(os.unlink, (self.target,), "Removing " + self.target)
57
+ if not self.dry_run:
58
+ pkg_resources.ensure_directory(self.target)
59
+ self.execute(
60
+ self.copytree, (), "Copying %s to %s" % (self.source, self.target)
61
+ )
62
+ self.install_namespaces()
63
+
64
+ def get_outputs(self):
65
+ return self.outputs
66
+
67
+ def copytree(self):
68
+ # Copy the .egg-info tree to site-packages
69
+ def skimmer(src, dst):
70
+ # filter out source-control directories; note that 'src' is always
71
+ # a '/'-separated path, regardless of platform. 'dst' is a
72
+ # platform-specific path.
73
+ for skip in '.svn/', 'CVS/':
74
+ if src.startswith(skip) or '/' + skip in src:
75
+ return None
76
+ if self.install_layout and self.install_layout in ['deb'] and src.startswith('SOURCES.txt'):
77
+ log.info("Skipping SOURCES.txt")
78
+ return None
79
+ self.outputs.append(dst)
80
+ log.debug("Copying %s to %s", src, dst)
81
+ return dst
82
+
83
+ unpack_archive(self.source, self.target, skimmer)
84
+
85
+ def install_namespaces(self):
86
+ nsp = self._get_all_ns_packages()
87
+ if not nsp:
88
+ return
89
+ filename, ext = os.path.splitext(self.target)
90
+ filename += '-nspkg.pth'
91
+ self.outputs.append(filename)
92
+ log.info("Installing %s", filename)
93
+ lines = map(self._gen_nspkg_line, nsp)
94
+
95
+ if self.dry_run:
96
+ # always generate the lines, even in dry run
97
+ list(lines)
98
+ return
99
+
100
+ with open(filename, 'wt') as f:
101
+ f.writelines(lines)
102
+
103
+ _nspkg_tmpl = (
104
+ "import sys, types, os",
105
+ "p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)",
106
+ "ie = os.path.exists(os.path.join(p,'__init__.py'))",
107
+ "m = not ie and "
108
+ "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
109
+ "mp = (m or []) and m.__dict__.setdefault('__path__',[])",
110
+ "(p not in mp) and mp.append(p)",
111
+ )
112
+ "lines for the namespace installer"
113
+
114
+ _nspkg_tmpl_multi = (
115
+ 'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
116
+ )
117
+ "additional line(s) when a parent package is indicated"
118
+
119
+ @classmethod
120
+ def _gen_nspkg_line(cls, pkg):
121
+ # ensure pkg is not a unicode string under Python 2.7
122
+ pkg = str(pkg)
123
+ pth = tuple(pkg.split('.'))
124
+ tmpl_lines = cls._nspkg_tmpl
125
+ parent, sep, child = pkg.rpartition('.')
126
+ if parent:
127
+ tmpl_lines += cls._nspkg_tmpl_multi
128
+ return ';'.join(tmpl_lines) % locals() + '\n'
129
+
130
+ def _get_all_ns_packages(self):
131
+ """Return sorted list of all package namespaces"""
132
+ nsp = set()
133
+ for pkg in self.distribution.namespace_packages or []:
134
+ pkg = pkg.split('.')
135
+ while pkg:
136
+ nsp.add('.'.join(pkg))
137
+ pkg.pop()
138
+ return sorted(nsp)
1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/command/install_scripts.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from distutils import log
2
+ import distutils.command.install_scripts as orig
3
+ import os
4
+
5
+ from pkg_resources import Distribution, PathMetadata, ensure_directory
6
+
7
+
8
+ class install_scripts(orig.install_scripts):
9
+ """Do normal script install, plus any egg_info wrapper scripts"""
10
+
11
+ def initialize_options(self):
12
+ orig.install_scripts.initialize_options(self)
13
+ self.no_ep = False
14
+
15
+ def run(self):
16
+ import setuptools.command.easy_install as ei
17
+
18
+ self.run_command("egg_info")
19
+ if self.distribution.scripts:
20
+ orig.install_scripts.run(self) # run first to set up self.outfiles
21
+ else:
22
+ self.outfiles = []
23
+ if self.no_ep:
24
+ # don't install entry point scripts into .egg file!
25
+ return
26
+
27
+ ei_cmd = self.get_finalized_command("egg_info")
28
+ dist = Distribution(
29
+ ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
30
+ ei_cmd.egg_name, ei_cmd.egg_version,
31
+ )
32
+ bs_cmd = self.get_finalized_command('build_scripts')
33
+ exec_param = getattr(bs_cmd, 'executable', None)
34
+ bw_cmd = self.get_finalized_command("bdist_wininst")
35
+ is_wininst = getattr(bw_cmd, '_is_running', False)
36
+ writer = ei.ScriptWriter
37
+ if is_wininst:
38
+ exec_param = "python.exe"
39
+ writer = ei.WindowsScriptWriter
40
+ # resolve the writer to the environment
41
+ writer = writer.best()
42
+ cmd = writer.command_spec_class.best().from_param(exec_param)
43
+ for args in writer.get_args(dist, cmd.as_header()):
44
+ self.write_script(*args)
45
+
46
+ def write_script(self, script_name, contents, mode="t", *ignored):
47
+ """Write an executable file to the scripts directory"""
48
+ from setuptools.command.easy_install import chmod, current_umask
49
+
50
+ log.info("Installing %s script to %s", script_name, self.install_dir)
51
+ target = os.path.join(self.install_dir, script_name)
52
+ self.outfiles.append(target)
53
+
54
+ mask = current_umask()
55
+ if not self.dry_run:
56
+ ensure_directory(target)
57
+ f = open(target, "w" + mode)
58
+ f.write(contents)
59
+ f.close()
60
+ chmod(target, 0o777 - mask)
1001-sala-de-aula-master/env/lib64/python3.5/site-packages/setuptools/py31compat.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import unittest
3
+
4
+ __all__ = ['get_config_vars', 'get_path']
5
+
6
+ try:
7
+ # Python 2.7 or >=3.2
8
+ from sysconfig import get_config_vars, get_path
9
+ except ImportError:
10
+ from distutils.sysconfig import get_config_vars, get_python_lib
11
+ def get_path(name):
12
+ if name not in ('platlib', 'purelib'):
13
+ raise ValueError("Name must be purelib or platlib")
14
+ return get_python_lib(name=='platlib')
15
+
16
+ try:
17
+ # Python >=3.2
18
+ from tempfile import TemporaryDirectory
19
+ except ImportError:
20
+ import shutil
21
+ import tempfile
22
+ class TemporaryDirectory(object):
23
+ """
24
+ Very simple temporary directory context manager.
25
+ Will try to delete afterward, but will also ignore OS and similar
26
+ errors on deletion.
27
+ """
28
+ def __init__(self):
29
+ self.name = None # Handle mkdtemp raising an exception
30
+ self.name = tempfile.mkdtemp()
31
+
32
+ def __enter__(self):
33
+ return self.name
34
+
35
+ def __exit__(self, exctype, excvalue, exctrace):
36
+ try:
37
+ shutil.rmtree(self.name, True)
38
+ except OSError: #removal errors are not the only possible
39
+ pass
40
+ self.name = None
41
+
42
+
43
+ unittest_main = unittest.main
44
+
45
+ _PY31 = (3, 1) <= sys.version_info[:2] < (3, 2)
46
+ if _PY31:
47
+ # on Python 3.1, translate testRunner==None to TextTestRunner
48
+ # for compatibility with Python 2.6, 2.7, and 3.2+
49
+ def unittest_main(*args, **kwargs):
50
+ if 'testRunner' in kwargs and kwargs['testRunner'] is None:
51
+ kwargs['testRunner'] = unittest.TextTestRunner
52
+ return unittest.main(*args, **kwargs)
1001-sala-de-aula-master/env/lib64/python3.5/site-packages/sqlparse/filters/__init__.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2009-2018 the sqlparse authors and contributors
4
+ # <see AUTHORS file>
5
+ #
6
+ # This module is part of python-sqlparse and is released under
7
+ # the BSD License: https://opensource.org/licenses/BSD-3-Clause
8
+
9
+ from sqlparse.filters.others import SerializerUnicode
10
+ from sqlparse.filters.others import StripCommentsFilter
11
+ from sqlparse.filters.others import StripWhitespaceFilter
12
+ from sqlparse.filters.others import SpacesAroundOperatorsFilter
13
+
14
+ from sqlparse.filters.output import OutputPHPFilter
15
+ from sqlparse.filters.output import OutputPythonFilter
16
+
17
+ from sqlparse.filters.tokens import KeywordCaseFilter
18
+ from sqlparse.filters.tokens import IdentifierCaseFilter
19
+ from sqlparse.filters.tokens import TruncateStringFilter
20
+
21
+ from sqlparse.filters.reindent import ReindentFilter
22
+ from sqlparse.filters.right_margin import RightMarginFilter
23
+ from sqlparse.filters.aligned_indent import AlignedIndentFilter
24
+
25
+ __all__ = [
26
+ 'SerializerUnicode',
27
+ 'StripCommentsFilter',
28
+ 'StripWhitespaceFilter',
29
+ 'SpacesAroundOperatorsFilter',
30
+
31
+ 'OutputPHPFilter',
32
+ 'OutputPythonFilter',
33
+
34
+ 'KeywordCaseFilter',
35
+ 'IdentifierCaseFilter',
36
+ 'TruncateStringFilter',
37
+
38
+ 'ReindentFilter',
39
+ 'RightMarginFilter',
40
+ 'AlignedIndentFilter',
41
+ ]
1001-sala-de-aula-master/env/lib64/python3.5/site-packages/sqlparse/lexer.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2009-2018 the sqlparse authors and contributors
4
+ # <see AUTHORS file>
5
+ #
6
+ # This module is part of python-sqlparse and is released under
7
+ # the BSD License: https://opensource.org/licenses/BSD-3-Clause
8
+
9
+ """SQL Lexer"""
10
+
11
+ # This code is based on the SqlLexer in pygments.
12
+ # http://pygments.org/
13
+ # It's separated from the rest of pygments to increase performance
14
+ # and to allow some customizations.
15
+
16
+ from sqlparse import tokens
17
+ from sqlparse.keywords import SQL_REGEX
18
+ from sqlparse.compat import text_type, file_types
19
+ from sqlparse.utils import consume
20
+
21
+
22
+ class Lexer(object):
23
+ """Lexer
24
+ Empty class. Leaving for backwards-compatibility
25
+ """
26
+
27
+ @staticmethod
28
+ def get_tokens(text, encoding=None):
29
+ """
30
+ Return an iterable of (tokentype, value) pairs generated from
31
+ `text`. If `unfiltered` is set to `True`, the filtering mechanism
32
+ is bypassed even if filters are defined.
33
+
34
+ Also preprocess the text, i.e. expand tabs and strip it if
35
+ wanted and applies registered filters.
36
+
37
+ Split ``text`` into (tokentype, text) pairs.
38
+
39
+ ``stack`` is the initial stack (default: ``['root']``)
40
+ """
41
+ if isinstance(text, file_types):
42
+ text = text.read()
43
+
44
+ if isinstance(text, text_type):
45
+ pass
46
+ elif isinstance(text, bytes):
47
+ if encoding:
48
+ text = text.decode(encoding)
49
+ else:
50
+ try:
51
+ text = text.decode('utf-8')
52
+ except UnicodeDecodeError:
53
+ text = text.decode('unicode-escape')
54
+ else:
55
+ raise TypeError(u"Expected text or file-like object, got {!r}".
56
+ format(type(text)))
57
+
58
+ iterable = enumerate(text)
59
+ for pos, char in iterable:
60
+ for rexmatch, action in SQL_REGEX:
61
+ m = rexmatch(text, pos)
62
+
63
+ if not m:
64
+ continue
65
+ elif isinstance(action, tokens._TokenType):
66
+ yield action, m.group()
67
+ elif callable(action):
68
+ yield action(m.group())
69
+
70
+ consume(iterable, m.end() - pos - 1)
71
+ break
72
+ else:
73
+ yield tokens.Error, char
74
+
75
+
76
+ def tokenize(sql, encoding=None):
77
+ """Tokenize sql.
78
+
79
+ Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
80
+ of ``(token type, value)`` items.
81
+ """
82
+ return Lexer().get_tokens(sql, encoding)
1001-sala-de-aula-master/professor/tests.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from django.test import TestCase
2
+
3
+ # Create your tests here.
1001-sala-de-aula-master/sala/migrations/0003_auto_20190909_1849.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by Django 2.2.5 on 2019-09-09 18:49
2
+
3
+ from django.db import migrations, models
4
+ import django.db.models.deletion
5
+
6
+
7
+ class Migration(migrations.Migration):
8
+
9
+ dependencies = [
10
+ ('professor', '0001_initial'),
11
+ ('sala', '0002_professor'),
12
+ ]
13
+
14
+ operations = [
15
+ migrations.DeleteModel(
16
+ name='Professor',
17
+ ),
18
+ migrations.AddField(
19
+ model_name='aluno',
20
+ name='prof_favorito',
21
+ field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='alunos', to='professor.Professor'),
22
+ preserve_default=False,
23
+ ),
24
+ ]
1001-sala-de-aula-master/sala/models.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from django.db import models
2
+
3
+ # Create your models here.
4
+ from professor.models import Professor
5
+
6
+
7
+ class Aluno(models.Model):
8
+ nome = models.CharField(
9
+ max_length=50,
10
+ verbose_name='nome')
11
+
12
+ idade = models.CharField(
13
+ max_length=50,
14
+ verbose_name='idade')
15
+
16
+ email = models.CharField(
17
+ max_length=255,
18
+ verbose_name='E-mail',
19
+ unique=True)
20
+
21
+ prof_favorito = models.ForeignKey(
22
+ Professor,
23
+ on_delete=models.CASCADE,
24
+ related_name='alunos'
25
+ )
26
+
27
+ def __str__(self):
28
+ return self.nome
29
+
1001-sala-de-aula-master/sala/serializers.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from rest_framework import serializers
2
+ from sala.models import Aluno
3
+ from sala.models import Professor
4
+
5
+ class ProfessorDataSerializer(serializers.Serializer):
6
+ id = serializers.IntegerField()
7
+ nome = serializers.CharField(read_only=True)
8
+
9
+ class AlunoSerializer(serializers.Serializer):
10
+ id = serializers.ImageField(read_only=True)
11
+ nome = serializers.CharField(max_length=255)
12
+ idade = serializers.CharField()
13
+
14
+ prof_favorito = ProfessorDataSerializer()
15
+
16
+
17
+ def create(self, validated_data):
18
+ professor_data = validated_data.pop('prof_favorito')
19
+ professor = Professor.objects.get(id=professor_data['id'])
20
+ aluno =Aluno.objects.create(prof_favorito=professor, **validated_data)
21
+ return aluno
22
+
23
+ def update(self, instance, validated_data):
24
+ instance.nome = validated_data.get('nome')
25
+ instance.idade = validated_data.get('idade')
26
+ instance.email = validated_data.get('email')
27
+ professor_data = validated_data.pop('prof_favorito')
28
+ professor = Professor.objects.get(id=professor_data['nome'])
29
+ instance.prof_favorito = professor
30
+ instance.save()
31
+ return instance
32
+
33
+ class AlunoLightSerializer(serializers.Serializer):
34
+ id = serializers.IntegerField()
35
+ nome = serializers.IntegerField()
1001-sala-de-aula-master/sala_de_aula/wsgi.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ WSGI config for sala_de_aula project.
3
+
4
+ It exposes the WSGI callable as a module-level variable named ``application``.
5
+
6
+ For more information on this file, see
7
+ https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
8
+ """
9
+
10
+ import os
11
+
12
+ from django.core.wsgi import get_wsgi_application
13
+
14
+ os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sala_de_aula.settings')
15
+
16
+ application = get_wsgi_application()
1076LAB-master/1225test/Debug/1225test.tlog/1225test.lastbuildstate ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #TargetFrameworkVersion=v4.0:PlatformToolSet=v141:EnableManagedIncrementalBuild=false:VCToolArchitecture=Native32Bit:WindowsTargetPlatformVersion=10.0.17763.0
2
+ Debug|Win32|C:\Users\isci\Desktop\1225test\1225test\|
1076LAB-master/Adafruit_Python_DHT/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2014 Adafruit Industries
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
1076LAB-master/Adafruit_Python_DHT/MANIFEST.in ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ include README.md
2
+ recursive-include source *
1076LAB-master/Adafruit_Python_DHT/setup.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages, Extension
2
+ import os
3
+ import sys
4
+
5
+ import Adafruit_DHT.platform_detect as platform_detect
6
+
7
+
8
+ BINARY_COMMANDS = [
9
+ 'build_ext',
10
+ 'build_clib',
11
+ 'bdist',
12
+ 'bdist_dumb',
13
+ 'bdist_rpm',
14
+ 'bdist_wininst',
15
+ 'bdist_wheel',
16
+ 'install'
17
+ ]
18
+
19
+
20
+ def is_binary_install():
21
+ do_binary = [command for command in BINARY_COMMANDS if command in sys.argv]
22
+ return len(do_binary) > 0
23
+
24
+
25
+ def read(fname):
26
+ return open(os.path.join(os.path.dirname(__file__), fname)).read()
27
+
28
+ # Check if an explicit platform was chosen with a command line parameter.
29
+ # Kind of hacky to manipulate the argument list before calling setup, but it's
30
+ # the best simple option for adding optional config to the setup.
31
+ platform = platform_detect.UNKNOWN
32
+ pi_version = None
33
+ if '--force-pi' in sys.argv:
34
+ platform = platform_detect.RASPBERRY_PI
35
+ pi_version = 1
36
+ sys.argv.remove('--force-pi')
37
+ elif '--force-pi2' in sys.argv:
38
+ platform = platform_detect.RASPBERRY_PI
39
+ pi_version = 2
40
+ sys.argv.remove('--force-pi2')
41
+ elif '--force-bbb' in sys.argv:
42
+ platform = platform_detect.BEAGLEBONE_BLACK
43
+ sys.argv.remove('--force-bbb')
44
+ elif '--force-test' in sys.argv:
45
+ platform = 'TEST'
46
+ sys.argv.remove('--force-test')
47
+ else:
48
+ # No explicit platform chosen, detect the current platform.
49
+ platform = platform_detect.platform_detect()
50
+
51
+ # Pick the right extension to compile based on the platform.
52
+ extensions = []
53
+ if not is_binary_install():
54
+ print('Skipped loading platform-specific extensions for Adafruit_DHT (we are generating a cross-platform source distribution).')
55
+ elif platform == platform_detect.RASPBERRY_PI:
56
+ # Get the Pi version (1 or 2)
57
+ if pi_version is None:
58
+ pi_version = platform_detect.pi_version()
59
+ # Build the right extension depending on the Pi version.
60
+ if pi_version == 1:
61
+ extensions.append(Extension("Adafruit_DHT.Raspberry_Pi_Driver",
62
+ ["source/_Raspberry_Pi_Driver.c", "source/common_dht_read.c", "source/Raspberry_Pi/pi_dht_read.c", "source/Raspberry_Pi/pi_mmio.c"],
63
+ libraries=['rt'],
64
+ extra_compile_args=['-std=gnu99']))
65
+ elif pi_version == 2:
66
+ extensions.append(Extension("Adafruit_DHT.Raspberry_Pi_2_Driver",
67
+ ["source/_Raspberry_Pi_2_Driver.c", "source/common_dht_read.c", "source/Raspberry_Pi_2/pi_2_dht_read.c", "source/Raspberry_Pi_2/pi_2_mmio.c"],
68
+ libraries=['rt'],
69
+ extra_compile_args=['-std=gnu99']))
70
+ elif pi_version == 3:
71
+ extensions.append(Extension("Adafruit_DHT.Raspberry_Pi_2_Driver",
72
+ ["source/_Raspberry_Pi_2_Driver.c", "source/common_dht_read.c", "source/Raspberry_Pi_2/pi_2_dht_read.c", "source/Raspberry_Pi_2/pi_2_mmio.c"],
73
+ libraries=['rt'],
74
+ extra_compile_args=['-std=gnu99']))
75
+ else:
76
+ raise RuntimeError('Detected Pi version that has no appropriate driver available.')
77
+ elif platform == platform_detect.BEAGLEBONE_BLACK:
78
+ extensions.append(Extension("Adafruit_DHT.Beaglebone_Black_Driver",
79
+ ["source/_Beaglebone_Black_Driver.c", "source/common_dht_read.c", "source/Beaglebone_Black/bbb_dht_read.c", "source/Beaglebone_Black/bbb_mmio.c"],
80
+ libraries=['rt'],
81
+ extra_compile_args=['-std=gnu99']))
82
+ elif platform == 'TEST':
83
+ extensions.append(Extension("Adafruit_DHT.Test_Driver",
84
+ ["source/_Test_Driver.c", "source/Test/test_dht_read.c"],
85
+ extra_compile_args=['-std=gnu99']))
86
+ else:
87
+ print('Could not detect if running on the Raspberry Pi or Beaglebone Black. If this failure is unexpected, you can run again with --force-pi or --force-bbb parameter to force using the Raspberry Pi or Beaglebone Black respectively.')
88
+ sys.exit(1)
89
+
90
+ classifiers = ['Development Status :: 4 - Beta',
91
+ 'Operating System :: POSIX :: Linux',
92
+ 'License :: OSI Approved :: MIT License',
93
+ 'Intended Audience :: Developers',
94
+ 'Programming Language :: Python :: 2.7',
95
+ 'Programming Language :: Python :: 3',
96
+ 'Topic :: Software Development',
97
+ 'Topic :: System :: Hardware']
98
+
99
+ # Call setuptools setup function to install package.
100
+ setup(name = 'Adafruit_DHT',
101
+ version = '1.4.0',
102
+ author = 'Tony DiCola',
103
+ author_email = 'tdicola@adafruit.com',
104
+ description = 'Library to get readings from the DHT11, DHT22, and AM2302 humidity and temperature sensors on a Raspberry Pi or Beaglebone Black.',
105
+ long_description = read('README.md'),
106
+ license = 'MIT',
107
+ classifiers = classifiers,
108
+ url = 'https://github.com/adafruit/Adafruit_Python_DHT/',
109
+ packages = find_packages(),
110
+ ext_modules = extensions)
1076LAB-master/Adafruit_Python_DHT/source/Raspberry_Pi_2/pi_2_dht_read.c ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2014 Adafruit Industries
2
+ // Author: Tony DiCola
3
+
4
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
5
+ // of this software and associated documentation files (the "Software"), to deal
6
+ // in the Software without restriction, including without limitation the rights
7
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
+ // copies of the Software, and to permit persons to whom the Software is
9
+ // furnished to do so, subject to the following conditions:
10
+
11
+ // The above copyright notice and this permission notice shall be included in all
12
+ // copies or substantial portions of the Software.
13
+
14
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20
+ // SOFTWARE.
21
+ #include <stdbool.h>
22
+ #include <stdlib.h>
23
+
24
+ #include "pi_2_dht_read.h"
25
+ #include "pi_2_mmio.h"
26
+
27
+ // This is the only processor specific magic value, the maximum amount of time to
28
+ // spin in a loop before bailing out and considering the read a timeout. This should
29
+ // be a high value, but if you're running on a much faster platform than a Raspberry
30
+ // Pi or Beaglebone Black then it might need to be increased.
31
+ #define DHT_MAXCOUNT 32000
32
+
33
+ // Number of bit pulses to expect from the DHT. Note that this is 41 because
34
+ // the first pulse is a constant 50 microsecond pulse, with 40 pulses to represent
35
+ // the data afterwards.
36
+ #define DHT_PULSES 41
37
+
38
+ int pi_2_dht_read(int type, int pin, float* humidity, float* temperature) {
39
+ // Validate humidity and temperature arguments and set them to zero.
40
+ if (humidity == NULL || temperature == NULL) {
41
+ return DHT_ERROR_ARGUMENT;
42
+ }
43
+ *temperature = 0.0f;
44
+ *humidity = 0.0f;
45
+
46
+ // Initialize GPIO library.
47
+ if (pi_2_mmio_init() < 0) {
48
+ return DHT_ERROR_GPIO;
49
+ }
50
+
51
+ // Store the count that each DHT bit pulse is low and high.
52
+ // Make sure array is initialized to start at zero.
53
+ int pulseCounts[DHT_PULSES*2] = {0};
54
+
55
+ // Set pin to output.
56
+ pi_2_mmio_set_output(pin);
57
+
58
+ // Bump up process priority and change scheduler to try to try to make process more 'real time'.
59
+ set_max_priority();
60
+
61
+ // Set pin high for ~500 milliseconds.
62
+ pi_2_mmio_set_high(pin);
63
+ sleep_milliseconds(500);
64
+
65
+ // The next calls are timing critical and care should be taken
66
+ // to ensure no unnecssary work is done below.
67
+
68
+ // Set pin low for ~20 milliseconds.
69
+ pi_2_mmio_set_low(pin);
70
+ busy_wait_milliseconds(20);
71
+
72
+ // Set pin at input.
73
+ pi_2_mmio_set_input(pin);
74
+ // Need a very short delay before reading pins or else value is sometimes still low.
75
+ for (volatile int i = 0; i < 50; ++i) {
76
+ }
77
+
78
+ // Wait for DHT to pull pin low.
79
+ uint32_t count = 0;
80
+ while (pi_2_mmio_input(pin)) {
81
+ if (++count >= DHT_MAXCOUNT) {
82
+ // Timeout waiting for response.
83
+ set_default_priority();
84
+ return DHT_ERROR_TIMEOUT;
85
+ }
86
+ }
87
+
88
+ // Record pulse widths for the expected result bits.
89
+ for (int i=0; i < DHT_PULSES*2; i+=2) {
90
+ // Count how long pin is low and store in pulseCounts[i]
91
+ while (!pi_2_mmio_input(pin)) {
92
+ if (++pulseCounts[i] >= DHT_MAXCOUNT) {
93
+ // Timeout waiting for response.
94
+ set_default_priority();
95
+ return DHT_ERROR_TIMEOUT;
96
+ }
97
+ }
98
+ // Count how long pin is high and store in pulseCounts[i+1]
99
+ while (pi_2_mmio_input(pin)) {
100
+ if (++pulseCounts[i+1] >= DHT_MAXCOUNT) {
101
+ // Timeout waiting for response.
102
+ set_default_priority();
103
+ return DHT_ERROR_TIMEOUT;
104
+ }
105
+ }
106
+ }
107
+
108
+ // Done with timing critical code, now interpret the results.
109
+
110
+ // Drop back to normal priority.
111
+ set_default_priority();
112
+
113
+ // Compute the average low pulse width to use as a 50 microsecond reference threshold.
114
+ // Ignore the first two readings because they are a constant 80 microsecond pulse.
115
+ uint32_t threshold = 0;
116
+ for (int i=2; i < DHT_PULSES*2; i+=2) {
117
+ threshold += pulseCounts[i];
118
+ }
119
+ threshold /= DHT_PULSES-1;
120
+
121
+ // Interpret each high pulse as a 0 or 1 by comparing it to the 50us reference.
122
+ // If the count is less than 50us it must be a ~28us 0 pulse, and if it's higher
123
+ // then it must be a ~70us 1 pulse.
124
+ uint8_t data[5] = {0};
125
+ for (int i=3; i < DHT_PULSES*2; i+=2) {
126
+ int index = (i-3)/16;
127
+ data[index] <<= 1;
128
+ if (pulseCounts[i] >= threshold) {
129
+ // One bit for long pulse.
130
+ data[index] |= 1;
131
+ }
132
+ // Else zero bit for short pulse.
133
+ }
134
+
135
+ // Useful debug info:
136
+ //printf("Data: 0x%x 0x%x 0x%x 0x%x 0x%x\n", data[0], data[1], data[2], data[3], data[4]);
137
+
138
+ // Verify checksum of received data.
139
+ if (data[4] == ((data[0] + data[1] + data[2] + data[3]) & 0xFF)) {
140
+ if (type == DHT11) {
141
+ // Get humidity and temp for DHT11 sensor.
142
+ *humidity = (float)data[0];
143
+ *temperature = (float)data[2];
144
+ }
145
+ else if (type == DHT22) {
146
+ // Calculate humidity and temp for DHT22 sensor.
147
+ *humidity = (data[0] * 256 + data[1]) / 10.0f;
148
+ *temperature = ((data[2] & 0x7F) * 256 + data[3]) / 10.0f;
149
+ if (data[2] & 0x80) {
150
+ *temperature *= -1.0f;
151
+ }
152
+ }
153
+ return DHT_SUCCESS;
154
+ }
155
+ else {
156
+ return DHT_ERROR_CHECKSUM;
157
+ }
158
+ }
1076LAB-master/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # 1076LAB
2
+
3
+ 備份實驗課用code的地方
1076LAB-master/launch/turtlebot3_app.launch ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <launch>
2
+ <arg name="model" default="$(env TURTLEBOT3_MODEL)" doc="model type [burger, waffle, waffle_pi]"/>
3
+ <param name="model" value="$(arg model)"/>
4
+
5
+ <!-- turtlebot3_teleop_key already has its own built in velocity smoother -->
6
+ <node pkg="turtlebot3_teleop" type="turtlebot3_app" name="turtlebot3_teleop_keyboard" output="screen">
7
+ </node>
8
+ </launch>
1076LAB-master/launch/turtlebot3_teleop_key.launch ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <launch>
2
+ <arg name="model" default="$(env TURTLEBOT3_MODEL)" doc="model type [burger, waffle, waffle_pi]"/>
3
+ <param name="model" value="$(arg model)"/>
4
+
5
+ <!-- turtlebot3_teleop_key already has its own built in velocity smoother -->
6
+ <node pkg="turtlebot3_teleop" type="turtlebot3_bot" name="turtlebot3_teleop_keyboard" output="screen">
7
+ </node>
8
+ </launch>
1076LAB-master/nodes/turtlebot3_app ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) 2011, Willow Garage, Inc.
4
+ # All rights reserved.
5
+ #
6
+ # Redistribution and use in source and binary forms, with or without
7
+ # modification, are permitted provided that the following conditions are met:
8
+ #
9
+ # * Redistributions of source code must retain the above copyright
10
+ # notice, this list of conditions and the following disclaimer.
11
+ # * Redistributions in binary form must reproduce the above copyright
12
+ # notice, this list of conditions and the following disclaimer in the
13
+ # documentation and/or other materials provided with the distribution.
14
+ # * Neither the name of the Willow Garage, Inc. nor the names of its
15
+ # contributors may be used to endorse or promote products derived from
16
+ # this software without specific prior written permission.
17
+ #
18
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22
+ # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23
+ # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24
+ # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26
+ # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27
+ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28
+ # POSSIBILITY OF SUCH DAMAGE.
29
+
30
+ import rospy
31
+ from geometry_msgs.msg import Twist
32
+ import sys, select, os
33
+ import socket
34
+ import time
35
+
36
+
37
+ #socket
38
+ HOST = '192.168.0.111'
39
+ PORT = 8001
40
+ try:
41
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
42
+ except socket.error, msg:
43
+ sys.stderr.write("[ERROR] %s\n" % msg[1])
44
+ sys.exit(1)
45
+
46
+ try:
47
+ sock.bind((HOST,PORT))
48
+ sock.listen(5)
49
+ except socket.error, msg:
50
+ sys.stderr.write("[ERROR] %s\n" % msg[1])
51
+ exit(1)
52
+
53
+
54
+ if os.name == 'nt':
55
+ import msvcrt
56
+ else:
57
+ import tty, termios
58
+
59
+ #the max linear velocity of turtlebot
60
+ BURGER_MAX_LIN_VEL = 0.22
61
+ BURGER_MAX_ANG_VEL = 2.84
62
+
63
+ WAFFLE_MAX_LIN_VEL = 0.26
64
+ WAFFLE_MAX_ANG_VEL = 1.82
65
+
66
+ #the increased value of velocity when you press the bottom
67
+ LIN_VEL_STEP_SIZE = 0.01
68
+ ANG_VEL_STEP_SIZE = 0.1
69
+
70
+ msg = """
71
+ Control Your TurtleBot3!
72
+ ---------------------------
73
+ Moving around:
74
+ w
75
+ a s d
76
+ x
77
+
78
+ w/x : increase/decrease linear velocity (Burger : ~ 0.22, Waffle and Waffle Pi : ~ 0.26)
79
+ a/d : increase/decrease angular velocity (Burger : ~ 2.84, Waffle and Waffle Pi : ~ 1.82)
80
+
81
+ space key, s : force stop
82
+
83
+ CTRL-C to quit
84
+ """
85
+
86
+ e = """
87
+ Communications Failed
88
+ """
89
+
90
+ def getKey():
91
+ if os.name == 'nt':
92
+ return msvcrt.getch()
93
+
94
+ tty.setraw(sys.stdin.fileno())
95
+ rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
96
+ if rlist:
97
+ key = sys.stdin.read(1)
98
+ else:
99
+ key = ''
100
+
101
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
102
+ return key
103
+
104
+ def vels(target_linear_vel, target_angular_vel):
105
+ return "currently:\tlinear vel %s\t angular vel %s " % (target_linear_vel,target_angular_vel)
106
+
107
+ def makeSimpleProfile(output, input, slop):
108
+ if input > output:
109
+ output = min( input, output + slop )
110
+ elif input < output:
111
+ output = max( input, output - slop )
112
+ else:
113
+ output = input
114
+
115
+ return output
116
+
117
+ def constrain(input, low, high):
118
+ if input < low:
119
+ input = low
120
+ elif input > high:
121
+ input = high
122
+ else:
123
+ input = input
124
+
125
+ return input
126
+
127
+ def checkLinearLimitVelocity(vel):
128
+ if turtlebot3_model == "burger":
129
+ vel = constrain(vel, -BURGER_MAX_LIN_VEL, BURGER_MAX_LIN_VEL)
130
+ elif turtlebot3_model == "waffle" or turtlebot3_model == "waffle_pi":
131
+ vel = constrain(vel, -WAFFLE_MAX_LIN_VEL, WAFFLE_MAX_LIN_VEL)
132
+ else:
133
+ vel = constrain(vel, -BURGER_MAX_LIN_VEL, BURGER_MAX_LIN_VEL)
134
+
135
+ return vel
136
+
137
+ def checkAngularLimitVelocity(vel):
138
+ if turtlebot3_model == "burger":
139
+ vel = constrain(vel, -BURGER_MAX_ANG_VEL, BURGER_MAX_ANG_VEL)
140
+ elif turtlebot3_model == "waffle" or turtlebot3_model == "waffle_pi":
141
+ vel = constrain(vel, -WAFFLE_MAX_ANG_VEL, WAFFLE_MAX_ANG_VEL)
142
+ else:
143
+ vel = constrain(vel, -BURGER_MAX_ANG_VEL, BURGER_MAX_ANG_VEL)
144
+
145
+ return vel
146
+
147
+ if __name__=="__main__":
148
+ if os.name != 'nt':
149
+ settings = termios.tcgetattr(sys.stdin)
150
+
151
+ rospy.init_node('turtlebot3_teleop')
152
+ pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
153
+
154
+ turtlebot3_model = rospy.get_param("model", "burger")
155
+
156
+ status = 0
157
+ target_linear_vel = 0.0
158
+ target_angular_vel = 0.0
159
+ control_linear_vel = 0.0
160
+ control_angular_vel = 0.0
161
+
162
+ try:
163
+ print(msg)
164
+ while(1):
165
+ conn,addr=sock.accept()
166
+ print(addr)
167
+ msg1=conn.recv(1024)
168
+ print(msg1)
169
+ key = getKey()
170
+ if msg1 == 'w' :
171
+ target_linear_vel = checkLinearLimitVelocity(target_linear_vel + LIN_VEL_STEP_SIZE)
172
+ status = status + 1
173
+ print(vels(target_linear_vel,target_angular_vel))
174
+ elif msg1 == 'x' :
175
+ target_linear_vel = checkLinearLimitVelocity(target_linear_vel - LIN_VEL_STEP_SIZE)
176
+ status = status + 1
177
+ print(vels(target_linear_vel,target_angular_vel))
178
+ elif msg1 == 'a' :
179
+ #add turn left action
180
+ elif msg1 == 'd' :
181
+ #add turn right action
182
+ elif msg1 == ' ' or msg1 == 's' :
183
+ target_linear_vel = 0.0
184
+ control_linear_vel = 0.0
185
+ target_angular_vel = 0.0
186
+ control_angular_vel = 0.0
187
+ print(vels(target_linear_vel, target_angular_vel))
188
+ else:
189
+ if (key == '\x03'):
190
+ sock.close()
191
+ break
192
+
193
+ if status == 20 :
194
+ print(msg1)
195
+ status = 0
196
+
197
+ twist = Twist()
198
+
199
+ control_linear_vel = makeSimpleProfile(control_linear_vel, target_linear_vel, (LIN_VEL_STEP_SIZE/2.0))
200
+ twist.linear.x = control_linear_vel; twist.linear.y = 0.0; twist.linear.z = 0.0
201
+
202
+ control_angular_vel = makeSimpleProfile(control_angular_vel, target_angular_vel, (ANG_VEL_STEP_SIZE/2.0))
203
+ twist.angular.x = 0.0; twist.angular.y = 0.0; twist.angular.z = control_angular_vel
204
+
205
+ pub.publish(twist)
206
+
207
+ except:
208
+ print(e)
209
+
210
+ finally:
211
+ twist = Twist()
212
+ twist.linear.x = 0.0; twist.linear.y = 0.0; twist.linear.z = 0.0
213
+ twist.angular.x = 0.0; twist.angular.y = 0.0; twist.angular.z = 0.0
214
+ pub.publish(twist)
215
+
216
+ if os.name != 'nt':
217
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
1076LAB-master/nodes/turtlebot3_teleop_key ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) 2011, Willow Garage, Inc.
4
+ # All rights reserved.
5
+ #
6
+ # Redistribution and use in source and binary forms, with or without
7
+ # modification, are permitted provided that the following conditions are met:
8
+ #
9
+ # * Redistributions of source code must retain the above copyright
10
+ # notice, this list of conditions and the following disclaimer.
11
+ # * Redistributions in binary form must reproduce the above copyright
12
+ # notice, this list of conditions and the following disclaimer in the
13
+ # documentation and/or other materials provided with the distribution.
14
+ # * Neither the name of the Willow Garage, Inc. nor the names of its
15
+ # contributors may be used to endorse or promote products derived from
16
+ # this software without specific prior written permission.
17
+ #
18
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22
+ # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23
+ # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24
+ # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26
+ # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27
+ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28
+ # POSSIBILITY OF SUCH DAMAGE.
29
+
30
+ import rospy
31
+ from geometry_msgs.msg import Twist
32
+ import sys, select, os
33
+ if os.name == 'nt':
34
+ import msvcrt
35
+ else:
36
+ import tty, termios
37
+
38
+ BURGER_MAX_LIN_VEL = 0.22
39
+ BURGER_MAX_ANG_VEL = 2.84
40
+
41
+ WAFFLE_MAX_LIN_VEL = 0.26
42
+ WAFFLE_MAX_ANG_VEL = 1.82
43
+
44
+ LIN_VEL_STEP_SIZE = 0.01
45
+ ANG_VEL_STEP_SIZE = 0.1
46
+
47
+ msg = """
48
+ Control Your TurtleBot3!
49
+ ---------------------------
50
+ Moving around:
51
+ w
52
+ a s d
53
+ x
54
+
55
+ w/x : increase/decrease linear velocity (Burger : ~ 0.22, Waffle and Waffle Pi : ~ 0.26)
56
+ a/d : increase/decrease angular velocity (Burger : ~ 2.84, Waffle and Waffle Pi : ~ 1.82)
57
+
58
+ space key, s : force stop
59
+
60
+ CTRL-C to quit
61
+ """
62
+
63
+ e = """
64
+ Communications Failed
65
+ """
66
+
67
+ def getKey():
68
+ if os.name == 'nt':
69
+ return msvcrt.getch()
70
+
71
+ tty.setraw(sys.stdin.fileno())
72
+ rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
73
+ if rlist:
74
+ key = sys.stdin.read(1)
75
+ else:
76
+ key = ''
77
+
78
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
79
+ return key
80
+
81
+ def vels(target_linear_vel, target_angular_vel):
82
+ return "currently:\tlinear vel %s\t angular vel %s " % (target_linear_vel,target_angular_vel)
83
+
84
+ def makeSimpleProfile(output, input, slop):
85
+ if input > output:
86
+ output = min( input, output + slop )
87
+ elif input < output:
88
+ output = max( input, output - slop )
89
+ else:
90
+ output = input
91
+
92
+ return output
93
+
94
+ def constrain(input, low, high):
95
+ if input < low:
96
+ input = low
97
+ elif input > high:
98
+ input = high
99
+ else:
100
+ input = input
101
+
102
+ return input
103
+
104
+ def checkLinearLimitVelocity(vel):
105
+ if turtlebot3_model == "burger":
106
+ vel = constrain(vel, -BURGER_MAX_LIN_VEL, BURGER_MAX_LIN_VEL)
107
+ elif turtlebot3_model == "waffle" or turtlebot3_model == "waffle_pi":
108
+ vel = constrain(vel, -WAFFLE_MAX_LIN_VEL, WAFFLE_MAX_LIN_VEL)
109
+ else:
110
+ vel = constrain(vel, -BURGER_MAX_LIN_VEL, BURGER_MAX_LIN_VEL)
111
+
112
+ return vel
113
+
114
+ def checkAngularLimitVelocity(vel):
115
+ if turtlebot3_model == "burger":
116
+ vel = constrain(vel, -BURGER_MAX_ANG_VEL, BURGER_MAX_ANG_VEL)
117
+ elif turtlebot3_model == "waffle" or turtlebot3_model == "waffle_pi":
118
+ vel = constrain(vel, -WAFFLE_MAX_ANG_VEL, WAFFLE_MAX_ANG_VEL)
119
+ else:
120
+ vel = constrain(vel, -BURGER_MAX_ANG_VEL, BURGER_MAX_ANG_VEL)
121
+
122
+ return vel
123
+
124
+ if __name__=="__main__":
125
+ if os.name != 'nt':
126
+ settings = termios.tcgetattr(sys.stdin)
127
+
128
+ rospy.init_node('turtlebot3_teleop')
129
+ pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
130
+
131
+ turtlebot3_model = rospy.get_param("model", "burger")
132
+
133
+ status = 0
134
+ target_linear_vel = 0.0
135
+ target_angular_vel = 0.0
136
+ control_linear_vel = 0.0
137
+ control_angular_vel = 0.0
138
+
139
+ try:
140
+ print(msg)
141
+ while(1):
142
+ key = getKey()
143
+ if key == 'w' :
144
+ target_linear_vel = checkLinearLimitVelocity(target_linear_vel + LIN_VEL_STEP_SIZE)
145
+ status = status + 1
146
+ print(vels(target_linear_vel,target_angular_vel))
147
+ elif key == 'x' :
148
+ target_linear_vel = checkLinearLimitVelocity(target_linear_vel - LIN_VEL_STEP_SIZE)
149
+ status = status + 1
150
+ print(vels(target_linear_vel,target_angular_vel))
151
+ elif key == 'a' :
152
+ #add turn left action
153
+ target_angular_vel = checkAngularLimitVelocity(target_angular_vel + ANG_VEL_STEP_SIZE)
154
+ status = status + 1
155
+ print(vels(target_linear_vel,target_angular_vel))
156
+
157
+ elif key == 'd' :
158
+ #add turn right action
159
+ target_angular_vel = checkAngularLimitVelocity(target_angular_vel - ANG_VEL_STEP_SIZE)
160
+ status = status + 1
161
+ print(vels(target_linear_vel,target_angular_vel))
162
+
163
+ elif key == ' ' or key == 's' :
164
+ target_linear_vel = 0.0
165
+ control_linear_vel = 0.0
166
+ target_angular_vel = 0.0
167
+ control_angular_vel = 0.0
168
+ print(vels(target_linear_vel, target_angular_vel))
169
+ else:
170
+ if (key == '\x03'):
171
+ break
172
+
173
+ if status == 20 :
174
+ print(msg)
175
+ status = 0
176
+
177
+ twist = Twist()
178
+
179
+ control_linear_vel = makeSimpleProfile(control_linear_vel, target_linear_vel, (LIN_VEL_STEP_SIZE/2.0))
180
+ twist.linear.x = control_linear_vel; twist.linear.y = 0.0; twist.linear.z = 0.0
181
+
182
+ control_angular_vel = makeSimpleProfile(control_angular_vel, target_angular_vel, (ANG_VEL_STEP_SIZE/2.0))
183
+ twist.angular.x = 0.0; twist.angular.y = 0.0; twist.angular.z = control_angular_vel
184
+
185
+ pub.publish(twist)
186
+
187
+ except:
188
+ print(e)
189
+
190
+ finally:
191
+ twist = Twist()
192
+ twist.linear.x = 0.0; twist.linear.y = 0.0; twist.linear.z = 0.0
193
+ twist.angular.x = 0.0; twist.angular.y = 0.0; twist.angular.z = 0.0
194
+ pub.publish(twist)
195
+
196
+ if os.name != 'nt':
197
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
11777-Group11-master/attention_weight_vis/attention_0.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f768019a423b69bd1bd1164b68996f320618081f5d19bbeba2de475999d42e7
3
+ size 4665728
11777-Group11-master/attention_weight_vis/attention_1.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3725e3956c4252dbadf8ce457634b62ccaa086aa576ddb399aa852f289fd60f3
3
+ size 4665728
11777-Group11-master/attention_weight_vis/attention_10.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:644503201c2ecb44f1c16cf9b7b49dd23e086d707d83482467b71e5dd0dd949e
3
+ size 4665728
11777-Group11-master/attention_weight_vis/attention_12.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9385b292ac9f43a20a9f3b847dae161f67ce077234eb95a0788c0a9ac99083fd
3
+ size 4665728
11777-Group11-master/attention_weight_vis/attention_14.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3d32b3a2cd9a03121fb0224d3bbb312343b9635a4fe735cefa5847fed978804
3
+ size 4665728
11777-Group11-master/attention_weight_vis/attention_3.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc9746a3f6e0ef89a5bb232e822f6be3a32b81420197a08e10351068e9cb4efd
3
+ size 4665728
11777-Group11-master/attention_weight_vis/bertviz/neuron_view.js ADDED
@@ -0,0 +1,959 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @fileoverview Transformer Visualization D3 javascript code.
3
+ *
4
+ * Based on https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/visualization/attention.js
5
+ *
6
+ * Change log:
7
+ *
8
+ * 12/19/18 Jesse Vig Assorted cleanup. Changed orientation of attention matrices.
9
+ * 12/22/18 Jesse Vig Display attention details: query/key vectors
10
+ */
11
+
12
+ requirejs(['jquery', 'd3'],
13
+ function ($, d3) {
14
+
15
+ var params = window.params;
16
+ var config = {};
17
+ initialize();
18
+
19
+ const HEADING_TEXT_SIZE = 16;
20
+ const HEADING_HEIGHT = 42;
21
+ const HEADING_TEXT_COLOR = "#000000";
22
+ const TEXT_COLOR = "#202020"
23
+ const TEXT_SIZE = 15;
24
+ const MATRIX_WIDTH = 200;
25
+ const BOXWIDTH = TEXT_SIZE * 8;
26
+ const BOXHEIGHT = 26;
27
+ const WIDTH = 3000;
28
+ const HEIGHT_PADDING = 800;
29
+ const PADDING_WIDTH = 25;
30
+ const DOT_WIDTH = 70;
31
+ const SOFTMAX_WIDTH = 70;
32
+ const ATTENTION_WIDTH = 150;
33
+ const POS_COLOR = '#0c36d8';
34
+ const NEG_COLOR = '#ff6318';
35
+ const TEXT_HIGHLIGHT_COLOR_LEFT = "#e5e5e5";
36
+ const TEXT_HIGHLIGHT_COLOR_RIGHT = '#478be8';
37
+ const DOT_PRODUCT_BORDER_COLOR = "#5d5d5d";
38
+
39
+ function render() {
40
+
41
+ var attnData = config.attention[config.filter];
42
+ var leftText = attnData.left_text;
43
+ var rightText = attnData.right_text;
44
+ // var attentionHeads = attnData.attn[config.layer];
45
+ var queries = attnData.queries[config.layer][config.head];
46
+ var keys = attnData.keys[config.layer][config.head];
47
+ var att = attnData.attn[config.layer][config.head];
48
+
49
+ $("#vis").empty();
50
+ var height = config.initialTextLength * BOXHEIGHT * 2 + HEIGHT_PADDING;
51
+ var svg = d3.select("#vis")
52
+ .append('svg')
53
+ .attr("width", WIDTH)
54
+ .attr("height", height);
55
+
56
+ renderVisExpanded(svg, leftText, rightText, queries, keys);
57
+ renderVisCollapsed(svg, leftText, rightText, att);
58
+ if (config.expanded == true) {
59
+ showExpanded();
60
+ } else {
61
+ showCollapsed();
62
+ }
63
+ }
64
+
65
+ function renderVisCollapsed(svg, leftText, rightText) {
66
+
67
+ var posLeftText = 0;
68
+ var posAttention = posLeftText + BOXWIDTH;
69
+ var posRightText = posAttention + ATTENTION_WIDTH + PADDING_WIDTH;
70
+
71
+ svg = svg.append("g")
72
+ .attr("id", "collapsed")
73
+ .attr("visibility", "hidden");
74
+
75
+ renderText(svg, leftText, "leftText", posLeftText, false);
76
+ renderAttn(svg, posAttention, posRightText, false);
77
+ renderText(svg, rightText, "rightText", posRightText, false);
78
+ }
79
+
80
+ function renderVisExpanded(svg, leftText, rightText, queries, keys) {
81
+
82
+ var posLeftText = 0;
83
+ var posQueries = posLeftText + BOXWIDTH + PADDING_WIDTH;
84
+ var posKeys = posQueries + MATRIX_WIDTH + PADDING_WIDTH * 1.5;
85
+ var posProduct = posKeys + MATRIX_WIDTH + PADDING_WIDTH;
86
+ var posDotProduct = posProduct + MATRIX_WIDTH + PADDING_WIDTH;
87
+ var posRightText = posDotProduct + BOXHEIGHT + PADDING_WIDTH;
88
+
89
+ svg = svg.append("g")
90
+ .attr("id", "expanded")
91
+ .attr("visibility", "hidden");
92
+
93
+ renderHeadingsExpanded(svg, posQueries, posKeys, posProduct, posDotProduct, posRightText);
94
+ renderText(svg, leftText, "leftText", posLeftText, true);
95
+ renderTextQueryLines(svg, posQueries - PADDING_WIDTH, posQueries - 2);
96
+ renderVectors(svg, "keys", keys, posKeys);
97
+ renderQueryKeyLines(svg, posQueries + MATRIX_WIDTH + 1, posKeys - 3);
98
+ renderVectors(svg, "queries", queries, posQueries);
99
+ renderHorizLines(svg, "hlines1", posProduct - PADDING_WIDTH + 1, posProduct - 1);
100
+ renderVectors(svg, "product", keys, posProduct);
101
+ renderHorizLines(svg, "hlines2", posDotProduct - PADDING_WIDTH + 2, posDotProduct);
102
+ var dotProducts = new Array(rightText.length).fill(0);
103
+ renderDotProducts(svg, dotProducts, posDotProduct);
104
+ renderText(svg, rightText, "rightText", posRightText, true);
105
+ renderHorizLines(svg, "hlines3", posRightText - PADDING_WIDTH - 2, posRightText);
106
+ renderVectorHighlights(svg, "key-vector-highlights", posKeys);
107
+ renderVectorHighlights(svg, "product-vector-highlights", posProduct)
108
+ }
109
+
110
+ function renderHeadingsExpanded(svg, posQueries, posKeys, posProduct, posDotProduct, posSoftmax) {
111
+ var headingContainer = svg.append("svg:g")
112
+ .attr("id", "heading")
113
+
114
+ var queryHeadingContainer = headingContainer.append("text")
115
+ .attr("x", posQueries + 68)
116
+ .attr("y", HEADING_HEIGHT - 12)
117
+ .attr("height", BOXHEIGHT)
118
+ .attr("width", MATRIX_WIDTH)
119
+ .style('fill', HEADING_TEXT_COLOR);
120
+
121
+ queryHeadingContainer.append('tspan')
122
+ .text('Query ')
123
+ .attr("y", HEADING_HEIGHT - 12)
124
+ .attr("font-size", HEADING_TEXT_SIZE + "px");
125
+
126
+ queryHeadingContainer.append('tspan')
127
+ .text('q')
128
+ .attr("y", HEADING_HEIGHT - 12)
129
+ .attr("font-size", HEADING_TEXT_SIZE + "px");
130
+
131
+
132
+ var keyHeadingContainer = headingContainer.append("text")
133
+ .attr("x", posKeys + 73)
134
+ .attr("y", HEADING_HEIGHT - 12)
135
+ .attr("height", BOXHEIGHT)
136
+ .attr("width", MATRIX_WIDTH)
137
+ .attr("font-size", HEADING_TEXT_SIZE + "px")
138
+ .style('fill', HEADING_TEXT_COLOR);
139
+
140
+ keyHeadingContainer.append('tspan')
141
+ .text('Key ')
142
+ .style('font-size', HEADING_TEXT_SIZE + "px")
143
+ .attr("y", HEADING_HEIGHT - 12);
144
+
145
+ keyHeadingContainer.append('tspan')
146
+ .text('k ')
147
+ .style('font-size', HEADING_TEXT_SIZE + "px")
148
+ .attr("y", HEADING_HEIGHT - 12);
149
+
150
+ var productHeadingContainer = headingContainer.append("text")
151
+ .attr("x", posProduct + 28)
152
+ .attr("y", HEADING_HEIGHT - 12)
153
+ .attr("height", BOXHEIGHT)
154
+ .attr("width", MATRIX_WIDTH)
155
+ .attr("font-size", HEADING_TEXT_SIZE + "px")
156
+ .style('fill', HEADING_TEXT_COLOR);
157
+
158
+ productHeadingContainer.append('tspan')
159
+ .text('q \u00D7 k (elementwise)')
160
+ .style('font-size', HEADING_TEXT_SIZE + "px")
161
+ .attr("y", HEADING_HEIGHT - 12);
162
+
163
+ var dotProductHeadingContainer = headingContainer.append("text")
164
+ .attr("x", posDotProduct - 6)
165
+ .attr("y", HEADING_HEIGHT - 12)
166
+ .attr("height", BOXHEIGHT)
167
+ .attr("width", MATRIX_WIDTH)
168
+ .attr("font-size", HEADING_TEXT_SIZE + "px")
169
+ .style('fill', HEADING_TEXT_COLOR);
170
+
171
+ dotProductHeadingContainer.append('tspan')
172
+ .text('q')
173
+ .style('font-size', HEADING_TEXT_SIZE + "px")
174
+ .attr("y", HEADING_HEIGHT - 12);
175
+
176
+ dotProductHeadingContainer.append('tspan')
177
+ .text(' \u2219 k')
178
+ .style('font-size', HEADING_TEXT_SIZE + "px")
179
+ .attr("y", HEADING_HEIGHT - 12);
180
+
181
+ headingContainer.append("text")
182
+ .attr("x", posSoftmax + 9)
183
+ .attr("y", HEADING_HEIGHT - 12)
184
+ .attr("height", BOXHEIGHT)
185
+ .attr("width", SOFTMAX_WIDTH)
186
+ .attr("font-size", HEADING_TEXT_SIZE + "px")
187
+ .style("text-anchor", "start")
188
+ .style('fill', HEADING_TEXT_COLOR)
189
+ .text("Softmax");
190
+
191
+ headingContainer.append("text")
192
+ .attr("id", "placeholder")
193
+ .attr("x", posProduct + 55)
194
+ .attr("y", HEADING_HEIGHT + 55)
195
+ .attr("height", BOXHEIGHT)
196
+ .attr("width", SOFTMAX_WIDTH + MATRIX_WIDTH + DOT_WIDTH)
197
+ .attr("font-size", 20 + "px")
198
+ .text("No token selected")
199
+ .attr("fill", TEXT_HIGHLIGHT_COLOR_LEFT);
200
+ }
201
+
202
+ function renderHorizLines(svg, id, start_pos, end_pos) {
203
+ var attnMatrix = config.attention[config.filter].attn[config.layer][config.head];
204
+ var linesContainer = svg.append("svg:g")
205
+ .attr("id", id);
206
+ linesContainer.selectAll("g")
207
+ .data(attnMatrix)
208
+ .enter()
209
+ .append("g") // Add group for each source token
210
+ .classed('horiz-line-group', true)
211
+ .style("opacity", 0)
212
+ .attr("source-index", function (d, i) { // Save index of source token
213
+ return i;
214
+ })
215
+ .selectAll("line")
216
+ .data(function (d) { // Loop over all target tokens
217
+ return d;
218
+ })
219
+ .enter() // When entering
220
+ .append("line")
221
+ .attr("x1", start_pos)
222
+ .attr("y1", function (d, targetIndex) {
223
+ return targetIndex * BOXHEIGHT + HEADING_HEIGHT + BOXHEIGHT / 2;
224
+ })
225
+ .attr("x2", end_pos)
226
+ .attr("y2", function (d, targetIndex) {
227
+ return targetIndex * BOXHEIGHT + HEADING_HEIGHT + BOXHEIGHT / 2;
228
+ })
229
+ .attr("stroke-width", 2)
230
+ .attr("stroke", "blue")
231
+ .attr("stroke-opacity", function (d) {
232
+ return d * 1.1;
233
+ });
234
+ }
235
+
236
+ function renderVectorHighlights(svg, id, start_pos) {
237
+ var attnMatrix = config.attention[config.filter].attn[config.layer][config.head];
238
+ var vectorHighlightsContainer = svg.append("svg:g")
239
+ .attr("id", id);
240
+ vectorHighlightsContainer.selectAll("g")
241
+ .data(attnMatrix)
242
+ .enter()
243
+ .append("g") // Add group for each source token
244
+ .classed('vector-highlight-group', true)
245
+ .style("opacity", 0)
246
+ .attr("source-index", function (d, i) { // Save index of source token
247
+ return i;
248
+ })
249
+ .selectAll("rect")
250
+ .data(function (d) { // Loop over all target tokens
251
+ return d;
252
+ })
253
+ .enter() // When entering
254
+ .append("rect")
255
+ .attr("x", start_pos - 1)
256
+ .attr("y", function (d, targetIndex) {
257
+ return targetIndex * BOXHEIGHT + HEADING_HEIGHT;
258
+ })
259
+ .attr("height", BOXHEIGHT - 5)
260
+ .attr("width", MATRIX_WIDTH + 3)
261
+ .style("fill-opacity", 0)
262
+ .attr("stroke-width", 1.9)
263
+ .attr("stroke", "blue")
264
+ .attr("stroke-opacity", function (d) {
265
+ return d * 1.6;
266
+ });
267
+ }
268
+
269
+ function renderQueryKeyLines(svg, start_pos, end_pos) {
270
+ var attnMatrix = config.attention[config.filter].attn[config.layer][config.head];
271
+ var linesContainer = svg.append("svg:g");
272
+ var lineFunction = d3.line()
273
+ .x(function (d) {
274
+ return d.x;
275
+ })
276
+ .y(function (d) {
277
+ return d.y;
278
+ });
279
+
280
+ linesContainer.selectAll("g")
281
+ .data(attnMatrix)
282
+ .enter()
283
+ .append("g") // Add group for each source token
284
+ .classed('qk-line-group', true)
285
+ .style("opacity", 0)
286
+ .attr("source-index", function (d, i) { // Save index of source token
287
+ return i;
288
+ })
289
+ .selectAll("path")
290
+ .data(function (d) { // Loop over all target tokens
291
+ return d;
292
+ })
293
+ .enter() // When entering
294
+ .append("path")
295
+ .attr("d", function (d, targetIndex) {
296
+ var sourceIndex = +this.parentNode.getAttribute("source-index");
297
+ var y1 = sourceIndex * BOXHEIGHT + HEADING_HEIGHT + BOXHEIGHT / 2;
298
+ var y2 = targetIndex * BOXHEIGHT + HEADING_HEIGHT + BOXHEIGHT / 2;
299
+ var x1 = start_pos;
300
+ var x2 = (start_pos + end_pos) / 2 + 1;
301
+ var x3 = end_pos;
302
+
303
+ return lineFunction([
304
+ {'x': x1, 'y': y1},
305
+ {'x': x2, 'y': y1},
306
+ {'x': x2, 'y': y2},
307
+ {'x': x3, 'y': y2},
308
+
309
+ ])
310
+ })
311
+ .attr("fill", "none")
312
+ .attr("stroke-width", 2)
313
+ .attr("stroke", "blue")
314
+ .attr("stroke-opacity", function (d) {
315
+ return d * 1.1;
316
+ });
317
+ }
318
+
319
+ function renderTextQueryLines(svg, start_pos, end_pos) {
320
+ var attnData = config.attention[config.filter];
321
+ var leftText = attnData.left_text; // Use for shape not values
322
+ var linesContainer = svg.append("svg:g");
323
+ linesContainer.selectAll("line")
324
+ .data(leftText)
325
+ .enter()
326
+ .append("line") // Add line
327
+ .classed('text-query-line', true)
328
+ .style("opacity", 0)
329
+ .attr("x1", start_pos)
330
+ .attr("y1", function (d, i) {
331
+ return i * BOXHEIGHT + HEADING_HEIGHT + BOXHEIGHT / 2;
332
+ })
333
+ .attr("x2", end_pos)
334
+ .attr("y2", function (d, i) {
335
+ return i * BOXHEIGHT + HEADING_HEIGHT + BOXHEIGHT / 2;
336
+ })
337
+ .attr("stroke-width", 2)
338
+ .attr("stroke", "blue")
339
+ }
340
+
341
+ function renderAttn(svg, start_pos, end_pos, expanded) {
342
+ var attnMatrix = config.attention[config.filter].attn[config.layer][config.head];
343
+ var attnContainer = svg.append("svg:g");
344
+ attnContainer.selectAll("g")
345
+ .data(attnMatrix)
346
+ .enter()
347
+ .append("g") // Add group for each source token
348
+ .classed('attn-line-group', true)
349
+ .attr("source-index", function (d, i) { // Save index of source token
350
+ return i;
351
+ })
352
+ .selectAll("line")
353
+ .data(function (d) { // Loop over all target tokens
354
+ return d;
355
+ })
356
+ .enter() // When entering
357
+ .append("line")
358
+ .attr("x1", start_pos)
359
+ .attr("y1", function (d) {
360
+ var sourceIndex = +this.parentNode.getAttribute("source-index");
361
+ return sourceIndex * BOXHEIGHT + HEADING_HEIGHT + BOXHEIGHT / 2;
362
+ })
363
+ .attr("x2", end_pos)
364
+ .attr("y2", function (d, targetIndex) {
365
+ return targetIndex * BOXHEIGHT + HEADING_HEIGHT + BOXHEIGHT / 2;
366
+ })
367
+ .attr("stroke-width", 2)
368
+ .attr("stroke", "blue")
369
+ .attr("stroke-opacity", function (d, i) {
370
+ if (expanded) {
371
+ return d;
372
+ } else {
373
+ return d;
374
+ }
375
+ });
376
+ }
377
+
378
+ function renderVectors(svg, id, vectors, leftPos) {
379
+ var vectorContainer = svg.append("svg:g")
380
+ .attr("id", id);
381
+
382
+ if (id == "product") {
383
+ vectorContainer.style("opacity", 0);
384
+ }
385
+
386
+ var vector = vectorContainer.append("g") //.classed("attention_boxes", true) // Add outer group
387
+ .selectAll("g")
388
+ .data(vectors) // Loop over query/key vectors, one for each token
389
+ .enter()
390
+ .append("g") // Add (sub) group for each token
391
+ .classed('vector', true)
392
+ .attr("data-index", function (d, i) {
393
+ return i;
394
+ }) // make parent index available from DOM
395
+
396
+ if (id == "queries") {
397
+ vector.append("rect")
398
+ .classed("vectorborder", true)
399
+ .attr("x", leftPos - 1)
400
+ .attr("y", function (d, i) {
401
+ return i * BOXHEIGHT + HEADING_HEIGHT;
402
+ })
403
+ .attr("width", MATRIX_WIDTH + 2)
404
+ .attr("height", BOXHEIGHT - 5)
405
+ .style("fill-opacity", 0)
406
+ .style("stroke-width", 1.9)
407
+ .style("stroke", "#5b83d5")
408
+ .attr("rx", 1)
409
+ .attr("ry", 1)
410
+ .style("stroke-opacity", 0)
411
+ } else if (id == "keys") {
412
+ vector.append("rect")
413
+ .classed("vectorborder", true)
414
+ .attr("x", leftPos - 1)
415
+ .attr("y", function (d, i) {
416
+ return i * BOXHEIGHT + HEADING_HEIGHT;
417
+ })
418
+ .attr("width", MATRIX_WIDTH + 2)
419
+ .attr("height", BOXHEIGHT - 6)
420
+ .style("fill-opacity", 0)
421
+ .style("stroke-width", 1)
422
+ .style("stroke", "#a2b4d5")
423
+ .attr("rx", 1)
424
+ .attr("ry", 1)
425
+ .style("stroke-opacity", 0)
426
+ } else {
427
+ vector.append("rect")
428
+ .classed("vectorborder", true)
429
+ .attr("x", leftPos - 1)
430
+ .attr("y", function (d, i) {
431
+ return i * BOXHEIGHT + HEADING_HEIGHT;
432
+ })
433
+ .attr("width", MATRIX_WIDTH + 2)
434
+ .attr("height", BOXHEIGHT - 6)
435
+ .style("fill-opacity", 0)
436
+ .style("stroke-width", 1)
437
+ .style("stroke", "#a2b4d5")
438
+ .attr("rx", 1)
439
+ .attr("ry", 1)
440
+ }
441
+
442
+ vector.selectAll(".element")
443
+ .data(function (d) {
444
+ return d;
445
+ }) // loop over elements within each query vector
446
+ .enter() // When entering
447
+ .append("rect") // Add rect element for each token index (j), vector index (i)
448
+ .classed('element', true)
449
+ .attr("x", function (d, i) { // i is vector index, j is index of token
450
+ return leftPos + i * MATRIX_WIDTH / config.vectorSize;
451
+ })
452
+ .attr("y", function (d, i) {
453
+ var j = +this.parentNode.getAttribute("data-index");
454
+ return j * BOXHEIGHT + HEADING_HEIGHT;
455
+ })
456
+ .attr("width", MATRIX_WIDTH / config.vectorSize)
457
+ .attr("height", BOXHEIGHT - 6)
458
+ .attr("rx", .7)
459
+ .attr("ry", .7)
460
+ .attr("data-value", function (d) {
461
+ return d
462
+ })
463
+ .style("fill", function (d) {
464
+ if (d >= 0) {
465
+ return POS_COLOR;
466
+ } else {
467
+ return NEG_COLOR
468
+ }
469
+ })
470
+ .style("opacity", function (d) {
471
+ return Math.tanh(Math.abs(d) / 4);
472
+ })
473
+ }
474
+
475
+ function renderText(svg, text, id, leftPos, expanded) {
476
+
477
+ var tokenContainer = svg.append("svg:g")
478
+ .attr("id", id)
479
+ .selectAll("g")
480
+ .data(text)
481
+ .enter()
482
+ .append("g");
483
+ if (id == "leftText" || id == "rightText") {
484
+ var fillColor;
485
+ if (id == "rightText") {
486
+ fillColor = TEXT_HIGHLIGHT_COLOR_RIGHT;
487
+ }
488
+ if (id == "leftText") {
489
+ fillColor = TEXT_HIGHLIGHT_COLOR_LEFT;
490
+ }
491
+
492
+ tokenContainer.append("rect")
493
+ .classed("highlight", true)
494
+ .attr("fill", fillColor)
495
+ .style("opacity", 0.0)
496
+ .attr("height", BOXHEIGHT)
497
+ .attr("width", BOXWIDTH)
498
+ .attr("x", leftPos)
499
+ .attr("y", function (d, i) {
500
+ return i * BOXHEIGHT + HEADING_HEIGHT - 1;
501
+ });
502
+ }
503
+
504
+ var offset;
505
+ if (id == "leftText") {
506
+ offset = -8;
507
+ } else {
508
+ offset = 8;
509
+ }
510
+
511
+ var textContainer = tokenContainer.append("text")
512
+ .classed("token", true)
513
+ .text(function (d) {
514
+ return d;
515
+ })
516
+ .attr("font-size", TEXT_SIZE + "px")
517
+ .style("fill", TEXT_COLOR)
518
+ .style("cursor", "default")
519
+ .style("-webkit-user-select", "none")
520
+ .attr("x", leftPos + offset)
521
+ .attr("y", function (d, i) {
522
+ return i * BOXHEIGHT + HEADING_HEIGHT;
523
+ })
524
+ .attr("height", BOXHEIGHT)
525
+ .attr("width", BOXWIDTH)
526
+ .attr("dy", TEXT_SIZE);
527
+
528
+ if (id == "leftText") {
529
+ textContainer.style("text-anchor", "end")
530
+ .attr("dx", BOXWIDTH - 2);
531
+ tokenContainer.on("mouseover", function (d, index) {
532
+ config.index = index;
533
+ highlightSelection(svg, index);
534
+ showComputation(svg, index);
535
+ });
536
+ tokenContainer.on("mouseleave", function () {
537
+ config.index = null;
538
+ unhighlightSelection(svg);
539
+ hideComputation(svg)
540
+ });
541
+
542
+ if (expanded) {
543
+ tokenContainer.append('path')
544
+ .attr("d", "M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zM124 296c-6.6 0-12-5.4-12-12v-56c0-6.6 5.4-12 12-12h264c6.6 0 12 5.4 12 12v56c0 6.6-5.4 12-12 12H124z")
545
+ .classed("minus-sign", true)
546
+ .attr("fill", "#909090")
547
+ .style('font-size', "17px")
548
+ .style('font-weight', 900)
549
+ .style('opacity', 0)
550
+ .attr("dy", 17)
551
+ .attr("transform", function (d, i) {
552
+ var x = leftPos + 5;
553
+ var y = i * BOXHEIGHT + HEADING_HEIGHT + 4;
554
+ return "translate(" + x + " " + y + ")" +
555
+ "scale(0.03 0.03) "
556
+ });
557
+ tokenContainer.append('rect')
558
+ .attr("x", leftPos + 5)
559
+ .attr("y", function (d, i) {
560
+ return i * BOXHEIGHT + HEADING_HEIGHT + 4;
561
+ })
562
+ .style('opacity', 0)
563
+ .attr("dy", 17)
564
+ .attr("height", 16)
565
+ .attr("width", 16)
566
+ .on("click", function (d, i) {
567
+ config.expanded = false;
568
+ showCollapsed();
569
+ })
570
+ .on("mouseover", function (d, i) {
571
+ d3.select(this).style("cursor", "pointer");
572
+ })
573
+ .on("mouseout", function (d, i) {
574
+ d3.select(this).style("cursor", "default");
575
+ })
576
+
577
+ } else {
578
+ tokenContainer.append('path')
579
+ .attr("d", "M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm144 276c0 6.6-5.4 12-12 12h-92v92c0 6.6-5.4 12-12 12h-56c-6.6 0-12-5.4-12-12v-92h-92c-6.6 0-12-5.4-12-12v-56c0-6.6 5.4-12 12-12h92v-92c0-6.6 5.4-12 12-12h56c6.6 0 12 5.4 12 12v92h92c6.6 0 12 5.4 12 12v56z")
580
+ .classed("plus-sign", true)
581
+ .attr("fill", "#909090")
582
+ .style('font-size', "17px")
583
+ .style('font-weight', 900)
584
+ .style('opacity', 0)
585
+ .attr("dy", 17)
586
+ .attr("transform", function (d, i) {
587
+ var x = leftPos + 5;
588
+ var y = i * BOXHEIGHT + HEADING_HEIGHT + 4;
589
+ return "translate(" + x + " " + y + ")" +
590
+ "scale(0.03 0.03) "
591
+ });
592
+ tokenContainer.append('rect')
593
+ .attr("x", leftPos + 5)
594
+ .attr("y", function (d, i) {
595
+ return i * BOXHEIGHT + HEADING_HEIGHT + 4;
596
+ })
597
+ .style('opacity', 0)
598
+ .attr("dy", 17)
599
+ .attr("height", 16)
600
+ .attr("width", 16)
601
+ .on("click", function (d, i) {
602
+ config.expanded = true;
603
+ showExpanded();
604
+ })
605
+ .on("mouseover", function (d, i) {
606
+ d3.select(this).style("cursor", "pointer");
607
+ })
608
+ .on("mouseout", function (d, i) {
609
+ d3.select(this).style("cursor", "default");
610
+ })
611
+ }
612
+ }
613
+ }
614
+
615
+ function updateTextAttention(svg, attn) {
616
+ var container = svg.select('#rightText');
617
+ container.selectAll(".highlight")
618
+ .data(attn)
619
+ .style("opacity", function (d) {
620
+ return d;
621
+ })
622
+ }
623
+
624
+ function renderDotProducts(svg, dotProducts, leftPos) {
625
+ svg.append("svg:g")
626
+ .attr("id", "dotproducts")
627
+ .style("opacity", 0)
628
+ .selectAll("rect")
629
+ .data(dotProducts)
630
+ .enter()
631
+ .append("rect")
632
+ .classed('dotproduct', true)
633
+ .attr("x", leftPos + 1)
634
+ .attr("y", function (d, i) {
635
+ return i * BOXHEIGHT + HEADING_HEIGHT;
636
+ })
637
+ .attr("height", BOXHEIGHT - 4)
638
+ .attr("width", BOXHEIGHT - 4)
639
+ .style("stroke-width", 1)
640
+ .style("stroke", DOT_PRODUCT_BORDER_COLOR)
641
+ .style("stroke-opacity", 1)
642
+ .style("fill-opacity", 0)
643
+ .attr("rx", 2)
644
+ .attr("ry", 2)
645
+ }
646
+
647
+ function updateDotProducts(svg, dotProducts) {
648
+ var vectorContainer = svg.select('#dotproducts').style("opacity", 1);
649
+ vectorContainer.selectAll(".dotproduct")
650
+ .data(dotProducts)
651
+ .style("fill", function (d) {
652
+ if (d >= 0) {
653
+ return POS_COLOR;
654
+ } else {
655
+ return NEG_COLOR;
656
+ }
657
+ })
658
+ .style("fill-opacity", function (d) {
659
+ return Math.tanh(Math.abs(d) / 54);
660
+ })
661
+ .style("stroke", function (d) {
662
+ if (d >= 0) {
663
+ return POS_COLOR;
664
+ } else {
665
+ return NEG_COLOR;
666
+ }
667
+ })
668
+ .style("stroke-opacity", function (d) {
669
+ return Math.max(Math.tanh(Math.abs(d) / 24), .15);
670
+ })
671
+ .attr("data-value", function (d) {
672
+ return d
673
+ })
674
+ }
675
+
676
+ function updateSoftmax(svg, softmax) {
677
+ var vectorContainer = svg.select('#softmaxes').style("opacity", 1);
678
+ vectorContainer.selectAll(".softmax")
679
+ .data(softmax)
680
+ .attr("width", function (d) {
681
+ return Math.max(d * SOFTMAX_WIDTH, 1);
682
+ })
683
+ .attr("data-value", function (d) {
684
+ return d
685
+ })
686
+
687
+ }
688
+
689
+ function highlightSelection(svg, index) {
690
+ svg.select("#queries")
691
+ .selectAll(".vector")
692
+ .style("opacity", function (d, i) {
693
+ return i == index ? 1.0 : 0.4;
694
+ });
695
+ svg.select("#queries")
696
+ .selectAll(".vectorborder")
697
+ .style("stroke-opacity", function (d, i) {
698
+ return i == index ? 1.0 : 0;
699
+ });
700
+ svg.select("#queries")
701
+ .select(".matrixborder")
702
+ .style("stroke-opacity", 0);
703
+ svg.select("#leftText")
704
+ .selectAll(".highlight")
705
+ .style("opacity", function (d, i) {
706
+ return i == index ? 1.0 : 0.0;
707
+ });
708
+ if (config.expanded) {
709
+ svg.select("#leftText")
710
+ .selectAll(".minus-sign")
711
+ .style("opacity", function (d, i) {
712
+ return i == index ? 1.0 : 0.0;
713
+ })
714
+ } else {
715
+ svg.select("#leftText")
716
+ .selectAll(".plus-sign")
717
+ .style("opacity", function (d, i) {
718
+ return i == index ? 1.0 : 0.0;
719
+ })
720
+ }
721
+ svg.selectAll(".i-index")
722
+ .text(index);
723
+ svg.selectAll(".attn-line-group")
724
+ .style("opacity", function (d, i) {
725
+ return i == index ? 1.0 : 0.0;
726
+ });
727
+ svg.selectAll(".qk-line-group")
728
+ .style("opacity", function (d, i) {
729
+ return i == index ? 1.0 : 0.0;
730
+ });
731
+ if (config.bidirectional) {
732
+ svg.select("#keys")
733
+ .selectAll(".vectorborder")
734
+ .style("stroke-opacity", 1);
735
+ } else {
736
+ svg.select("#keys")
737
+ .selectAll(".vectorborder")
738
+ .style("opacity", function (d, i) {
739
+ return i <= index ? 1.0 : 0.0;
740
+ });
741
+ svg.select("#keys")
742
+ .selectAll(".vector")
743
+ .style("opacity", function (d, i) {
744
+ return i <= index ? 1.0 : 0.0;
745
+ });
746
+ svg.select("#product")
747
+ .selectAll(".vector")
748
+ .style("opacity", function (d, i) {
749
+ return i <= index ? 1.0 : 0.0;
750
+ });
751
+ svg.select("#dotproducts")
752
+ .selectAll("rect")
753
+ .style("opacity", function (d, i) {
754
+ return i <= index ? 1.0 : 0.0;
755
+ });
756
+ }
757
+ svg.select('#hlines1')
758
+ .selectAll(".horiz-line-group")
759
+ .style("opacity", function (d, i) {
760
+ return i == index ? 1.0 : 0.0;
761
+ });
762
+ svg.select('#hlines2')
763
+ .selectAll(".horiz-line-group")
764
+ .style("opacity", function (d, i) {
765
+ return i == index ? 1.0 : 0.0;
766
+ });
767
+ svg.select('#hlines3')
768
+ .selectAll(".horiz-line-group")
769
+ .style("opacity", function (d, i) {
770
+ return i == index ? 1.0 : 0.0;
771
+ });
772
+ svg.select('#key-vector-highlights')
773
+ .selectAll(".vector-highlight-group")
774
+ .style("opacity", function (d, i) {
775
+ return i == index ? 1.0 : 0.0;
776
+ });
777
+ svg.select('#product-vector-highlights')
778
+ .selectAll(".vector-highlight-group")
779
+ .style("opacity", function (d, i) {
780
+ return i == index ? 1.0 : 0.0;
781
+ });
782
+ svg.selectAll(".text-query-line")
783
+ .style("opacity", function (d, i) {
784
+ return i == index ? 1.0 : 0.0;
785
+ })
786
+ }
787
+
788
+ function unhighlightSelection(svg) {
789
+ svg.select("#queries")
790
+ .selectAll(".vector")
791
+ .style("opacity", 1.0);
792
+ svg.select("#queries")
793
+ .selectAll(".vectorborder")
794
+ .style("stroke-opacity", 0);
795
+ svg.select("#queries")
796
+ .select(".matrixborder")
797
+ .style("stroke-opacity", 1);
798
+ svg.select("#leftText")
799
+ .selectAll(".highlight")
800
+ .style("opacity", 0.0);
801
+ svg.select("#leftText")
802
+ .selectAll(".minus-sign")
803
+ .style("opacity", 0);
804
+ svg.select("#leftText")
805
+ .selectAll(".plus-sign")
806
+ .style("opacity", 0);
807
+ svg.selectAll(".i-index")
808
+ .text("i");
809
+ if (!config.expanded) {
810
+ svg.selectAll(".attn-line-group")
811
+ .style("opacity", 1)
812
+ }
813
+ svg.selectAll(".qk-line-group")
814
+ .style("opacity", 0);
815
+ svg.select("#keys")
816
+ .selectAll(".vectorborder")
817
+ .style("stroke-opacity", 0);
818
+
819
+ svg.selectAll(".horiz-line-group")
820
+ .style("opacity", 0);
821
+ svg.selectAll(".vector-highlight-group")
822
+ .style("opacity", 0);
823
+ svg.selectAll(".text-query-line")
824
+ .style("opacity", 0);
825
+
826
+ if (!config.bidirectional) {
827
+ svg.select("#keys")
828
+ .selectAll(".vector")
829
+ .style("opacity", 1);
830
+ svg.select("#right_text")
831
+ .selectAll("text")
832
+ .style("opacity", 1);
833
+ }
834
+ }
835
+
836
+ function showComputation(svg, query_index) {
837
+ var attnData = config.attention[config.filter];
838
+ var query_vector = attnData.queries[config.layer][config.head][query_index];
839
+ var keys = attnData.keys[config.layer][config.head];
840
+ var att = attnData.attn[config.layer][config.head][query_index];
841
+
842
+ var seq_len = keys.length;
843
+ var productVectors = [];
844
+ var dotProducts = [];
845
+ for (var i = 0; i < seq_len; i++) {
846
+ var key_vector = keys[i];
847
+ var productVector = [];
848
+ var dotProduct = 0;
849
+ for (var j = 0; j < config.vectorSize; j++) {
850
+ var product = query_vector[j] * key_vector[j];
851
+ productVector.push(product);
852
+ dotProduct += product;
853
+ }
854
+ productVectors.push(productVector);
855
+ dotProducts.push(dotProduct);
856
+ }
857
+ updateVectors(svg, 'product', productVectors);
858
+ updateDotProducts(svg, dotProducts);
859
+ updateSoftmax(svg, att);
860
+ updateTextAttention(svg, att);
861
+ svg.select("#placeholder").style("opacity", 0);
862
+
863
+ }
864
+
865
+ function hideComputation(svg) {
866
+ svg.select("#product").style("opacity", 0);
867
+ svg.select("#dotproducts").style("opacity", 0);
868
+ svg.select("#softmaxes").style("opacity", 0);
869
+ svg.select('#rightText').selectAll("rect").style("opacity", 0);
870
+ svg.select("#placeholder").style("opacity", 1);
871
+ }
872
+
873
+ function updateVectors(svg, id, data) {
874
+ var vectorContainer = svg.select('#' + id).style("opacity", 1);
875
+ var vectors = vectorContainer.selectAll(".vector");
876
+ vectors
877
+ .data(data)
878
+ .selectAll(".element") // loop over elements rects within each vector
879
+ .data(function (d) {
880
+ return d;
881
+ }) // Bind them to array of elements from parent array
882
+ .style("fill", function (d) {
883
+
884
+ if (d >= 0) {
885
+ return POS_COLOR;
886
+ } else {
887
+ return NEG_COLOR;
888
+ }
889
+ })
890
+ .attr("data-value", function (d) {
891
+ return d
892
+ })
893
+ .style("opacity", function (d) {
894
+ return Math.tanh(Math.abs(d) / 4);
895
+ });
896
+ }
897
+
898
+ function showCollapsed() {
899
+ if (config.index != null) {
900
+ var svg = d3.select("#vis");
901
+ highlightSelection(svg, config.index);
902
+ }
903
+ d3.select("#expanded").attr("visibility", "hidden");
904
+ d3.select("#collapsed").attr("visibility", "visible");
905
+ }
906
+
907
+ function showExpanded() {
908
+ if (config.index != null) {
909
+ var svg = d3.select("#vis");
910
+ highlightSelection(svg, config.index);
911
+ showComputation(svg, config.index);
912
+ }
913
+ d3.select("#expanded").attr("visibility", "visible");
914
+ d3.select("#collapsed").attr("visibility", "hidden")
915
+ }
916
+
917
+ function initialize() {
918
+ config.attention = params['attention'];
919
+ config.filter = params['default_filter'];
920
+ var attentionFilter = config.attention[config.filter];
921
+ config.nLayers = attentionFilter['attn'].length;
922
+ config.nHeads = attentionFilter['attn'][0].length;
923
+ config.vectorSize = attentionFilter['queries'][0][0][0].length; // Layer 0, head 0, position 0 length
924
+ config.headVis = new Array(config.nHeads).fill(true);
925
+ config.head = 0;
926
+ config.layer = 0;
927
+ config.initialTextLength = attentionFilter.right_text.length;
928
+ config.expanded = false;
929
+ config.bidirectional = params['bidirectional']
930
+ }
931
+
932
+ $("#layer").empty();
933
+ for (var i = 0; i < config.nLayers; i++) {
934
+ $("#layer").append($("<option />").val(i).text(i));
935
+ }
936
+
937
+ $("#layer").on('change', function (e) {
938
+ config.layer = +e.currentTarget.value;
939
+ render();
940
+ });
941
+
942
+ $("#att_head").empty();
943
+ for (var i = 0; i < config.nHeads; i++) {
944
+ $("#att_head").append($("<option />").val(i).text(i));
945
+ }
946
+
947
+ $("#att_head").on('change', function (e) {
948
+ config.head = +e.currentTarget.value;
949
+ render();
950
+ });
951
+
952
+ $("#filter").on('change', function (e) {
953
+ config.filter = e.currentTarget.value;
954
+ render();
955
+ });
956
+
957
+ render();
958
+
959
+ });
11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/file_utils.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for working with the local dataset cache.
3
+ This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
4
+ Copyright by the AllenNLP authors.
5
+ """
6
+ from __future__ import (absolute_import, division, print_function, unicode_literals)
7
+
8
+ import sys
9
+ import json
10
+ import logging
11
+ import os
12
+ import shutil
13
+ import tempfile
14
+ import fnmatch
15
+ from functools import wraps
16
+ from hashlib import sha256
17
+ from io import open
18
+
19
+ import boto3
20
+ import requests
21
+ from botocore.exceptions import ClientError
22
+ from tqdm import tqdm
23
+
24
+ try:
25
+ from torch.hub import _get_torch_home
26
+ torch_cache_home = _get_torch_home()
27
+ except ImportError:
28
+ torch_cache_home = os.path.expanduser(
29
+ os.getenv('TORCH_HOME', os.path.join(
30
+ os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
31
+ default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
32
+
33
+ try:
34
+ from urllib.parse import urlparse
35
+ except ImportError:
36
+ from urlparse import urlparse
37
+
38
+ try:
39
+ from pathlib import Path
40
+ PYTORCH_PRETRAINED_BERT_CACHE = Path(
41
+ os.getenv('PYTORCH_TRANSFORMERS_CACHE', os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)))
42
+ except (AttributeError, ImportError):
43
+ PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_TRANSFORMERS_CACHE',
44
+ os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
45
+ default_cache_path))
46
+
47
+ PYTORCH_TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
48
+
49
+ logger = logging.getLogger(__name__) # pylint: disable=invalid-name
50
+
51
+
52
+ def url_to_filename(url, etag=None):
53
+ """
54
+ Convert `url` into a hashed filename in a repeatable way.
55
+ If `etag` is specified, append its hash to the url's, delimited
56
+ by a period.
57
+ """
58
+ url_bytes = url.encode('utf-8')
59
+ url_hash = sha256(url_bytes)
60
+ filename = url_hash.hexdigest()
61
+
62
+ if etag:
63
+ etag_bytes = etag.encode('utf-8')
64
+ etag_hash = sha256(etag_bytes)
65
+ filename += '.' + etag_hash.hexdigest()
66
+
67
+ return filename
68
+
69
+
70
+ def filename_to_url(filename, cache_dir=None):
71
+ """
72
+ Return the url and etag (which may be ``None``) stored for `filename`.
73
+ Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
74
+ """
75
+ if cache_dir is None:
76
+ cache_dir = PYTORCH_TRANSFORMERS_CACHE
77
+ if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
78
+ cache_dir = str(cache_dir)
79
+
80
+ cache_path = os.path.join(cache_dir, filename)
81
+ if not os.path.exists(cache_path):
82
+ raise EnvironmentError("file {} not found".format(cache_path))
83
+
84
+ meta_path = cache_path + '.json'
85
+ if not os.path.exists(meta_path):
86
+ raise EnvironmentError("file {} not found".format(meta_path))
87
+
88
+ with open(meta_path, encoding="utf-8") as meta_file:
89
+ metadata = json.load(meta_file)
90
+ url = metadata['url']
91
+ etag = metadata['etag']
92
+
93
+ return url, etag
94
+
95
+
96
+ def cached_path(url_or_filename, cache_dir=None):
97
+ """
98
+ Given something that might be a URL (or might be a local path),
99
+ determine which. If it's a URL, download the file and cache it, and
100
+ return the path to the cached file. If it's already a local path,
101
+ make sure the file exists and then return the path.
102
+ """
103
+ if cache_dir is None:
104
+ cache_dir = PYTORCH_TRANSFORMERS_CACHE
105
+ if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
106
+ url_or_filename = str(url_or_filename)
107
+ if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
108
+ cache_dir = str(cache_dir)
109
+
110
+ parsed = urlparse(url_or_filename)
111
+
112
+ if parsed.scheme in ('http', 'https', 's3'):
113
+ # URL, so get it from the cache (downloading if necessary)
114
+ return get_from_cache(url_or_filename, cache_dir)
115
+ elif os.path.exists(url_or_filename):
116
+ # File, and it exists.
117
+ return url_or_filename
118
+ elif parsed.scheme == '':
119
+ # File, but it doesn't exist.
120
+ raise EnvironmentError("file {} not found".format(url_or_filename))
121
+ else:
122
+ # Something unknown
123
+ raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
124
+
125
+
126
+ def split_s3_path(url):
127
+ """Split a full s3 path into the bucket name and path."""
128
+ parsed = urlparse(url)
129
+ if not parsed.netloc or not parsed.path:
130
+ raise ValueError("bad s3 path {}".format(url))
131
+ bucket_name = parsed.netloc
132
+ s3_path = parsed.path
133
+ # Remove '/' at beginning of path.
134
+ if s3_path.startswith("/"):
135
+ s3_path = s3_path[1:]
136
+ return bucket_name, s3_path
137
+
138
+
139
+ def s3_request(func):
140
+ """
141
+ Wrapper function for s3 requests in order to create more helpful error
142
+ messages.
143
+ """
144
+
145
+ @wraps(func)
146
+ def wrapper(url, *args, **kwargs):
147
+ try:
148
+ return func(url, *args, **kwargs)
149
+ except ClientError as exc:
150
+ if int(exc.response["Error"]["Code"]) == 404:
151
+ raise EnvironmentError("file {} not found".format(url))
152
+ else:
153
+ raise
154
+
155
+ return wrapper
156
+
157
+
158
+ @s3_request
159
+ def s3_etag(url):
160
+ """Check ETag on S3 object."""
161
+ s3_resource = boto3.resource("s3")
162
+ bucket_name, s3_path = split_s3_path(url)
163
+ s3_object = s3_resource.Object(bucket_name, s3_path)
164
+ return s3_object.e_tag
165
+
166
+
167
+ @s3_request
168
+ def s3_get(url, temp_file):
169
+ """Pull a file directly from S3."""
170
+ s3_resource = boto3.resource("s3")
171
+ bucket_name, s3_path = split_s3_path(url)
172
+ s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
173
+
174
+
175
+ def http_get(url, temp_file):
176
+ req = requests.get(url, stream=True)
177
+ content_length = req.headers.get('Content-Length')
178
+ total = int(content_length) if content_length is not None else None
179
+ progress = tqdm(unit="B", total=total)
180
+ for chunk in req.iter_content(chunk_size=1024):
181
+ if chunk: # filter out keep-alive new chunks
182
+ progress.update(len(chunk))
183
+ temp_file.write(chunk)
184
+ progress.close()
185
+
186
+
187
+ def get_from_cache(url, cache_dir=None):
188
+ """
189
+ Given a URL, look for the corresponding dataset in the local cache.
190
+ If it's not there, download it. Then return the path to the cached file.
191
+ """
192
+ if cache_dir is None:
193
+ cache_dir = PYTORCH_TRANSFORMERS_CACHE
194
+ if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
195
+ cache_dir = str(cache_dir)
196
+ if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
197
+ cache_dir = str(cache_dir)
198
+
199
+ if not os.path.exists(cache_dir):
200
+ os.makedirs(cache_dir)
201
+
202
+ # Get eTag to add to filename, if it exists.
203
+ if url.startswith("s3://"):
204
+ etag = s3_etag(url)
205
+ else:
206
+ try:
207
+ response = requests.head(url, allow_redirects=True)
208
+ if response.status_code != 200:
209
+ etag = None
210
+ else:
211
+ etag = response.headers.get("ETag")
212
+ except EnvironmentError:
213
+ etag = None
214
+
215
+ if sys.version_info[0] == 2 and etag is not None:
216
+ etag = etag.decode('utf-8')
217
+ filename = url_to_filename(url, etag)
218
+
219
+ # get cache path to put the file
220
+ cache_path = os.path.join(cache_dir, filename)
221
+
222
+ # If we don't have a connection (etag is None) and can't identify the file
223
+ # try to get the last downloaded one
224
+ if not os.path.exists(cache_path) and etag is None:
225
+ matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
226
+ matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
227
+ if matching_files:
228
+ cache_path = os.path.join(cache_dir, matching_files[-1])
229
+
230
+ if not os.path.exists(cache_path):
231
+ # Download to temporary file, then copy to cache dir once finished.
232
+ # Otherwise you get corrupt cache entries if the download gets interrupted.
233
+ with tempfile.NamedTemporaryFile() as temp_file:
234
+ logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
235
+
236
+ # GET file object
237
+ if url.startswith("s3://"):
238
+ s3_get(url, temp_file)
239
+ else:
240
+ http_get(url, temp_file)
241
+
242
+ # we are copying the file before closing it, so flush to avoid truncation
243
+ temp_file.flush()
244
+ # shutil.copyfileobj() starts at the current position, so go to the start
245
+ temp_file.seek(0)
246
+
247
+ logger.info("copying %s to cache at %s", temp_file.name, cache_path)
248
+ with open(cache_path, 'wb') as cache_file:
249
+ shutil.copyfileobj(temp_file, cache_file)
250
+
251
+ logger.info("creating metadata file for %s", cache_path)
252
+ meta = {'url': url, 'etag': etag}
253
+ meta_path = cache_path + '.json'
254
+ with open(meta_path, 'w') as meta_file:
255
+ output_string = json.dumps(meta)
256
+ if sys.version_info[0] == 2 and isinstance(output_string, str):
257
+ output_string = unicode(output_string, 'utf-8') # The beauty of python 2
258
+ meta_file.write(output_string)
259
+
260
+ logger.info("removing temp file %s", temp_file.name)
261
+
262
+ return cache_path
11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/modeling_bert.py ADDED
@@ -0,0 +1,1254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ #
18
+ # Change log
19
+ # 7/14/19 Jesse Vig Adapted for use in visualization
20
+
21
+
22
+ """PyTorch BERT model."""
23
+
24
+ from __future__ import absolute_import, division, print_function, unicode_literals
25
+
26
+ import json
27
+ import logging
28
+ import math
29
+ import os
30
+ import sys
31
+ from io import open
32
+
33
+ import torch
34
+ from torch import nn
35
+ from torch.nn import CrossEntropyLoss, MSELoss
36
+
37
+ from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel,
38
+ prune_linear_layer, add_start_docstrings)
39
+
40
+ logger = logging.getLogger(__name__)
41
+
42
+ BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
43
+ 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
44
+ 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
45
+ 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
46
+ 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
47
+ 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
48
+ 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
49
+ 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
50
+ 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
51
+ 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
52
+ 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
53
+ 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
54
+ 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
55
+ 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
56
+ }
57
+
58
+ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
59
+ 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
60
+ 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
61
+ 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
62
+ 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
63
+ 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
64
+ 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
65
+ 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
66
+ 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
67
+ 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
68
+ 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
69
+ 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
70
+ 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
71
+ 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
72
+ }
73
+
74
+
75
+ def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
76
+ """ Load tf checkpoints in a pytorch model.
77
+ """
78
+ try:
79
+ import re
80
+ import numpy as np
81
+ import tensorflow as tf
82
+ except ImportError:
83
+ logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
84
+ "https://www.tensorflow.org/install/ for installation instructions.")
85
+ raise
86
+ tf_path = os.path.abspath(tf_checkpoint_path)
87
+ logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
88
+ # Load weights from TF model
89
+ init_vars = tf.train.list_variables(tf_path)
90
+ names = []
91
+ arrays = []
92
+ for name, shape in init_vars:
93
+ logger.info("Loading TF weight {} with shape {}".format(name, shape))
94
+ array = tf.train.load_variable(tf_path, name)
95
+ names.append(name)
96
+ arrays.append(array)
97
+
98
+ for name, array in zip(names, arrays):
99
+ name = name.split('/')
100
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
101
+ # which are not required for using pretrained model
102
+ if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
103
+ logger.info("Skipping {}".format("/".join(name)))
104
+ continue
105
+ pointer = model
106
+ for m_name in name:
107
+ if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
108
+ l = re.split(r'_(\d+)', m_name)
109
+ else:
110
+ l = [m_name]
111
+ if l[0] == 'kernel' or l[0] == 'gamma':
112
+ pointer = getattr(pointer, 'weight')
113
+ elif l[0] == 'output_bias' or l[0] == 'beta':
114
+ pointer = getattr(pointer, 'bias')
115
+ elif l[0] == 'output_weights':
116
+ pointer = getattr(pointer, 'weight')
117
+ elif l[0] == 'squad':
118
+ pointer = getattr(pointer, 'classifier')
119
+ else:
120
+ try:
121
+ pointer = getattr(pointer, l[0])
122
+ except AttributeError:
123
+ logger.info("Skipping {}".format("/".join(name)))
124
+ continue
125
+ if len(l) >= 2:
126
+ num = int(l[1])
127
+ pointer = pointer[num]
128
+ if m_name[-11:] == '_embeddings':
129
+ pointer = getattr(pointer, 'weight')
130
+ elif m_name == 'kernel':
131
+ array = np.transpose(array)
132
+ try:
133
+ assert pointer.shape == array.shape
134
+ except AssertionError as e:
135
+ e.args += (pointer.shape, array.shape)
136
+ raise
137
+ logger.info("Initialize PyTorch weight {}".format(name))
138
+ pointer.data = torch.from_numpy(array)
139
+ return model
140
+
141
+
142
+ def gelu(x):
143
+ """Implementation of the gelu activation function.
144
+ For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
145
+ 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
146
+ Also see https://arxiv.org/abs/1606.08415
147
+ """
148
+ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
149
+
150
+
151
+ def swish(x):
152
+ return x * torch.sigmoid(x)
153
+
154
+
155
+ ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
156
+
157
+
158
+ class BertConfig(PretrainedConfig):
159
+ r"""
160
+ :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a
161
+ `BertModel`.
162
+
163
+
164
+ Arguments:
165
+ vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
166
+ hidden_size: Size of the encoder layers and the pooler layer.
167
+ num_hidden_layers: Number of hidden layers in the Transformer encoder.
168
+ num_attention_heads: Number of attention heads for each attention layer in
169
+ the Transformer encoder.
170
+ intermediate_size: The size of the "intermediate" (i.e., feed-forward)
171
+ layer in the Transformer encoder.
172
+ hidden_act: The non-linear activation function (function or string) in the
173
+ encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
174
+ hidden_dropout_prob: The dropout probabilitiy for all fully connected
175
+ layers in the embeddings, encoder, and pooler.
176
+ attention_probs_dropout_prob: The dropout ratio for the attention
177
+ probabilities.
178
+ max_position_embeddings: The maximum sequence length that this model might
179
+ ever be used with. Typically set this to something large just in case
180
+ (e.g., 512 or 1024 or 2048).
181
+ type_vocab_size: The vocabulary size of the `token_type_ids` passed into
182
+ `BertModel`.
183
+ initializer_range: The sttdev of the truncated_normal_initializer for
184
+ initializing all weight matrices.
185
+ layer_norm_eps: The epsilon used by LayerNorm.
186
+ """
187
+ pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
188
+
189
+ def __init__(self,
190
+ vocab_size_or_config_json_file=30522,
191
+ hidden_size=768,
192
+ num_hidden_layers=12,
193
+ num_attention_heads=12,
194
+ intermediate_size=3072,
195
+ hidden_act="gelu",
196
+ hidden_dropout_prob=0.1,
197
+ attention_probs_dropout_prob=0.1,
198
+ max_position_embeddings=512,
199
+ type_vocab_size=2,
200
+ initializer_range=0.02,
201
+ layer_norm_eps=1e-12,
202
+ **kwargs):
203
+ super(BertConfig, self).__init__(**kwargs)
204
+ if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
205
+ and isinstance(vocab_size_or_config_json_file, unicode)):
206
+ with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
207
+ json_config = json.loads(reader.read())
208
+ for key, value in json_config.items():
209
+ self.__dict__[key] = value
210
+ elif isinstance(vocab_size_or_config_json_file, int):
211
+ self.vocab_size = vocab_size_or_config_json_file
212
+ self.hidden_size = hidden_size
213
+ self.num_hidden_layers = num_hidden_layers
214
+ self.num_attention_heads = num_attention_heads
215
+ self.hidden_act = hidden_act
216
+ self.intermediate_size = intermediate_size
217
+ self.hidden_dropout_prob = hidden_dropout_prob
218
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
219
+ self.max_position_embeddings = max_position_embeddings
220
+ self.type_vocab_size = type_vocab_size
221
+ self.initializer_range = initializer_range
222
+ self.layer_norm_eps = layer_norm_eps
223
+ else:
224
+ raise ValueError("First argument must be either a vocabulary size (int)"
225
+ "or the path to a pretrained model config file (str)")
226
+
227
+
228
+
229
+ try:
230
+ from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
231
+ except (ImportError, AttributeError) as e:
232
+ logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
233
+ class BertLayerNorm(nn.Module):
234
+ def __init__(self, hidden_size, eps=1e-12):
235
+ """Construct a layernorm module in the TF style (epsilon inside the square root).
236
+ """
237
+ super(BertLayerNorm, self).__init__()
238
+ self.weight = nn.Parameter(torch.ones(hidden_size))
239
+ self.bias = nn.Parameter(torch.zeros(hidden_size))
240
+ self.variance_epsilon = eps
241
+
242
+ def forward(self, x):
243
+ u = x.mean(-1, keepdim=True)
244
+ s = (x - u).pow(2).mean(-1, keepdim=True)
245
+ x = (x - u) / torch.sqrt(s + self.variance_epsilon)
246
+ return self.weight * x + self.bias
247
+
248
+ class BertEmbeddings(nn.Module):
249
+ """Construct the embeddings from word, position and token_type embeddings.
250
+ """
251
+ def __init__(self, config):
252
+ super(BertEmbeddings, self).__init__()
253
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
254
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
255
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
256
+
257
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
258
+ # any TensorFlow checkpoint file
259
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
260
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
261
+
262
+ def forward(self, input_ids, token_type_ids=None, position_ids=None):
263
+ seq_length = input_ids.size(1)
264
+ if position_ids is None:
265
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
266
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
267
+ if token_type_ids is None:
268
+ token_type_ids = torch.zeros_like(input_ids)
269
+
270
+ words_embeddings = self.word_embeddings(input_ids)
271
+ position_embeddings = self.position_embeddings(position_ids)
272
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
273
+
274
+ embeddings = words_embeddings + position_embeddings + token_type_embeddings
275
+ embeddings = self.LayerNorm(embeddings)
276
+ embeddings = self.dropout(embeddings)
277
+ return embeddings
278
+
279
+
280
+ class BertSelfAttention(nn.Module):
281
+ def __init__(self, config):
282
+ super(BertSelfAttention, self).__init__()
283
+ if config.hidden_size % config.num_attention_heads != 0:
284
+ raise ValueError(
285
+ "The hidden size (%d) is not a multiple of the number of attention "
286
+ "heads (%d)" % (config.hidden_size, config.num_attention_heads))
287
+ self.output_attentions = config.output_attentions
288
+
289
+ self.num_attention_heads = config.num_attention_heads
290
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
291
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
292
+
293
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
294
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
295
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
296
+
297
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
298
+
299
+ def transpose_for_scores(self, x):
300
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
301
+ x = x.view(*new_x_shape)
302
+ return x.permute(0, 2, 1, 3)
303
+
304
+ def forward(self, hidden_states, attention_mask, head_mask=None):
305
+ mixed_query_layer = self.query(hidden_states)
306
+ mixed_key_layer = self.key(hidden_states)
307
+ mixed_value_layer = self.value(hidden_states)
308
+
309
+ query_layer = self.transpose_for_scores(mixed_query_layer)
310
+ key_layer = self.transpose_for_scores(mixed_key_layer)
311
+ value_layer = self.transpose_for_scores(mixed_value_layer)
312
+
313
+ # Take the dot product between "query" and "key" to get the raw attention scores.
314
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
315
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
316
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
317
+ attention_scores = attention_scores + attention_mask
318
+
319
+ # Normalize the attention scores to probabilities.
320
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
321
+
322
+ # This is actually dropping out entire tokens to attend to, which might
323
+ # seem a bit unusual, but is taken from the original Transformer paper.
324
+ attention_probs = self.dropout(attention_probs)
325
+
326
+ # Mask heads if we want to
327
+ if head_mask is not None:
328
+ attention_probs = attention_probs * head_mask
329
+
330
+ context_layer = torch.matmul(attention_probs, value_layer)
331
+
332
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
333
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
334
+ context_layer = context_layer.view(*new_context_layer_shape)
335
+
336
+ if self.output_attentions:
337
+ attn_data = {
338
+ 'attn': attention_probs,
339
+ 'queries': query_layer,
340
+ 'keys': key_layer
341
+ }
342
+ outputs = (context_layer, attn_data)
343
+ else:
344
+ outputs = (context_layer,)
345
+ return outputs
346
+
347
+
348
+ class BertSelfOutput(nn.Module):
349
+ def __init__(self, config):
350
+ super(BertSelfOutput, self).__init__()
351
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
352
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
353
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
354
+
355
+ def forward(self, hidden_states, input_tensor):
356
+ hidden_states = self.dense(hidden_states)
357
+ hidden_states = self.dropout(hidden_states)
358
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
359
+ return hidden_states
360
+
361
+
362
+ class BertAttention(nn.Module):
363
+ def __init__(self, config):
364
+ super(BertAttention, self).__init__()
365
+ self.self = BertSelfAttention(config)
366
+ self.output = BertSelfOutput(config)
367
+
368
+ def prune_heads(self, heads):
369
+ if len(heads) == 0:
370
+ return
371
+ mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
372
+ for head in heads:
373
+ mask[head] = 0
374
+ mask = mask.view(-1).contiguous().eq(1)
375
+ index = torch.arange(len(mask))[mask].long()
376
+ # Prune linear layers
377
+ self.self.query = prune_linear_layer(self.self.query, index)
378
+ self.self.key = prune_linear_layer(self.self.key, index)
379
+ self.self.value = prune_linear_layer(self.self.value, index)
380
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
381
+ # Update hyper params
382
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
383
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
384
+
385
+ def forward(self, input_tensor, attention_mask, head_mask=None):
386
+ self_outputs = self.self(input_tensor, attention_mask, head_mask)
387
+ attention_output = self.output(self_outputs[0], input_tensor)
388
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
389
+ return outputs
390
+
391
+
392
+ class BertIntermediate(nn.Module):
393
+ def __init__(self, config):
394
+ super(BertIntermediate, self).__init__()
395
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
396
+ if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
397
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
398
+ else:
399
+ self.intermediate_act_fn = config.hidden_act
400
+
401
+ def forward(self, hidden_states):
402
+ hidden_states = self.dense(hidden_states)
403
+ hidden_states = self.intermediate_act_fn(hidden_states)
404
+ return hidden_states
405
+
406
+
407
+ class BertOutput(nn.Module):
408
+ def __init__(self, config):
409
+ super(BertOutput, self).__init__()
410
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
411
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
412
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
413
+
414
+ def forward(self, hidden_states, input_tensor):
415
+ hidden_states = self.dense(hidden_states)
416
+ hidden_states = self.dropout(hidden_states)
417
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
418
+ return hidden_states
419
+
420
+
421
+ class BertLayer(nn.Module):
422
+ def __init__(self, config):
423
+ super(BertLayer, self).__init__()
424
+ self.attention = BertAttention(config)
425
+ self.intermediate = BertIntermediate(config)
426
+ self.output = BertOutput(config)
427
+
428
+ def forward(self, hidden_states, attention_mask, head_mask=None):
429
+ attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
430
+ attention_output = attention_outputs[0]
431
+ intermediate_output = self.intermediate(attention_output)
432
+ layer_output = self.output(intermediate_output, attention_output)
433
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
434
+ return outputs
435
+
436
+
437
+ class BertEncoder(nn.Module):
438
+ def __init__(self, config):
439
+ super(BertEncoder, self).__init__()
440
+ self.output_attentions = config.output_attentions
441
+ self.output_hidden_states = config.output_hidden_states
442
+ self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
443
+
444
+ def forward(self, hidden_states, attention_mask, head_mask=None):
445
+ all_hidden_states = ()
446
+ all_attentions = ()
447
+ for i, layer_module in enumerate(self.layer):
448
+ if self.output_hidden_states:
449
+ all_hidden_states = all_hidden_states + (hidden_states,)
450
+
451
+ layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
452
+ hidden_states = layer_outputs[0]
453
+
454
+ if self.output_attentions:
455
+ all_attentions = all_attentions + (layer_outputs[1],)
456
+
457
+ # Add last layer
458
+ if self.output_hidden_states:
459
+ all_hidden_states = all_hidden_states + (hidden_states,)
460
+
461
+ outputs = (hidden_states,)
462
+ if self.output_hidden_states:
463
+ outputs = outputs + (all_hidden_states,)
464
+ if self.output_attentions:
465
+ outputs = outputs + (all_attentions,)
466
+ return outputs # outputs, (hidden states), (attentions)
467
+
468
+
469
+ class BertPooler(nn.Module):
470
+ def __init__(self, config):
471
+ super(BertPooler, self).__init__()
472
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
473
+ self.activation = nn.Tanh()
474
+
475
+ def forward(self, hidden_states):
476
+ # We "pool" the model by simply taking the hidden state corresponding
477
+ # to the first token.
478
+ first_token_tensor = hidden_states[:, 0]
479
+ pooled_output = self.dense(first_token_tensor)
480
+ pooled_output = self.activation(pooled_output)
481
+ return pooled_output
482
+
483
+
484
+ class BertPredictionHeadTransform(nn.Module):
485
+ def __init__(self, config):
486
+ super(BertPredictionHeadTransform, self).__init__()
487
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
488
+ if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
489
+ self.transform_act_fn = ACT2FN[config.hidden_act]
490
+ else:
491
+ self.transform_act_fn = config.hidden_act
492
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
493
+
494
+ def forward(self, hidden_states):
495
+ hidden_states = self.dense(hidden_states)
496
+ hidden_states = self.transform_act_fn(hidden_states)
497
+ hidden_states = self.LayerNorm(hidden_states)
498
+ return hidden_states
499
+
500
+
501
+ class BertLMPredictionHead(nn.Module):
502
+ def __init__(self, config):
503
+ super(BertLMPredictionHead, self).__init__()
504
+ self.transform = BertPredictionHeadTransform(config)
505
+
506
+ # The output weights are the same as the input embeddings, but there is
507
+ # an output-only bias for each token.
508
+ self.decoder = nn.Linear(config.hidden_size,
509
+ config.vocab_size,
510
+ bias=False)
511
+
512
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
513
+
514
+ def forward(self, hidden_states):
515
+ hidden_states = self.transform(hidden_states)
516
+ hidden_states = self.decoder(hidden_states) + self.bias
517
+ return hidden_states
518
+
519
+
520
+ class BertOnlyMLMHead(nn.Module):
521
+ def __init__(self, config):
522
+ super(BertOnlyMLMHead, self).__init__()
523
+ self.predictions = BertLMPredictionHead(config)
524
+
525
+ def forward(self, sequence_output):
526
+ prediction_scores = self.predictions(sequence_output)
527
+ return prediction_scores
528
+
529
+
530
+ class BertOnlyNSPHead(nn.Module):
531
+ def __init__(self, config):
532
+ super(BertOnlyNSPHead, self).__init__()
533
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
534
+
535
+ def forward(self, pooled_output):
536
+ seq_relationship_score = self.seq_relationship(pooled_output)
537
+ return seq_relationship_score
538
+
539
+
540
+ class BertPreTrainingHeads(nn.Module):
541
+ def __init__(self, config):
542
+ super(BertPreTrainingHeads, self).__init__()
543
+ self.predictions = BertLMPredictionHead(config)
544
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
545
+
546
+ def forward(self, sequence_output, pooled_output):
547
+ prediction_scores = self.predictions(sequence_output)
548
+ seq_relationship_score = self.seq_relationship(pooled_output)
549
+ return prediction_scores, seq_relationship_score
550
+
551
+
552
+ class BertPreTrainedModel(PreTrainedModel):
553
+ """ An abstract class to handle weights initialization and
554
+ a simple interface for dowloading and loading pretrained models.
555
+ """
556
+ config_class = BertConfig
557
+ pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
558
+ load_tf_weights = load_tf_weights_in_bert
559
+ base_model_prefix = "bert"
560
+
561
+ def __init__(self, *inputs, **kwargs):
562
+ super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)
563
+
564
+ def init_weights(self, module):
565
+ """ Initialize the weights.
566
+ """
567
+ if isinstance(module, (nn.Linear, nn.Embedding)):
568
+ # Slightly different from the TF version which uses truncated_normal for initialization
569
+ # cf https://github.com/pytorch/pytorch/pull/5617
570
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
571
+ elif isinstance(module, BertLayerNorm):
572
+ module.bias.data.zero_()
573
+ module.weight.data.fill_(1.0)
574
+ if isinstance(module, nn.Linear) and module.bias is not None:
575
+ module.bias.data.zero_()
576
+
577
+
578
+ BERT_START_DOCSTRING = r""" The BERT model was proposed in
579
+ `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
580
+ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
581
+ pre-trained using a combination of masked language modeling objective and next sentence prediction
582
+ on a large corpus comprising the Toronto Book Corpus and Wikipedia.
583
+
584
+ This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
585
+ refer to the PyTorch documentation for all matter related to general usage and behavior.
586
+
587
+ .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
588
+ https://arxiv.org/abs/1810.04805
589
+
590
+ .. _`torch.nn.Module`:
591
+ https://pytorch.org/docs/stable/nn.html#module
592
+
593
+ Parameters:
594
+ config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model.
595
+ """
596
+
597
+ BERT_INPUTS_DOCSTRING = r"""
598
+ Inputs:
599
+ **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
600
+ Indices of input sequence tokens in the vocabulary.
601
+ To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
602
+
603
+ (a) For sequence pairs:
604
+
605
+ ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
606
+
607
+ ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
608
+
609
+ (b) For single sequences:
610
+
611
+ ``tokens: [CLS] the dog is hairy . [SEP]``
612
+
613
+ ``token_type_ids: 0 0 0 0 0 0 0``
614
+
615
+ Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
616
+ See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
617
+ :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
618
+ **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
619
+ Indices of positions of each input sequence tokens in the position embeddings.
620
+ Selected in the range ``[0, config.max_position_embeddings - 1]``.
621
+ **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
622
+ Segment token indices to indicate first and second portions of the inputs.
623
+ Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
624
+ corresponds to a `sentence B` token
625
+ (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
626
+ **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
627
+ Mask to avoid performing attention on padding token indices.
628
+ Mask values selected in ``[0, 1]``:
629
+ ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
630
+ **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
631
+ Mask to nullify selected heads of the self-attention modules.
632
+ Mask values selected in ``[0, 1]``:
633
+ ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
634
+ """
635
+
636
+ @add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
637
+ BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
638
+ class BertModel(BertPreTrainedModel):
639
+ r"""
640
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
641
+ **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
642
+ Sequence of hidden-states at the output of the last layer of the model.
643
+ **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
644
+ Last layer hidden-state of the first token of the sequence (classification token)
645
+ further processed by a Linear layer and a Tanh activation function. The Linear
646
+ layer weights are trained from the next sentence prediction (classification)
647
+ objective during Bert pretraining. This output is usually *not* a good summary
648
+ of the semantic content of the input, you're often better with averaging or pooling
649
+ the sequence of hidden-states for the whole input sequence.
650
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
651
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
652
+ of shape ``(batch_size, sequence_length, hidden_size)``:
653
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
654
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
655
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
656
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
657
+
658
+ Examples::
659
+
660
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
661
+ model = BertModel.from_pretrained('bert-base-uncased')
662
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
663
+ outputs = model(input_ids)
664
+ last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
665
+
666
+ """
667
+ def __init__(self, config):
668
+ super(BertModel, self).__init__(config)
669
+ config.output_attentions = True
670
+ self.embeddings = BertEmbeddings(config)
671
+ self.encoder = BertEncoder(config)
672
+ self.pooler = BertPooler(config)
673
+
674
+ self.apply(self.init_weights)
675
+
676
+ def _resize_token_embeddings(self, new_num_tokens):
677
+ old_embeddings = self.embeddings.word_embeddings
678
+ new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
679
+ self.embeddings.word_embeddings = new_embeddings
680
+ return self.embeddings.word_embeddings
681
+
682
+ def _prune_heads(self, heads_to_prune):
683
+ """ Prunes heads of the model.
684
+ heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
685
+ See base class PreTrainedModel
686
+ """
687
+ for layer, heads in heads_to_prune.items():
688
+ self.encoder.layer[layer].attention.prune_heads(heads)
689
+
690
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
691
+ if attention_mask is None:
692
+ attention_mask = torch.ones_like(input_ids)
693
+ if token_type_ids is None:
694
+ token_type_ids = torch.zeros_like(input_ids)
695
+
696
+ # We create a 3D attention mask from a 2D tensor mask.
697
+ # Sizes are [batch_size, 1, 1, to_seq_length]
698
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
699
+ # this attention mask is more simple than the triangular masking of causal attention
700
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
701
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
702
+
703
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
704
+ # masked positions, this operation will create a tensor which is 0.0 for
705
+ # positions we want to attend and -10000.0 for masked positions.
706
+ # Since we are adding it to the raw scores before the softmax, this is
707
+ # effectively the same as removing these entirely.
708
+ extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
709
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
710
+
711
+ # Prepare head mask if needed
712
+ # 1.0 in head_mask indicate we keep the head
713
+ # attention_probs has shape bsz x n_heads x N x N
714
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
715
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
716
+ if head_mask is not None:
717
+ if head_mask.dim() == 1:
718
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
719
+ head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
720
+ elif head_mask.dim() == 2:
721
+ head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
722
+ head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
723
+ else:
724
+ head_mask = [None] * self.config.num_hidden_layers
725
+
726
+ embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
727
+ encoder_outputs = self.encoder(embedding_output,
728
+ extended_attention_mask,
729
+ head_mask=head_mask)
730
+ sequence_output = encoder_outputs[0]
731
+ pooled_output = self.pooler(sequence_output)
732
+
733
+ outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
734
+ return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
735
+
736
+
737
+ @add_start_docstrings("""Bert Model with two heads on top as done during the pre-training:
738
+ a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
739
+ BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
740
+ class BertForPreTraining(BertPreTrainedModel):
741
+ r"""
742
+ **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
743
+ Labels for computing the masked language modeling loss.
744
+ Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
745
+ Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
746
+ in ``[0, ..., config.vocab_size]``
747
+ **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
748
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
749
+ Indices should be in ``[0, 1]``.
750
+ ``0`` indicates sequence B is a continuation of sequence A,
751
+ ``1`` indicates sequence B is a random sequence.
752
+
753
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
754
+ **loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
755
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
756
+ **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
757
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
758
+ **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
759
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
760
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
761
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
762
+ of shape ``(batch_size, sequence_length, hidden_size)``:
763
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
764
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
765
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
766
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
767
+
768
+ Examples::
769
+
770
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
771
+ model = BertForPreTraining.from_pretrained('bert-base-uncased')
772
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
773
+ outputs = model(input_ids)
774
+ prediction_scores, seq_relationship_scores = outputs[:2]
775
+
776
+ """
777
+ def __init__(self, config):
778
+ super(BertForPreTraining, self).__init__(config)
779
+
780
+ self.bert = BertModel(config)
781
+ self.cls = BertPreTrainingHeads(config)
782
+
783
+ self.apply(self.init_weights)
784
+ self.tie_weights()
785
+
786
+ def tie_weights(self):
787
+ """ Make sure we are sharing the input and output embeddings.
788
+ Export to TorchScript can't handle parameter sharing so we are cloning them instead.
789
+ """
790
+ self._tie_or_clone_weights(self.cls.predictions.decoder,
791
+ self.bert.embeddings.word_embeddings)
792
+
793
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
794
+ next_sentence_label=None, position_ids=None, head_mask=None):
795
+ outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
796
+ attention_mask=attention_mask, head_mask=head_mask)
797
+
798
+ sequence_output, pooled_output = outputs[:2]
799
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
800
+
801
+ outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
802
+
803
+ if masked_lm_labels is not None and next_sentence_label is not None:
804
+ loss_fct = CrossEntropyLoss(ignore_index=-1)
805
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
806
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
807
+ total_loss = masked_lm_loss + next_sentence_loss
808
+ outputs = (total_loss,) + outputs
809
+
810
+ return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
811
+
812
+
813
+ @add_start_docstrings("""Bert Model with a `language modeling` head on top. """,
814
+ BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
815
+ class BertForMaskedLM(BertPreTrainedModel):
816
+ r"""
817
+ **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
818
+ Labels for computing the masked language modeling loss.
819
+ Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
820
+ Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
821
+ in ``[0, ..., config.vocab_size]``
822
+
823
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
824
+ **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
825
+ Masked language modeling loss.
826
+ **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
827
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
828
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
829
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
830
+ of shape ``(batch_size, sequence_length, hidden_size)``:
831
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
832
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
833
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
834
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
835
+
836
+ Examples::
837
+
838
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
839
+ model = BertForMaskedLM.from_pretrained('bert-base-uncased')
840
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
841
+ outputs = model(input_ids, masked_lm_labels=input_ids)
842
+ loss, prediction_scores = outputs[:2]
843
+
844
+ """
845
+ def __init__(self, config):
846
+ super(BertForMaskedLM, self).__init__(config)
847
+
848
+ self.bert = BertModel(config)
849
+ self.cls = BertOnlyMLMHead(config)
850
+
851
+ self.apply(self.init_weights)
852
+ self.tie_weights()
853
+
854
+ def tie_weights(self):
855
+ """ Make sure we are sharing the input and output embeddings.
856
+ Export to TorchScript can't handle parameter sharing so we are cloning them instead.
857
+ """
858
+ self._tie_or_clone_weights(self.cls.predictions.decoder,
859
+ self.bert.embeddings.word_embeddings)
860
+
861
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
862
+ position_ids=None, head_mask=None):
863
+ outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
864
+ attention_mask=attention_mask, head_mask=head_mask)
865
+
866
+ sequence_output = outputs[0]
867
+ prediction_scores = self.cls(sequence_output)
868
+
869
+ outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
870
+ if masked_lm_labels is not None:
871
+ loss_fct = CrossEntropyLoss(ignore_index=-1)
872
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
873
+ outputs = (masked_lm_loss,) + outputs
874
+
875
+ return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
876
+
877
+
878
+ @add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
879
+ BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
880
+ class BertForNextSentencePrediction(BertPreTrainedModel):
881
+ r"""
882
+ **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
883
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
884
+ Indices should be in ``[0, 1]``.
885
+ ``0`` indicates sequence B is a continuation of sequence A,
886
+ ``1`` indicates sequence B is a random sequence.
887
+
888
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
889
+ **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
890
+ Next sequence prediction (classification) loss.
891
+ **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
892
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
893
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
894
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
895
+ of shape ``(batch_size, sequence_length, hidden_size)``:
896
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
897
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
898
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
899
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
900
+
901
+ Examples::
902
+
903
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
904
+ model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
905
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
906
+ outputs = model(input_ids)
907
+ seq_relationship_scores = outputs[0]
908
+
909
+ """
910
+ def __init__(self, config):
911
+ super(BertForNextSentencePrediction, self).__init__(config)
912
+
913
+ self.bert = BertModel(config)
914
+ self.cls = BertOnlyNSPHead(config)
915
+
916
+ self.apply(self.init_weights)
917
+
918
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None,
919
+ position_ids=None, head_mask=None):
920
+ outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
921
+ attention_mask=attention_mask, head_mask=head_mask)
922
+ pooled_output = outputs[1]
923
+
924
+ seq_relationship_score = self.cls(pooled_output)
925
+
926
+ outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
927
+ if next_sentence_label is not None:
928
+ loss_fct = CrossEntropyLoss(ignore_index=-1)
929
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
930
+ outputs = (next_sentence_loss,) + outputs
931
+
932
+ return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
933
+
934
+
935
+ @add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
936
+ the pooled output) e.g. for GLUE tasks. """,
937
+ BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
938
+ class BertForSequenceClassification(BertPreTrainedModel):
939
+ r"""
940
+ **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
941
+ Labels for computing the sequence classification/regression loss.
942
+ Indices should be in ``[0, ..., config.num_labels - 1]``.
943
+ If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
944
+ If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
945
+
946
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
947
+ **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
948
+ Classification (or regression if config.num_labels==1) loss.
949
+ **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
950
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
951
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
952
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
953
+ of shape ``(batch_size, sequence_length, hidden_size)``:
954
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
955
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
956
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
957
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
958
+
959
+ Examples::
960
+
961
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
962
+ model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
963
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
964
+ labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
965
+ outputs = model(input_ids, labels=labels)
966
+ loss, logits = outputs[:2]
967
+
968
+ """
969
+ def __init__(self, config):
970
+ super(BertForSequenceClassification, self).__init__(config)
971
+ self.num_labels = config.num_labels
972
+
973
+ self.bert = BertModel(config)
974
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
975
+ self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
976
+
977
+ self.apply(self.init_weights)
978
+
979
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
980
+ position_ids=None, head_mask=None):
981
+ outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
982
+ attention_mask=attention_mask, head_mask=head_mask)
983
+ pooled_output = outputs[1]
984
+
985
+ pooled_output = self.dropout(pooled_output)
986
+ logits = self.classifier(pooled_output)
987
+
988
+ outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
989
+
990
+ if labels is not None:
991
+ if self.num_labels == 1:
992
+ # We are doing regression
993
+ loss_fct = MSELoss()
994
+ loss = loss_fct(logits.view(-1), labels.view(-1))
995
+ else:
996
+ loss_fct = CrossEntropyLoss()
997
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
998
+ outputs = (loss,) + outputs
999
+
1000
+ return outputs # (loss), logits, (hidden_states), (attentions)
1001
+
1002
+
1003
+ @add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of
1004
+ the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
1005
+ BERT_START_DOCSTRING)
1006
+ class BertForMultipleChoice(BertPreTrainedModel):
1007
+ r"""
1008
+ Inputs:
1009
+ **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
1010
+ Indices of input sequence tokens in the vocabulary.
1011
+ The second dimension of the input (`num_choices`) indicates the number of choices to score.
1012
+ To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
1013
+
1014
+ (a) For sequence pairs:
1015
+
1016
+ ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
1017
+
1018
+ ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
1019
+
1020
+ (b) For single sequences:
1021
+
1022
+ ``tokens: [CLS] the dog is hairy . [SEP]``
1023
+
1024
+ ``token_type_ids: 0 0 0 0 0 0 0``
1025
+
1026
+ Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
1027
+ See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
1028
+ :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
1029
+ **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
1030
+ Segment token indices to indicate first and second portions of the inputs.
1031
+ The second dimension of the input (`num_choices`) indicates the number of choices to score.
1032
+ Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
1033
+ corresponds to a `sentence B` token
1034
+ (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
1035
+ **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
1036
+ Mask to avoid performing attention on padding token indices.
1037
+ The second dimension of the input (`num_choices`) indicates the number of choices to score.
1038
+ Mask values selected in ``[0, 1]``:
1039
+ ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
1040
+ **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
1041
+ Mask to nullify selected heads of the self-attention modules.
1042
+ Mask values selected in ``[0, 1]``:
1043
+ ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
1044
+ **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
1045
+ Labels for computing the multiple choice classification loss.
1046
+ Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
1047
+ of the input tensors. (see `input_ids` above)
1048
+
1049
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
1050
+ **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
1051
+ Classification loss.
1052
+ **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
1053
+ of the input tensors. (see `input_ids` above).
1054
+ Classification scores (before SoftMax).
1055
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
1056
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
1057
+ of shape ``(batch_size, sequence_length, hidden_size)``:
1058
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1059
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
1060
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
1061
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
1062
+
1063
+ Examples::
1064
+
1065
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
1066
+ model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
1067
+ choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
1068
+ input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
1069
+ labels = torch.tensor(1).unsqueeze(0) # Batch size 1
1070
+ outputs = model(input_ids, labels=labels)
1071
+ loss, classification_scores = outputs[:2]
1072
+
1073
+ """
1074
+ def __init__(self, config):
1075
+ super(BertForMultipleChoice, self).__init__(config)
1076
+
1077
+ self.bert = BertModel(config)
1078
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1079
+ self.classifier = nn.Linear(config.hidden_size, 1)
1080
+
1081
+ self.apply(self.init_weights)
1082
+
1083
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
1084
+ position_ids=None, head_mask=None):
1085
+ num_choices = input_ids.shape[1]
1086
+
1087
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1))
1088
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1089
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1090
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1091
+ outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
1092
+ attention_mask=flat_attention_mask, head_mask=head_mask)
1093
+ pooled_output = outputs[1]
1094
+
1095
+ pooled_output = self.dropout(pooled_output)
1096
+ logits = self.classifier(pooled_output)
1097
+ reshaped_logits = logits.view(-1, num_choices)
1098
+
1099
+ outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
1100
+
1101
+ if labels is not None:
1102
+ loss_fct = CrossEntropyLoss()
1103
+ loss = loss_fct(reshaped_logits, labels)
1104
+ outputs = (loss,) + outputs
1105
+
1106
+ return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
1107
+
1108
+
1109
+ @add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of
1110
+ the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
1111
+ BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
1112
+ class BertForTokenClassification(BertPreTrainedModel):
1113
+ r"""
1114
+ **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
1115
+ Labels for computing the token classification loss.
1116
+ Indices should be in ``[0, ..., config.num_labels - 1]``.
1117
+
1118
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
1119
+ **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
1120
+ Classification loss.
1121
+ **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
1122
+ Classification scores (before SoftMax).
1123
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
1124
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
1125
+ of shape ``(batch_size, sequence_length, hidden_size)``:
1126
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1127
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
1128
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
1129
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
1130
+
1131
+ Examples::
1132
+
1133
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
1134
+ model = BertForTokenClassification.from_pretrained('bert-base-uncased')
1135
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
1136
+ labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
1137
+ outputs = model(input_ids, labels=labels)
1138
+ loss, scores = outputs[:2]
1139
+
1140
+ """
1141
+ def __init__(self, config):
1142
+ super(BertForTokenClassification, self).__init__(config)
1143
+ self.num_labels = config.num_labels
1144
+
1145
+ self.bert = BertModel(config)
1146
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1147
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1148
+
1149
+ self.apply(self.init_weights)
1150
+
1151
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
1152
+ position_ids=None, head_mask=None):
1153
+ outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
1154
+ attention_mask=attention_mask, head_mask=head_mask)
1155
+ sequence_output = outputs[0]
1156
+
1157
+ sequence_output = self.dropout(sequence_output)
1158
+ logits = self.classifier(sequence_output)
1159
+
1160
+ outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
1161
+ if labels is not None:
1162
+ loss_fct = CrossEntropyLoss()
1163
+ # Only keep active parts of the loss
1164
+ if attention_mask is not None:
1165
+ active_loss = attention_mask.view(-1) == 1
1166
+ active_logits = logits.view(-1, self.num_labels)[active_loss]
1167
+ active_labels = labels.view(-1)[active_loss]
1168
+ loss = loss_fct(active_logits, active_labels)
1169
+ else:
1170
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1171
+ outputs = (loss,) + outputs
1172
+
1173
+ return outputs # (loss), scores, (hidden_states), (attentions)
1174
+
1175
+
1176
+ @add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
1177
+ the hidden-states output to compute `span start logits` and `span end logits`). """,
1178
+ BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
1179
+ class BertForQuestionAnswering(BertPreTrainedModel):
1180
+ r"""
1181
+ **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
1182
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1183
+ Positions are clamped to the length of the sequence (`sequence_length`).
1184
+ Position outside of the sequence are not taken into account for computing the loss.
1185
+ **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
1186
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1187
+ Positions are clamped to the length of the sequence (`sequence_length`).
1188
+ Position outside of the sequence are not taken into account for computing the loss.
1189
+
1190
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
1191
+ **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
1192
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
1193
+ **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
1194
+ Span-start scores (before SoftMax).
1195
+ **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
1196
+ Span-end scores (before SoftMax).
1197
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
1198
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
1199
+ of shape ``(batch_size, sequence_length, hidden_size)``:
1200
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1201
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
1202
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
1203
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
1204
+
1205
+ Examples::
1206
+
1207
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
1208
+ model = BertForQuestionAnswering.from_pretrained('bert-base-uncased')
1209
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
1210
+ start_positions = torch.tensor([1])
1211
+ end_positions = torch.tensor([3])
1212
+ outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
1213
+ loss, start_scores, end_scores = outputs[:2]
1214
+
1215
+ """
1216
+ def __init__(self, config):
1217
+ super(BertForQuestionAnswering, self).__init__(config)
1218
+ self.num_labels = config.num_labels
1219
+
1220
+ self.bert = BertModel(config)
1221
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1222
+
1223
+ self.apply(self.init_weights)
1224
+
1225
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
1226
+ end_positions=None, position_ids=None, head_mask=None):
1227
+ outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
1228
+ attention_mask=attention_mask, head_mask=head_mask)
1229
+ sequence_output = outputs[0]
1230
+
1231
+ logits = self.qa_outputs(sequence_output)
1232
+ start_logits, end_logits = logits.split(1, dim=-1)
1233
+ start_logits = start_logits.squeeze(-1)
1234
+ end_logits = end_logits.squeeze(-1)
1235
+
1236
+ outputs = (start_logits, end_logits,) + outputs[2:]
1237
+ if start_positions is not None and end_positions is not None:
1238
+ # If we are on multi-GPU, split add a dimension
1239
+ if len(start_positions.size()) > 1:
1240
+ start_positions = start_positions.squeeze(-1)
1241
+ if len(end_positions.size()) > 1:
1242
+ end_positions = end_positions.squeeze(-1)
1243
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1244
+ ignored_index = start_logits.size(1)
1245
+ start_positions.clamp_(0, ignored_index)
1246
+ end_positions.clamp_(0, ignored_index)
1247
+
1248
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1249
+ start_loss = loss_fct(start_logits, start_positions)
1250
+ end_loss = loss_fct(end_logits, end_positions)
1251
+ total_loss = (start_loss + end_loss) / 2
1252
+ outputs = (total_loss,) + outputs
1253
+
1254
+ return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/modeling_gpt2.py ADDED
@@ -0,0 +1,747 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+ # Change log
18
+ # 7/14/19 Jesse Vig Adapted for use in visualization
19
+
20
+ """PyTorch OpenAI GPT-2 model."""
21
+
22
+ from __future__ import absolute_import, division, print_function, unicode_literals
23
+
24
+ import collections
25
+ import json
26
+ import logging
27
+ import math
28
+ import os
29
+ import sys
30
+ from io import open
31
+
32
+ import torch
33
+ import torch.nn as nn
34
+ from torch.nn import CrossEntropyLoss
35
+ from torch.nn.parameter import Parameter
36
+
37
+ from .modeling_utils import (Conv1D, CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig,
38
+ PreTrainedModel, prune_conv1d_layer, SequenceSummary,
39
+ add_start_docstrings)
40
+ from .modeling_bert import BertLayerNorm as LayerNorm
41
+
42
+ logger = logging.getLogger(__name__)
43
+
44
+ GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
45
+ "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin"}
46
+ GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json",
47
+ "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json"}
48
+
49
+ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
50
+ """ Load tf checkpoints in a pytorch model
51
+ """
52
+ try:
53
+ import re
54
+ import numpy as np
55
+ import tensorflow as tf
56
+ except ImportError:
57
+ logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
58
+ "https://www.tensorflow.org/install/ for installation instructions.")
59
+ raise
60
+ tf_path = os.path.abspath(gpt2_checkpoint_path)
61
+ logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
62
+ # Load weights from TF model
63
+ init_vars = tf.train.list_variables(tf_path)
64
+ names = []
65
+ arrays = []
66
+ for name, shape in init_vars:
67
+ logger.info("Loading TF weight {} with shape {}".format(name, shape))
68
+ array = tf.train.load_variable(tf_path, name)
69
+ names.append(name)
70
+ arrays.append(array.squeeze())
71
+
72
+ for name, array in zip(names, arrays):
73
+ name = name[6:] # skip "model/"
74
+ name = name.split('/')
75
+ pointer = model
76
+ for m_name in name:
77
+ if re.fullmatch(r'[A-Za-z]+\d+', m_name):
78
+ l = re.split(r'(\d+)', m_name)
79
+ else:
80
+ l = [m_name]
81
+ if l[0] == 'w' or l[0] == 'g':
82
+ pointer = getattr(pointer, 'weight')
83
+ elif l[0] == 'b':
84
+ pointer = getattr(pointer, 'bias')
85
+ elif l[0] == 'wpe' or l[0] == 'wte':
86
+ pointer = getattr(pointer, l[0])
87
+ pointer = getattr(pointer, 'weight')
88
+ else:
89
+ pointer = getattr(pointer, l[0])
90
+ if len(l) >= 2:
91
+ num = int(l[1])
92
+ pointer = pointer[num]
93
+ try:
94
+ assert pointer.shape == array.shape
95
+ except AssertionError as e:
96
+ e.args += (pointer.shape, array.shape)
97
+ raise
98
+ logger.info("Initialize PyTorch weight {}".format(name))
99
+ pointer.data = torch.from_numpy(array)
100
+ return model
101
+
102
+
103
+ def gelu(x):
104
+ return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
105
+
106
+
107
+ class GPT2Config(PretrainedConfig):
108
+ """Configuration class to store the configuration of a `GPT2Model`.
109
+
110
+ Args:
111
+ vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
112
+ n_positions: Number of positional embeddings.
113
+ n_ctx: Size of the causal mask (usually same as n_positions).
114
+ n_embd: Dimensionality of the embeddings and hidden states.
115
+ n_layer: Number of hidden layers in the Transformer encoder.
116
+ n_head: Number of attention heads for each attention layer in
117
+ the Transformer encoder.
118
+ layer_norm_epsilon: epsilon to use in the layer norm layers
119
+ resid_pdrop: The dropout probabilitiy for all fully connected
120
+ layers in the embeddings, encoder, and pooler.
121
+ attn_pdrop: The dropout ratio for the attention
122
+ probabilities.
123
+ embd_pdrop: The dropout ratio for the embeddings.
124
+ initializer_range: The sttdev of the truncated_normal_initializer for
125
+ initializing all weight matrices.
126
+ """
127
+ pretrained_config_archive_map = GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
128
+
129
+ def __init__(
130
+ self,
131
+ vocab_size_or_config_json_file=50257,
132
+ n_positions=1024,
133
+ n_ctx=1024,
134
+ n_embd=768,
135
+ n_layer=12,
136
+ n_head=12,
137
+ resid_pdrop=0.1,
138
+ embd_pdrop=0.1,
139
+ attn_pdrop=0.1,
140
+ layer_norm_epsilon=1e-5,
141
+ initializer_range=0.02,
142
+
143
+ num_labels=1,
144
+ summary_type='cls_index',
145
+ summary_use_proj=True,
146
+ summary_activation=None,
147
+ summary_proj_to_labels=True,
148
+ summary_first_dropout=0.1,
149
+ **kwargs
150
+ ):
151
+ """Constructs GPT2Config.
152
+
153
+ Args:
154
+ vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
155
+ n_positions: Number of positional embeddings.
156
+ n_ctx: Size of the causal mask (usually same as n_positions).
157
+ n_embd: Dimensionality of the embeddings and hidden states.
158
+ n_layer: Number of hidden layers in the Transformer encoder.
159
+ n_head: Number of attention heads for each attention layer in
160
+ the Transformer encoder.
161
+ layer_norm_epsilon: epsilon to use in the layer norm layers
162
+ resid_pdrop: The dropout probabilitiy for all fully connected
163
+ layers in the embeddings, encoder, and pooler.
164
+ attn_pdrop: The dropout ratio for the attention
165
+ probabilities.
166
+ embd_pdrop: The dropout ratio for the embeddings.
167
+ initializer_range: The sttdev of the truncated_normal_initializer for
168
+ initializing all weight matrices.
169
+ """
170
+ super(GPT2Config, self).__init__(**kwargs)
171
+
172
+ if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
173
+ and isinstance(vocab_size_or_config_json_file, unicode)):
174
+ with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
175
+ json_config = json.loads(reader.read())
176
+ for key, value in json_config.items():
177
+ self.__dict__[key] = value
178
+ elif isinstance(vocab_size_or_config_json_file, int):
179
+ self.vocab_size = vocab_size_or_config_json_file
180
+ self.n_ctx = n_ctx
181
+ self.n_positions = n_positions
182
+ self.n_embd = n_embd
183
+ self.n_layer = n_layer
184
+ self.n_head = n_head
185
+ self.resid_pdrop = resid_pdrop
186
+ self.embd_pdrop = embd_pdrop
187
+ self.attn_pdrop = attn_pdrop
188
+ self.layer_norm_epsilon = layer_norm_epsilon
189
+ self.initializer_range = initializer_range
190
+
191
+ self.num_labels = num_labels
192
+ self.summary_type = summary_type
193
+ self.summary_use_proj = summary_use_proj
194
+ self.summary_activation = summary_activation
195
+ self.summary_first_dropout = summary_first_dropout
196
+ self.summary_proj_to_labels = summary_proj_to_labels
197
+ else:
198
+ raise ValueError(
199
+ "First argument must be either a vocabulary size (int)"
200
+ "or the path to a pretrained model config file (str)"
201
+ )
202
+
203
+ @property
204
+ def max_position_embeddings(self):
205
+ return self.n_positions
206
+
207
+ @property
208
+ def hidden_size(self):
209
+ return self.n_embd
210
+
211
+ @property
212
+ def num_attention_heads(self):
213
+ return self.n_head
214
+
215
+ @property
216
+ def num_hidden_layers(self):
217
+ return self.n_layer
218
+
219
+
220
+
221
+ class Attention(nn.Module):
222
+ def __init__(self, nx, n_ctx, config, scale=False):
223
+ super(Attention, self).__init__()
224
+ self.output_attentions = config.output_attentions
225
+
226
+ n_state = nx # in Attention: n_state=768 (nx=n_embd)
227
+ # [switch nx => n_state from Block to Attention to keep identical to TF implem]
228
+ assert n_state % config.n_head == 0
229
+ self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
230
+ self.n_head = config.n_head
231
+ self.split_size = n_state
232
+ self.scale = scale
233
+
234
+ self.c_attn = Conv1D(n_state * 3, nx)
235
+ self.c_proj = Conv1D(n_state, nx)
236
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
237
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
238
+
239
+ def prune_heads(self, heads):
240
+ if len(heads) == 0:
241
+ return
242
+ mask = torch.ones(self.n_head, self.split_size // self.n_head)
243
+ for head in heads:
244
+ mask[head] = 0
245
+ mask = mask.view(-1).contiguous().eq(1)
246
+ index = torch.arange(len(mask))[mask].long()
247
+ index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
248
+ # Prune conv1d layers
249
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
250
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
251
+ # Update hyper params
252
+ self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
253
+ self.n_head = self.n_head - len(heads)
254
+
255
+ def _attn(self, q, k, v, head_mask=None):
256
+ w = torch.matmul(q, k)
257
+ if self.scale:
258
+ w = w / math.sqrt(v.size(-1))
259
+ nd, ns = w.size(-2), w.size(-1)
260
+ b = self.bias[:, :, ns-nd:ns, :ns]
261
+ w = w * b - 1e4 * (1 - b)
262
+
263
+ w = nn.Softmax(dim=-1)(w)
264
+ w = self.attn_dropout(w)
265
+
266
+ # Mask heads if we want to
267
+ if head_mask is not None:
268
+ w = w * head_mask
269
+
270
+ outputs = [torch.matmul(w, v)]
271
+ if self.output_attentions:
272
+ outputs.append(w)
273
+ return outputs
274
+
275
+ def merge_heads(self, x):
276
+ x = x.permute(0, 2, 1, 3).contiguous()
277
+ new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
278
+ return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
279
+
280
+ def split_heads(self, x, k=False):
281
+ new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
282
+ x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
283
+ if k:
284
+ return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
285
+ else:
286
+ return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
287
+
288
+ def forward(self, x, layer_past=None, head_mask=None):
289
+ x = self.c_attn(x)
290
+ query, key, value = x.split(self.split_size, dim=2)
291
+ query = self.split_heads(query)
292
+ key = self.split_heads(key, k=True)
293
+ value = self.split_heads(value)
294
+ if layer_past is not None:
295
+ past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
296
+ key = torch.cat((past_key, key), dim=-1)
297
+ value = torch.cat((past_value, value), dim=-2)
298
+ present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
299
+
300
+ attn_outputs = self._attn(query, key, value, head_mask)
301
+ a = attn_outputs[0]
302
+
303
+ a = self.merge_heads(a)
304
+ a = self.c_proj(a)
305
+ a = self.resid_dropout(a)
306
+
307
+
308
+ if self.output_attentions:
309
+ attention_probs = attn_outputs[1]
310
+ attn_data = {
311
+ 'attn': attention_probs,
312
+ 'queries': query,
313
+ 'keys': key.transpose(-1, -2)
314
+ }
315
+ outputs = [a, present, attn_data]
316
+ else:
317
+ outputs = [a, present]
318
+ return outputs # a, present, (attentions)
319
+
320
+
321
+ class MLP(nn.Module):
322
+ def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
323
+ super(MLP, self).__init__()
324
+ nx = config.n_embd
325
+ self.c_fc = Conv1D(n_state, nx)
326
+ self.c_proj = Conv1D(nx, n_state)
327
+ self.act = gelu
328
+ self.dropout = nn.Dropout(config.resid_pdrop)
329
+
330
+ def forward(self, x):
331
+ h = self.act(self.c_fc(x))
332
+ h2 = self.c_proj(h)
333
+ return self.dropout(h2)
334
+
335
+
336
+ class Block(nn.Module):
337
+ def __init__(self, n_ctx, config, scale=False):
338
+ super(Block, self).__init__()
339
+ nx = config.n_embd
340
+ self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
341
+ self.attn = Attention(nx, n_ctx, config, scale)
342
+ self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
343
+ self.mlp = MLP(4 * nx, config)
344
+
345
+ def forward(self, x, layer_past=None, head_mask=None):
346
+ output_attn = self.attn(self.ln_1(x), layer_past=layer_past, head_mask=head_mask)
347
+ a = output_attn[0] # output_attn: a, present, (attentions)
348
+
349
+ x = x + a
350
+ m = self.mlp(self.ln_2(x))
351
+ x = x + m
352
+
353
+ outputs = [x] + output_attn[1:]
354
+ return outputs # x, present, (attentions)
355
+
356
+
357
+ class GPT2PreTrainedModel(PreTrainedModel):
358
+ """ An abstract class to handle weights initialization and
359
+ a simple interface for dowloading and loading pretrained models.
360
+ """
361
+ config_class = GPT2Config
362
+ pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
363
+ load_tf_weights = load_tf_weights_in_gpt2
364
+ base_model_prefix = "transformer"
365
+
366
+ def __init__(self, *inputs, **kwargs):
367
+ super(GPT2PreTrainedModel, self).__init__(*inputs, **kwargs)
368
+
369
+ def init_weights(self, module):
370
+ """ Initialize the weights.
371
+ """
372
+ if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
373
+ # Slightly different from the TF version which uses truncated_normal for initialization
374
+ # cf https://github.com/pytorch/pytorch/pull/5617
375
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
376
+ if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
377
+ module.bias.data.zero_()
378
+ elif isinstance(module, LayerNorm):
379
+ module.bias.data.zero_()
380
+ module.weight.data.fill_(1.0)
381
+
382
+
383
+ GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in
384
+ `Language Models are Unsupervised Multitask Learners`_
385
+ by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
386
+ It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
387
+ corpus of ~40 GB of text data.
388
+
389
+ This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
390
+ refer to the PyTorch documentation for all matter related to general usage and behavior.
391
+
392
+ .. _`Language Models are Unsupervised Multitask Learners`:
393
+ https://openai.com/blog/better-language-models/
394
+
395
+ .. _`torch.nn.Module`:
396
+ https://pytorch.org/docs/stable/nn.html#module
397
+
398
+ Parameters:
399
+ config (:class:`~pytorch_transformers.GPT2Config`): Model configuration class with all the parameters of the model.
400
+ """
401
+
402
+ GPT2_INPUTS_DOCSTRING = r""" Inputs:
403
+ **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
404
+ Indices of input sequence tokens in the vocabulary.
405
+ Indices can be obtained using :class:`pytorch_transformers.BPT2Tokenizer`.
406
+ See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
407
+ :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
408
+ **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
409
+ Indices of positions of each input sequence tokens in the position embeddings.
410
+ Selected in the range ``[0, config.max_position_embeddings - 1]``.
411
+ **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
412
+ A parallel sequence of tokens (can be used to indicate various portions of the inputs).
413
+ The embeddings from these tokens will be summed with the respective token embeddings.
414
+ Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
415
+ **past**:
416
+ list of ``torch.FloatTensor`` (one for each layer):
417
+ that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
418
+ (see `past` output below). Can be used to speed up sequential decoding.
419
+ **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
420
+ Mask to avoid performing attention on padding token indices.
421
+ Mask values selected in ``[0, 1]``:
422
+ ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
423
+ **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
424
+ Mask to nullify selected heads of the self-attention modules.
425
+ Mask values selected in ``[0, 1]``:
426
+ ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
427
+ """
428
+
429
+ @add_start_docstrings("The bare GPT2 Model transformer outputing raw hidden-states without any specific head on top.",
430
+ GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
431
+ class GPT2Model(GPT2PreTrainedModel):
432
+ r"""
433
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
434
+ **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
435
+ Sequence of hidden-states at the last layer of the model.
436
+ **past**:
437
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
438
+ that contains pre-computed hidden-states (key and values in the attention blocks).
439
+ Can be used (see `past` input) to speed up sequential decoding.
440
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
441
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
442
+ of shape ``(batch_size, sequence_length, hidden_size)``:
443
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
444
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
445
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
446
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
447
+
448
+ Examples::
449
+
450
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
451
+ model = GPT2Model.from_pretrained('gpt2')
452
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
453
+ outputs = model(input_ids)
454
+ last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
455
+
456
+ """
457
+ def __init__(self, config):
458
+ super(GPT2Model, self).__init__(config)
459
+ self.output_hidden_states = config.output_hidden_states
460
+ config.output_attentions = True
461
+ self.output_attentions = config.output_attentions
462
+
463
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
464
+ self.wpe = nn.Embedding(config.n_positions, config.n_embd)
465
+ self.drop = nn.Dropout(config.embd_pdrop)
466
+ self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
467
+ self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
468
+
469
+ self.apply(self.init_weights)
470
+
471
+ def _resize_token_embeddings(self, new_num_tokens):
472
+ self.wte = self._get_resized_embeddings(self.wte, new_num_tokens)
473
+ return self.wte
474
+
475
+ def _prune_heads(self, heads_to_prune):
476
+ """ Prunes heads of the model.
477
+ heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
478
+ """
479
+ for layer, heads in heads_to_prune.items():
480
+ self.h[layer].attn.prune_heads(heads)
481
+
482
+ def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None, head_mask=None):
483
+ if past is None:
484
+ past_length = 0
485
+ past = [None] * len(self.h)
486
+ else:
487
+ past_length = past[0][0].size(-2)
488
+ if position_ids is None:
489
+ position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
490
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
491
+
492
+ # Prepare head mask if needed
493
+ # 1.0 in head_mask indicate we keep the head
494
+ # attention_probs has shape bsz x n_heads x N x N
495
+ # head_mask has shape n_layer x batch x n_heads x N x N
496
+ if head_mask is not None:
497
+ if head_mask.dim() == 1:
498
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
499
+ head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
500
+ elif head_mask.dim() == 2:
501
+ head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
502
+ head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
503
+ else:
504
+ head_mask = [None] * self.config.n_layer
505
+
506
+ input_shape = input_ids.size()
507
+ input_ids = input_ids.view(-1, input_ids.size(-1))
508
+ position_ids = position_ids.view(-1, position_ids.size(-1))
509
+
510
+ inputs_embeds = self.wte(input_ids)
511
+ position_embeds = self.wpe(position_ids)
512
+ if token_type_ids is not None:
513
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
514
+ token_type_embeds = self.wte(token_type_ids)
515
+ else:
516
+ token_type_embeds = 0
517
+ hidden_states = inputs_embeds + position_embeds + token_type_embeds
518
+ hidden_states = self.drop(hidden_states)
519
+
520
+ output_shape = input_shape + (hidden_states.size(-1),)
521
+
522
+ presents = ()
523
+ all_attentions = []
524
+ all_hidden_states = ()
525
+ for i, (block, layer_past) in enumerate(zip(self.h, past)):
526
+ if self.output_hidden_states:
527
+ all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
528
+
529
+ outputs = block(hidden_states, layer_past, head_mask[i])
530
+ hidden_states, present = outputs[:2]
531
+ presents = presents + (present,)
532
+
533
+ if self.output_attentions:
534
+ all_attentions.append(outputs[2])
535
+
536
+ hidden_states = self.ln_f(hidden_states)
537
+
538
+ hidden_states = hidden_states.view(*output_shape)
539
+ # Add last hidden state
540
+ if self.output_hidden_states:
541
+ all_hidden_states = all_hidden_states + (hidden_states,)
542
+
543
+ outputs = (hidden_states, presents)
544
+ if self.output_hidden_states:
545
+ outputs = outputs + (all_hidden_states,)
546
+ if self.output_attentions:
547
+ # # let the number of heads free (-1) so we can extract attention even after head pruning
548
+ # attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
549
+ # all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
550
+ outputs = outputs + (all_attentions,)
551
+ return outputs # last hidden state, presents, (all hidden_states), (attentions)
552
+
553
+
554
+ @add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top
555
+ (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
556
+ class GPT2LMHeadModel(GPT2PreTrainedModel):
557
+ r"""
558
+ **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
559
+ Labels for language modeling.
560
+ Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
561
+ Indices are selected in ``[-1, 0, ..., config.vocab_size]``
562
+ All labels set to ``-1`` are ignored (masked), the loss is only
563
+ computed for labels in ``[0, ..., config.vocab_size]``
564
+
565
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
566
+ **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
567
+ Language modeling loss.
568
+ **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
569
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
570
+ **past**:
571
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
572
+ that contains pre-computed hidden-states (key and values in the attention blocks).
573
+ Can be used (see `past` input) to speed up sequential decoding.
574
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
575
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
576
+ of shape ``(batch_size, sequence_length, hidden_size)``:
577
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
578
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
579
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
580
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
581
+
582
+ Examples::
583
+
584
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
585
+ model = GPT2LMHeadModel.from_pretrained('gpt2')
586
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
587
+ outputs = model(input_ids, labels=input_ids)
588
+ loss, logits = outputs[:2]
589
+
590
+ """
591
+ def __init__(self, config):
592
+ super(GPT2LMHeadModel, self).__init__(config)
593
+ self.transformer = GPT2Model(config)
594
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
595
+
596
+ self.apply(self.init_weights)
597
+ self.tie_weights()
598
+
599
+ def tie_weights(self):
600
+ """ Make sure we are sharing the input and output embeddings.
601
+ Export to TorchScript can't handle parameter sharing so we are cloning them instead.
602
+ """
603
+ self._tie_or_clone_weights(self.lm_head,
604
+ self.transformer.wte)
605
+
606
+ def forward(self, input_ids, position_ids=None, token_type_ids=None, labels=None, past=None, head_mask=None):
607
+ transformer_outputs = self.transformer(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
608
+ past=past, head_mask=head_mask)
609
+ hidden_states = transformer_outputs[0]
610
+
611
+ lm_logits = self.lm_head(hidden_states)
612
+
613
+ outputs = (lm_logits,) + transformer_outputs[1:]
614
+ if labels is not None:
615
+ # Shift so that tokens < n predict n
616
+ shift_logits = lm_logits[..., :-1, :].contiguous()
617
+ shift_labels = labels[..., 1:].contiguous()
618
+ # Flatten the tokens
619
+ loss_fct = CrossEntropyLoss(ignore_index=-1)
620
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
621
+ shift_labels.view(-1))
622
+ outputs = (loss,) + outputs
623
+
624
+ return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
625
+
626
+
627
+ @add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification
628
+ head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
629
+ The language modeling head has its weights tied to the input embeddings,
630
+ the classification head takes as input the input of a specified classification token index in the intput sequence).
631
+ """, GPT2_START_DOCSTRING)
632
+ class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
633
+ r""" Inputs:
634
+ **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
635
+ Indices of input sequence tokens in the vocabulary.
636
+ The second dimension of the input (`num_choices`) indicates the number of choices to score.
637
+ Indices can be obtained using :class:`pytorch_transformers.BPT2Tokenizer`.
638
+ See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
639
+ :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
640
+ **mc_token_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
641
+ Index of the classification token in each input sequence.
642
+ Selected in the range ``[0, input_ids.size(-1) - 1[``.
643
+ **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
644
+ Indices of positions of each input sequence tokens in the position embeddings.
645
+ Selected in the range ``[0, config.max_position_embeddings - 1]``.
646
+ **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
647
+ A parallel sequence of tokens (can be used to indicate various portions of the inputs).
648
+ The embeddings from these tokens will be summed with the respective token embeddings.
649
+ Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
650
+ **past**:
651
+ list of ``torch.FloatTensor`` (one for each layer):
652
+ that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
653
+ (see `past` output below). Can be used to speed up sequential decoding.
654
+ **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
655
+ Mask to avoid performing attention on padding token indices.
656
+ Mask values selected in ``[0, 1]``:
657
+ ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
658
+ **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
659
+ Mask to nullify selected heads of the self-attention modules.
660
+ Mask values selected in ``[0, 1]``:
661
+ ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
662
+ **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
663
+ Labels for language modeling.
664
+ Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
665
+ Indices are selected in ``[-1, 0, ..., config.vocab_size]``
666
+ All labels set to ``-1`` are ignored (masked), the loss is only
667
+ computed for labels in ``[0, ..., config.vocab_size]``
668
+ **multiple_choice_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
669
+ Labels for computing the multiple choice classification loss.
670
+ Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
671
+ of the input tensors. (see `input_ids` above)
672
+
673
+ `multiple_choice_labels`: optional multiple choice labels: ``torch.LongTensor`` of shape [batch_size]
674
+ with indices selected in [0, ..., num_choices].
675
+
676
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
677
+ **lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
678
+ Language modeling loss.
679
+ **mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
680
+ Multiple choice classification loss.
681
+ **lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
682
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
683
+ **mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
684
+ Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
685
+ **past**:
686
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
687
+ that contains pre-computed hidden-states (key and values in the attention blocks).
688
+ Can be used (see `past` input) to speed up sequential decoding.
689
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
690
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
691
+ of shape ``(batch_size, sequence_length, hidden_size)``:
692
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
693
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
694
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
695
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
696
+
697
+ Examples::
698
+
699
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
700
+ model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
701
+ tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!)
702
+ choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
703
+ input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
704
+ mc_token_ids = torch.tensor([input_ids.size(-1), input_ids.size(-1)]).unsqueeze(0) # Batch size 1
705
+ outputs = model(input_ids, mc_token_ids)
706
+ lm_prediction_scores, mc_prediction_scores = outputs[:2]
707
+
708
+ """
709
+ def __init__(self, config):
710
+ super(GPT2DoubleHeadsModel, self).__init__(config)
711
+ self.transformer = GPT2Model(config)
712
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
713
+ self.multiple_choice_head = SequenceSummary(config)
714
+
715
+ self.apply(self.init_weights)
716
+
717
+ def tie_weights(self):
718
+ """ Make sure we are sharing the input and output embeddings.
719
+ Export to TorchScript can't handle parameter sharing so we are cloning them instead.
720
+ """
721
+ self._tie_or_clone_weights(self.lm_head,
722
+ self.transformer.wte)
723
+
724
+ def forward(self, input_ids, mc_token_ids=None, lm_labels=None, mc_labels=None, token_type_ids=None,
725
+ position_ids=None, past=None, head_mask=None):
726
+ transformer_outputs = self.transformer(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
727
+ past=past, head_mask=head_mask)
728
+ hidden_states = transformer_outputs[0]
729
+
730
+ lm_logits = self.lm_head(hidden_states)
731
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
732
+
733
+ outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
734
+ if mc_labels is not None:
735
+ loss_fct = CrossEntropyLoss()
736
+ loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
737
+ mc_labels.view(-1))
738
+ outputs = (loss,) + outputs
739
+ if lm_labels is not None:
740
+ shift_logits = lm_logits[..., :-1, :].contiguous()
741
+ shift_labels = lm_labels[..., 1:].contiguous()
742
+ loss_fct = CrossEntropyLoss(ignore_index=-1)
743
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
744
+ shift_labels.view(-1))
745
+ outputs = (loss,) + outputs
746
+
747
+ return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)
11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/modeling_roberta.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch RoBERTa model. """
17
+
18
+ from __future__ import (absolute_import, division, print_function,
19
+ unicode_literals)
20
+
21
+ import logging
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ import torch.nn.functional as F
26
+ from torch.nn import CrossEntropyLoss, MSELoss
27
+
28
+ from .modeling_bert import (BertConfig, BertEmbeddings,
29
+ BertLayerNorm, BertModel,
30
+ BertPreTrainedModel, gelu)
31
+
32
+ from .modeling_utils import add_start_docstrings
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
37
+ 'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin",
38
+ 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin",
39
+ 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin",
40
+ }
41
+
42
+ ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
43
+ 'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json",
44
+ 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-config.json",
45
+ 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-config.json",
46
+ }
47
+
48
+
49
+ class RobertaEmbeddings(BertEmbeddings):
50
+ """
51
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
52
+ """
53
+ def __init__(self, config):
54
+ super(RobertaEmbeddings, self).__init__(config)
55
+ self.padding_idx = 1
56
+
57
+ def forward(self, input_ids, token_type_ids=None, position_ids=None):
58
+ seq_length = input_ids.size(1)
59
+ if position_ids is None:
60
+ # Position numbers begin at padding_idx+1. Padding symbols are ignored.
61
+ # cf. fairseq's `utils.make_positions`
62
+ position_ids = torch.arange(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=torch.long, device=input_ids.device)
63
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
64
+ return super(RobertaEmbeddings, self).forward(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
65
+
66
+
67
+ class RobertaConfig(BertConfig):
68
+ pretrained_config_archive_map = ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
69
+
70
+
71
+ ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in
72
+ `RoBERTa: A Robustly Optimized BERT Pretraining Approach`_
73
+ by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,
74
+ Veselin Stoyanov. It is based on Google's BERT model released in 2018.
75
+
76
+ It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining
77
+ objective and training with much larger mini-batches and learning rates.
78
+
79
+ This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained
80
+ models.
81
+
82
+ This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
83
+ refer to the PyTorch documentation for all matter related to general usage and behavior.
84
+
85
+ .. _`RoBERTa: A Robustly Optimized BERT Pretraining Approach`:
86
+ https://arxiv.org/abs/1907.11692
87
+
88
+ .. _`torch.nn.Module`:
89
+ https://pytorch.org/docs/stable/nn.html#module
90
+
91
+ Parameters:
92
+ config (:class:`~pytorch_transformers.RobertaConfig`): Model configuration class with all the parameters of the
93
+ model.
94
+ """
95
+
96
+ ROBERTA_INPUTS_DOCSTRING = r"""
97
+ Inputs:
98
+ **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
99
+ Indices of input sequence tokens in the vocabulary.
100
+ To match pre-training, RoBERTa input sequence should be formatted with [CLS] and [SEP] tokens as follows:
101
+
102
+ (a) For sequence pairs:
103
+
104
+ ``tokens: [CLS] is this jack ##son ##ville ? [SEP][SEP] no it is not . [SEP]``
105
+
106
+ (b) For single sequences:
107
+
108
+ ``tokens: [CLS] the dog is hairy . [SEP]``
109
+
110
+ Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with
111
+ the ``add_special_tokens`` parameter set to ``True``.
112
+ See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
113
+ :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
114
+ **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
115
+ Indices of positions of each input sequence tokens in the position embeddings.
116
+ Selected in the range ``[0, config.max_position_embeddings - 1[``.
117
+ **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
118
+ Mask to avoid performing attention on padding token indices.
119
+ Mask values selected in ``[0, 1]``:
120
+ ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
121
+ **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
122
+ Mask to nullify selected heads of the self-attention modules.
123
+ Mask values selected in ``[0, 1]``:
124
+ ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
125
+ """
126
+
127
+ @add_start_docstrings("The bare RoBERTa Model transformer outputing raw hidden-states without any specific head on top.",
128
+ ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
129
+ class RobertaModel(BertModel):
130
+ r"""
131
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
132
+ **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
133
+ Sequence of hidden-states at the output of the last layer of the model.
134
+ **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
135
+ Last layer hidden-state of the first token of the sequence (classification token)
136
+ further processed by a Linear layer and a Tanh activation function. The Linear
137
+ layer weights are trained from the next sentence prediction (classification)
138
+ objective during Bert pretraining. This output is usually *not* a good summary
139
+ of the semantic content of the input, you're often better with averaging or pooling
140
+ the sequence of hidden-states for the whole input sequence.
141
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
142
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
143
+ of shape ``(batch_size, sequence_length, hidden_size)``:
144
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
145
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
146
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
147
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
148
+
149
+ Examples::
150
+
151
+ tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
152
+ model = RobertaModel.from_pretrained('roberta-base')
153
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
154
+ outputs = model(input_ids)
155
+ last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
156
+
157
+ """
158
+ config_class = RobertaConfig
159
+ pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
160
+ base_model_prefix = "roberta"
161
+
162
+ def __init__(self, config):
163
+ super(RobertaModel, self).__init__(config)
164
+
165
+ self.embeddings = RobertaEmbeddings(config)
166
+ self.apply(self.init_weights)
167
+
168
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
169
+ if input_ids[:, 0].sum().item() != 0:
170
+ logger.warning("A sequence with no special tokens has been passed to the RoBERTa model. "
171
+ "This model requires special tokens in order to work. "
172
+ "Please specify add_special_tokens=True in your encoding.")
173
+ return super(RobertaModel, self).forward(input_ids, token_type_ids, attention_mask, position_ids, head_mask)
174
+
175
+
176
+ @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """,
177
+ ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
178
+ class RobertaForMaskedLM(BertPreTrainedModel):
179
+ r"""
180
+ **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
181
+ Labels for computing the masked language modeling loss.
182
+ Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
183
+ Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
184
+ in ``[0, ..., config.vocab_size]``
185
+
186
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
187
+ **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
188
+ Masked language modeling loss.
189
+ **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
190
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
191
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
192
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
193
+ of shape ``(batch_size, sequence_length, hidden_size)``:
194
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
195
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
196
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
197
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
198
+
199
+ Examples::
200
+
201
+ tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
202
+ model = RobertaForMaskedLM.from_pretrained('roberta-base')
203
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
204
+ outputs = model(input_ids, masked_lm_labels=input_ids)
205
+ loss, prediction_scores = outputs[:2]
206
+
207
+ """
208
+ config_class = RobertaConfig
209
+ pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
210
+ base_model_prefix = "roberta"
211
+
212
+ def __init__(self, config):
213
+ super(RobertaForMaskedLM, self).__init__(config)
214
+
215
+ self.roberta = RobertaModel(config)
216
+ self.lm_head = RobertaLMHead(config)
217
+
218
+ self.apply(self.init_weights)
219
+ self.tie_weights()
220
+
221
+ def tie_weights(self):
222
+ """ Make sure we are sharing the input and output embeddings.
223
+ Export to TorchScript can't handle parameter sharing so we are cloning them instead.
224
+ """
225
+ self._tie_or_clone_weights(self.lm_head.decoder, self.roberta.embeddings.word_embeddings)
226
+
227
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, position_ids=None,
228
+ head_mask=None):
229
+ outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
230
+ attention_mask=attention_mask, head_mask=head_mask)
231
+ sequence_output = outputs[0]
232
+ prediction_scores = self.lm_head(sequence_output)
233
+
234
+ outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
235
+
236
+ if masked_lm_labels is not None:
237
+ loss_fct = CrossEntropyLoss(ignore_index=-1)
238
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
239
+ outputs = (masked_lm_loss,) + outputs
240
+
241
+ return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
242
+
243
+
244
+ class RobertaLMHead(nn.Module):
245
+ """Roberta Head for masked language modeling."""
246
+
247
+ def __init__(self, config):
248
+ super(RobertaLMHead, self).__init__()
249
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
250
+ self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
251
+
252
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
253
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
254
+
255
+ def forward(self, features, **kwargs):
256
+ x = self.dense(features)
257
+ x = gelu(x)
258
+ x = self.layer_norm(x)
259
+
260
+ # project back to size of vocabulary with bias
261
+ x = self.decoder(x) + self.bias
262
+
263
+ return x
264
+
265
+
266
+ @add_start_docstrings("""RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
267
+ on top of the pooled output) e.g. for GLUE tasks. """,
268
+ ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
269
+ class RobertaForSequenceClassification(BertPreTrainedModel):
270
+ r"""
271
+ **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
272
+ Labels for computing the sequence classification/regression loss.
273
+ Indices should be in ``[0, ..., config.num_labels]``.
274
+ If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
275
+ If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
276
+
277
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
278
+ **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
279
+ Classification (or regression if config.num_labels==1) loss.
280
+ **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
281
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
282
+ **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
283
+ list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
284
+ of shape ``(batch_size, sequence_length, hidden_size)``:
285
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
286
+ **attentions**: (`optional`, returned when ``config.output_attentions=True``)
287
+ list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
288
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
289
+
290
+ Examples::
291
+
292
+ tokenizer = RoertaTokenizer.from_pretrained('roberta-base')
293
+ model = RobertaForSequenceClassification.from_pretrained('roberta-base')
294
+ input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
295
+ labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
296
+ outputs = model(input_ids, labels=labels)
297
+ loss, logits = outputs[:2]
298
+
299
+ """
300
+ config_class = RobertaConfig
301
+ pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
302
+ base_model_prefix = "roberta"
303
+
304
+ def __init__(self, config):
305
+ super(RobertaForSequenceClassification, self).__init__(config)
306
+ self.num_labels = config.num_labels
307
+
308
+ self.roberta = RobertaModel(config)
309
+ self.classifier = RobertaClassificationHead(config)
310
+
311
+ def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
312
+ position_ids=None, head_mask=None):
313
+ outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
314
+ attention_mask=attention_mask, head_mask=head_mask)
315
+ sequence_output = outputs[0]
316
+ logits = self.classifier(sequence_output)
317
+
318
+ outputs = (logits,) + outputs[2:]
319
+ if labels is not None:
320
+ if self.num_labels == 1:
321
+ # We are doing regression
322
+ loss_fct = MSELoss()
323
+ loss = loss_fct(logits.view(-1), labels.view(-1))
324
+ else:
325
+ loss_fct = CrossEntropyLoss()
326
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
327
+ outputs = (loss,) + outputs
328
+
329
+ return outputs # (loss), logits, (hidden_states), (attentions)
330
+
331
+
332
+
333
+ class RobertaClassificationHead(nn.Module):
334
+ """Head for sentence-level classification tasks."""
335
+
336
+ def __init__(self, config):
337
+ super(RobertaClassificationHead, self).__init__()
338
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
339
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
340
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
341
+
342
+ def forward(self, features, **kwargs):
343
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
344
+ x = self.dropout(x)
345
+ x = self.dense(x)
346
+ x = torch.tanh(x)
347
+ x = self.dropout(x)
348
+ x = self.out_proj(x)
349
+ return x
11777-Group11-master/attention_weight_vis/bertviz/transformers_neuron_view/modeling_transfo_xl_utilities.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Utilities for PyTorch Transformer XL model.
17
+ Directly adapted from https://github.com/kimiyoung/transformer-xl.
18
+ """
19
+
20
+ from collections import defaultdict
21
+
22
+ import numpy as np
23
+
24
+ import torch
25
+ import torch.nn as nn
26
+ import torch.nn.functional as F
27
+
28
+ # CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
29
+ # CUDA_MINOR = int(torch.version.cuda.split('.')[1])
30
+
31
+ class ProjectedAdaptiveLogSoftmax(nn.Module):
32
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
33
+ keep_order=False):
34
+ super(ProjectedAdaptiveLogSoftmax, self).__init__()
35
+
36
+ self.n_token = n_token
37
+ self.d_embed = d_embed
38
+ self.d_proj = d_proj
39
+
40
+ self.cutoffs = cutoffs + [n_token]
41
+ self.cutoff_ends = [0] + self.cutoffs
42
+ self.div_val = div_val
43
+
44
+ self.shortlist_size = self.cutoffs[0]
45
+ self.n_clusters = len(self.cutoffs) - 1
46
+ self.head_size = self.shortlist_size + self.n_clusters
47
+
48
+ if self.n_clusters > 0:
49
+ self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
50
+ self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
51
+
52
+ self.out_layers = nn.ModuleList()
53
+ self.out_projs = nn.ParameterList()
54
+
55
+ if div_val == 1:
56
+ for i in range(len(self.cutoffs)):
57
+ if d_proj != d_embed:
58
+ self.out_projs.append(
59
+ nn.Parameter(torch.FloatTensor(d_proj, d_embed))
60
+ )
61
+ else:
62
+ self.out_projs.append(None)
63
+
64
+ self.out_layers.append(nn.Linear(d_embed, n_token))
65
+ else:
66
+ for i in range(len(self.cutoffs)):
67
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
68
+ d_emb_i = d_embed // (div_val ** i)
69
+
70
+ self.out_projs.append(
71
+ nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))
72
+ )
73
+
74
+ self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
75
+
76
+ self.keep_order = keep_order
77
+
78
+ def _compute_logit(self, hidden, weight, bias, proj):
79
+ if proj is None:
80
+ logit = F.linear(hidden, weight, bias=bias)
81
+ else:
82
+ # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
83
+ proj_hid = F.linear(hidden, proj.t().contiguous())
84
+ logit = F.linear(proj_hid, weight, bias=bias)
85
+ # else:
86
+ # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
87
+ # if bias is not None:
88
+ # logit = logit + bias
89
+
90
+ return logit
91
+
92
+ def forward(self, hidden, labels=None, keep_order=False):
93
+ '''
94
+ Params:
95
+ hidden :: [len*bsz x d_proj]
96
+ labels :: [len*bsz]
97
+ Return:
98
+ if labels is None:
99
+ out :: [len*bsz] Negative log likelihood
100
+ else:
101
+ out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary
102
+ We could replace this implementation by the native PyTorch one
103
+ if their's had an option to set bias on all clusters in the native one.
104
+ here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
105
+ '''
106
+
107
+ if labels is not None:
108
+ labels = labels.view(-1)
109
+ if hidden.size(0) != labels.size(0):
110
+ raise RuntimeError('Input and labels should have the same size '
111
+ 'in the batch dimension.')
112
+
113
+ if self.n_clusters == 0:
114
+ logit = self._compute_logit(hidden, self.out_layers[0].weight,
115
+ self.out_layers[0].bias, self.out_projs[0])
116
+ if labels is not None:
117
+ out = -F.log_softmax(logit, dim=-1) \
118
+ .gather(1, labels.unsqueeze(1)).squeeze(1)
119
+ else:
120
+ out = F.log_softmax(logit, dim=-1)
121
+ else:
122
+ # construct weights and biases
123
+ weights, biases = [], []
124
+ for i in range(len(self.cutoffs)):
125
+ if self.div_val == 1:
126
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
127
+ weight_i = self.out_layers[0].weight[l_idx:r_idx]
128
+ bias_i = self.out_layers[0].bias[l_idx:r_idx]
129
+ else:
130
+ weight_i = self.out_layers[i].weight
131
+ bias_i = self.out_layers[i].bias
132
+
133
+ if i == 0:
134
+ weight_i = torch.cat(
135
+ [weight_i, self.cluster_weight], dim=0)
136
+ bias_i = torch.cat(
137
+ [bias_i, self.cluster_bias], dim=0)
138
+
139
+ weights.append(weight_i)
140
+ biases.append(bias_i)
141
+
142
+ head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
143
+
144
+ head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
145
+ head_logprob = F.log_softmax(head_logit, dim=1)
146
+
147
+ if labels is None:
148
+ out = hidden.new_empty((head_logit.size(0), self.n_token))
149
+ else:
150
+ out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
151
+
152
+ offset = 0
153
+ cutoff_values = [0] + self.cutoffs
154
+ for i in range(len(cutoff_values) - 1):
155
+ l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
156
+
157
+ if labels is not None:
158
+ mask_i = (labels >= l_idx) & (labels < r_idx)
159
+ indices_i = mask_i.nonzero().squeeze()
160
+
161
+ if indices_i.numel() == 0:
162
+ continue
163
+
164
+ target_i = labels.index_select(0, indices_i) - l_idx
165
+ head_logprob_i = head_logprob.index_select(0, indices_i)
166
+ hidden_i = hidden.index_select(0, indices_i)
167
+ else:
168
+ hidden_i = hidden
169
+
170
+ if i == 0:
171
+ if labels is not None:
172
+ logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
173
+ else:
174
+ out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
175
+ else:
176
+ weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
177
+
178
+ tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
179
+ tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
180
+ cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
181
+ if labels is not None:
182
+ logprob_i = head_logprob_i[:, cluster_prob_idx] \
183
+ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
184
+ else:
185
+ logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
186
+ out[:, l_idx:r_idx] = logprob_i
187
+
188
+ if labels is not None:
189
+ if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
190
+ out.index_copy_(0, indices_i, -logprob_i)
191
+ else:
192
+ out[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
193
+ offset += logprob_i.size(0)
194
+
195
+ return out
196
+
197
+
198
+ def log_prob(self, hidden):
199
+ r""" Computes log probabilities for all :math:`n\_classes`
200
+ From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
201
+ Args:
202
+ hidden (Tensor): a minibatch of examples
203
+ Returns:
204
+ log-probabilities of for each class :math:`c`
205
+ in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a
206
+ parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
207
+ Shape:
208
+ - Input: :math:`(N, in\_features)`
209
+ - Output: :math:`(N, n\_classes)`
210
+ """
211
+ if self.n_clusters == 0:
212
+ logit = self._compute_logit(hidden, self.out_layers[0].weight,
213
+ self.out_layers[0].bias, self.out_projs[0])
214
+ return F.log_softmax(logit, dim=-1)
215
+ else:
216
+ # construct weights and biases
217
+ weights, biases = [], []
218
+ for i in range(len(self.cutoffs)):
219
+ if self.div_val == 1:
220
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
221
+ weight_i = self.out_layers[0].weight[l_idx:r_idx]
222
+ bias_i = self.out_layers[0].bias[l_idx:r_idx]
223
+ else:
224
+ weight_i = self.out_layers[i].weight
225
+ bias_i = self.out_layers[i].bias
226
+
227
+ if i == 0:
228
+ weight_i = torch.cat(
229
+ [weight_i, self.cluster_weight], dim=0)
230
+ bias_i = torch.cat(
231
+ [bias_i, self.cluster_bias], dim=0)
232
+
233
+ weights.append(weight_i)
234
+ biases.append(bias_i)
235
+
236
+ head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
237
+ head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
238
+
239
+ out = hidden.new_empty((head_logit.size(0), self.n_token))
240
+ head_logprob = F.log_softmax(head_logit, dim=1)
241
+
242
+ cutoff_values = [0] + self.cutoffs
243
+ for i in range(len(cutoff_values) - 1):
244
+ start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
245
+
246
+ if i == 0:
247
+ out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
248
+ else:
249
+ weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
250
+
251
+ tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
252
+ tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
253
+
254
+ logprob_i = head_logprob[:, -i] + tail_logprob_i
255
+ out[:, start_idx, stop_idx] = logprob_i
256
+
257
+ return out
258
+
259
+
260
+ class LogUniformSampler(object):
261
+ def __init__(self, range_max, n_sample):
262
+ """
263
+ Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
264
+ `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
265
+
266
+ expected count can be approximated by 1 - (1 - p)^n
267
+ and we use a numerically stable version -expm1(num_tries * log1p(-p))
268
+
269
+ Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
270
+ """
271
+ with torch.no_grad():
272
+ self.range_max = range_max
273
+ log_indices = torch.arange(1., range_max+2., 1.).log_()
274
+ self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
275
+
276
+ self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
277
+
278
+ self.n_sample = n_sample
279
+
280
+ def sample(self, labels):
281
+ """
282
+ labels: [b1, b2]
283
+ Return
284
+ true_log_probs: [b1, b2]
285
+ samp_log_probs: [n_sample]
286
+ neg_samples: [n_sample]
287
+ """
288
+
289
+ # neg_samples = torch.empty(0).long()
290
+ n_sample = self.n_sample
291
+ n_tries = 2 * n_sample
292
+
293
+ with torch.no_grad():
294
+ neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
295
+ device = labels.device
296
+ neg_samples = neg_samples.to(device)
297
+ true_log_probs = self.log_q[labels].to(device)
298
+ samp_log_probs = self.log_q[neg_samples].to(device)
299
+ return true_log_probs, samp_log_probs, neg_samples
300
+
301
+ def sample_logits(embedding, bias, labels, inputs, sampler):
302
+ """
303
+ embedding: an nn.Embedding layer
304
+ bias: [n_vocab]
305
+ labels: [b1, b2]
306
+ inputs: [b1, b2, n_emb]
307
+ sampler: you may use a LogUniformSampler
308
+ Return
309
+ logits: [b1, b2, 1 + n_sample]
310
+ """
311
+ true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
312
+ n_sample = neg_samples.size(0)
313
+ b1, b2 = labels.size(0), labels.size(1)
314
+ all_ids = torch.cat([labels.view(-1), neg_samples])
315
+ all_w = embedding(all_ids)
316
+ true_w = all_w[: -n_sample].view(b1, b2, -1)
317
+ sample_w = all_w[- n_sample:].view(n_sample, -1)
318
+
319
+ all_b = bias[all_ids]
320
+ true_b = all_b[: -n_sample].view(b1, b2)
321
+ sample_b = all_b[- n_sample:]
322
+
323
+ hit = (labels[:, :, None] == neg_samples).detach()
324
+
325
+ true_logits = torch.einsum('ijk,ijk->ij',
326
+ [true_w, inputs]) + true_b - true_log_probs
327
+ sample_logits = torch.einsum('lk,ijk->ijl',
328
+ [sample_w, inputs]) + sample_b - samp_log_probs
329
+ sample_logits.masked_fill_(hit, -1e30)
330
+ logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
331
+
332
+ return logits
11777-Group11-master/attention_weight_vis/bertviz/util.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ def format_attention(attention):
4
+ squeezed = []
5
+ for layer_attention in attention:
6
+ # 1 x num_heads x seq_len x seq_len
7
+ if len(layer_attention.shape) != 4:
8
+ raise ValueError("The attention tensor does not have the correct number of dimensions. Make sure you set "
9
+ "output_attentions=True when initializing your model.")
10
+ squeezed.append(layer_attention.squeeze(0))
11
+ # num_layers x num_heads x seq_len x seq_len
12
+ return torch.stack(squeezed)
13
+
14
+ def format_special_chars(tokens):
15
+ return [t.replace('Ġ', ' ').replace('▁', ' ').replace('</w>', '') for t in tokens]
11777-Group11-master/example_data/example.feature.lineidx ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ 0
2
+ 230089
3
+ 602590
4
+ 854591
5
+ 1292820
6
+ 1774869
7
+ 2169276
8
+ 2519869
11777-Group11-master/example_data/example.feature.tsv ADDED
The diff for this file is too large to render. See raw diff
 
11777-Group11-master/example_data/example_caption.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [{"image_id": "554625", "id": 28241, "caption": "a boy wearing headphones using one computer in a long row of computers"}, {"image_id": "5802", "id": 33147, "caption": "Two men wearing aprons working in a commercial-style kitchen."}, {"image_id": "554625", "id": 34196, "caption": "A little boy with earphones on listening to something."}, {"image_id": "5802", "id": 34992, "caption": "Chefs preparing food in a professional metallic style kitchen."}, {"image_id": "554625", "id": 35393, "caption": "A group of people sitting at desk using computers."}, {"image_id": "5802", "id": 37362, "caption": "Two people standing around in a large kitchen."}, {"image_id": "5802", "id": 38118, "caption": "A commercial kitchen with two men working to prepare several plates."}, {"image_id": "5802", "id": 38577, "caption": "two men in white shirts in a large steel kitchen"}, {"image_id": "554625", "id": 40226, "caption": "Children sitting at computer stations on a long table."}, {"image_id": "554625", "id": 44231, "caption": "A small child wearing headphones plays on the computer."}, {"image_id": "309022", "id": 101131, "caption": "A commercial stainless kitchen with a pot of food cooking. "}, {"image_id": "309022", "id": 103063, "caption": "Some food sits in a pot in a kitchen. "}, {"image_id": "309022", "id": 103657, "caption": "A kitchen has all stainless steel appliances and counters."}, {"image_id": "309022", "id": 104869, "caption": "a kitchen with a sink and many cooking machines and a pot of food"}, {"image_id": "309022", "id": 110806, "caption": "Food cooks in a pot on a stove in a kitchen."}, {"image_id": "574769", "id": 561237, "caption": "A woman in a room with a cat."}, {"image_id": "574769", "id": 562344, "caption": "A girl smiles as she holds a cat and wears a brightly colored skirt."}, {"image_id": "574769", "id": 564249, "caption": "a woman is holding a cat in her kitchen"}, {"image_id": "574769", "id": 564444, "caption": "A woman is working in a kitchen carrying a soft toy."}, {"image_id": "574769", "id": 564477, "caption": "A woman is holding a cat in her kitchen."}, {"image_id": "522418", "id": 681330, "caption": "A woman wearing a net on her head cutting a cake. "}, {"image_id": "522418", "id": 686718, "caption": "A woman cutting a large white sheet cake."}, {"image_id": "118113", "id": 688725, "caption": "this is a very dark picture of a room with a shelf"}, {"image_id": "522418", "id": 688839, "caption": "A woman wearing a hair net cutting a large sheet cake."}, {"image_id": "118113", "id": 691068, "caption": "a cluttered room with a table and shelf on the wall."}, {"image_id": "522418", "id": 693159, "caption": "there is a woman that is cutting a white cake"}, {"image_id": "522418", "id": 693204, "caption": "A woman marking a cake with the back of a chef's knife. "}, {"image_id": "118113", "id": 695676, "caption": "A view of a messy room, with shelves on the wall."}, {"image_id": "118113", "id": 698481, "caption": "A dark and cluttered storage area with wood walls."}, {"image_id": "118113", "id": 699174, "caption": "A dim lit room consisting of many objects put together. "}, {"image_id": "318219", "id": 721981, "caption": "A young boy standing in front of a computer keyboard."}, {"image_id": "318219", "id": 728503, "caption": "a little boy wearing headphones and looking at a computer monitor"}, {"image_id": "318219", "id": 730555, "caption": "He is listening intently to the computer at school."}, {"image_id": "318219", "id": 731665, "caption": "A young boy stares up at the computer monitor."}, {"image_id": "318219", "id": 734977, "caption": "a young kid with head phones on using a computer "}, {"image_id": "222564", "id": 748361, "caption": "Two chefs in a restaurant kitchen preparing food. "}, {"image_id": "222564", "id": 748622, "caption": "Two cooks are cooking the food someone ordered at this restaurant"}, {"image_id": "222564", "id": 749123, "caption": "The chef is cooking with pans on the stove next to an oven. "}, {"image_id": "222564", "id": 749141, "caption": "Two men that are standing in a kitchen."}, {"image_id": "222564", "id": 750074, "caption": "Two cooks are near the stove in a stainless steel kitchen."}]
11777-Group11-master/idea1OscarSetup.sh ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # install Ananconda to /home/ubuntu/anaconda3
2
+ # via tutorial here: https://www.digitalocean.com/community/tutorials/how-to-install-the-anaconda-python-distribution-on-ubuntu-18-04
3
+ cd /tmp
4
+ curl -O https://repo.anaconda.com/archive/Anaconda3-2020.07-Linux-x86_64.sh
5
+ bash Anaconda3-2020.07-Linux-x86_64.sh
6
+ source ~/.bashrc
7
+ conda list
8
+
9
+ # following oscar installation guide: https://github.com/microsoft/Oscar/blob/master/INSTALL.md
10
+ # create a new environment
11
+ conda create --name oscar python=3.7
12
+ conda activate oscar
13
+
14
+ # install pytorch1.2
15
+ conda install pytorch==1.2.0 torchvision==0.4.0 cudatoolkit=10.0 -c pytorch
16
+
17
+ # install apex
18
+ export INSTALL_DIR=/home/ubuntu
19
+ cd $INSTALL_DIR
20
+ git clone https://github.com/NVIDIA/apex.git
21
+ cd apex
22
+ git checkout f3a960f80244cf9e80558ab30f7f7e8cbf03c0a0
23
+ python setup.py install --cuda_ext --cpp_ext
24
+
25
+ # install unzip library
26
+ sudo apt install unzip
27
+
28
+ # install oscar
29
+ cd $INSTALL_DIR
30
+ git clone https://github.com/microsoft/Oscar.git
31
+ cd Oscar
32
+ sed -i "s/git@github\.com:/https:\/\/github\.com\//" .gitmodules
33
+ git submodule update --init --recursive
34
+
35
+ cd coco_caption
36
+ ./get_stanford_models.sh
37
+ cd ..
38
+ python setup.py build develop
39
+
40
+ # download pretrained models
41
+ chmod +x get_model.sh
42
+ ./get_model.sh
43
+ chmod +x get_datasets_cococaption.sh
44
+ ./get_datasets_cococaption.sh
45
+
46
+ # install requirements
47
+ pip install -r requirements.txt
48
+ unset INSTALL_DIR
49
+
50
+ # image captioning on coc
51
+ # source : https://github.com/microsoft/Oscar/blob/master/MODEL_ZOO.md
52
+
53
+ # Fine tune train without object tags
54
+ mkdir output
55
+ python oscar/run_captioning_finetune.py \
56
+ --model_name_or_path pretrained_models/base-vg-labels/ep_67_588997 \
57
+ --do_train \
58
+ --do_lower_case \
59
+ --learning_rate 0.00003 \
60
+ --max_seq_length 40 \
61
+ --per_gpu_train_batch_size 64 \
62
+ --save_steps 4000 \
63
+ --max_steps 44000\
64
+ --output_dir output/
65
+
66
+ # eval without object tags
67
+ python oscar/run_captioning_.py \
68
+ --eval_model_dir output/checkpoint-14-32000 \
69
+ --do_eval \
70
+ --max_seq_length 40 \
71
+ --do_lower_case
72
+
73
+ # fine tune without image features
74
+ python oscar/run_captioning_finetune.py \
75
+ --model_name_or_path pretrained_models/base-vg-labels/ep_67_588997 \
76
+ --do_train \
77
+ --do_lower_case \
78
+ --add_od_labels \
79
+ --disable_img_features \
80
+ --learning_rate 0.00003 \
81
+ --per_gpu_train_batch_size 64 \
82
+ --save_steps 4000 \
83
+ --max_steps 44000\
84
+ --output_dir output_img/
85
+
86
+ # eval without image features
87
+ python oscar/run_captioning_finetune.py \
88
+ --eval_model_dir output_baseline/output/checkpoint-1-4000 \
89
+ --do_eval \
90
+ --do_lower_case \
91
+ --add_od_labels \
92
+ --disable_img_features
93
+
94
+ # inference with objectags by confidence ordering
95
+ python oscar/run_captioning_finetune.py \
96
+ --eval_model_dir output_baseline/output/checkpoint-14-32000 \
97
+ --do_eval \
98
+ --do_lower_case \
99
+ --add_od_labels \
100
+ --keep_top_percentage_tag_conf_threshold 0.3 \
101
+ --keep_top_percentage_tag 0.1
102
+
103
+ # fine tune with confidence added to embedding
104
+ python oscar/run_captioning_add_confidence.py \
105
+ --model_name_or_path pretrained_models/base-vg-labels/ep_67_588997 \
106
+ --do_train \
107
+ --do_lower_case \
108
+ --add_od_labels \
109
+ --add_conf \
110
+ --learning_rate 0.00003 \
111
+ --per_gpu_train_batch_size 64 \
112
+ --save_steps 20 \
113
+ --max_steps 44000\
114
+ --output_dir output/
115
+
116
+ # inference with objectags from confidence added to embedding
117
+ python oscar/run_captioning_add_confidence.py \
118
+ --eval_model_dir output/checkpoint-18-40000 \
119
+ --do_eval \
120
+ --do_lower_case \
121
+ --add_od_labels
122
+
123
+ # tmux command
124
+ # exit from oscar virtualenv
125
+ # start tmux session
126
+ # start oscar virtual envt
127
+ sudo apt-get install tmux
128
+ tmux new -s finetune-nolabel finetune-noimg
129
+ tmux ls
130
+ tmux attach -t finetune-nolabel
131
+ tmux kill-session -t session-name
132
+ # C+b d to detach current tmux session
133
+
134
+ aws s3 sync output s3://mmml-idea1/finetune-no-objecttag
135
+ echo DONE
11777-Group11-master/oscar/.DS_Store ADDED
Binary file (6.15 kB). View file
 
11777-Group11-master/oscar/distillation/distiller.py ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ The distiller to distil the student.
16
+ Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
17
+ """
18
+ import math
19
+ import os
20
+ import time
21
+
22
+ import psutil
23
+ import torch
24
+ import torch.nn as nn
25
+ import torch.nn.functional as F
26
+ from torch.optim import AdamW
27
+ from torch.utils.data import BatchSampler, DataLoader, RandomSampler
28
+ from torch.utils.data.distributed import DistributedSampler
29
+ from tqdm import tqdm
30
+ import json
31
+
32
+ from .grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups
33
+ from transformers import get_linear_schedule_with_warmup
34
+ from oscar.run_captioning import (
35
+ CaptionTSVDataset, CaptionTensorizer, evaluate, build_dataset,
36
+ compute_score_with_logits)
37
+ from .utils import logger
38
+
39
+
40
+
41
+ try:
42
+ from torch.utils.tensorboard import SummaryWriter
43
+ except ImportError:
44
+ from tensorboardX import SummaryWriter
45
+
46
+
47
+ class Distiller:
48
+ def __init__(
49
+ self, params: dict, dataset: CaptionTSVDataset, student: nn.Module, teacher: nn.Module,
50
+ val_dataset, tokenizer
51
+ ):
52
+ logger.info("Initializing Distiller")
53
+ self.params = params
54
+ self.dump_path = params.output_dir
55
+ self.multi_gpu = params.multi_gpu
56
+ self.fp16 = params.fp16
57
+
58
+ self.student = student
59
+ self.teacher = teacher
60
+
61
+ self.student_config = student.config
62
+ self.vocab_size = student.config.vocab_size
63
+
64
+ if params.n_gpu <= 1:
65
+ sampler = RandomSampler(dataset)
66
+ else:
67
+ sampler = DistributedSampler(dataset)
68
+
69
+ # if params.group_by_size:
70
+ # groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size)
71
+ # sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size)
72
+ # else:
73
+ # sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False)
74
+
75
+ sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False)
76
+
77
+ self.dataloader = DataLoader(dataset=dataset, batch_sampler=sampler)
78
+ self.val_dataset = val_dataset
79
+ self.tokenizer = tokenizer
80
+
81
+ self.eval_log = []
82
+
83
+ self.temperature = params.temperature
84
+ assert self.temperature > 0.0
85
+
86
+ self.alpha_ce = params.alpha_ce
87
+ self.alpha_mse = params.alpha_mse
88
+ self.alpha_cos = params.alpha_cos
89
+
90
+ # self.mlm = params.mlm
91
+ # if self.mlm:
92
+ # logger.info("Using MLM loss for LM step.")
93
+ # self.mlm_mask_prop = params.mlm_mask_prop
94
+ # assert 0.0 <= self.mlm_mask_prop <= 1.0
95
+ # assert params.word_mask + params.word_keep + params.word_rand == 1.0
96
+ # self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand])
97
+ # self.pred_probs = self.pred_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else self.pred_probs
98
+ # self.token_probs = token_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else token_probs
99
+ # if self.fp16:
100
+ # self.pred_probs = self.pred_probs.half()
101
+ # self.token_probs = self.token_probs.half()
102
+ # else:
103
+ # logger.info("Using CLM loss for LM step.")
104
+
105
+ self.epoch = 0
106
+ self.n_iter = 0
107
+ self.n_total_iter = 0
108
+ self.n_sequences_epoch = 0
109
+ self.total_loss_epoch = 0
110
+ self.last_loss = 0
111
+ self.last_loss_ce = 0
112
+ if self.alpha_mse > 0.0:
113
+ self.last_loss_mse = 0
114
+ if self.alpha_cos > 0.0:
115
+ self.last_loss_cos = 0
116
+ self.last_log = 0
117
+
118
+ self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean")
119
+ if self.alpha_mse > 0.0:
120
+ self.mse_loss_fct = nn.MSELoss(reduction="sum")
121
+ if self.alpha_cos > 0.0:
122
+ self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction="mean")
123
+
124
+ logger.info("--- Initializing model optimizer")
125
+ assert params.gradient_accumulation_steps >= 1
126
+ self.num_steps_epoch = len(self.dataloader)
127
+ num_train_optimization_steps = (
128
+ int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1
129
+ )
130
+
131
+ no_decay = ["bias", "LayerNorm.weight"]
132
+ optimizer_grouped_parameters = [
133
+ {
134
+ "params": [
135
+ p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad
136
+ ],
137
+ "weight_decay": params.weight_decay,
138
+ },
139
+ {
140
+ "params": [
141
+ p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad
142
+ ],
143
+ "weight_decay": 0.0,
144
+ },
145
+ ]
146
+ logger.info(
147
+ "------ Number of trainable parameters (student): %i"
148
+ % sum([p.numel() for p in self.student.parameters() if p.requires_grad])
149
+ )
150
+ logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()]))
151
+ self.optimizer = AdamW(
152
+ optimizer_grouped_parameters, lr=params.learning_rate, eps=params.adam_epsilon, betas=(0.9, 0.98)
153
+ )
154
+
155
+ warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop)
156
+ self.scheduler = get_linear_schedule_with_warmup(
157
+ self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps
158
+ )
159
+
160
+ if self.fp16:
161
+ try:
162
+ from apex import amp
163
+ except ImportError:
164
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
165
+ logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level")
166
+ self.student, self.optimizer = amp.initialize(
167
+ self.student, self.optimizer, opt_level=self.params.fp16_opt_level
168
+ )
169
+ self.teacher = self.teacher.half()
170
+
171
+ if self.multi_gpu:
172
+ if self.fp16:
173
+ from apex.parallel import DistributedDataParallel
174
+
175
+ logger.info("Using apex.parallel.DistributedDataParallel for distributed training.")
176
+ self.student = DistributedDataParallel(self.student)
177
+ else:
178
+ from torch.nn.parallel import DistributedDataParallel
179
+
180
+ logger.info("Using nn.parallel.DistributedDataParallel for distributed training.")
181
+ self.student = DistributedDataParallel(
182
+ self.student,
183
+ device_ids=[params.local_rank],
184
+ output_device=params.local_rank,
185
+ find_unused_parameters=True,
186
+ )
187
+
188
+ # self.is_master = params.is_master
189
+ # if self.is_master:
190
+ logger.info("--- Initializing Tensorboard")
191
+ self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, "log", "train"))
192
+ self.tensorboard.add_text(tag="config/training", text_string=str(self.params), global_step=0)
193
+ self.tensorboard.add_text(tag="config/student", text_string=str(self.student_config), global_step=0)
194
+
195
+
196
+ def train(self):
197
+ """
198
+ The real training loop.
199
+ """
200
+ logger.info("Starting training")
201
+ self.last_log = time.time()
202
+ self.student.train()
203
+ self.teacher.eval()
204
+
205
+ for _ in range(self.params.n_epoch):
206
+ logger.info(f"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}")
207
+ if self.multi_gpu:
208
+ torch.distributed.barrier()
209
+
210
+ iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0])
211
+ for batch in iter_bar:
212
+ if self.params.n_gpu > 0:
213
+ img_key, example = batch
214
+ # img_key = img_key.to(f"cuda:{self.params.local_rank}")
215
+ example = tuple(t.to(f"cuda:{self.params.local_rank}") for t in example)
216
+
217
+ '''CaptionTSVDataset:
218
+ def __getitem__(self, idx):
219
+ img_idx = self.get_image_index(idx)
220
+ img_key = self.image_keys[img_idx]
221
+ features = self.get_image_features(img_idx)
222
+ caption = self.get_caption(idx)
223
+ od_labels = self.get_od_labels(img_idx)
224
+ example = self.tensorizer.tensorize_example(caption, features, text_b=od_labels)
225
+ return img_key, example
226
+ '''
227
+
228
+ # example: (input_ids, attention_mask, segment_ids, img_feat, masked_pos)
229
+
230
+ inputs = {'input_ids': example[0], 'attention_mask': example[1],
231
+ 'token_type_ids': example[2], 'img_feats': example[3],
232
+ 'masked_pos': example[4], 'masked_ids': example[5]
233
+ }
234
+ outputs = self.step(**inputs)
235
+
236
+ iter_bar.update()
237
+ iter_bar.set_postfix(
238
+ {"Last_loss": f"{self.last_loss:.2f}", "Avg_cum_loss": f"{self.total_loss_epoch/self.n_iter:.2f}"}
239
+ )
240
+ iter_bar.close()
241
+
242
+ logger.info(f"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}")
243
+ self.end_epoch()
244
+
245
+ logger.info("Save very last checkpoint as `pytorch_model.bin`.")
246
+ self.save_checkpoint(checkpoint_name="pytorch_model.bin")
247
+ logger.info("Training is finished")
248
+
249
+ def step(self, input_ids: torch.tensor, attention_mask: torch.tensor, token_type_ids: torch.tensor,
250
+ img_feats: torch.tensor, masked_pos: torch.tensor, masked_ids: torch.tensor):
251
+ """
252
+ One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation),
253
+ and possibly a parameter update (depending on the gradient accumulation).
254
+
255
+ Input:
256
+ ------
257
+ input_ids: `torch.tensor(bs, seq_length)` - The token ids.
258
+ attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention.
259
+ lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM).
260
+ """
261
+
262
+ s_logits, s_hidden_states = self.student(
263
+ input_ids=input_ids, attention_mask=attention_mask, img_feats=img_feats,
264
+ masked_pos=masked_pos, masked_ids=masked_ids, token_type_ids=token_type_ids
265
+ ) # (bs, seq_length, voc_size)
266
+ with torch.no_grad():
267
+ t_output = self.teacher(
268
+ input_ids=input_ids, attention_mask=attention_mask, img_feats=img_feats,
269
+ masked_pos=masked_pos, masked_ids=masked_ids, token_type_ids=token_type_ids
270
+ ) # (bs, seq_length, voc_size)
271
+ _, t_logits, t_hidden_states = t_output
272
+
273
+ # output shape (num_blanks, voc_size)
274
+
275
+ # mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size)
276
+ # s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask
277
+ # s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask
278
+ # t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask
279
+ # t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask
280
+
281
+ s_logits_slct = s_logits
282
+ t_logits_slct = t_logits
283
+ assert t_logits_slct.size() == s_logits_slct.size()
284
+
285
+ loss_ce = (
286
+ self.ce_loss_fct(
287
+ F.log_softmax(s_logits_slct / self.temperature, dim=-1),
288
+ F.softmax(t_logits_slct / self.temperature, dim=-1),
289
+ )
290
+ * (self.temperature) ** 2
291
+ )
292
+ loss = self.alpha_ce * loss_ce
293
+
294
+ if self.alpha_mse > 0.0:
295
+ loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct) / s_logits_slct.size(
296
+ 0
297
+ ) # Reproducing batchmean reduction
298
+ loss += self.alpha_mse * loss_mse
299
+ if self.alpha_cos > 0.0:
300
+ s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim)
301
+ t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim)
302
+ # mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim)
303
+ # assert s_hidden_states.size() == t_hidden_states.size()
304
+ # dim = s_hidden_states.size(-1)
305
+
306
+ # s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim)
307
+ # s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim)
308
+ # t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim)
309
+ # t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim)
310
+
311
+ s_hidden_states_slct = s_hidden_states.reshape(1,-1)
312
+ t_hidden_states_slct = t_hidden_states.reshape(1,-1)
313
+
314
+ target = torch.ones(s_hidden_states_slct.shape).to(s_hidden_states_slct.device) # (bs * seq_length,)
315
+ loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target)
316
+ loss += self.alpha_cos * loss_cos
317
+
318
+ self.total_loss_epoch += loss.item()
319
+ self.last_loss = loss.item()
320
+ self.last_loss_ce = loss_ce.item()
321
+ if self.alpha_mse > 0.0:
322
+ self.last_loss_mse = loss_mse.item()
323
+ if self.alpha_cos > 0.0:
324
+ self.last_loss_cos = loss_cos.item()
325
+
326
+ self.optimize(loss)
327
+
328
+ self.n_sequences_epoch += input_ids.size(0)
329
+
330
+ def optimize(self, loss):
331
+ """
332
+ Normalization on the loss (gradient accumulation or distributed training), followed by
333
+ backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation).
334
+ Also update the metrics for tensorboard.
335
+ """
336
+ # Check for NaN
337
+ if (loss != loss).data.any():
338
+ logger.error("NaN detected")
339
+ exit()
340
+
341
+ if self.multi_gpu:
342
+ loss = loss.mean()
343
+ if self.params.gradient_accumulation_steps > 1:
344
+ loss = loss / self.params.gradient_accumulation_steps
345
+
346
+ if self.fp16:
347
+ from apex import amp
348
+
349
+ with amp.scale_loss(loss, self.optimizer) as scaled_loss:
350
+ scaled_loss.backward()
351
+ else:
352
+ loss.backward()
353
+
354
+ self.iter()
355
+ if self.n_iter % self.params.gradient_accumulation_steps == 0:
356
+ if self.fp16:
357
+ torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm)
358
+ else:
359
+ torch.nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm)
360
+ self.optimizer.step()
361
+ self.optimizer.zero_grad()
362
+ self.scheduler.step()
363
+
364
+ def iter(self):
365
+ """
366
+ Update global counts, write to tensorboard and save checkpoint.
367
+ """
368
+ self.n_iter += 1
369
+ self.n_total_iter += 1
370
+
371
+ if self.n_total_iter % self.params.log_interval == 0:
372
+ self.log_tensorboard()
373
+ self.last_log = time.time()
374
+ if self.n_total_iter % self.params.checkpoint_interval == 0:
375
+ self.save_checkpoint()
376
+ logger.info("Perform evaluation at step: %d" % (self.n_total_iter))
377
+ try:
378
+ evaluate_file = evaluate(self.params, self.val_dataset, self.student, self.tokenizer,
379
+ self.dump_path)
380
+ with open(evaluate_file, 'r') as f:
381
+ res = json.load(f)
382
+ best_score = max(best_score, res['CIDEr'])
383
+ res['epoch'] = epoch
384
+ res['global_step'] = step
385
+ res['best_CIDEr'] = best_score
386
+ self.eval_log.append(res)
387
+ with open(self.dump_path + '/eval_logs.json', 'w') as f:
388
+ json.dump(eval_log, f)
389
+ except:
390
+ print("An exception was made in the evaluation process. ")
391
+
392
+ def log_tensorboard(self):
393
+ """
394
+ Log into tensorboard. Only by the master process.
395
+ """
396
+ # if not self.is_master:
397
+ # return
398
+
399
+ for param_name, param in self.student.named_parameters():
400
+ self.tensorboard.add_scalar(
401
+ tag="parameter_mean/" + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter
402
+ )
403
+ self.tensorboard.add_scalar(
404
+ tag="parameter_std/" + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter
405
+ )
406
+ if param.grad is None:
407
+ continue
408
+ self.tensorboard.add_scalar(
409
+ tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(), global_step=self.n_total_iter
410
+ )
411
+ self.tensorboard.add_scalar(
412
+ tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter
413
+ )
414
+
415
+ self.tensorboard.add_scalar(
416
+ tag="losses/cum_avg_loss_epoch",
417
+ scalar_value=self.total_loss_epoch / self.n_iter,
418
+ global_step=self.n_total_iter,
419
+ )
420
+ self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter)
421
+ self.tensorboard.add_scalar(
422
+ tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter
423
+ )
424
+ if self.alpha_mse > 0.0:
425
+ self.tensorboard.add_scalar(
426
+ tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter
427
+ )
428
+ if self.alpha_cos > 0.0:
429
+ self.tensorboard.add_scalar(
430
+ tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter
431
+ )
432
+ self.tensorboard.add_scalar(
433
+ tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter
434
+ )
435
+
436
+ self.tensorboard.add_scalar(
437
+ tag="global/memory_usage",
438
+ scalar_value=psutil.virtual_memory()._asdict()["used"] / 1_000_000,
439
+ global_step=self.n_total_iter,
440
+ )
441
+ self.tensorboard.add_scalar(
442
+ tag="global/speed", scalar_value=time.time() - self.last_log, global_step=self.n_total_iter
443
+ )
444
+
445
+ def end_epoch(self):
446
+ """
447
+ Finally arrived at the end of epoch (full pass on dataset).
448
+ Do some tensorboard logging and checkpoint saving.
449
+ """
450
+ logger.info(f"{self.n_sequences_epoch} sequences have been trained during this epoch.")
451
+
452
+ self.save_checkpoint(checkpoint_name=f"model_epoch_{self.epoch}.pth")
453
+ self.tensorboard.add_scalar(
454
+ tag="epoch/loss", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.epoch
455
+ )
456
+
457
+ self.epoch += 1
458
+ self.n_sequences_epoch = 0
459
+ self.n_iter = 0
460
+ self.total_loss_epoch = 0
461
+
462
+ def save_checkpoint(self, checkpoint_name: str = "checkpoint.pth"):
463
+ """
464
+ Save the current state. Only by the master process.
465
+ """
466
+ # if not self.is_master:
467
+ # return
468
+ mdl_to_save = self.student.module if hasattr(self.student, "module") else self.student
469
+ mdl_to_save.config.save_pretrained(self.dump_path)
470
+ state_dict = mdl_to_save.state_dict()
471
+ torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
11777-Group11-master/oscar/distillation/lm_seqs_dataset.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Dataset to distilled models
16
+ adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
17
+ """
18
+ import numpy as np
19
+ import torch
20
+ from torch.utils.data import Dataset
21
+
22
+ from utils import logger
23
+
24
+
25
+ class LmSeqsDataset(Dataset):
26
+ """Custom Dataset wrapping language modeling sequences.
27
+
28
+ Each sample will be retrieved by indexing the list of token_ids and their corresponding lengths.
29
+
30
+ Input:
31
+ ------
32
+ params: `NameSpace` parameters
33
+ data: `List[np.array[int]]
34
+ """
35
+
36
+ def __init__(self, params, data):
37
+ self.params = params
38
+
39
+ self.token_ids = np.array(data)
40
+ self.lengths = np.array([len(t) for t in data])
41
+
42
+ self.check()
43
+ self.remove_long_sequences()
44
+ self.remove_empty_sequences()
45
+ self.remove_unknown_sequences()
46
+ self.check()
47
+ self.print_statistics()
48
+
49
+ def __getitem__(self, index):
50
+ return (self.token_ids[index], self.lengths[index])
51
+
52
+ def __len__(self):
53
+ return len(self.lengths)
54
+
55
+ def check(self):
56
+ """
57
+ Some sanity checks
58
+ """
59
+ assert len(self.token_ids) == len(self.lengths)
60
+ assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
61
+
62
+ def remove_long_sequences(self):
63
+ """
64
+ Sequences that are too long are split by chunk of max_model_input_size.
65
+ """
66
+ max_len = self.params.max_model_input_size
67
+ indices = self.lengths > max_len
68
+ logger.info(f"Splitting {sum(indices)} too long sequences.")
69
+
70
+ def divide_chunks(l, n):
71
+ return [l[i : i + n] for i in range(0, len(l), n)]
72
+
73
+ new_tok_ids = []
74
+ new_lengths = []
75
+ if self.params.mlm:
76
+ cls_id, sep_id = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
77
+ else:
78
+ cls_id, sep_id = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
79
+
80
+ for seq_, len_ in zip(self.token_ids, self.lengths):
81
+ assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
82
+ if len_ <= max_len:
83
+ new_tok_ids.append(seq_)
84
+ new_lengths.append(len_)
85
+ else:
86
+ sub_seqs = []
87
+ for sub_s in divide_chunks(seq_, max_len - 2):
88
+ if sub_s[0] != cls_id:
89
+ sub_s = np.insert(sub_s, 0, cls_id)
90
+ if sub_s[-1] != sep_id:
91
+ sub_s = np.insert(sub_s, len(sub_s), sep_id)
92
+ assert len(sub_s) <= max_len
93
+ assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
94
+ sub_seqs.append(sub_s)
95
+
96
+ new_tok_ids.extend(sub_seqs)
97
+ new_lengths.extend([len(l) for l in sub_seqs])
98
+
99
+ self.token_ids = np.array(new_tok_ids)
100
+ self.lengths = np.array(new_lengths)
101
+
102
+ def remove_empty_sequences(self):
103
+ """
104
+ Too short sequences are simply removed. This could be tuned.
105
+ """
106
+ init_size = len(self)
107
+ indices = self.lengths > 11
108
+ self.token_ids = self.token_ids[indices]
109
+ self.lengths = self.lengths[indices]
110
+ new_size = len(self)
111
+ logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences.")
112
+
113
+ def remove_unknown_sequences(self):
114
+ """
115
+ Remove sequences with a (too) high level of unknown tokens.
116
+ """
117
+ if "unk_token" not in self.params.special_tok_ids:
118
+ return
119
+ else:
120
+ unk_token_id = self.params.special_tok_ids["unk_token"]
121
+ init_size = len(self)
122
+ unk_occs = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
123
+ indices = (unk_occs / self.lengths) < 0.5
124
+ self.token_ids = self.token_ids[indices]
125
+ self.lengths = self.lengths[indices]
126
+ new_size = len(self)
127
+ logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).")
128
+
129
+ def print_statistics(self):
130
+ """
131
+ Print some statistics on the corpus. Only the master process.
132
+ """
133
+ if not self.params.is_master:
134
+ return
135
+ logger.info(f"{len(self)} sequences")
136
+ # data_len = sum(self.lengths)
137
+ # nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
138
+ # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
139
+
140
+ # unk_idx = self.params.special_tok_ids['unk_token']
141
+ # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
142
+ # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
143
+
144
+ def batch_sequences(self, batch):
145
+ """
146
+ Do the padding and transform into torch.tensor.
147
+ """
148
+ token_ids = [t[0] for t in batch]
149
+ lengths = [t[1] for t in batch]
150
+ assert len(token_ids) == len(lengths)
151
+
152
+ # Max for paddings
153
+ max_seq_len_ = max(lengths)
154
+
155
+ # Pad token ids
156
+ if self.params.mlm:
157
+ pad_idx = self.params.special_tok_ids["pad_token"]
158
+ else:
159
+ pad_idx = self.params.special_tok_ids["unk_token"]
160
+ tk_ = [list(t.astype(int)) + [pad_idx] * (max_seq_len_ - len(t)) for t in token_ids]
161
+ assert len(tk_) == len(token_ids)
162
+ assert all(len(t) == max_seq_len_ for t in tk_)
163
+
164
+ tk_t = torch.tensor(tk_) # (bs, max_seq_len_)
165
+ lg_t = torch.tensor(lengths) # (bs)
166
+ return tk_t, lg_t
11777-Group11-master/oscar/distillation/scripts/binarized_data 3.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Preprocessing script before distillation.
17
+ """
18
+ import argparse
19
+ import logging
20
+ import pickle
21
+ import random
22
+ import time
23
+
24
+ import numpy as np
25
+
26
+ from transformers import BertTokenizer, GPT2Tokenizer, RobertaTokenizer
27
+
28
+
29
+ logging.basicConfig(
30
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
31
+ )
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ def main():
36
+ parser = argparse.ArgumentParser(
37
+ description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)."
38
+ )
39
+ parser.add_argument("--file_path", type=str, default="data/dump.txt", help="The path to the data.")
40
+ parser.add_argument("--tokenizer_type", type=str, default="bert", choices=["bert", "roberta", "gpt2"])
41
+ parser.add_argument("--tokenizer_name", type=str, default="bert-base-uncased", help="The tokenizer to use.")
42
+ parser.add_argument("--dump_file", type=str, default="data/dump", help="The dump file prefix.")
43
+ args = parser.parse_args()
44
+
45
+ logger.info(f"Loading Tokenizer ({args.tokenizer_name})")
46
+ if args.tokenizer_type == "bert":
47
+ tokenizer = BertTokenizer.from_pretrained(args.tokenizer_name)
48
+ bos = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
49
+ sep = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
50
+ elif args.tokenizer_type == "roberta":
51
+ tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name)
52
+ bos = tokenizer.special_tokens_map["cls_token"] # `<s>`
53
+ sep = tokenizer.special_tokens_map["sep_token"] # `</s>`
54
+ elif args.tokenizer_type == "gpt2":
55
+ tokenizer = GPT2Tokenizer.from_pretrained(args.tokenizer_name)
56
+ bos = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
57
+ sep = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
58
+
59
+ logger.info(f"Loading text from {args.file_path}")
60
+ with open(args.file_path, "r", encoding="utf8") as fp:
61
+ data = fp.readlines()
62
+
63
+ logger.info("Start encoding")
64
+ logger.info(f"{len(data)} examples to process.")
65
+
66
+ rslt = []
67
+ iter = 0
68
+ interval = 10000
69
+ start = time.time()
70
+ for text in data:
71
+ text = f"{bos} {text.strip()} {sep}"
72
+ token_ids = tokenizer.encode(text, add_special_tokens=False)
73
+ rslt.append(token_ids)
74
+
75
+ iter += 1
76
+ if iter % interval == 0:
77
+ end = time.time()
78
+ logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl")
79
+ start = time.time()
80
+ logger.info("Finished binarization")
81
+ logger.info(f"{len(data)} examples processed.")
82
+
83
+ dp_file = f"{args.dump_file}.{args.tokenizer_name}.pickle"
84
+ vocab_size = tokenizer.vocab_size
85
+ if vocab_size < (1 << 16):
86
+ rslt_ = [np.uint16(d) for d in rslt]
87
+ else:
88
+ rslt_ = [np.int32(d) for d in rslt]
89
+ random.shuffle(rslt_)
90
+ logger.info(f"Dump to {dp_file}")
91
+ with open(dp_file, "wb") as handle:
92
+ pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL)
93
+
94
+
95
+ if __name__ == "__main__":
96
+ main()
11777-Group11-master/oscar/distillation/scripts/binarized_data.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Preprocessing script before distillation.
17
+ """
18
+ import argparse
19
+ import logging
20
+ import pickle
21
+ import random
22
+ import time
23
+
24
+ import numpy as np
25
+
26
+ from transformers import BertTokenizer, GPT2Tokenizer, RobertaTokenizer
27
+
28
+
29
+ logging.basicConfig(
30
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
31
+ )
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ def main():
36
+ parser = argparse.ArgumentParser(
37
+ description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)."
38
+ )
39
+ parser.add_argument("--file_path", type=str, default="data/dump.txt", help="The path to the data.")
40
+ parser.add_argument("--tokenizer_type", type=str, default="bert", choices=["bert", "roberta", "gpt2"])
41
+ parser.add_argument("--tokenizer_name", type=str, default="bert-base-uncased", help="The tokenizer to use.")
42
+ parser.add_argument("--dump_file", type=str, default="data/dump", help="The dump file prefix.")
43
+ args = parser.parse_args()
44
+
45
+ logger.info(f"Loading Tokenizer ({args.tokenizer_name})")
46
+ if args.tokenizer_type == "bert":
47
+ tokenizer = BertTokenizer.from_pretrained(args.tokenizer_name)
48
+ bos = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
49
+ sep = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
50
+ elif args.tokenizer_type == "roberta":
51
+ tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name)
52
+ bos = tokenizer.special_tokens_map["cls_token"] # `<s>`
53
+ sep = tokenizer.special_tokens_map["sep_token"] # `</s>`
54
+ elif args.tokenizer_type == "gpt2":
55
+ tokenizer = GPT2Tokenizer.from_pretrained(args.tokenizer_name)
56
+ bos = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
57
+ sep = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
58
+
59
+ logger.info(f"Loading text from {args.file_path}")
60
+ with open(args.file_path, "r", encoding="utf8") as fp:
61
+ data = fp.readlines()
62
+
63
+ logger.info("Start encoding")
64
+ logger.info(f"{len(data)} examples to process.")
65
+
66
+ rslt = []
67
+ iter = 0
68
+ interval = 10000
69
+ start = time.time()
70
+ for text in data:
71
+ text = f"{bos} {text.strip()} {sep}"
72
+ token_ids = tokenizer.encode(text, add_special_tokens=False)
73
+ rslt.append(token_ids)
74
+
75
+ iter += 1
76
+ if iter % interval == 0:
77
+ end = time.time()
78
+ logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl")
79
+ start = time.time()
80
+ logger.info("Finished binarization")
81
+ logger.info(f"{len(data)} examples processed.")
82
+
83
+ dp_file = f"{args.dump_file}.{args.tokenizer_name}.pickle"
84
+ vocab_size = tokenizer.vocab_size
85
+ if vocab_size < (1 << 16):
86
+ rslt_ = [np.uint16(d) for d in rslt]
87
+ else:
88
+ rslt_ = [np.int32(d) for d in rslt]
89
+ random.shuffle(rslt_)
90
+ logger.info(f"Dump to {dp_file}")
91
+ with open(dp_file, "wb") as handle:
92
+ pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL)
93
+
94
+
95
+ if __name__ == "__main__":
96
+ main()
11777-Group11-master/oscar/distillation/scripts/extract.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Preprocessing script before training the distilled model.
17
+ Specific to RoBERTa -> DistilRoBERTa and GPT2 -> DistilGPT2.
18
+ """
19
+ import argparse
20
+
21
+ import torch
22
+
23
+ from transformers import GPT2LMHeadModel, RobertaForMaskedLM
24
+
25
+
26
+ if __name__ == "__main__":
27
+ parser = argparse.ArgumentParser(
28
+ description="Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned Distillation"
29
+ )
30
+ parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
31
+ parser.add_argument("--model_name", default="roberta-large", type=str)
32
+ parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
33
+ parser.add_argument("--vocab_transform", action="store_true")
34
+ args = parser.parse_args()
35
+
36
+ if args.model_type == "roberta":
37
+ model = RobertaForMaskedLM.from_pretrained(args.model_name)
38
+ prefix = "roberta"
39
+ elif args.model_type == "gpt2":
40
+ model = GPT2LMHeadModel.from_pretrained(args.model_name)
41
+ prefix = "transformer"
42
+
43
+ state_dict = model.state_dict()
44
+ compressed_sd = {}
45
+
46
+ # Embeddings #
47
+ if args.model_type == "gpt2":
48
+ for param_name in ["wte.weight", "wpe.weight"]:
49
+ compressed_sd[f"{prefix}.{param_name}"] = state_dict[f"{prefix}.{param_name}"]
50
+ else:
51
+ for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
52
+ param_name = f"{prefix}.embeddings.{w}.weight"
53
+ compressed_sd[param_name] = state_dict[param_name]
54
+ for w in ["weight", "bias"]:
55
+ param_name = f"{prefix}.embeddings.LayerNorm.{w}"
56
+ compressed_sd[param_name] = state_dict[param_name]
57
+
58
+ # Transformer Blocks #
59
+ std_idx = 0
60
+ for teacher_idx in [0, 2, 4, 7, 9, 11]:
61
+ if args.model_type == "gpt2":
62
+ for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
63
+ for w in ["weight", "bias"]:
64
+ compressed_sd[f"{prefix}.h.{std_idx}.{layer}.{w}"] = state_dict[
65
+ f"{prefix}.h.{teacher_idx}.{layer}.{w}"
66
+ ]
67
+ compressed_sd[f"{prefix}.h.{std_idx}.attn.bias"] = state_dict[f"{prefix}.h.{teacher_idx}.attn.bias"]
68
+ else:
69
+ for layer in [
70
+ "attention.self.query",
71
+ "attention.self.key",
72
+ "attention.self.value",
73
+ "attention.output.dense",
74
+ "attention.output.LayerNorm",
75
+ "intermediate.dense",
76
+ "output.dense",
77
+ "output.LayerNorm",
78
+ ]:
79
+ for w in ["weight", "bias"]:
80
+ compressed_sd[f"{prefix}.encoder.layer.{std_idx}.{layer}.{w}"] = state_dict[
81
+ f"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
82
+ ]
83
+ std_idx += 1
84
+
85
+ # Language Modeling Head ###s
86
+ if args.model_type == "roberta":
87
+ for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
88
+ compressed_sd[f"{layer}"] = state_dict[f"{layer}"]
89
+ if args.vocab_transform:
90
+ for w in ["weight", "bias"]:
91
+ compressed_sd[f"lm_head.dense.{w}"] = state_dict[f"lm_head.dense.{w}"]
92
+ compressed_sd[f"lm_head.layer_norm.{w}"] = state_dict[f"lm_head.layer_norm.{w}"]
93
+ elif args.model_type == "gpt2":
94
+ for w in ["weight", "bias"]:
95
+ compressed_sd[f"{prefix}.ln_f.{w}"] = state_dict[f"{prefix}.ln_f.{w}"]
96
+ compressed_sd["lm_head.weight"] = state_dict["lm_head.weight"]
97
+
98
+ print(f"N layers selected for distillation: {std_idx}")
99
+ print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
100
+
101
+ print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
102
+ torch.save(compressed_sd, args.dump_checkpoint)
11777-Group11-master/oscar/distillation/scripts/extract_distilbert 2.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Preprocessing script before training DistilBERT.
17
+ Specific to BERT -> DistilBERT.
18
+ """
19
+ import argparse
20
+
21
+ import torch
22
+
23
+ from oscar.modeling.modeling_bert import BertForImageCaptioning
24
+ from oscar.modeling.modeling_distilbert import DistilBertForImageCaptioning
25
+ from transformers.modeling_bert import BertConfig
26
+
27
+ if __name__ == "__main__":
28
+ parser = argparse.ArgumentParser()
29
+ parser.add_argument("--dump_source", default="", type=str)
30
+ parser.add_argument("--target_config", default="", type=str)
31
+ parser.add_argument("--dump_target", default="", type=str)
32
+ parser.add_argument("--vocab_transform", action="store_true")
33
+ args = parser.parse_args()
34
+
35
+ f = open("/home/ubuntu/mmml/layers.log",'w')
36
+
37
+ model = BertForImageCaptioning.from_pretrained(args.dump_source)
38
+ new_model = DistilBertForImageCaptioning(BertConfig.from_pretrained(args.target_config))
39
+ state_dict = model.state_dict()
40
+ compressed_sd = {}
41
+
42
+ for name, param in model.named_parameters():
43
+ if param.requires_grad:
44
+ print(name, param.data.shape, file=f)
45
+
46
+ print("\n\n",file=f)
47
+
48
+ for name, param in new_model.named_parameters():
49
+ if param.requires_grad:
50
+ print(name, param.data.shape, file=f)
51
+
52
+ prefix = "bert"
53
+
54
+ for w in ["word_embeddings"]:
55
+ compressed_sd[f"bert.embeddings.{w}.weight"] = state_dict[f"{prefix}.embeddings.{w}.weight"]
56
+ for w in ["weight", "bias"]:
57
+ compressed_sd[f"bert.embeddings.LayerNorm.{w}"] = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
58
+ compressed_sd[f"bert.img_embedding.{w}"] = state_dict[f"{prefix}.img_embedding.{w}"]
59
+ compressed_sd[f"transform.dense.{w}"] = state_dict[f"transform.dense.{w}"]
60
+ compressed_sd[f"transform.LayerNorm.{w}"] = state_dict[f"transform.LayerNorm.{w}"]
61
+
62
+ std_idx = 0
63
+ for teacher_idx in [0, 2, 4, 7, 9, 11]:
64
+ for w in ["weight", "bias"]:
65
+ compressed_sd[f"bert.encoder.layer.{std_idx}.attention.q_lin.{w}"] = state_dict[
66
+ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
67
+ ]
68
+ compressed_sd[f"bert.encoder.layer.{std_idx}.attention.k_lin.{w}"] = state_dict[
69
+ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
70
+ ]
71
+ compressed_sd[f"bert.encoder.layer.{std_idx}.attention.v_lin.{w}"] = state_dict[
72
+ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
73
+ ]
74
+
75
+ compressed_sd[f"bert.encoder.layer.{std_idx}.attention.out_lin.{w}"] = state_dict[
76
+ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
77
+ ]
78
+ compressed_sd[f"bert.encoder.layer.{std_idx}.sa_layer_norm.{w}"] = state_dict[
79
+ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
80
+ ]
81
+
82
+ compressed_sd[f"bert.encoder.layer.{std_idx}.ffn.lin1.{w}"] = state_dict[
83
+ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
84
+ ]
85
+ compressed_sd[f"bert.encoder.layer.{std_idx}.ffn.lin2.{w}"] = state_dict[
86
+ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
87
+ ]
88
+ compressed_sd[f"bert.encoder.layer.{std_idx}.output_layer_norm.{w}"] = state_dict[
89
+ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
90
+ ]
91
+ std_idx += 1
92
+
93
+
94
+ compressed_sd["decoder.weight"] = compressed_sd["bert.embeddings.word_embeddings.weight"]
95
+
96
+ print(f"N layers selected for distillation: {std_idx}")
97
+ print(f"Number of params transfered for distillation: {len(compressed_sd.keys())}")
98
+
99
+ print(f"Save transfered checkpoint to {args.dump_target}.")
100
+ torch.save(compressed_sd, args.dump_target)
11777-Group11-master/oscar/distillation/train.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Training the distilled model.
17
+ Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2.
18
+ """
19
+ import argparse
20
+ import json
21
+ import os
22
+ import pickle
23
+ import shutil
24
+
25
+ import numpy as np
26
+ import torch
27
+
28
+ from .distiller import Distiller
29
+ from .lm_seqs_dataset import LmSeqsDataset
30
+ from transformers import (
31
+ BertConfig,
32
+ BertForMaskedLM,
33
+ BertTokenizer,
34
+ DistilBertConfig,
35
+ DistilBertForMaskedLM,
36
+ DistilBertTokenizer,
37
+ GPT2Config,
38
+ GPT2LMHeadModel,
39
+ GPT2Tokenizer,
40
+ RobertaConfig,
41
+ RobertaForMaskedLM,
42
+ RobertaTokenizer,
43
+ )
44
+ from .utils import git_log, init_gpu_params, logger, set_seed
45
+
46
+
47
+ MODEL_CLASSES = {
48
+ "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
49
+ "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
50
+ "bert": (BertConfig, BertForMaskedLM, BertTokenizer),
51
+ "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
52
+ }
53
+
54
+
55
+ def sanity_checks(args):
56
+ """
57
+ A bunch of args sanity checks to perform even starting...
58
+ """
59
+ assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
60
+ assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
61
+ if args.mlm:
62
+ assert os.path.isfile(args.token_counts)
63
+ assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
64
+ else:
65
+ assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
66
+
67
+ assert args.teacher_type == args.student_type or (
68
+ args.student_type == "distilbert" and args.teacher_type == "bert"
69
+ )
70
+ assert os.path.isfile(args.student_config)
71
+ if args.student_pretrained_weights is not None:
72
+ assert os.path.isfile(args.student_pretrained_weights)
73
+
74
+ if args.freeze_token_type_embds:
75
+ assert args.student_type in ["roberta"]
76
+
77
+ assert args.alpha_ce >= 0.0
78
+ assert args.alpha_mlm >= 0.0
79
+ assert args.alpha_clm >= 0.0
80
+ assert args.alpha_mse >= 0.0
81
+ assert args.alpha_cos >= 0.0
82
+ assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
83
+
84
+
85
+ def freeze_pos_embeddings(student, args):
86
+ if args.student_type == "roberta":
87
+ student.roberta.embeddings.position_embeddings.weight.requires_grad = False
88
+ elif args.student_type == "gpt2":
89
+ student.transformer.wpe.weight.requires_grad = False
90
+
91
+
92
+ def freeze_token_type_embeddings(student, args):
93
+ if args.student_type == "roberta":
94
+ student.roberta.embeddings.token_type_embeddings.weight.requires_grad = False
95
+
96
+
97
+ def main():
98
+ parser = argparse.ArgumentParser(description="Training")
99
+ parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists.")
100
+
101
+ parser.add_argument(
102
+ "--dump_path", type=str, required=True, help="The output directory (log, checkpoints, parameters, etc.)"
103
+ )
104
+ parser.add_argument(
105
+ "--data_file",
106
+ type=str,
107
+ required=True,
108
+ help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.",
109
+ )
110
+
111
+ parser.add_argument(
112
+ "--student_type",
113
+ type=str,
114
+ choices=["distilbert", "roberta", "gpt2"],
115
+ required=True,
116
+ help="The student type (DistilBERT, RoBERTa).",
117
+ )
118
+ parser.add_argument("--student_config", type=str, required=True, help="Path to the student configuration.")
119
+ parser.add_argument(
120
+ "--student_pretrained_weights", default=None, type=str, help="Load student initialization checkpoint."
121
+ )
122
+
123
+ parser.add_argument(
124
+ "--teacher_type", choices=["bert", "roberta", "gpt2"], required=True, help="Teacher type (BERT, RoBERTa)."
125
+ )
126
+ parser.add_argument("--teacher_name", type=str, required=True, help="The teacher model.")
127
+
128
+ parser.add_argument("--temperature", default=2.0, type=float, help="Temperature for the softmax temperature.")
129
+ parser.add_argument(
130
+ "--alpha_ce", default=0.5, type=float, help="Linear weight for the distillation loss. Must be >=0."
131
+ )
132
+ parser.add_argument(
133
+ "--alpha_mlm",
134
+ default=0.0,
135
+ type=float,
136
+ help="Linear weight for the MLM loss. Must be >=0. Should be used in coonjunction with `mlm` flag.",
137
+ )
138
+ parser.add_argument("--alpha_clm", default=0.5, type=float, help="Linear weight for the CLM loss. Must be >=0.")
139
+ parser.add_argument("--alpha_mse", default=0.0, type=float, help="Linear weight of the MSE loss. Must be >=0.")
140
+ parser.add_argument(
141
+ "--alpha_cos", default=0.0, type=float, help="Linear weight of the cosine embedding loss. Must be >=0."
142
+ )
143
+
144
+ parser.add_argument(
145
+ "--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM."
146
+ )
147
+ parser.add_argument(
148
+ "--mlm_mask_prop",
149
+ default=0.15,
150
+ type=float,
151
+ help="Proportion of tokens for which we need to make a prediction.",
152
+ )
153
+ parser.add_argument("--word_mask", default=0.8, type=float, help="Proportion of tokens to mask out.")
154
+ parser.add_argument("--word_keep", default=0.1, type=float, help="Proportion of tokens to keep.")
155
+ parser.add_argument("--word_rand", default=0.1, type=float, help="Proportion of tokens to randomly replace.")
156
+ parser.add_argument(
157
+ "--mlm_smoothing",
158
+ default=0.7,
159
+ type=float,
160
+ help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).",
161
+ )
162
+ parser.add_argument("--token_counts", type=str, help="The token counts in the data_file for MLM.")
163
+
164
+ parser.add_argument(
165
+ "--restrict_ce_to_mask",
166
+ action="store_true",
167
+ help="If true, compute the distilation loss only the [MLM] prediction distribution.",
168
+ )
169
+ parser.add_argument(
170
+ "--freeze_pos_embs",
171
+ action="store_true",
172
+ help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.",
173
+ )
174
+ parser.add_argument(
175
+ "--freeze_token_type_embds",
176
+ action="store_true",
177
+ help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.",
178
+ )
179
+
180
+ parser.add_argument("--n_epoch", type=int, default=3, help="Number of pass on the whole dataset.")
181
+ parser.add_argument("--batch_size", type=int, default=5, help="Batch size (for each process).")
182
+ parser.add_argument(
183
+ "--group_by_size",
184
+ action="store_false",
185
+ help="If true, group sequences that have similar length into the same batch. Default is true.",
186
+ )
187
+
188
+ parser.add_argument(
189
+ "--gradient_accumulation_steps",
190
+ type=int,
191
+ default=50,
192
+ help="Gradient accumulation for larger training batches.",
193
+ )
194
+ parser.add_argument("--warmup_prop", default=0.05, type=float, help="Linear warmup proportion.")
195
+ parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
196
+ parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.")
197
+ parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.")
198
+ parser.add_argument("--max_grad_norm", default=5.0, type=float, help="Max gradient norm.")
199
+ parser.add_argument("--initializer_range", default=0.02, type=float, help="Random initialization range.")
200
+
201
+ parser.add_argument(
202
+ "--fp16",
203
+ action="store_true",
204
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
205
+ )
206
+ parser.add_argument(
207
+ "--fp16_opt_level",
208
+ type=str,
209
+ default="O1",
210
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
211
+ "See details at https://nvidia.github.io/apex/amp.html",
212
+ )
213
+ parser.add_argument("--gpus", type=int, default=1, help="Number of GPUs in the node.")
214
+ parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank")
215
+ parser.add_argument("--seed", type=int, default=56, help="Random seed")
216
+
217
+ parser.add_argument("--log_interval", type=int, default=500, help="Tensorboard logging interval.")
218
+ parser.add_argument("--checkpoint_interval", type=int, default=4000, help="Checkpoint interval.")
219
+ args = parser.parse_args()
220
+ sanity_checks(args)
221
+
222
+ # ARGS #
223
+ init_gpu_params(args)
224
+ set_seed(args)
225
+ if args.is_master:
226
+ if os.path.exists(args.dump_path):
227
+ if not args.force:
228
+ raise ValueError(
229
+ f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite it"
230
+ "Use `--force` if you want to overwrite it"
231
+ )
232
+ else:
233
+ shutil.rmtree(args.dump_path)
234
+
235
+ if not os.path.exists(args.dump_path):
236
+ os.makedirs(args.dump_path)
237
+ logger.info(f"Experiment will be dumped and logged in {args.dump_path}")
238
+
239
+ # SAVE PARAMS #
240
+ logger.info(f"Param: {args}")
241
+ with open(os.path.join(args.dump_path, "parameters.json"), "w") as f:
242
+ json.dump(vars(args), f, indent=4)
243
+ git_log(args.dump_path)
244
+
245
+ student_config_class, student_model_class, _ = MODEL_CLASSES[args.student_type]
246
+ teacher_config_class, teacher_model_class, teacher_tokenizer_class = MODEL_CLASSES[args.teacher_type]
247
+
248
+ # TOKENIZER #
249
+ tokenizer = teacher_tokenizer_class.from_pretrained(args.teacher_name)
250
+ special_tok_ids = {}
251
+ for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
252
+ idx = tokenizer.all_special_tokens.index(tok_symbol)
253
+ special_tok_ids[tok_name] = tokenizer.all_special_ids[idx]
254
+ logger.info(f"Special tokens {special_tok_ids}")
255
+ args.special_tok_ids = special_tok_ids
256
+ args.max_model_input_size = tokenizer.max_model_input_sizes[args.teacher_name]
257
+
258
+ # DATA LOADER #
259
+ logger.info(f"Loading data from {args.data_file}")
260
+ with open(args.data_file, "rb") as fp:
261
+ data = pickle.load(fp)
262
+
263
+ if args.mlm:
264
+ logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)")
265
+ with open(args.token_counts, "rb") as fp:
266
+ counts = pickle.load(fp)
267
+
268
+ token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing
269
+ for idx in special_tok_ids.values():
270
+ token_probs[idx] = 0.0 # do not predict special tokens
271
+ token_probs = torch.from_numpy(token_probs)
272
+ else:
273
+ token_probs = None
274
+
275
+ train_lm_seq_dataset = LmSeqsDataset(params=args, data=data)
276
+ logger.info("Data loader created.")
277
+
278
+ # STUDENT #
279
+ logger.info(f"Loading student config from {args.student_config}")
280
+ stu_architecture_config = student_config_class.from_pretrained(args.student_config)
281
+ stu_architecture_config.output_hidden_states = True
282
+
283
+ if args.student_pretrained_weights is not None:
284
+ logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}")
285
+ student = student_model_class.from_pretrained(args.student_pretrained_weights, config=stu_architecture_config)
286
+ else:
287
+ student = student_model_class(stu_architecture_config)
288
+
289
+ if args.n_gpu > 0:
290
+ student.to(f"cuda:{args.local_rank}")
291
+ logger.info("Student loaded.")
292
+
293
+ # TEACHER #
294
+ teacher = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=True)
295
+ if args.n_gpu > 0:
296
+ teacher.to(f"cuda:{args.local_rank}")
297
+ logger.info(f"Teacher loaded from {args.teacher_name}.")
298
+
299
+ # FREEZING #
300
+ if args.freeze_pos_embs:
301
+ freeze_pos_embeddings(student, args)
302
+ if args.freeze_token_type_embds:
303
+ freeze_token_type_embeddings(student, args)
304
+
305
+ # SANITY CHECKS #
306
+ assert student.config.vocab_size == teacher.config.vocab_size
307
+ assert student.config.hidden_size == teacher.config.hidden_size
308
+ assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
309
+ if args.mlm:
310
+ assert token_probs.size(0) == stu_architecture_config.vocab_size
311
+
312
+ # DISTILLER #
313
+ torch.cuda.empty_cache()
314
+ distiller = Distiller(
315
+ params=args, dataset=train_lm_seq_dataset, token_probs=token_probs, student=student, teacher=teacher
316
+ )
317
+ distiller.train()
318
+ logger.info("Let's go get some drinks.")
319
+
320
+
321
+ if __name__ == "__main__":
322
+ main()
11777-Group11-master/oscar/distillation/training_configs/distiloscar 3.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "attention_dropout": 0.1,
4
+ "dim": 768,
5
+ "dropout": 0.1,
6
+ "hidden_dim": 3072,
7
+ "initializer_range": 0.02,
8
+ "max_position_embeddings": 512,
9
+ "n_heads": 12,
10
+ "n_layers": 6,
11
+ "sinusoidal_pos_embds": true,
12
+ "tie_weights_": true,
13
+ "vocab_size": 30522,
14
+ "img_feature_dim": 2054,
15
+ "img_feature_type": "faster_r-cnn",
16
+ "attention_probs_dropout_prob": 0.1,
17
+ "num_labels": 2,
18
+ "type_vocab_size": 2
19
+ }
20
+