body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
e1e93fe5e2ca7bc9742e760e8619e8169dc4d9c47ddf31bbf36390c808f9d232
def verify_password(self, password): '\n 将用户输入的密码加密后与数据库对比\n ' return werkzeug.security.generate_password_hash(password)
将用户输入的密码加密后与数据库对比
app/orm/User.py
verify_password
sevenZz/CouponStatistic
0
python
def verify_password(self, password): '\n \n ' return werkzeug.security.generate_password_hash(password)
def verify_password(self, password): '\n \n ' return werkzeug.security.generate_password_hash(password)<|docstring|>将用户输入的密码加密后与数据库对比<|endoftext|>
71e281791c3eb151c8fd9f23a0b89d0d85465cca3c9d7308c55b1078c9b04234
def word_fits_in_line(pagewidth, x_pos, wordsize_w): ' Return True if a word can fit into a line. ' return (((pagewidth - x_pos) - wordsize_w) > 0)
Return True if a word can fit into a line.
tesseract_trainer/__init__.py
word_fits_in_line
kevin-dunnicliffe/tesseract-trainer
0
python
def word_fits_in_line(pagewidth, x_pos, wordsize_w): ' ' return (((pagewidth - x_pos) - wordsize_w) > 0)
def word_fits_in_line(pagewidth, x_pos, wordsize_w): ' ' return (((pagewidth - x_pos) - wordsize_w) > 0)<|docstring|>Return True if a word can fit into a line.<|endoftext|>
87ff0ae7057901774509099191e370eb6e91698d6b22b459d978f22ca0e158d6
def newline_fits_in_page(pageheight, y_pos, wordsize_h): ' Return True if a new line can be contained in a page. ' return (((pageheight - y_pos) - (2 * wordsize_h)) > 0)
Return True if a new line can be contained in a page.
tesseract_trainer/__init__.py
newline_fits_in_page
kevin-dunnicliffe/tesseract-trainer
0
python
def newline_fits_in_page(pageheight, y_pos, wordsize_h): ' ' return (((pageheight - y_pos) - (2 * wordsize_h)) > 0)
def newline_fits_in_page(pageheight, y_pos, wordsize_h): ' ' return (((pageheight - y_pos) - (2 * wordsize_h)) > 0)<|docstring|>Return True if a new line can be contained in a page.<|endoftext|>
688cca1daa388294dd412e9400a660ce0b5baf33b40ffc9e5d8d584389374778
def pil_coord_to_tesseract(pil_x, pil_y, tif_h): ' Convert PIL coordinates into Tesseract boxfile coordinates:\n in PIL, (0,0) is at the top left corner and\n in tesseract boxfile format, (0,0) is at the bottom left corner.\n ' return (pil_x, (tif_h - pil_y))
Convert PIL coordinates into Tesseract boxfile coordinates: in PIL, (0,0) is at the top left corner and in tesseract boxfile format, (0,0) is at the bottom left corner.
tesseract_trainer/__init__.py
pil_coord_to_tesseract
kevin-dunnicliffe/tesseract-trainer
0
python
def pil_coord_to_tesseract(pil_x, pil_y, tif_h): ' Convert PIL coordinates into Tesseract boxfile coordinates:\n in PIL, (0,0) is at the top left corner and\n in tesseract boxfile format, (0,0) is at the bottom left corner.\n ' return (pil_x, (tif_h - pil_y))
def pil_coord_to_tesseract(pil_x, pil_y, tif_h): ' Convert PIL coordinates into Tesseract boxfile coordinates:\n in PIL, (0,0) is at the top left corner and\n in tesseract boxfile format, (0,0) is at the bottom left corner.\n ' return (pil_x, (tif_h - pil_y))<|docstring|>Convert PIL coordinates into Tesseract boxfile coordinates: in PIL, (0,0) is at the top left corner and in tesseract boxfile format, (0,0) is at the bottom left corner.<|endoftext|>
7ee92a1ebcb592bef4b7d8fbbc7b22c2deb63eaaf54e34a26c3caad7320f9f6b
def display_output(run, verbose): " Display the output/error of a subprocess.Popen object\n if 'verbose' is True.\n " (out, err) = run.communicate() if verbose: print(out.strip()) if err: print(err.strip())
Display the output/error of a subprocess.Popen object if 'verbose' is True.
tesseract_trainer/__init__.py
display_output
kevin-dunnicliffe/tesseract-trainer
0
python
def display_output(run, verbose): " Display the output/error of a subprocess.Popen object\n if 'verbose' is True.\n " (out, err) = run.communicate() if verbose: print(out.strip()) if err: print(err.strip())
def display_output(run, verbose): " Display the output/error of a subprocess.Popen object\n if 'verbose' is True.\n " (out, err) = run.communicate() if verbose: print(out.strip()) if err: print(err.strip())<|docstring|>Display the output/error of a subprocess.Popen object if 'verbose' is True.<|endoftext|>
9d9b3863d9083bff45b01363f3f904a59f53a362647d24099908d99c20dd7f91
def generate_tif(self): ' Create several individual tifs from text and merge them\n into a multi-page tif, and finally delete all individual tifs.\n ' self._fill_pages() self._multipage_tif() self._clean()
Create several individual tifs from text and merge them into a multi-page tif, and finally delete all individual tifs.
tesseract_trainer/__init__.py
generate_tif
kevin-dunnicliffe/tesseract-trainer
0
python
def generate_tif(self): ' Create several individual tifs from text and merge them\n into a multi-page tif, and finally delete all individual tifs.\n ' self._fill_pages() self._multipage_tif() self._clean()
def generate_tif(self): ' Create several individual tifs from text and merge them\n into a multi-page tif, and finally delete all individual tifs.\n ' self._fill_pages() self._multipage_tif() self._clean()<|docstring|>Create several individual tifs from text and merge them into a multi-page tif, and finally delete all individual tifs.<|endoftext|>
60bb5e9ed03bb738077958c1fdfbc2775f6302b81da6f37cdf2b39fb28f2c700
def generate_boxfile(self): ' Generate a boxfile from the multipage tif.\n The boxfile will be named {self.prefix}.box\n ' boxfile_path = (self.prefix + '.box') if self.verbose: print(('Generating boxfile %s' % boxfile_path)) with open(boxfile_path, 'w') as boxfile: for boxline in self.boxlines: boxfile.write((boxline + '\n'))
Generate a boxfile from the multipage tif. The boxfile will be named {self.prefix}.box
tesseract_trainer/__init__.py
generate_boxfile
kevin-dunnicliffe/tesseract-trainer
0
python
def generate_boxfile(self): ' Generate a boxfile from the multipage tif.\n The boxfile will be named {self.prefix}.box\n ' boxfile_path = (self.prefix + '.box') if self.verbose: print(('Generating boxfile %s' % boxfile_path)) with open(boxfile_path, 'w') as boxfile: for boxline in self.boxlines: boxfile.write((boxline + '\n'))
def generate_boxfile(self): ' Generate a boxfile from the multipage tif.\n The boxfile will be named {self.prefix}.box\n ' boxfile_path = (self.prefix + '.box') if self.verbose: print(('Generating boxfile %s' % boxfile_path)) with open(boxfile_path, 'w') as boxfile: for boxline in self.boxlines: boxfile.write((boxline + '\n'))<|docstring|>Generate a boxfile from the multipage tif. The boxfile will be named {self.prefix}.box<|endoftext|>
969bbe5e9dec823931f47930c363e91f1ffd87a361621ae234b088f32a4c2431
def _new_tif(self, color='white'): ' Create and returns a new RGB blank tif, with specified background color (default: white) ' return Image.new('L', (self.W, self.H), color=color)
Create and returns a new RGB blank tif, with specified background color (default: white)
tesseract_trainer/__init__.py
_new_tif
kevin-dunnicliffe/tesseract-trainer
0
python
def _new_tif(self, color='white'): ' ' return Image.new('L', (self.W, self.H), color=color)
def _new_tif(self, color='white'): ' ' return Image.new('L', (self.W, self.H), color=color)<|docstring|>Create and returns a new RGB blank tif, with specified background color (default: white)<|endoftext|>
def390c5d24de46aa693efa51302bbe9ff352773b3156be3ebd1322c4fcfc2c6
def _save_tif(self, tif, page_number): " Save the argument tif using 'page_number' argument in filename.\n The filepath will be {self.indiv_page_prefix}{self.page_number}.tif\n " tif.save(((self.indiv_page_prefix + str(page_number)) + '.tif'))
Save the argument tif using 'page_number' argument in filename. The filepath will be {self.indiv_page_prefix}{self.page_number}.tif
tesseract_trainer/__init__.py
_save_tif
kevin-dunnicliffe/tesseract-trainer
0
python
def _save_tif(self, tif, page_number): " Save the argument tif using 'page_number' argument in filename.\n The filepath will be {self.indiv_page_prefix}{self.page_number}.tif\n " tif.save(((self.indiv_page_prefix + str(page_number)) + '.tif'))
def _save_tif(self, tif, page_number): " Save the argument tif using 'page_number' argument in filename.\n The filepath will be {self.indiv_page_prefix}{self.page_number}.tif\n " tif.save(((self.indiv_page_prefix + str(page_number)) + '.tif'))<|docstring|>Save the argument tif using 'page_number' argument in filename. The filepath will be {self.indiv_page_prefix}{self.page_number}.tif<|endoftext|>
e0258435a226eb0f85d78ea6dd18e4f7e3ea23c99578d46d03297f637fb2baf2
def _fill_pages(self): ' Fill individual tifs with text, and save them to disk.\n Each time a character is written in the tif, its coordinates will be added to the self.boxlines\n list (with the exception of white spaces).\n\n All along the process, we manage to contain the text within the image limits.\n ' tif = self._new_tif() draw = ImageDraw.Draw(tif) page_nb = 0 x_pos = self.start_x y_pos = self.start_y if self.verbose: print(('Generating individual tif image %s' % ((self.indiv_page_prefix + str(page_nb)) + '.tif'))) for word in self.text: word += ' ' (wordsize_w, wordsize_h) = self.font.getsize(word) wordsize_w = (len(word) * 28) wordsize_h = 28 if (not word_fits_in_line(self.W, x_pos, wordsize_w)): if newline_fits_in_page(self.H, y_pos, wordsize_h): x_pos = self.start_x y_pos += wordsize_h else: x_pos = self.start_x y_pos = self.start_y self._save_tif(tif, page_nb) page_nb += 1 if self.verbose: print(('Generating individual tif image %s' % ((self.indiv_page_prefix + str(page_nb)) + '.tif'))) tif = self._new_tif() draw = ImageDraw.Draw(tif) for char in word: (char_w, char_h) = self.font.getsize(char) char_w = 28 char_h = 28 (char_x0, char_y0) = (x_pos, y_pos) (char_x1, char_y1) = ((x_pos + char_w), (y_pos + char_h)) draw.text((x_pos, y_pos), char, fill='black', font=self.font) if (char != ' '): self._write_boxline(char, char_x0, char_y0, char_x1, char_y1, page_nb) x_pos += char_w self._save_tif(tif, page_nb)
Fill individual tifs with text, and save them to disk. Each time a character is written in the tif, its coordinates will be added to the self.boxlines list (with the exception of white spaces). All along the process, we manage to contain the text within the image limits.
tesseract_trainer/__init__.py
_fill_pages
kevin-dunnicliffe/tesseract-trainer
0
python
def _fill_pages(self): ' Fill individual tifs with text, and save them to disk.\n Each time a character is written in the tif, its coordinates will be added to the self.boxlines\n list (with the exception of white spaces).\n\n All along the process, we manage to contain the text within the image limits.\n ' tif = self._new_tif() draw = ImageDraw.Draw(tif) page_nb = 0 x_pos = self.start_x y_pos = self.start_y if self.verbose: print(('Generating individual tif image %s' % ((self.indiv_page_prefix + str(page_nb)) + '.tif'))) for word in self.text: word += ' ' (wordsize_w, wordsize_h) = self.font.getsize(word) wordsize_w = (len(word) * 28) wordsize_h = 28 if (not word_fits_in_line(self.W, x_pos, wordsize_w)): if newline_fits_in_page(self.H, y_pos, wordsize_h): x_pos = self.start_x y_pos += wordsize_h else: x_pos = self.start_x y_pos = self.start_y self._save_tif(tif, page_nb) page_nb += 1 if self.verbose: print(('Generating individual tif image %s' % ((self.indiv_page_prefix + str(page_nb)) + '.tif'))) tif = self._new_tif() draw = ImageDraw.Draw(tif) for char in word: (char_w, char_h) = self.font.getsize(char) char_w = 28 char_h = 28 (char_x0, char_y0) = (x_pos, y_pos) (char_x1, char_y1) = ((x_pos + char_w), (y_pos + char_h)) draw.text((x_pos, y_pos), char, fill='black', font=self.font) if (char != ' '): self._write_boxline(char, char_x0, char_y0, char_x1, char_y1, page_nb) x_pos += char_w self._save_tif(tif, page_nb)
def _fill_pages(self): ' Fill individual tifs with text, and save them to disk.\n Each time a character is written in the tif, its coordinates will be added to the self.boxlines\n list (with the exception of white spaces).\n\n All along the process, we manage to contain the text within the image limits.\n ' tif = self._new_tif() draw = ImageDraw.Draw(tif) page_nb = 0 x_pos = self.start_x y_pos = self.start_y if self.verbose: print(('Generating individual tif image %s' % ((self.indiv_page_prefix + str(page_nb)) + '.tif'))) for word in self.text: word += ' ' (wordsize_w, wordsize_h) = self.font.getsize(word) wordsize_w = (len(word) * 28) wordsize_h = 28 if (not word_fits_in_line(self.W, x_pos, wordsize_w)): if newline_fits_in_page(self.H, y_pos, wordsize_h): x_pos = self.start_x y_pos += wordsize_h else: x_pos = self.start_x y_pos = self.start_y self._save_tif(tif, page_nb) page_nb += 1 if self.verbose: print(('Generating individual tif image %s' % ((self.indiv_page_prefix + str(page_nb)) + '.tif'))) tif = self._new_tif() draw = ImageDraw.Draw(tif) for char in word: (char_w, char_h) = self.font.getsize(char) char_w = 28 char_h = 28 (char_x0, char_y0) = (x_pos, y_pos) (char_x1, char_y1) = ((x_pos + char_w), (y_pos + char_h)) draw.text((x_pos, y_pos), char, fill='black', font=self.font) if (char != ' '): self._write_boxline(char, char_x0, char_y0, char_x1, char_y1, page_nb) x_pos += char_w self._save_tif(tif, page_nb)<|docstring|>Fill individual tifs with text, and save them to disk. Each time a character is written in the tif, its coordinates will be added to the self.boxlines list (with the exception of white spaces). All along the process, we manage to contain the text within the image limits.<|endoftext|>
bbb7fd484aef18a25e845069a324c207f6f3cec152b8792ede3a0d635a39688f
def _write_boxline(self, char, char_x0, char_y0, char_x1, char_y1, page_nb): ' Generate a boxfile line given a character coordinates, and append it to the\n self.boxlines list.\n ' (tess_char_x0, tess_char_y0) = pil_coord_to_tesseract(char_x0, char_y0, self.H) (tess_char_x1, tess_char_y1) = pil_coord_to_tesseract(char_x1, char_y1, self.H) boxline = ('%s %d %d %d %d %d' % (char, tess_char_x0, tess_char_y1, tess_char_x1, tess_char_y0, page_nb)) self.boxlines.append(boxline)
Generate a boxfile line given a character coordinates, and append it to the self.boxlines list.
tesseract_trainer/__init__.py
_write_boxline
kevin-dunnicliffe/tesseract-trainer
0
python
def _write_boxline(self, char, char_x0, char_y0, char_x1, char_y1, page_nb): ' Generate a boxfile line given a character coordinates, and append it to the\n self.boxlines list.\n ' (tess_char_x0, tess_char_y0) = pil_coord_to_tesseract(char_x0, char_y0, self.H) (tess_char_x1, tess_char_y1) = pil_coord_to_tesseract(char_x1, char_y1, self.H) boxline = ('%s %d %d %d %d %d' % (char, tess_char_x0, tess_char_y1, tess_char_x1, tess_char_y0, page_nb)) self.boxlines.append(boxline)
def _write_boxline(self, char, char_x0, char_y0, char_x1, char_y1, page_nb): ' Generate a boxfile line given a character coordinates, and append it to the\n self.boxlines list.\n ' (tess_char_x0, tess_char_y0) = pil_coord_to_tesseract(char_x0, char_y0, self.H) (tess_char_x1, tess_char_y1) = pil_coord_to_tesseract(char_x1, char_y1, self.H) boxline = ('%s %d %d %d %d %d' % (char, tess_char_x0, tess_char_y1, tess_char_x1, tess_char_y0, page_nb)) self.boxlines.append(boxline)<|docstring|>Generate a boxfile line given a character coordinates, and append it to the self.boxlines list.<|endoftext|>
efa5cb62f67e8df09ae8e4af9e436a07b6de511e7948b2e27ae4eb67c4ddfa7d
def _multipage_tif(self): ' Generate a multipage tif from all the generated tifs.\n The multipage tif will be named {self.prefix}.tif\n ' cmd = ['convert'] tifs = sorted(glob.glob((self.indiv_page_prefix + '*.tif')), key=os.path.getmtime) cmd.extend(tifs) multitif_name = (self.prefix + '.tif') cmd.append(multitif_name) if self.verbose: print(('Generating multipage-tif %s' % multitif_name)) subprocess.call(cmd)
Generate a multipage tif from all the generated tifs. The multipage tif will be named {self.prefix}.tif
tesseract_trainer/__init__.py
_multipage_tif
kevin-dunnicliffe/tesseract-trainer
0
python
def _multipage_tif(self): ' Generate a multipage tif from all the generated tifs.\n The multipage tif will be named {self.prefix}.tif\n ' cmd = ['convert'] tifs = sorted(glob.glob((self.indiv_page_prefix + '*.tif')), key=os.path.getmtime) cmd.extend(tifs) multitif_name = (self.prefix + '.tif') cmd.append(multitif_name) if self.verbose: print(('Generating multipage-tif %s' % multitif_name)) subprocess.call(cmd)
def _multipage_tif(self): ' Generate a multipage tif from all the generated tifs.\n The multipage tif will be named {self.prefix}.tif\n ' cmd = ['convert'] tifs = sorted(glob.glob((self.indiv_page_prefix + '*.tif')), key=os.path.getmtime) cmd.extend(tifs) multitif_name = (self.prefix + '.tif') cmd.append(multitif_name) if self.verbose: print(('Generating multipage-tif %s' % multitif_name)) subprocess.call(cmd)<|docstring|>Generate a multipage tif from all the generated tifs. The multipage tif will be named {self.prefix}.tif<|endoftext|>
4f9259ac1606d33e1576cdcfca44d51309e6155546d4e1204872e44e2dcb85eb
def _clean(self): ' Remove all generated individual tifs ' if self.verbose: print('Removing all individual tif images') tifs = glob.glob(('%s*' % self.indiv_page_prefix)) for tif in tifs: os.remove(tif)
Remove all generated individual tifs
tesseract_trainer/__init__.py
_clean
kevin-dunnicliffe/tesseract-trainer
0
python
def _clean(self): ' ' if self.verbose: print('Removing all individual tif images') tifs = glob.glob(('%s*' % self.indiv_page_prefix)) for tif in tifs: os.remove(tif)
def _clean(self): ' ' if self.verbose: print('Removing all individual tif images') tifs = glob.glob(('%s*' % self.indiv_page_prefix)) for tif in tifs: os.remove(tif)<|docstring|>Remove all generated individual tifs<|endoftext|>
1388352e121a575a908556f4302fa088fc8a292901c69f5ccc1c4276e72ab057
def _generate_boxfile(self): ' Generate a multipage tif, filled with the training text and generate a boxfile\n from the coordinates of the characters inside it\n ' mp = MultiPageTif(self.training_text, 3500, 1024, 20, 20, self.font_name, self.font_path, self.font_size, self.exp_number, self.dictionary_name, self.verbose) mp.generate_tif() mp.generate_boxfile()
Generate a multipage tif, filled with the training text and generate a boxfile from the coordinates of the characters inside it
tesseract_trainer/__init__.py
_generate_boxfile
kevin-dunnicliffe/tesseract-trainer
0
python
def _generate_boxfile(self): ' Generate a multipage tif, filled with the training text and generate a boxfile\n from the coordinates of the characters inside it\n ' mp = MultiPageTif(self.training_text, 3500, 1024, 20, 20, self.font_name, self.font_path, self.font_size, self.exp_number, self.dictionary_name, self.verbose) mp.generate_tif() mp.generate_boxfile()
def _generate_boxfile(self): ' Generate a multipage tif, filled with the training text and generate a boxfile\n from the coordinates of the characters inside it\n ' mp = MultiPageTif(self.training_text, 3500, 1024, 20, 20, self.font_name, self.font_path, self.font_size, self.exp_number, self.dictionary_name, self.verbose) mp.generate_tif() mp.generate_boxfile()<|docstring|>Generate a multipage tif, filled with the training text and generate a boxfile from the coordinates of the characters inside it<|endoftext|>
ebb7cfab6dc09e14a30b241f41166c4eb9d1896fa546ce1ae046ddcfa2e6c19e
def _train_on_boxfile(self): ' Run tesseract on training mode, using the generated boxfiles ' cmd = 'tesseract -psm 5 {prefix}.tif {prefix} nobatch box.train'.format(prefix=self.prefix) print(cmd) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
Run tesseract on training mode, using the generated boxfiles
tesseract_trainer/__init__.py
_train_on_boxfile
kevin-dunnicliffe/tesseract-trainer
0
python
def _train_on_boxfile(self): ' ' cmd = 'tesseract -psm 5 {prefix}.tif {prefix} nobatch box.train'.format(prefix=self.prefix) print(cmd) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
def _train_on_boxfile(self): ' ' cmd = 'tesseract -psm 5 {prefix}.tif {prefix} nobatch box.train'.format(prefix=self.prefix) print(cmd) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)<|docstring|>Run tesseract on training mode, using the generated boxfiles<|endoftext|>
01dac2a36cb5cd1ba933969b15aad481bf5be13ec0e39db805bb39f7261ff7b8
def _compute_character_set(self): " Computes the character properties set: isalpha, isdigit, isupper, islower, ispunctuation\n and encode it in the 'unicharset' data file\n\n examples:\n ';' is an punctuation character. Its properties are thus represented\n by the binary number 10000 (10 in hexadecimal).\n 'b' is an alphabetic character and a lower case character.\n Its properties are thus represented by the binary number 00011 (3 in hexadecimal).\n W' is an alphabetic character and an upper case character. Its properties are\n thus represented by the binary number 00101 (5 in hexadecimal).\n '7' is just a digit. Its properties are thus represented by the binary number 01000 (8 in hexadecimal).\n '=' does is not punctuation not digit or alphabetic character. Its properties\n are thus represented by the binary number 00000 (0 in hexadecimal).\n " cmd = ('unicharset_extractor %s.box' % self.prefix) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
Computes the character properties set: isalpha, isdigit, isupper, islower, ispunctuation and encode it in the 'unicharset' data file examples: ';' is an punctuation character. Its properties are thus represented by the binary number 10000 (10 in hexadecimal). 'b' is an alphabetic character and a lower case character. Its properties are thus represented by the binary number 00011 (3 in hexadecimal). W' is an alphabetic character and an upper case character. Its properties are thus represented by the binary number 00101 (5 in hexadecimal). '7' is just a digit. Its properties are thus represented by the binary number 01000 (8 in hexadecimal). '=' does is not punctuation not digit or alphabetic character. Its properties are thus represented by the binary number 00000 (0 in hexadecimal).
tesseract_trainer/__init__.py
_compute_character_set
kevin-dunnicliffe/tesseract-trainer
0
python
def _compute_character_set(self): " Computes the character properties set: isalpha, isdigit, isupper, islower, ispunctuation\n and encode it in the 'unicharset' data file\n\n examples:\n ';' is an punctuation character. Its properties are thus represented\n by the binary number 10000 (10 in hexadecimal).\n 'b' is an alphabetic character and a lower case character.\n Its properties are thus represented by the binary number 00011 (3 in hexadecimal).\n W' is an alphabetic character and an upper case character. Its properties are\n thus represented by the binary number 00101 (5 in hexadecimal).\n '7' is just a digit. Its properties are thus represented by the binary number 01000 (8 in hexadecimal).\n '=' does is not punctuation not digit or alphabetic character. Its properties\n are thus represented by the binary number 00000 (0 in hexadecimal).\n " cmd = ('unicharset_extractor %s.box' % self.prefix) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
def _compute_character_set(self): " Computes the character properties set: isalpha, isdigit, isupper, islower, ispunctuation\n and encode it in the 'unicharset' data file\n\n examples:\n ';' is an punctuation character. Its properties are thus represented\n by the binary number 10000 (10 in hexadecimal).\n 'b' is an alphabetic character and a lower case character.\n Its properties are thus represented by the binary number 00011 (3 in hexadecimal).\n W' is an alphabetic character and an upper case character. Its properties are\n thus represented by the binary number 00101 (5 in hexadecimal).\n '7' is just a digit. Its properties are thus represented by the binary number 01000 (8 in hexadecimal).\n '=' does is not punctuation not digit or alphabetic character. Its properties\n are thus represented by the binary number 00000 (0 in hexadecimal).\n " cmd = ('unicharset_extractor %s.box' % self.prefix) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)<|docstring|>Computes the character properties set: isalpha, isdigit, isupper, islower, ispunctuation and encode it in the 'unicharset' data file examples: ';' is an punctuation character. Its properties are thus represented by the binary number 10000 (10 in hexadecimal). 'b' is an alphabetic character and a lower case character. Its properties are thus represented by the binary number 00011 (3 in hexadecimal). W' is an alphabetic character and an upper case character. Its properties are thus represented by the binary number 00101 (5 in hexadecimal). '7' is just a digit. Its properties are thus represented by the binary number 01000 (8 in hexadecimal). '=' does is not punctuation not digit or alphabetic character. Its properties are thus represented by the binary number 00000 (0 in hexadecimal).<|endoftext|>
b4c39eabe2f4beddc89ff5b00d80098127205bc26b17a2f8ec421ab1be7a1313
def _clustering(self): ' Cluster character features from all the training pages, and create characters prototype ' cmd = ('mftraining -F font_properties -U unicharset %s.tr' % self.prefix) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
Cluster character features from all the training pages, and create characters prototype
tesseract_trainer/__init__.py
_clustering
kevin-dunnicliffe/tesseract-trainer
0
python
def _clustering(self): ' ' cmd = ('mftraining -F font_properties -U unicharset %s.tr' % self.prefix) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
def _clustering(self): ' ' cmd = ('mftraining -F font_properties -U unicharset %s.tr' % self.prefix) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)<|docstring|>Cluster character features from all the training pages, and create characters prototype<|endoftext|>
cde46f1bd7aa918b14db83ac9db3276cc688e045efc39b18519ef146cd611d2c
def _normalize(self): " Generate the 'normproto' data file (the character normalization sensitivity prototypes) " cmd = ('cntraining %s.tr' % self.prefix) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
Generate the 'normproto' data file (the character normalization sensitivity prototypes)
tesseract_trainer/__init__.py
_normalize
kevin-dunnicliffe/tesseract-trainer
0
python
def _normalize(self): " " cmd = ('cntraining %s.tr' % self.prefix) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
def _normalize(self): " " cmd = ('cntraining %s.tr' % self.prefix) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)<|docstring|>Generate the 'normproto' data file (the character normalization sensitivity prototypes)<|endoftext|>
57d1ae5cd60216e5b87156be5008299340117ea4ac41fe4561dcdf2afe8893d2
def _rename_files(self): ' Add the self.dictionary_name prefix to each file generated during the tesseract training process ' for generated_file in GENERATED_DURING_TRAINING: os.rename(('%s' % generated_file), ('%s.%s' % (self.dictionary_name, generated_file)))
Add the self.dictionary_name prefix to each file generated during the tesseract training process
tesseract_trainer/__init__.py
_rename_files
kevin-dunnicliffe/tesseract-trainer
0
python
def _rename_files(self): ' ' for generated_file in GENERATED_DURING_TRAINING: os.rename(('%s' % generated_file), ('%s.%s' % (self.dictionary_name, generated_file)))
def _rename_files(self): ' ' for generated_file in GENERATED_DURING_TRAINING: os.rename(('%s' % generated_file), ('%s.%s' % (self.dictionary_name, generated_file)))<|docstring|>Add the self.dictionary_name prefix to each file generated during the tesseract training process<|endoftext|>
e705c4d074f9a985f5e1f19249038d824a54b752a67c797f6b468709eaf49989
def _dictionary_data(self): ' Generate dictionaries, coded as a Directed Acyclic Word Graph (DAWG),\n from the list of frequent words if those were submitted during the Trainer initialization.\n ' if self.word_list: cmd = ('wordlist2dawg %s %s.freq-dawg %s.unicharset' % (self.word_list, self.dictionary_name, self.dictionary_name)) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
Generate dictionaries, coded as a Directed Acyclic Word Graph (DAWG), from the list of frequent words if those were submitted during the Trainer initialization.
tesseract_trainer/__init__.py
_dictionary_data
kevin-dunnicliffe/tesseract-trainer
0
python
def _dictionary_data(self): ' Generate dictionaries, coded as a Directed Acyclic Word Graph (DAWG),\n from the list of frequent words if those were submitted during the Trainer initialization.\n ' if self.word_list: cmd = ('wordlist2dawg %s %s.freq-dawg %s.unicharset' % (self.word_list, self.dictionary_name, self.dictionary_name)) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)
def _dictionary_data(self): ' Generate dictionaries, coded as a Directed Acyclic Word Graph (DAWG),\n from the list of frequent words if those were submitted during the Trainer initialization.\n ' if self.word_list: cmd = ('wordlist2dawg %s %s.freq-dawg %s.unicharset' % (self.word_list, self.dictionary_name, self.dictionary_name)) run = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) display_output(run, self.verbose)<|docstring|>Generate dictionaries, coded as a Directed Acyclic Word Graph (DAWG), from the list of frequent words if those were submitted during the Trainer initialization.<|endoftext|>
6e1c0f6637ca655535d09b11b396bdda13802b3b4f4486d1c063ec7e01651e42
def training(self): ' Execute all training steps ' self._generate_boxfile() self._train_on_boxfile() self._compute_character_set() self._clustering() self._normalize() self._rename_files() self._dictionary_data() self._combine_data() if self.verbose: print(('The %s.traineddata file has been generated !' % self.dictionary_name))
Execute all training steps
tesseract_trainer/__init__.py
training
kevin-dunnicliffe/tesseract-trainer
0
python
def training(self): ' ' self._generate_boxfile() self._train_on_boxfile() self._compute_character_set() self._clustering() self._normalize() self._rename_files() self._dictionary_data() self._combine_data() if self.verbose: print(('The %s.traineddata file has been generated !' % self.dictionary_name))
def training(self): ' ' self._generate_boxfile() self._train_on_boxfile() self._compute_character_set() self._clustering() self._normalize() self._rename_files() self._dictionary_data() self._combine_data() if self.verbose: print(('The %s.traineddata file has been generated !' % self.dictionary_name))<|docstring|>Execute all training steps<|endoftext|>
09cca0a01a9f5a6f60e08dde637a87ada686126da548dc753824040eb6071e99
def clean(self): ' Remove all files generated during tesseract training process ' if self.verbose: print('cleaning...') os.remove(('%s.tr' % self.prefix)) os.remove(('%s.txt' % self.prefix)) os.remove(('%s.box' % self.prefix)) os.remove(('%s.inttemp' % self.dictionary_name)) os.remove(('%s.Microfeat' % self.dictionary_name)) os.remove(('%s.normproto' % self.dictionary_name)) os.remove(('%s.pffmtable' % self.dictionary_name)) os.remove(('%s.unicharset' % self.dictionary_name)) if self.word_list: os.remove(('%s.freq-dawg' % self.dictionary_name)) os.remove('mfunicharset')
Remove all files generated during tesseract training process
tesseract_trainer/__init__.py
clean
kevin-dunnicliffe/tesseract-trainer
0
python
def clean(self): ' ' if self.verbose: print('cleaning...') os.remove(('%s.tr' % self.prefix)) os.remove(('%s.txt' % self.prefix)) os.remove(('%s.box' % self.prefix)) os.remove(('%s.inttemp' % self.dictionary_name)) os.remove(('%s.Microfeat' % self.dictionary_name)) os.remove(('%s.normproto' % self.dictionary_name)) os.remove(('%s.pffmtable' % self.dictionary_name)) os.remove(('%s.unicharset' % self.dictionary_name)) if self.word_list: os.remove(('%s.freq-dawg' % self.dictionary_name)) os.remove('mfunicharset')
def clean(self): ' ' if self.verbose: print('cleaning...') os.remove(('%s.tr' % self.prefix)) os.remove(('%s.txt' % self.prefix)) os.remove(('%s.box' % self.prefix)) os.remove(('%s.inttemp' % self.dictionary_name)) os.remove(('%s.Microfeat' % self.dictionary_name)) os.remove(('%s.normproto' % self.dictionary_name)) os.remove(('%s.pffmtable' % self.dictionary_name)) os.remove(('%s.unicharset' % self.dictionary_name)) if self.word_list: os.remove(('%s.freq-dawg' % self.dictionary_name)) os.remove('mfunicharset')<|docstring|>Remove all files generated during tesseract training process<|endoftext|>
abc9acb01ffa50f177973f582f8c80b26f658885d04041eaed1247952fc104ee
def add_trained_data(self): ' Copy the newly trained data to the tessdata/ directory ' traineddata = ('%s.traineddata' % self.dictionary_name) if self.verbose: print(('Copying %s to %s.' % (traineddata, self.tessdata_path))) try: shutil.copyfile(traineddata, join(self.tessdata_path, traineddata)) except IOError: raise IOError(('Permission denied. Super-user rights are required to copy %s to %s.' % (traineddata, self.tessdata_path)))
Copy the newly trained data to the tessdata/ directory
tesseract_trainer/__init__.py
add_trained_data
kevin-dunnicliffe/tesseract-trainer
0
python
def add_trained_data(self): ' ' traineddata = ('%s.traineddata' % self.dictionary_name) if self.verbose: print(('Copying %s to %s.' % (traineddata, self.tessdata_path))) try: shutil.copyfile(traineddata, join(self.tessdata_path, traineddata)) except IOError: raise IOError(('Permission denied. Super-user rights are required to copy %s to %s.' % (traineddata, self.tessdata_path)))
def add_trained_data(self): ' ' traineddata = ('%s.traineddata' % self.dictionary_name) if self.verbose: print(('Copying %s to %s.' % (traineddata, self.tessdata_path))) try: shutil.copyfile(traineddata, join(self.tessdata_path, traineddata)) except IOError: raise IOError(('Permission denied. Super-user rights are required to copy %s to %s.' % (traineddata, self.tessdata_path)))<|docstring|>Copy the newly trained data to the tessdata/ directory<|endoftext|>
0682eb1782cec0703ed2849f39e852226b24eb8583fb1115700326164600a923
def get_dev_risk(weight, error): '\n :param weight: shape [N, 1], the importance weight for N source samples in the validation set\n :param error: shape [N, 1], the error value for each source sample in the validation set\n (typically 0 for correct classification and 1 for wrong classification)\n ' (N, d) = weight.shape (_N, _d) = error.shape assert ((N == _N) and (d == _d)), 'dimension mismatch!' weighted_error = (weight * error) cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1] var_w = np.var(weight, ddof=1) if ((cov == 0) and (var_w == 0)): cov = var_w = 1e-05 if (var_w == 0): var_w = cov eta = ((- cov) / var_w) return ((np.mean(weighted_error) + (eta * np.mean(weight))) - eta)
:param weight: shape [N, 1], the importance weight for N source samples in the validation set :param error: shape [N, 1], the error value for each source sample in the validation set (typically 0 for correct classification and 1 for wrong classification)
visda_classification/dev.py
get_dev_risk
stellaxu/MCD_DA
0
python
def get_dev_risk(weight, error): '\n :param weight: shape [N, 1], the importance weight for N source samples in the validation set\n :param error: shape [N, 1], the error value for each source sample in the validation set\n (typically 0 for correct classification and 1 for wrong classification)\n ' (N, d) = weight.shape (_N, _d) = error.shape assert ((N == _N) and (d == _d)), 'dimension mismatch!' weighted_error = (weight * error) cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1] var_w = np.var(weight, ddof=1) if ((cov == 0) and (var_w == 0)): cov = var_w = 1e-05 if (var_w == 0): var_w = cov eta = ((- cov) / var_w) return ((np.mean(weighted_error) + (eta * np.mean(weight))) - eta)
def get_dev_risk(weight, error): '\n :param weight: shape [N, 1], the importance weight for N source samples in the validation set\n :param error: shape [N, 1], the error value for each source sample in the validation set\n (typically 0 for correct classification and 1 for wrong classification)\n ' (N, d) = weight.shape (_N, _d) = error.shape assert ((N == _N) and (d == _d)), 'dimension mismatch!' weighted_error = (weight * error) cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1] var_w = np.var(weight, ddof=1) if ((cov == 0) and (var_w == 0)): cov = var_w = 1e-05 if (var_w == 0): var_w = cov eta = ((- cov) / var_w) return ((np.mean(weighted_error) + (eta * np.mean(weight))) - eta)<|docstring|>:param weight: shape [N, 1], the importance weight for N source samples in the validation set :param error: shape [N, 1], the error value for each source sample in the validation set (typically 0 for correct classification and 1 for wrong classification)<|endoftext|>
b2d705f984a9b0031b93970a31da52664813e1fccc92bcf27fb61032e1fab4f5
def get_weight(source_feature, target_feature, validation_feature): '\n :param source_feature: shape [N_tr, d], features from training set\n :param target_feature: shape [N_te, d], features from test set\n :param validation_feature: shape [N_v, d], features from validation set\n :return:\n ' (N_s, d) = source_feature.shape (N_t, _d) = target_feature.shape if ((float(N_s) / N_t) > 2): source_feature = random_select_src(source_feature, target_feature) else: source_feature = source_feature.copy() print('num_source is {}, num_target is {}, ratio is {}\n'.format(N_s, N_t, (float(N_s) / N_t))) (N_s, d) = source_feature.shape target_feature = target_feature.copy() all_feature = np.concatenate((source_feature, target_feature)) all_label = np.asarray((([1] * N_s) + ([0] * N_t)), dtype=np.int32) (feature_for_train, feature_for_test, label_for_train, label_for_test) = train_test_split(all_feature, all_label, train_size=0.8) decays = [0.1, 0.03, 0.01, 0.003, 0.001, 0.0003, 0.0001, 3e-05, 1e-05] val_acc = [] domain_classifiers = [] for decay in decays: domain_classifier = MLPClassifier(hidden_layer_sizes=(d, d, 2), activation='relu', alpha=decay) domain_classifier.fit(feature_for_train, label_for_train) output = domain_classifier.predict(feature_for_test) acc = np.mean((label_for_test == output).astype(np.float32)) val_acc.append(acc) domain_classifiers.append(domain_classifier) print(('decay is %s, val acc is %s' % (decay, acc))) index = val_acc.index(max(val_acc)) domain_classifier = domain_classifiers[index] domain_out = domain_classifier.predict_proba(validation_feature) return ((((domain_out[(:, :1)] / domain_out[(:, 1:)]) * N_s) * 1.0) / N_t)
:param source_feature: shape [N_tr, d], features from training set :param target_feature: shape [N_te, d], features from test set :param validation_feature: shape [N_v, d], features from validation set :return:
visda_classification/dev.py
get_weight
stellaxu/MCD_DA
0
python
def get_weight(source_feature, target_feature, validation_feature): '\n :param source_feature: shape [N_tr, d], features from training set\n :param target_feature: shape [N_te, d], features from test set\n :param validation_feature: shape [N_v, d], features from validation set\n :return:\n ' (N_s, d) = source_feature.shape (N_t, _d) = target_feature.shape if ((float(N_s) / N_t) > 2): source_feature = random_select_src(source_feature, target_feature) else: source_feature = source_feature.copy() print('num_source is {}, num_target is {}, ratio is {}\n'.format(N_s, N_t, (float(N_s) / N_t))) (N_s, d) = source_feature.shape target_feature = target_feature.copy() all_feature = np.concatenate((source_feature, target_feature)) all_label = np.asarray((([1] * N_s) + ([0] * N_t)), dtype=np.int32) (feature_for_train, feature_for_test, label_for_train, label_for_test) = train_test_split(all_feature, all_label, train_size=0.8) decays = [0.1, 0.03, 0.01, 0.003, 0.001, 0.0003, 0.0001, 3e-05, 1e-05] val_acc = [] domain_classifiers = [] for decay in decays: domain_classifier = MLPClassifier(hidden_layer_sizes=(d, d, 2), activation='relu', alpha=decay) domain_classifier.fit(feature_for_train, label_for_train) output = domain_classifier.predict(feature_for_test) acc = np.mean((label_for_test == output).astype(np.float32)) val_acc.append(acc) domain_classifiers.append(domain_classifier) print(('decay is %s, val acc is %s' % (decay, acc))) index = val_acc.index(max(val_acc)) domain_classifier = domain_classifiers[index] domain_out = domain_classifier.predict_proba(validation_feature) return ((((domain_out[(:, :1)] / domain_out[(:, 1:)]) * N_s) * 1.0) / N_t)
def get_weight(source_feature, target_feature, validation_feature): '\n :param source_feature: shape [N_tr, d], features from training set\n :param target_feature: shape [N_te, d], features from test set\n :param validation_feature: shape [N_v, d], features from validation set\n :return:\n ' (N_s, d) = source_feature.shape (N_t, _d) = target_feature.shape if ((float(N_s) / N_t) > 2): source_feature = random_select_src(source_feature, target_feature) else: source_feature = source_feature.copy() print('num_source is {}, num_target is {}, ratio is {}\n'.format(N_s, N_t, (float(N_s) / N_t))) (N_s, d) = source_feature.shape target_feature = target_feature.copy() all_feature = np.concatenate((source_feature, target_feature)) all_label = np.asarray((([1] * N_s) + ([0] * N_t)), dtype=np.int32) (feature_for_train, feature_for_test, label_for_train, label_for_test) = train_test_split(all_feature, all_label, train_size=0.8) decays = [0.1, 0.03, 0.01, 0.003, 0.001, 0.0003, 0.0001, 3e-05, 1e-05] val_acc = [] domain_classifiers = [] for decay in decays: domain_classifier = MLPClassifier(hidden_layer_sizes=(d, d, 2), activation='relu', alpha=decay) domain_classifier.fit(feature_for_train, label_for_train) output = domain_classifier.predict(feature_for_test) acc = np.mean((label_for_test == output).astype(np.float32)) val_acc.append(acc) domain_classifiers.append(domain_classifier) print(('decay is %s, val acc is %s' % (decay, acc))) index = val_acc.index(max(val_acc)) domain_classifier = domain_classifiers[index] domain_out = domain_classifier.predict_proba(validation_feature) return ((((domain_out[(:, :1)] / domain_out[(:, 1:)]) * N_s) * 1.0) / N_t)<|docstring|>:param source_feature: shape [N_tr, d], features from training set :param target_feature: shape [N_te, d], features from test set :param validation_feature: shape [N_v, d], features from validation set :return:<|endoftext|>
66923da99a9e9b3e4abf015adedf2ae6df8d479c4059caa2798f8fb02c085f24
def random_select_src(source_feature, target_feature): '\n Select at most 2*Ntr data from source feature randomly\n :param source_feature: shape [N_tr, d], features from training set\n :param target_feature: shape [N_te, d], features from test set\n :return:\n ' (N_s, d) = source_feature.shape (N_t, _d) = target_feature.shape items = [i for i in range(1, N_s)] random_list = random.sample(items, ((2 * N_t) - 1)) new_source_feature = source_feature[0].reshape(1, d) for i in range(((2 * N_t) - 1)): new_source_feature = np.concatenate((new_source_feature, source_feature[random_list[i]].reshape(1, d))) print('random_select:') print(new_source_feature.shape) return new_source_feature
Select at most 2*Ntr data from source feature randomly :param source_feature: shape [N_tr, d], features from training set :param target_feature: shape [N_te, d], features from test set :return:
visda_classification/dev.py
random_select_src
stellaxu/MCD_DA
0
python
def random_select_src(source_feature, target_feature): '\n Select at most 2*Ntr data from source feature randomly\n :param source_feature: shape [N_tr, d], features from training set\n :param target_feature: shape [N_te, d], features from test set\n :return:\n ' (N_s, d) = source_feature.shape (N_t, _d) = target_feature.shape items = [i for i in range(1, N_s)] random_list = random.sample(items, ((2 * N_t) - 1)) new_source_feature = source_feature[0].reshape(1, d) for i in range(((2 * N_t) - 1)): new_source_feature = np.concatenate((new_source_feature, source_feature[random_list[i]].reshape(1, d))) print('random_select:') print(new_source_feature.shape) return new_source_feature
def random_select_src(source_feature, target_feature): '\n Select at most 2*Ntr data from source feature randomly\n :param source_feature: shape [N_tr, d], features from training set\n :param target_feature: shape [N_te, d], features from test set\n :return:\n ' (N_s, d) = source_feature.shape (N_t, _d) = target_feature.shape items = [i for i in range(1, N_s)] random_list = random.sample(items, ((2 * N_t) - 1)) new_source_feature = source_feature[0].reshape(1, d) for i in range(((2 * N_t) - 1)): new_source_feature = np.concatenate((new_source_feature, source_feature[random_list[i]].reshape(1, d))) print('random_select:') print(new_source_feature.shape) return new_source_feature<|docstring|>Select at most 2*Ntr data from source feature randomly :param source_feature: shape [N_tr, d], features from training set :param target_feature: shape [N_te, d], features from test set :return:<|endoftext|>
f878443af25045d2e956cdb8ea7c9b5a36bc2a1b8e7012fd579b916fc0a9feea
def predict_loss(cls, y_pre): '\n Calculate the cross entropy loss for prediction of one picture\n :param cls:\n :param y_pre:\n :return:\n ' cls_torch = np.full(1, cls) pre_cls_torch = y_pre.double() target = torch.from_numpy(cls_torch).cuda() entropy = nn.CrossEntropyLoss() return entropy(pre_cls_torch, target)
Calculate the cross entropy loss for prediction of one picture :param cls: :param y_pre: :return:
visda_classification/dev.py
predict_loss
stellaxu/MCD_DA
0
python
def predict_loss(cls, y_pre): '\n Calculate the cross entropy loss for prediction of one picture\n :param cls:\n :param y_pre:\n :return:\n ' cls_torch = np.full(1, cls) pre_cls_torch = y_pre.double() target = torch.from_numpy(cls_torch).cuda() entropy = nn.CrossEntropyLoss() return entropy(pre_cls_torch, target)
def predict_loss(cls, y_pre): '\n Calculate the cross entropy loss for prediction of one picture\n :param cls:\n :param y_pre:\n :return:\n ' cls_torch = np.full(1, cls) pre_cls_torch = y_pre.double() target = torch.from_numpy(cls_torch).cuda() entropy = nn.CrossEntropyLoss() return entropy(pre_cls_torch, target)<|docstring|>Calculate the cross entropy loss for prediction of one picture :param cls: :param y_pre: :return:<|endoftext|>
a275647c19d39aa2662a4d2e713005b620e9fb36a95eddff701cbbf81c9016a9
def get_label_list(args, target_list, feature_network_path, predict_network_path, num_layer, resize_size, crop_size, batch_size, use_gpu): '\n Return the target list with pesudolabel\n :param target_list: list conatinging all target file path and a wrong label\n :param predict_network: network to perdict label for target image\n :param resize_size:\n :param crop_size:\n :param batch_size:\n :return:\n ' option = ('resnet' + args.resnet) G = ResBase(option) F1 = ResClassifier(num_layer=num_layer) G.load_state_dict(torch.load(feature_network_path)) F1.load_state_dict(torch.load(predict_network_path)) if use_gpu: G.cuda() F1.cuda() G.eval() F1.eval() label_list = [] dsets_tar = ImageList(target_list, transform=prep.image_train(resize_size=resize_size, crop_size=crop_size)) dset_loaders_tar = util_data.DataLoader(dsets_tar, batch_size=batch_size, shuffle=False, num_workers=4) len_train_target = len(dset_loaders_tar) iter_target = iter(dset_loaders_tar) count = 0 for i in range(len_train_target): (input_tar, label_tar) = iter_target.next() if use_gpu: (input_tar, label_tar) = (Variable(input_tar).cuda(), Variable(label_tar).cuda()) else: (input_tar, label_tar) = (Variable(input_tar), Variable(label_tar)) tar_feature = G(input_tar) predict_score = F1(tar_feature) (_, pre_lab) = torch.max(predict_score, 1) predict_label = pre_lab.detach() for num in range(len(predict_label.cpu())): if (target_list[count][(- 3)] == ' '): ind = (- 2) else: ind = (- 3) label_list.append(target_list[count][:ind]) label_list[count] = ((label_list[count] + str(predict_label[num].cpu().numpy())) + '\n') count += 1 return label_list
Return the target list with pesudolabel :param target_list: list conatinging all target file path and a wrong label :param predict_network: network to perdict label for target image :param resize_size: :param crop_size: :param batch_size: :return:
visda_classification/dev.py
get_label_list
stellaxu/MCD_DA
0
python
def get_label_list(args, target_list, feature_network_path, predict_network_path, num_layer, resize_size, crop_size, batch_size, use_gpu): '\n Return the target list with pesudolabel\n :param target_list: list conatinging all target file path and a wrong label\n :param predict_network: network to perdict label for target image\n :param resize_size:\n :param crop_size:\n :param batch_size:\n :return:\n ' option = ('resnet' + args.resnet) G = ResBase(option) F1 = ResClassifier(num_layer=num_layer) G.load_state_dict(torch.load(feature_network_path)) F1.load_state_dict(torch.load(predict_network_path)) if use_gpu: G.cuda() F1.cuda() G.eval() F1.eval() label_list = [] dsets_tar = ImageList(target_list, transform=prep.image_train(resize_size=resize_size, crop_size=crop_size)) dset_loaders_tar = util_data.DataLoader(dsets_tar, batch_size=batch_size, shuffle=False, num_workers=4) len_train_target = len(dset_loaders_tar) iter_target = iter(dset_loaders_tar) count = 0 for i in range(len_train_target): (input_tar, label_tar) = iter_target.next() if use_gpu: (input_tar, label_tar) = (Variable(input_tar).cuda(), Variable(label_tar).cuda()) else: (input_tar, label_tar) = (Variable(input_tar), Variable(label_tar)) tar_feature = G(input_tar) predict_score = F1(tar_feature) (_, pre_lab) = torch.max(predict_score, 1) predict_label = pre_lab.detach() for num in range(len(predict_label.cpu())): if (target_list[count][(- 3)] == ' '): ind = (- 2) else: ind = (- 3) label_list.append(target_list[count][:ind]) label_list[count] = ((label_list[count] + str(predict_label[num].cpu().numpy())) + '\n') count += 1 return label_list
def get_label_list(args, target_list, feature_network_path, predict_network_path, num_layer, resize_size, crop_size, batch_size, use_gpu): '\n Return the target list with pesudolabel\n :param target_list: list conatinging all target file path and a wrong label\n :param predict_network: network to perdict label for target image\n :param resize_size:\n :param crop_size:\n :param batch_size:\n :return:\n ' option = ('resnet' + args.resnet) G = ResBase(option) F1 = ResClassifier(num_layer=num_layer) G.load_state_dict(torch.load(feature_network_path)) F1.load_state_dict(torch.load(predict_network_path)) if use_gpu: G.cuda() F1.cuda() G.eval() F1.eval() label_list = [] dsets_tar = ImageList(target_list, transform=prep.image_train(resize_size=resize_size, crop_size=crop_size)) dset_loaders_tar = util_data.DataLoader(dsets_tar, batch_size=batch_size, shuffle=False, num_workers=4) len_train_target = len(dset_loaders_tar) iter_target = iter(dset_loaders_tar) count = 0 for i in range(len_train_target): (input_tar, label_tar) = iter_target.next() if use_gpu: (input_tar, label_tar) = (Variable(input_tar).cuda(), Variable(label_tar).cuda()) else: (input_tar, label_tar) = (Variable(input_tar), Variable(label_tar)) tar_feature = G(input_tar) predict_score = F1(tar_feature) (_, pre_lab) = torch.max(predict_score, 1) predict_label = pre_lab.detach() for num in range(len(predict_label.cpu())): if (target_list[count][(- 3)] == ' '): ind = (- 2) else: ind = (- 3) label_list.append(target_list[count][:ind]) label_list[count] = ((label_list[count] + str(predict_label[num].cpu().numpy())) + '\n') count += 1 return label_list<|docstring|>Return the target list with pesudolabel :param target_list: list conatinging all target file path and a wrong label :param predict_network: network to perdict label for target image :param resize_size: :param crop_size: :param batch_size: :return:<|endoftext|>
4fda59657d022b7e52392d1f91ef3e42608ed97a521c54515cac072eb8c63d6e
def cross_validation_loss(args, feature_network_path, predict_network_path, num_layer, src_cls_list, target_path, val_cls_list, class_num, resize_size, crop_size, batch_size, use_gpu): '\n Main function for computing the CV loss\n :param feature_network:\n :param predict_network:\n :param src_cls_list:\n :param target_path:\n :param val_cls_list:\n :param class_num:\n :param resize_size:\n :param crop_size:\n :param batch_size:\n :return:\n ' target_list_no_label = open(target_path).readlines() tar_cls_list = [] cross_val_loss = 0 target_list = get_label_list(args, target_list_no_label, feature_network_path, predict_network_path, num_layer, resize_size, crop_size, batch_size, use_gpu) for i in range(class_num): tar_cls_list.append([j for j in target_list if (int(j.split(' ')[1].replace('\n', '')) == i)]) prep_dict = prep.image_train(resize_size=resize_size, crop_size=crop_size) option = ('resnet' + args.resnet) G = ResBase(option) F1 = ResClassifier(num_layer=num_layer) G.load_state_dict(torch.load(feature_network_path)) F1.load_state_dict(torch.load(predict_network_path)) if use_gpu: G.cuda() F1.cuda() G.eval() F1.eval() for cls in range(class_num): print(cls) dsets_src = ImageList(src_cls_list[cls], transform=prep_dict) dset_loaders_src = util_data.DataLoader(dsets_src, batch_size=batch_size, shuffle=True, num_workers=4) iter_src = iter(dset_loaders_src) src_input = iter_src.next()[0] if use_gpu: src_input = Variable(src_input).cuda() else: src_input = Variable(src_input) src_feature = G(src_input) src_feature_de = src_feature.detach().cpu().numpy() for count_src in range((len(dset_loaders_src) - 1)): src_input = iter_src.next()[0] if use_gpu: src_input = Variable(src_input).cuda() else: src_input = Variable(src_input) src_feature_new = G(src_input) src_feature_new_de = src_feature_new.detach().cpu().numpy() src_feature_de = np.append(src_feature_de, src_feature_new_de, axis=0) dsets_tar = ImageList(tar_cls_list[cls], transform=prep_dict) dset_loaders_tar = util_data.DataLoader(dsets_tar, batch_size=batch_size, shuffle=True, num_workers=4) iter_tar = iter(dset_loaders_tar) tar_input = iter_tar.next()[0] if use_gpu: tar_input = Variable(tar_input).cuda() else: tar_input = Variable(tar_input) tar_feature = G(tar_input) tar_feature_de = tar_feature.detach().cpu().numpy() for count_tar in range((len(dset_loaders_tar) - 1)): tar_input = iter_tar.next()[0] if use_gpu: tar_input = Variable(tar_input).cuda() else: tar_input = Variable(tar_input) tar_feature_new = G(tar_input) tar_feature_new_de = tar_feature_new.detach().cpu().numpy() tar_feature_de = np.append(tar_feature_de, tar_feature_new_de, axis=0) dsets_val = ImageList(val_cls_list[cls], transform=prep_dict) dset_loaders_val = util_data.DataLoader(dsets_val, batch_size=batch_size, shuffle=True, num_workers=4) iter_val = iter(dset_loaders_val) (val_input, val_labels) = iter_val.next() if use_gpu: (val_input, val_labels) = (Variable(val_input).cuda(), Variable(val_labels).cuda()) else: (val_input, val_labels) = (Variable(val_input), Variable(val_labels)) val_feature = G(val_input) pred_label = F1(val_feature) val_feature_de = val_feature.detach().cpu().numpy() w = pred_label[0].shape[0] error = np.zeros(1) error[0] = predict_loss(cls, pred_label[0].reshape(1, w)).item() error = error.reshape(1, 1) for num_image in range(1, len(pred_label)): new_error = np.zeros(1) single_pred_label = pred_label[num_image] w = single_pred_label.shape[0] new_error[0] = predict_loss(cls, single_pred_label.reshape(1, w)).item() new_error = new_error.reshape(1, 1) error = np.append(error, new_error, axis=0) for count_val in range((len(dset_loaders_val) - 1)): (val_input, val_labels) = iter_val.next() if use_gpu: (val_input, val_labels) = (Variable(val_input).cuda(), Variable(val_labels).cuda()) else: (val_input, val_labels) = (Variable(val_input), Variable(val_labels)) val_feature_new = G(val_input) val_feature_new_de = val_feature_new.detach().cpu().numpy() val_feature_de = np.append(val_feature_de, val_feature_new_de, axis=0) val_feature = G(val_input) pred_label = F1(val_feature) for num_image in range(len(pred_label)): new_error = np.zeros(1) single_pred_label = pred_label[num_image] w = single_pred_label.shape[0] new_error[0] = predict_loss(cls, single_pred_label.reshape(1, w)).item() new_error = new_error.reshape(1, 1) error = np.append(error, new_error, axis=0) print(src_feature_de.shape) print(tar_feature_de.shape) print(val_feature_de.shape) weight = get_weight(src_feature_de, tar_feature_de, val_feature_de) cross_val_loss = (cross_val_loss + (get_dev_risk(weight, error) / class_num)) return cross_val_loss
Main function for computing the CV loss :param feature_network: :param predict_network: :param src_cls_list: :param target_path: :param val_cls_list: :param class_num: :param resize_size: :param crop_size: :param batch_size: :return:
visda_classification/dev.py
cross_validation_loss
stellaxu/MCD_DA
0
python
def cross_validation_loss(args, feature_network_path, predict_network_path, num_layer, src_cls_list, target_path, val_cls_list, class_num, resize_size, crop_size, batch_size, use_gpu): '\n Main function for computing the CV loss\n :param feature_network:\n :param predict_network:\n :param src_cls_list:\n :param target_path:\n :param val_cls_list:\n :param class_num:\n :param resize_size:\n :param crop_size:\n :param batch_size:\n :return:\n ' target_list_no_label = open(target_path).readlines() tar_cls_list = [] cross_val_loss = 0 target_list = get_label_list(args, target_list_no_label, feature_network_path, predict_network_path, num_layer, resize_size, crop_size, batch_size, use_gpu) for i in range(class_num): tar_cls_list.append([j for j in target_list if (int(j.split(' ')[1].replace('\n', )) == i)]) prep_dict = prep.image_train(resize_size=resize_size, crop_size=crop_size) option = ('resnet' + args.resnet) G = ResBase(option) F1 = ResClassifier(num_layer=num_layer) G.load_state_dict(torch.load(feature_network_path)) F1.load_state_dict(torch.load(predict_network_path)) if use_gpu: G.cuda() F1.cuda() G.eval() F1.eval() for cls in range(class_num): print(cls) dsets_src = ImageList(src_cls_list[cls], transform=prep_dict) dset_loaders_src = util_data.DataLoader(dsets_src, batch_size=batch_size, shuffle=True, num_workers=4) iter_src = iter(dset_loaders_src) src_input = iter_src.next()[0] if use_gpu: src_input = Variable(src_input).cuda() else: src_input = Variable(src_input) src_feature = G(src_input) src_feature_de = src_feature.detach().cpu().numpy() for count_src in range((len(dset_loaders_src) - 1)): src_input = iter_src.next()[0] if use_gpu: src_input = Variable(src_input).cuda() else: src_input = Variable(src_input) src_feature_new = G(src_input) src_feature_new_de = src_feature_new.detach().cpu().numpy() src_feature_de = np.append(src_feature_de, src_feature_new_de, axis=0) dsets_tar = ImageList(tar_cls_list[cls], transform=prep_dict) dset_loaders_tar = util_data.DataLoader(dsets_tar, batch_size=batch_size, shuffle=True, num_workers=4) iter_tar = iter(dset_loaders_tar) tar_input = iter_tar.next()[0] if use_gpu: tar_input = Variable(tar_input).cuda() else: tar_input = Variable(tar_input) tar_feature = G(tar_input) tar_feature_de = tar_feature.detach().cpu().numpy() for count_tar in range((len(dset_loaders_tar) - 1)): tar_input = iter_tar.next()[0] if use_gpu: tar_input = Variable(tar_input).cuda() else: tar_input = Variable(tar_input) tar_feature_new = G(tar_input) tar_feature_new_de = tar_feature_new.detach().cpu().numpy() tar_feature_de = np.append(tar_feature_de, tar_feature_new_de, axis=0) dsets_val = ImageList(val_cls_list[cls], transform=prep_dict) dset_loaders_val = util_data.DataLoader(dsets_val, batch_size=batch_size, shuffle=True, num_workers=4) iter_val = iter(dset_loaders_val) (val_input, val_labels) = iter_val.next() if use_gpu: (val_input, val_labels) = (Variable(val_input).cuda(), Variable(val_labels).cuda()) else: (val_input, val_labels) = (Variable(val_input), Variable(val_labels)) val_feature = G(val_input) pred_label = F1(val_feature) val_feature_de = val_feature.detach().cpu().numpy() w = pred_label[0].shape[0] error = np.zeros(1) error[0] = predict_loss(cls, pred_label[0].reshape(1, w)).item() error = error.reshape(1, 1) for num_image in range(1, len(pred_label)): new_error = np.zeros(1) single_pred_label = pred_label[num_image] w = single_pred_label.shape[0] new_error[0] = predict_loss(cls, single_pred_label.reshape(1, w)).item() new_error = new_error.reshape(1, 1) error = np.append(error, new_error, axis=0) for count_val in range((len(dset_loaders_val) - 1)): (val_input, val_labels) = iter_val.next() if use_gpu: (val_input, val_labels) = (Variable(val_input).cuda(), Variable(val_labels).cuda()) else: (val_input, val_labels) = (Variable(val_input), Variable(val_labels)) val_feature_new = G(val_input) val_feature_new_de = val_feature_new.detach().cpu().numpy() val_feature_de = np.append(val_feature_de, val_feature_new_de, axis=0) val_feature = G(val_input) pred_label = F1(val_feature) for num_image in range(len(pred_label)): new_error = np.zeros(1) single_pred_label = pred_label[num_image] w = single_pred_label.shape[0] new_error[0] = predict_loss(cls, single_pred_label.reshape(1, w)).item() new_error = new_error.reshape(1, 1) error = np.append(error, new_error, axis=0) print(src_feature_de.shape) print(tar_feature_de.shape) print(val_feature_de.shape) weight = get_weight(src_feature_de, tar_feature_de, val_feature_de) cross_val_loss = (cross_val_loss + (get_dev_risk(weight, error) / class_num)) return cross_val_loss
def cross_validation_loss(args, feature_network_path, predict_network_path, num_layer, src_cls_list, target_path, val_cls_list, class_num, resize_size, crop_size, batch_size, use_gpu): '\n Main function for computing the CV loss\n :param feature_network:\n :param predict_network:\n :param src_cls_list:\n :param target_path:\n :param val_cls_list:\n :param class_num:\n :param resize_size:\n :param crop_size:\n :param batch_size:\n :return:\n ' target_list_no_label = open(target_path).readlines() tar_cls_list = [] cross_val_loss = 0 target_list = get_label_list(args, target_list_no_label, feature_network_path, predict_network_path, num_layer, resize_size, crop_size, batch_size, use_gpu) for i in range(class_num): tar_cls_list.append([j for j in target_list if (int(j.split(' ')[1].replace('\n', )) == i)]) prep_dict = prep.image_train(resize_size=resize_size, crop_size=crop_size) option = ('resnet' + args.resnet) G = ResBase(option) F1 = ResClassifier(num_layer=num_layer) G.load_state_dict(torch.load(feature_network_path)) F1.load_state_dict(torch.load(predict_network_path)) if use_gpu: G.cuda() F1.cuda() G.eval() F1.eval() for cls in range(class_num): print(cls) dsets_src = ImageList(src_cls_list[cls], transform=prep_dict) dset_loaders_src = util_data.DataLoader(dsets_src, batch_size=batch_size, shuffle=True, num_workers=4) iter_src = iter(dset_loaders_src) src_input = iter_src.next()[0] if use_gpu: src_input = Variable(src_input).cuda() else: src_input = Variable(src_input) src_feature = G(src_input) src_feature_de = src_feature.detach().cpu().numpy() for count_src in range((len(dset_loaders_src) - 1)): src_input = iter_src.next()[0] if use_gpu: src_input = Variable(src_input).cuda() else: src_input = Variable(src_input) src_feature_new = G(src_input) src_feature_new_de = src_feature_new.detach().cpu().numpy() src_feature_de = np.append(src_feature_de, src_feature_new_de, axis=0) dsets_tar = ImageList(tar_cls_list[cls], transform=prep_dict) dset_loaders_tar = util_data.DataLoader(dsets_tar, batch_size=batch_size, shuffle=True, num_workers=4) iter_tar = iter(dset_loaders_tar) tar_input = iter_tar.next()[0] if use_gpu: tar_input = Variable(tar_input).cuda() else: tar_input = Variable(tar_input) tar_feature = G(tar_input) tar_feature_de = tar_feature.detach().cpu().numpy() for count_tar in range((len(dset_loaders_tar) - 1)): tar_input = iter_tar.next()[0] if use_gpu: tar_input = Variable(tar_input).cuda() else: tar_input = Variable(tar_input) tar_feature_new = G(tar_input) tar_feature_new_de = tar_feature_new.detach().cpu().numpy() tar_feature_de = np.append(tar_feature_de, tar_feature_new_de, axis=0) dsets_val = ImageList(val_cls_list[cls], transform=prep_dict) dset_loaders_val = util_data.DataLoader(dsets_val, batch_size=batch_size, shuffle=True, num_workers=4) iter_val = iter(dset_loaders_val) (val_input, val_labels) = iter_val.next() if use_gpu: (val_input, val_labels) = (Variable(val_input).cuda(), Variable(val_labels).cuda()) else: (val_input, val_labels) = (Variable(val_input), Variable(val_labels)) val_feature = G(val_input) pred_label = F1(val_feature) val_feature_de = val_feature.detach().cpu().numpy() w = pred_label[0].shape[0] error = np.zeros(1) error[0] = predict_loss(cls, pred_label[0].reshape(1, w)).item() error = error.reshape(1, 1) for num_image in range(1, len(pred_label)): new_error = np.zeros(1) single_pred_label = pred_label[num_image] w = single_pred_label.shape[0] new_error[0] = predict_loss(cls, single_pred_label.reshape(1, w)).item() new_error = new_error.reshape(1, 1) error = np.append(error, new_error, axis=0) for count_val in range((len(dset_loaders_val) - 1)): (val_input, val_labels) = iter_val.next() if use_gpu: (val_input, val_labels) = (Variable(val_input).cuda(), Variable(val_labels).cuda()) else: (val_input, val_labels) = (Variable(val_input), Variable(val_labels)) val_feature_new = G(val_input) val_feature_new_de = val_feature_new.detach().cpu().numpy() val_feature_de = np.append(val_feature_de, val_feature_new_de, axis=0) val_feature = G(val_input) pred_label = F1(val_feature) for num_image in range(len(pred_label)): new_error = np.zeros(1) single_pred_label = pred_label[num_image] w = single_pred_label.shape[0] new_error[0] = predict_loss(cls, single_pred_label.reshape(1, w)).item() new_error = new_error.reshape(1, 1) error = np.append(error, new_error, axis=0) print(src_feature_de.shape) print(tar_feature_de.shape) print(val_feature_de.shape) weight = get_weight(src_feature_de, tar_feature_de, val_feature_de) cross_val_loss = (cross_val_loss + (get_dev_risk(weight, error) / class_num)) return cross_val_loss<|docstring|>Main function for computing the CV loss :param feature_network: :param predict_network: :param src_cls_list: :param target_path: :param val_cls_list: :param class_num: :param resize_size: :param crop_size: :param batch_size: :return:<|endoftext|>
2bfeaaa0138c84b99f59a1551db32bfcf3abc61909d9e799decb5516c2d05267
def __init__(self): 'Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n ' self.data = None
Method: __init__ Description: Class initialization. Arguments:
test/unit/mysql_perf/mysql_stat.py
__init__
deepcoder42/mysql-perf
0
python
def __init__(self): 'Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n ' self.data = None
def __init__(self): 'Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n ' self.data = None<|docstring|>Method: __init__ Description: Class initialization. Arguments:<|endoftext|>
f60f3aa93c7dea41939163d8f956491a89b686cec25e8b4822b655a3554b8625
def add_2_msg(self, data): 'Method: add_2_msg\n\n Description: Stub method holder for Mail.add_2_msg.\n\n Arguments:\n\n ' self.data = data return True
Method: add_2_msg Description: Stub method holder for Mail.add_2_msg. Arguments:
test/unit/mysql_perf/mysql_stat.py
add_2_msg
deepcoder42/mysql-perf
0
python
def add_2_msg(self, data): 'Method: add_2_msg\n\n Description: Stub method holder for Mail.add_2_msg.\n\n Arguments:\n\n ' self.data = data return True
def add_2_msg(self, data): 'Method: add_2_msg\n\n Description: Stub method holder for Mail.add_2_msg.\n\n Arguments:\n\n ' self.data = data return True<|docstring|>Method: add_2_msg Description: Stub method holder for Mail.add_2_msg. Arguments:<|endoftext|>
6ec11ba627a16f906adc756103779e84268ca4625e334b9ee52536be07f1ec16
def send_mail(self, use_mailx=False): 'Method: send_mail\n\n Description: Stub method holder for Mail.send_mail.\n\n Arguments:\n (input) use_mailx -> True|False - To use mailx command.\n\n ' status = True if use_mailx: status = True return status
Method: send_mail Description: Stub method holder for Mail.send_mail. Arguments: (input) use_mailx -> True|False - To use mailx command.
test/unit/mysql_perf/mysql_stat.py
send_mail
deepcoder42/mysql-perf
0
python
def send_mail(self, use_mailx=False): 'Method: send_mail\n\n Description: Stub method holder for Mail.send_mail.\n\n Arguments:\n (input) use_mailx -> True|False - To use mailx command.\n\n ' status = True if use_mailx: status = True return status
def send_mail(self, use_mailx=False): 'Method: send_mail\n\n Description: Stub method holder for Mail.send_mail.\n\n Arguments:\n (input) use_mailx -> True|False - To use mailx command.\n\n ' status = True if use_mailx: status = True return status<|docstring|>Method: send_mail Description: Stub method holder for Mail.send_mail. Arguments: (input) use_mailx -> True|False - To use mailx command.<|endoftext|>
9b1b52fbfd616f70322ccc3a6d2c90b3295883f4e0793ac11e119969341badec
def __init__(self): 'Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n ' pass
Method: __init__ Description: Class initialization. Arguments:
test/unit/mysql_perf/mysql_stat.py
__init__
deepcoder42/mysql-perf
0
python
def __init__(self): 'Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n ' pass
def __init__(self): 'Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n ' pass<|docstring|>Method: __init__ Description: Class initialization. Arguments:<|endoftext|>
a004d3f5f4b688006a1c840d52c595808696bd73aab8d7e0238882e16b0e19f9
def setUp(self): 'Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n ' self.server = Server() self.mail = Mail() self.args_array = {'-n': 1, '-b': 1} self.args_array2 = {'-n': 3, '-b': 1} self.args_array3 = {'-n': 1, '-b': 1, '-a': True} self.args_array4 = {'-n': 1, '-b': 1, '-f': True} self.args_array5 = {'-n': 0, '-b': 1} self.args_array6 = {'-n': 2, '-b': 2} self.args_array7 = {'-n': 2, '-b': 1} self.args_array8 = {'-n': 2, '-b': 0} self.args_array10 = {'-n': (- 1), '-b': 1} self.args_array11 = {'-n': 1, '-b': 1, '-t': 'email_addr', '-s': 'subject_line'} self.args_array11a = {'-n': 1, '-b': 1, '-t': 'email_addr', '-s': 'subject_line', '-u': True} self.args_array12 = {'-n': 1, '-b': 1, '-t': 'email_addr'} self.args_array12a = {'-n': 1, '-b': 1, '-t': 'email_addr', '-u': True}
Function: setUp Description: Initialization for unit testing. Arguments:
test/unit/mysql_perf/mysql_stat.py
setUp
deepcoder42/mysql-perf
0
python
def setUp(self): 'Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n ' self.server = Server() self.mail = Mail() self.args_array = {'-n': 1, '-b': 1} self.args_array2 = {'-n': 3, '-b': 1} self.args_array3 = {'-n': 1, '-b': 1, '-a': True} self.args_array4 = {'-n': 1, '-b': 1, '-f': True} self.args_array5 = {'-n': 0, '-b': 1} self.args_array6 = {'-n': 2, '-b': 2} self.args_array7 = {'-n': 2, '-b': 1} self.args_array8 = {'-n': 2, '-b': 0} self.args_array10 = {'-n': (- 1), '-b': 1} self.args_array11 = {'-n': 1, '-b': 1, '-t': 'email_addr', '-s': 'subject_line'} self.args_array11a = {'-n': 1, '-b': 1, '-t': 'email_addr', '-s': 'subject_line', '-u': True} self.args_array12 = {'-n': 1, '-b': 1, '-t': 'email_addr'} self.args_array12a = {'-n': 1, '-b': 1, '-t': 'email_addr', '-u': True}
def setUp(self): 'Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n ' self.server = Server() self.mail = Mail() self.args_array = {'-n': 1, '-b': 1} self.args_array2 = {'-n': 3, '-b': 1} self.args_array3 = {'-n': 1, '-b': 1, '-a': True} self.args_array4 = {'-n': 1, '-b': 1, '-f': True} self.args_array5 = {'-n': 0, '-b': 1} self.args_array6 = {'-n': 2, '-b': 2} self.args_array7 = {'-n': 2, '-b': 1} self.args_array8 = {'-n': 2, '-b': 0} self.args_array10 = {'-n': (- 1), '-b': 1} self.args_array11 = {'-n': 1, '-b': 1, '-t': 'email_addr', '-s': 'subject_line'} self.args_array11a = {'-n': 1, '-b': 1, '-t': 'email_addr', '-s': 'subject_line', '-u': True} self.args_array12 = {'-n': 1, '-b': 1, '-t': 'email_addr'} self.args_array12a = {'-n': 1, '-b': 1, '-t': 'email_addr', '-u': True}<|docstring|>Function: setUp Description: Initialization for unit testing. Arguments:<|endoftext|>
258589b174bf7773a40e415000c1f04d0102e155a3d8b7ac62526765632cb905
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email_no_subj_mailx(self, mock_process, mock_mail): 'Function: test_email_no_subj_mailx\n\n Description: Test with email but no subject using mailx.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array12a))
Function: test_email_no_subj_mailx Description: Test with email but no subject using mailx. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_email_no_subj_mailx
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email_no_subj_mailx(self, mock_process, mock_mail): 'Function: test_email_no_subj_mailx\n\n Description: Test with email but no subject using mailx.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array12a))
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email_no_subj_mailx(self, mock_process, mock_mail): 'Function: test_email_no_subj_mailx\n\n Description: Test with email but no subject using mailx.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array12a))<|docstring|>Function: test_email_no_subj_mailx Description: Test with email but no subject using mailx. Arguments:<|endoftext|>
fd3de93875e940336965d822f4ab6c5985bb4c1100aeb1ca514bf7cf95f3d4f0
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email_mailx(self, mock_process, mock_mail): 'Function: test_email_mailx\n\n Description: Test with email option set using mailx.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array11a))
Function: test_email_mailx Description: Test with email option set using mailx. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_email_mailx
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email_mailx(self, mock_process, mock_mail): 'Function: test_email_mailx\n\n Description: Test with email option set using mailx.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array11a))
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email_mailx(self, mock_process, mock_mail): 'Function: test_email_mailx\n\n Description: Test with email option set using mailx.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array11a))<|docstring|>Function: test_email_mailx Description: Test with email option set using mailx. Arguments:<|endoftext|>
89c8cdf686c39e6dd408dcb674a49f28d6d45043cc81b2ab885f0217f5e02d60
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email_no_subj(self, mock_process, mock_mail): 'Function: test_email_no_subj\n\n Description: Test with email but no subject in args.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array12))
Function: test_email_no_subj Description: Test with email but no subject in args. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_email_no_subj
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email_no_subj(self, mock_process, mock_mail): 'Function: test_email_no_subj\n\n Description: Test with email but no subject in args.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array12))
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email_no_subj(self, mock_process, mock_mail): 'Function: test_email_no_subj\n\n Description: Test with email but no subject in args.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array12))<|docstring|>Function: test_email_no_subj Description: Test with email but no subject in args. Arguments:<|endoftext|>
e5d00aba25a66ac799212790c3fb7c14ee9b1a1a2d383b407b019482e8660ed5
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email(self, mock_process, mock_mail): 'Function: test_email\n\n Description: Test with email option set.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array11))
Function: test_email Description: Test with email option set. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_email
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email(self, mock_process, mock_mail): 'Function: test_email\n\n Description: Test with email option set.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array11))
@mock.patch('mysql_perf.gen_class.setup_mail') @mock.patch('mysql_perf.mysql_stat_run') def test_email(self, mock_process, mock_mail): 'Function: test_email\n\n Description: Test with email option set.\n\n Arguments:\n\n ' mock_process.return_value = True mock_mail.return_value = self.mail self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array11))<|docstring|>Function: test_email Description: Test with email option set. Arguments:<|endoftext|>
d49a66b74e3c209c2d67afe455e86382b090366872fdedb668430c120f8916c4
@mock.patch('mysql_perf.mysql_stat_run') def test_interval_zero(self, mock_process): 'Function: test_interval_zero\n\n Description: Test with -b option set to zero.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array8))
Function: test_interval_zero Description: Test with -b option set to zero. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_interval_zero
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.mysql_stat_run') def test_interval_zero(self, mock_process): 'Function: test_interval_zero\n\n Description: Test with -b option set to zero.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array8))
@mock.patch('mysql_perf.mysql_stat_run') def test_interval_zero(self, mock_process): 'Function: test_interval_zero\n\n Description: Test with -b option set to zero.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array8))<|docstring|>Function: test_interval_zero Description: Test with -b option set to zero. Arguments:<|endoftext|>
1ed0a9f9247c0820e470bd79d60f2494e9a0981b0fe91a5ef3bfc549945963a1
@mock.patch('mysql_perf.mysql_stat_run') def test_interval_one(self, mock_process): 'Function: test_interval_two\n\n Description: Test with -b option set to one.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array7))
Function: test_interval_two Description: Test with -b option set to one. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_interval_one
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.mysql_stat_run') def test_interval_one(self, mock_process): 'Function: test_interval_two\n\n Description: Test with -b option set to one.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array7))
@mock.patch('mysql_perf.mysql_stat_run') def test_interval_one(self, mock_process): 'Function: test_interval_two\n\n Description: Test with -b option set to one.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array7))<|docstring|>Function: test_interval_two Description: Test with -b option set to one. Arguments:<|endoftext|>
64b4698d78c5f43a90b93772c66974b24cc19ed2dd2fa002fa03bf1873c8bf78
@mock.patch('mysql_perf.mysql_stat_run') def test_interval_two(self, mock_process): 'Function: test_interval_two\n\n Description: Test with -b option set to > one.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array6))
Function: test_interval_two Description: Test with -b option set to > one. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_interval_two
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.mysql_stat_run') def test_interval_two(self, mock_process): 'Function: test_interval_two\n\n Description: Test with -b option set to > one.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array6))
@mock.patch('mysql_perf.mysql_stat_run') def test_interval_two(self, mock_process): 'Function: test_interval_two\n\n Description: Test with -b option set to > one.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array6))<|docstring|>Function: test_interval_two Description: Test with -b option set to > one. Arguments:<|endoftext|>
9f05ff5656ab6b592c8ea23c27c3580208246dcb035abc4c0107a3c1e6d0d718
def test_loop_negative(self): 'Function: test_loop_negative\n\n Description: Test with -n option set to negative number.\n\n Arguments:\n\n ' self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array10))
Function: test_loop_negative Description: Test with -n option set to negative number. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_loop_negative
deepcoder42/mysql-perf
0
python
def test_loop_negative(self): 'Function: test_loop_negative\n\n Description: Test with -n option set to negative number.\n\n Arguments:\n\n ' self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array10))
def test_loop_negative(self): 'Function: test_loop_negative\n\n Description: Test with -n option set to negative number.\n\n Arguments:\n\n ' self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array10))<|docstring|>Function: test_loop_negative Description: Test with -n option set to negative number. Arguments:<|endoftext|>
b0dd46a65ed7cae4fa7444791a30eb17b3537e0362c26ae274041486b9b24340
def test_zero_loop(self): 'Function: test_zero_loop\n\n Description: Test with -n option set to zero.\n\n Arguments:\n\n ' self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array5))
Function: test_zero_loop Description: Test with -n option set to zero. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_zero_loop
deepcoder42/mysql-perf
0
python
def test_zero_loop(self): 'Function: test_zero_loop\n\n Description: Test with -n option set to zero.\n\n Arguments:\n\n ' self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array5))
def test_zero_loop(self): 'Function: test_zero_loop\n\n Description: Test with -n option set to zero.\n\n Arguments:\n\n ' self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array5))<|docstring|>Function: test_zero_loop Description: Test with -n option set to zero. Arguments:<|endoftext|>
d4b5e209f4c819cdfdf106b6bd32b3d78cbf09ec6f99f0708ef227e6153928c7
@mock.patch('mysql_perf.mysql_stat_run') def test_json_flat(self, mock_process): 'Function: test_json_flat\n\n Description: Test with flatten indentation for JSON.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array4))
Function: test_json_flat Description: Test with flatten indentation for JSON. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_json_flat
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.mysql_stat_run') def test_json_flat(self, mock_process): 'Function: test_json_flat\n\n Description: Test with flatten indentation for JSON.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array4))
@mock.patch('mysql_perf.mysql_stat_run') def test_json_flat(self, mock_process): 'Function: test_json_flat\n\n Description: Test with flatten indentation for JSON.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array4))<|docstring|>Function: test_json_flat Description: Test with flatten indentation for JSON. Arguments:<|endoftext|>
16d4258d14fcb4d52587d4b64765601b0a7da7e187f8c345df61ca4553c6e4ec
@mock.patch('mysql_perf.mysql_stat_run') def test_json_indent(self, mock_process): 'Function: test_json_indent\n\n Description: Test with default indentation for JSON.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array))
Function: test_json_indent Description: Test with default indentation for JSON. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_json_indent
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.mysql_stat_run') def test_json_indent(self, mock_process): 'Function: test_json_indent\n\n Description: Test with default indentation for JSON.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array))
@mock.patch('mysql_perf.mysql_stat_run') def test_json_indent(self, mock_process): 'Function: test_json_indent\n\n Description: Test with default indentation for JSON.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array))<|docstring|>Function: test_json_indent Description: Test with default indentation for JSON. Arguments:<|endoftext|>
df75a6c780efe5bb7f36551f8e193d94cd30aa71ba864cdbbc53e5871e4fe3af
@mock.patch('mysql_perf.mysql_stat_run') def test_file_write(self, mock_process): 'Function: test_file_write\n\n Description: Test with setting file write.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array3))
Function: test_file_write Description: Test with setting file write. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_file_write
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.mysql_stat_run') def test_file_write(self, mock_process): 'Function: test_file_write\n\n Description: Test with setting file write.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array3))
@mock.patch('mysql_perf.mysql_stat_run') def test_file_write(self, mock_process): 'Function: test_file_write\n\n Description: Test with setting file write.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array3))<|docstring|>Function: test_file_write Description: Test with setting file write. Arguments:<|endoftext|>
0d4bfb18e387fc4fe4eee8b0fa91e41aa0cc70f1634580a6c96bce977f5b8d7b
@mock.patch('mysql_perf.mysql_stat_run') def test_file_append(self, mock_process): 'Function: test_file_append\n\n Description: Test with setting file append.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array3))
Function: test_file_append Description: Test with setting file append. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_file_append
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.mysql_stat_run') def test_file_append(self, mock_process): 'Function: test_file_append\n\n Description: Test with setting file append.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array3))
@mock.patch('mysql_perf.mysql_stat_run') def test_file_append(self, mock_process): 'Function: test_file_append\n\n Description: Test with setting file append.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array3))<|docstring|>Function: test_file_append Description: Test with setting file append. Arguments:<|endoftext|>
93a995e07c5f976591d44ba9afeee61c5c521a6432f3d765b3bb0a048c587e62
@mock.patch('mysql_perf.mysql_stat_run') def test_multi_loop(self, mock_process): 'Function: test_multi_loop\n\n Description: Test with multiple loops.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array2))
Function: test_multi_loop Description: Test with multiple loops. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_multi_loop
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.mysql_stat_run') def test_multi_loop(self, mock_process): 'Function: test_multi_loop\n\n Description: Test with multiple loops.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array2))
@mock.patch('mysql_perf.mysql_stat_run') def test_multi_loop(self, mock_process): 'Function: test_multi_loop\n\n Description: Test with multiple loops.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array2))<|docstring|>Function: test_multi_loop Description: Test with multiple loops. Arguments:<|endoftext|>
2cd57289244804a128229fcd0f26be484d27ba1db2aa958eec90b7dbc6beaaa8
@mock.patch('mysql_perf.mysql_stat_run') def test_default(self, mock_process): 'Function: test_default\n\n Description: Test with default settings.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array))
Function: test_default Description: Test with default settings. Arguments:
test/unit/mysql_perf/mysql_stat.py
test_default
deepcoder42/mysql-perf
0
python
@mock.patch('mysql_perf.mysql_stat_run') def test_default(self, mock_process): 'Function: test_default\n\n Description: Test with default settings.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array))
@mock.patch('mysql_perf.mysql_stat_run') def test_default(self, mock_process): 'Function: test_default\n\n Description: Test with default settings.\n\n Arguments:\n\n ' mock_process.return_value = True self.assertFalse(mysql_perf.mysql_stat(self.server, self.args_array))<|docstring|>Function: test_default Description: Test with default settings. Arguments:<|endoftext|>
8baabeab82162c9d9ed4ecea1ddef81f1f7ad671da7f40e6431aa75400a24ea1
def update_files(data, python=True): 'Update files with new project name.' if (len(sys.argv) != 2): e = Exception(('Specify project name: "%s streamlit"' % sys.argv[0])) raise e project_name = sys.argv[1] for (filename, regex) in data.items(): filename = os.path.join(BASE_DIR, filename) matched = False pattern = re.compile(regex) for line in fileinput.input(filename, inplace=True): if pattern.match(line.rstrip()): matched = True line = re.sub(regex, ('\\g<pre>%s\\g<post>' % project_name), line.rstrip()) print(line) if (not matched): raise Exception(('In file "%s", did not find regex "%s"' % (filename, regex)))
Update files with new project name.
scripts/update_name.py
update_files
jonathan-eckel/streamlit
19,099
python
def update_files(data, python=True): if (len(sys.argv) != 2): e = Exception(('Specify project name: "%s streamlit"' % sys.argv[0])) raise e project_name = sys.argv[1] for (filename, regex) in data.items(): filename = os.path.join(BASE_DIR, filename) matched = False pattern = re.compile(regex) for line in fileinput.input(filename, inplace=True): if pattern.match(line.rstrip()): matched = True line = re.sub(regex, ('\\g<pre>%s\\g<post>' % project_name), line.rstrip()) print(line) if (not matched): raise Exception(('In file "%s", did not find regex "%s"' % (filename, regex)))
def update_files(data, python=True): if (len(sys.argv) != 2): e = Exception(('Specify project name: "%s streamlit"' % sys.argv[0])) raise e project_name = sys.argv[1] for (filename, regex) in data.items(): filename = os.path.join(BASE_DIR, filename) matched = False pattern = re.compile(regex) for line in fileinput.input(filename, inplace=True): if pattern.match(line.rstrip()): matched = True line = re.sub(regex, ('\\g<pre>%s\\g<post>' % project_name), line.rstrip()) print(line) if (not matched): raise Exception(('In file "%s", did not find regex "%s"' % (filename, regex)))<|docstring|>Update files with new project name.<|endoftext|>
70b5cb39a3a705af28ea47368800b566a56b5a342955660c082fff36731864c8
def main(): 'Run main loop.' update_files(PYTHON)
Run main loop.
scripts/update_name.py
main
jonathan-eckel/streamlit
19,099
python
def main(): update_files(PYTHON)
def main(): update_files(PYTHON)<|docstring|>Run main loop.<|endoftext|>
d089b627c0754bf1d568aa2f01434d8a83e0dd128fa906ced70408e68a878b9a
def deep_update(target, source): 'Update a nested dictionary with another nested dictionary.' for (key, value) in source.items(): if isinstance(value, collections.Mapping): target[key] = deep_update(target.get(key, {}), value) else: target[key] = value return target
Update a nested dictionary with another nested dictionary.
homeassistant/components/google_assistant/smart_home.py
deep_update
ellsclytn/home-assistant
37
python
def deep_update(target, source): for (key, value) in source.items(): if isinstance(value, collections.Mapping): target[key] = deep_update(target.get(key, {}), value) else: target[key] = value return target
def deep_update(target, source): for (key, value) in source.items(): if isinstance(value, collections.Mapping): target[key] = deep_update(target.get(key, {}), value) else: target[key] = value return target<|docstring|>Update a nested dictionary with another nested dictionary.<|endoftext|>
9a9103819fe96ffe0e10cfade2d284c64ba3d19dfb674444a9e23ad6265f727a
async def async_handle_message(hass, config, message): 'Handle incoming API messages.' response = (await _process(hass, config, message)) if ('errorCode' in response['payload']): _LOGGER.error('Error handling message %s: %s', message, response['payload']) return response
Handle incoming API messages.
homeassistant/components/google_assistant/smart_home.py
async_handle_message
ellsclytn/home-assistant
37
python
async def async_handle_message(hass, config, message): response = (await _process(hass, config, message)) if ('errorCode' in response['payload']): _LOGGER.error('Error handling message %s: %s', message, response['payload']) return response
async def async_handle_message(hass, config, message): response = (await _process(hass, config, message)) if ('errorCode' in response['payload']): _LOGGER.error('Error handling message %s: %s', message, response['payload']) return response<|docstring|>Handle incoming API messages.<|endoftext|>
322000710b0ca04cddc40e2adca335226722013156f4c397b1bce651443bf638
async def _process(hass, config, message): 'Process a message.' request_id = message.get('requestId') inputs = message.get('inputs') if (len(inputs) != 1): return {'requestId': request_id, 'payload': {'errorCode': ERR_PROTOCOL_ERROR}} handler = HANDLERS.get(inputs[0].get('intent')) if (handler is None): return {'requestId': request_id, 'payload': {'errorCode': ERR_PROTOCOL_ERROR}} try: result = (await handler(hass, config, inputs[0].get('payload'))) return {'requestId': request_id, 'payload': result} except SmartHomeError as err: return {'requestId': request_id, 'payload': {'errorCode': err.code}} except Exception as err: _LOGGER.exception('Unexpected error') return {'requestId': request_id, 'payload': {'errorCode': ERR_UNKNOWN_ERROR}}
Process a message.
homeassistant/components/google_assistant/smart_home.py
_process
ellsclytn/home-assistant
37
python
async def _process(hass, config, message): request_id = message.get('requestId') inputs = message.get('inputs') if (len(inputs) != 1): return {'requestId': request_id, 'payload': {'errorCode': ERR_PROTOCOL_ERROR}} handler = HANDLERS.get(inputs[0].get('intent')) if (handler is None): return {'requestId': request_id, 'payload': {'errorCode': ERR_PROTOCOL_ERROR}} try: result = (await handler(hass, config, inputs[0].get('payload'))) return {'requestId': request_id, 'payload': result} except SmartHomeError as err: return {'requestId': request_id, 'payload': {'errorCode': err.code}} except Exception as err: _LOGGER.exception('Unexpected error') return {'requestId': request_id, 'payload': {'errorCode': ERR_UNKNOWN_ERROR}}
async def _process(hass, config, message): request_id = message.get('requestId') inputs = message.get('inputs') if (len(inputs) != 1): return {'requestId': request_id, 'payload': {'errorCode': ERR_PROTOCOL_ERROR}} handler = HANDLERS.get(inputs[0].get('intent')) if (handler is None): return {'requestId': request_id, 'payload': {'errorCode': ERR_PROTOCOL_ERROR}} try: result = (await handler(hass, config, inputs[0].get('payload'))) return {'requestId': request_id, 'payload': result} except SmartHomeError as err: return {'requestId': request_id, 'payload': {'errorCode': err.code}} except Exception as err: _LOGGER.exception('Unexpected error') return {'requestId': request_id, 'payload': {'errorCode': ERR_UNKNOWN_ERROR}}<|docstring|>Process a message.<|endoftext|>
670bb4562eaef9c6fbe189f60908a600c6f7d52d68eb6ebeb5ef762704170656
@HANDLERS.register('action.devices.SYNC') async def async_devices_sync(hass, config, payload): 'Handle action.devices.SYNC request.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicessync\n ' devices = [] for state in hass.states.async_all(): if (not config.should_expose(state)): continue entity = _GoogleEntity(hass, config, state) serialized = entity.sync_serialize() if (serialized is None): _LOGGER.debug('No mapping for %s domain', entity.state) continue devices.append(serialized) return {'agentUserId': config.agent_user_id, 'devices': devices}
Handle action.devices.SYNC request. https://developers.google.com/actions/smarthome/create-app#actiondevicessync
homeassistant/components/google_assistant/smart_home.py
async_devices_sync
ellsclytn/home-assistant
37
python
@HANDLERS.register('action.devices.SYNC') async def async_devices_sync(hass, config, payload): 'Handle action.devices.SYNC request.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicessync\n ' devices = [] for state in hass.states.async_all(): if (not config.should_expose(state)): continue entity = _GoogleEntity(hass, config, state) serialized = entity.sync_serialize() if (serialized is None): _LOGGER.debug('No mapping for %s domain', entity.state) continue devices.append(serialized) return {'agentUserId': config.agent_user_id, 'devices': devices}
@HANDLERS.register('action.devices.SYNC') async def async_devices_sync(hass, config, payload): 'Handle action.devices.SYNC request.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicessync\n ' devices = [] for state in hass.states.async_all(): if (not config.should_expose(state)): continue entity = _GoogleEntity(hass, config, state) serialized = entity.sync_serialize() if (serialized is None): _LOGGER.debug('No mapping for %s domain', entity.state) continue devices.append(serialized) return {'agentUserId': config.agent_user_id, 'devices': devices}<|docstring|>Handle action.devices.SYNC request. https://developers.google.com/actions/smarthome/create-app#actiondevicessync<|endoftext|>
83c74fb034c6f639343ced1b1f11af34f70eac342f0b41f63d7cb63262202429
@HANDLERS.register('action.devices.QUERY') async def async_devices_query(hass, config, payload): 'Handle action.devices.QUERY request.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesquery\n ' devices = {} for device in payload.get('devices', []): devid = device['id'] state = hass.states.get(devid) if (not state): devices[devid] = {'online': False} continue devices[devid] = _GoogleEntity(hass, config, state).query_serialize() return {'devices': devices}
Handle action.devices.QUERY request. https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
homeassistant/components/google_assistant/smart_home.py
async_devices_query
ellsclytn/home-assistant
37
python
@HANDLERS.register('action.devices.QUERY') async def async_devices_query(hass, config, payload): 'Handle action.devices.QUERY request.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesquery\n ' devices = {} for device in payload.get('devices', []): devid = device['id'] state = hass.states.get(devid) if (not state): devices[devid] = {'online': False} continue devices[devid] = _GoogleEntity(hass, config, state).query_serialize() return {'devices': devices}
@HANDLERS.register('action.devices.QUERY') async def async_devices_query(hass, config, payload): 'Handle action.devices.QUERY request.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesquery\n ' devices = {} for device in payload.get('devices', []): devid = device['id'] state = hass.states.get(devid) if (not state): devices[devid] = {'online': False} continue devices[devid] = _GoogleEntity(hass, config, state).query_serialize() return {'devices': devices}<|docstring|>Handle action.devices.QUERY request. https://developers.google.com/actions/smarthome/create-app#actiondevicesquery<|endoftext|>
7e83da98719b8a050ef49244f2090ddb1441393b34229dada12b482d843d326d
@HANDLERS.register('action.devices.EXECUTE') async def handle_devices_execute(hass, config, payload): 'Handle action.devices.EXECUTE request.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute\n ' entities = {} results = {} for command in payload['commands']: for (device, execution) in product(command['devices'], command['execution']): entity_id = device['id'] if (entity_id in results): continue if (entity_id not in entities): state = hass.states.get(entity_id) if (state is None): results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': ERR_DEVICE_OFFLINE} continue entities[entity_id] = _GoogleEntity(hass, config, state) try: (await entities[entity_id].execute(execution['command'], execution.get('params', {}))) except SmartHomeError as err: results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': err.code} final_results = list(results.values()) for entity in entities.values(): if (entity.entity_id in results): continue entity.async_update() final_results.append({'ids': [entity.entity_id], 'status': 'SUCCESS', 'states': entity.query_serialize()}) return {'commands': final_results}
Handle action.devices.EXECUTE request. https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
homeassistant/components/google_assistant/smart_home.py
handle_devices_execute
ellsclytn/home-assistant
37
python
@HANDLERS.register('action.devices.EXECUTE') async def handle_devices_execute(hass, config, payload): 'Handle action.devices.EXECUTE request.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute\n ' entities = {} results = {} for command in payload['commands']: for (device, execution) in product(command['devices'], command['execution']): entity_id = device['id'] if (entity_id in results): continue if (entity_id not in entities): state = hass.states.get(entity_id) if (state is None): results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': ERR_DEVICE_OFFLINE} continue entities[entity_id] = _GoogleEntity(hass, config, state) try: (await entities[entity_id].execute(execution['command'], execution.get('params', {}))) except SmartHomeError as err: results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': err.code} final_results = list(results.values()) for entity in entities.values(): if (entity.entity_id in results): continue entity.async_update() final_results.append({'ids': [entity.entity_id], 'status': 'SUCCESS', 'states': entity.query_serialize()}) return {'commands': final_results}
@HANDLERS.register('action.devices.EXECUTE') async def handle_devices_execute(hass, config, payload): 'Handle action.devices.EXECUTE request.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute\n ' entities = {} results = {} for command in payload['commands']: for (device, execution) in product(command['devices'], command['execution']): entity_id = device['id'] if (entity_id in results): continue if (entity_id not in entities): state = hass.states.get(entity_id) if (state is None): results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': ERR_DEVICE_OFFLINE} continue entities[entity_id] = _GoogleEntity(hass, config, state) try: (await entities[entity_id].execute(execution['command'], execution.get('params', {}))) except SmartHomeError as err: results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': err.code} final_results = list(results.values()) for entity in entities.values(): if (entity.entity_id in results): continue entity.async_update() final_results.append({'ids': [entity.entity_id], 'status': 'SUCCESS', 'states': entity.query_serialize()}) return {'commands': final_results}<|docstring|>Handle action.devices.EXECUTE request. https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute<|endoftext|>
a0f56b86264c0795a9f796ebd0f5f2d94cd8de19e108c01c249ce6c816b0f8f9
@property def entity_id(self): 'Return entity ID.' return self.state.entity_id
Return entity ID.
homeassistant/components/google_assistant/smart_home.py
entity_id
ellsclytn/home-assistant
37
python
@property def entity_id(self): return self.state.entity_id
@property def entity_id(self): return self.state.entity_id<|docstring|>Return entity ID.<|endoftext|>
4b752b11f4e6abbbf91af5f85354aae643f32141a2e9809f028bf98ffaa39ae8
@callback def traits(self): 'Return traits for entity.' state = self.state domain = state.domain features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) return [Trait(state) for Trait in trait.TRAITS if Trait.supported(domain, features)]
Return traits for entity.
homeassistant/components/google_assistant/smart_home.py
traits
ellsclytn/home-assistant
37
python
@callback def traits(self): state = self.state domain = state.domain features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) return [Trait(state) for Trait in trait.TRAITS if Trait.supported(domain, features)]
@callback def traits(self): state = self.state domain = state.domain features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) return [Trait(state) for Trait in trait.TRAITS if Trait.supported(domain, features)]<|docstring|>Return traits for entity.<|endoftext|>
47b46a9617ab6197ec2d99e341f6613e4f1b9f17d54b8577ee5d4d4c5d39b37a
@callback def sync_serialize(self): 'Serialize entity for a SYNC response.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicessync\n ' state = self.state if (state.state == STATE_UNAVAILABLE): return None entity_config = self.config.entity_config.get(state.entity_id, {}) name = (entity_config.get(CONF_NAME) or state.name).strip() if (not name): return None traits = self.traits() if (not traits): return None device = {'id': state.entity_id, 'name': {'name': name}, 'attributes': {}, 'traits': [trait.name for trait in traits], 'willReportState': False, 'type': DOMAIN_TO_GOOGLE_TYPES[state.domain]} aliases = entity_config.get(CONF_ALIASES) if aliases: device['name']['nicknames'] = aliases room = entity_config.get(CONF_ROOM_HINT) if room: device['roomHint'] = room for trt in traits: device['attributes'].update(trt.sync_attributes()) return device
Serialize entity for a SYNC response. https://developers.google.com/actions/smarthome/create-app#actiondevicessync
homeassistant/components/google_assistant/smart_home.py
sync_serialize
ellsclytn/home-assistant
37
python
@callback def sync_serialize(self): 'Serialize entity for a SYNC response.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicessync\n ' state = self.state if (state.state == STATE_UNAVAILABLE): return None entity_config = self.config.entity_config.get(state.entity_id, {}) name = (entity_config.get(CONF_NAME) or state.name).strip() if (not name): return None traits = self.traits() if (not traits): return None device = {'id': state.entity_id, 'name': {'name': name}, 'attributes': {}, 'traits': [trait.name for trait in traits], 'willReportState': False, 'type': DOMAIN_TO_GOOGLE_TYPES[state.domain]} aliases = entity_config.get(CONF_ALIASES) if aliases: device['name']['nicknames'] = aliases room = entity_config.get(CONF_ROOM_HINT) if room: device['roomHint'] = room for trt in traits: device['attributes'].update(trt.sync_attributes()) return device
@callback def sync_serialize(self): 'Serialize entity for a SYNC response.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicessync\n ' state = self.state if (state.state == STATE_UNAVAILABLE): return None entity_config = self.config.entity_config.get(state.entity_id, {}) name = (entity_config.get(CONF_NAME) or state.name).strip() if (not name): return None traits = self.traits() if (not traits): return None device = {'id': state.entity_id, 'name': {'name': name}, 'attributes': {}, 'traits': [trait.name for trait in traits], 'willReportState': False, 'type': DOMAIN_TO_GOOGLE_TYPES[state.domain]} aliases = entity_config.get(CONF_ALIASES) if aliases: device['name']['nicknames'] = aliases room = entity_config.get(CONF_ROOM_HINT) if room: device['roomHint'] = room for trt in traits: device['attributes'].update(trt.sync_attributes()) return device<|docstring|>Serialize entity for a SYNC response. https://developers.google.com/actions/smarthome/create-app#actiondevicessync<|endoftext|>
518b8ed926439c4a987ed4d2ac159e5fe00ec9c2cf306acc8d52d347872c96f2
@callback def query_serialize(self): 'Serialize entity for a QUERY response.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesquery\n ' state = self.state if (state.state == STATE_UNAVAILABLE): return {'online': False} attrs = {'online': True} for trt in self.traits(): deep_update(attrs, trt.query_attributes()) return attrs
Serialize entity for a QUERY response. https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
homeassistant/components/google_assistant/smart_home.py
query_serialize
ellsclytn/home-assistant
37
python
@callback def query_serialize(self): 'Serialize entity for a QUERY response.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesquery\n ' state = self.state if (state.state == STATE_UNAVAILABLE): return {'online': False} attrs = {'online': True} for trt in self.traits(): deep_update(attrs, trt.query_attributes()) return attrs
@callback def query_serialize(self): 'Serialize entity for a QUERY response.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesquery\n ' state = self.state if (state.state == STATE_UNAVAILABLE): return {'online': False} attrs = {'online': True} for trt in self.traits(): deep_update(attrs, trt.query_attributes()) return attrs<|docstring|>Serialize entity for a QUERY response. https://developers.google.com/actions/smarthome/create-app#actiondevicesquery<|endoftext|>
695631e5b7398de881b4178d2d1f9fd78497a65a587540ef8719c2768a73bf31
async def execute(self, command, params): 'Execute a command.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute\n ' executed = False for trt in self.traits(): if trt.can_execute(command, params): (await trt.execute(self.hass, command, params)) executed = True break if (not executed): raise SmartHomeError(ERR_NOT_SUPPORTED, 'Unable to execute {} for {}'.format(command, self.state.entity_id))
Execute a command. https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
homeassistant/components/google_assistant/smart_home.py
execute
ellsclytn/home-assistant
37
python
async def execute(self, command, params): 'Execute a command.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute\n ' executed = False for trt in self.traits(): if trt.can_execute(command, params): (await trt.execute(self.hass, command, params)) executed = True break if (not executed): raise SmartHomeError(ERR_NOT_SUPPORTED, 'Unable to execute {} for {}'.format(command, self.state.entity_id))
async def execute(self, command, params): 'Execute a command.\n\n https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute\n ' executed = False for trt in self.traits(): if trt.can_execute(command, params): (await trt.execute(self.hass, command, params)) executed = True break if (not executed): raise SmartHomeError(ERR_NOT_SUPPORTED, 'Unable to execute {} for {}'.format(command, self.state.entity_id))<|docstring|>Execute a command. https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute<|endoftext|>
4ca3e5758b72311848298bb564afd3f1401d862e6c474f4117a57d9d4ccfbc31
@callback def async_update(self): 'Update the entity with latest info from Home Assistant.' self.state = self.hass.states.get(self.entity_id)
Update the entity with latest info from Home Assistant.
homeassistant/components/google_assistant/smart_home.py
async_update
ellsclytn/home-assistant
37
python
@callback def async_update(self): self.state = self.hass.states.get(self.entity_id)
@callback def async_update(self): self.state = self.hass.states.get(self.entity_id)<|docstring|>Update the entity with latest info from Home Assistant.<|endoftext|>
f9ba04fc5536239bcfbe0046fd8e8fa13a926308ba9c0fb2f44674f8144bf346
def load_sample(sample_file): 'Load a single example and return it as a SquadExample tuple.' with open(sample_file, 'rt') as handle: sample = json.load(handle) return SquadExample(question=sample['question'], context=sample['context'], sentence_lengths=sample['sent_lengths'], answer_sentence=sample['ans_sentence'], answer_start=sample['ans_start'], answer_end=sample['ans_end'], same_as_question_word=sample['same_as_question_word'], repeated_words=sample['repeated_words'], repeated_word_intensity=sample['repeated_intensity'], id=sample['qa_id'])
Load a single example and return it as a SquadExample tuple.
gnr.py
load_sample
baidu-research/GloballyNormalizedReader
73
python
def load_sample(sample_file): with open(sample_file, 'rt') as handle: sample = json.load(handle) return SquadExample(question=sample['question'], context=sample['context'], sentence_lengths=sample['sent_lengths'], answer_sentence=sample['ans_sentence'], answer_start=sample['ans_start'], answer_end=sample['ans_end'], same_as_question_word=sample['same_as_question_word'], repeated_words=sample['repeated_words'], repeated_word_intensity=sample['repeated_intensity'], id=sample['qa_id'])
def load_sample(sample_file): with open(sample_file, 'rt') as handle: sample = json.load(handle) return SquadExample(question=sample['question'], context=sample['context'], sentence_lengths=sample['sent_lengths'], answer_sentence=sample['ans_sentence'], answer_start=sample['ans_start'], answer_end=sample['ans_end'], same_as_question_word=sample['same_as_question_word'], repeated_words=sample['repeated_words'], repeated_word_intensity=sample['repeated_intensity'], id=sample['qa_id'])<|docstring|>Load a single example and return it as a SquadExample tuple.<|endoftext|>
251559bd809fd7fb99c5471d3acef4acdbadc5f343bf708bb34d45f67c91ecd7
def make_batches(samples, augmented_samples, batch_size, cycle=False): 'Convert samples from the samples generator into\n padded batches with `batch_size` as the first dimension.' current_batch = [] while True: if (augmented_samples is not None): augmented = featurize.random_sample(augmented_samples, 10000) epoch_samples = (samples + augmented) else: epoch_samples = samples random.shuffle(epoch_samples) for (idx, sample) in enumerate(epoch_samples): current_batch.append(load_sample(sample)) if ((len(current_batch) == batch_size) or (idx == (len(samples) - 1))): questions = ops.lists_to_array([sample.question for sample in current_batch], padding=(- 1)) contexts = ops.lists_to_array([sample.context for sample in current_batch], padding=(- 1)) same_as_question_word = ops.lists_to_array([sample.same_as_question_word for sample in current_batch], padding=0) repeated_words = ops.lists_to_array([sample.repeated_words for sample in current_batch], padding=0) repeated_word_intensity = ops.lists_to_array([sample.repeated_word_intensity for sample in current_batch], padding=0) sent_lengths = ops.lists_to_array([sample.sentence_lengths for sample in current_batch], padding=0) answer_sentences = np.array([sample.answer_sentence for sample in current_batch]) answer_starts = np.array([sample.answer_start for sample in current_batch]) answer_ends = np.array([sample.answer_end for sample in current_batch]) ids = [sample.id for sample in current_batch] (yield [questions, contexts, same_as_question_word, repeated_words, repeated_word_intensity, sent_lengths, answer_sentences, answer_starts, answer_ends, ids]) current_batch = [] if (not cycle): break
Convert samples from the samples generator into padded batches with `batch_size` as the first dimension.
gnr.py
make_batches
baidu-research/GloballyNormalizedReader
73
python
def make_batches(samples, augmented_samples, batch_size, cycle=False): 'Convert samples from the samples generator into\n padded batches with `batch_size` as the first dimension.' current_batch = [] while True: if (augmented_samples is not None): augmented = featurize.random_sample(augmented_samples, 10000) epoch_samples = (samples + augmented) else: epoch_samples = samples random.shuffle(epoch_samples) for (idx, sample) in enumerate(epoch_samples): current_batch.append(load_sample(sample)) if ((len(current_batch) == batch_size) or (idx == (len(samples) - 1))): questions = ops.lists_to_array([sample.question for sample in current_batch], padding=(- 1)) contexts = ops.lists_to_array([sample.context for sample in current_batch], padding=(- 1)) same_as_question_word = ops.lists_to_array([sample.same_as_question_word for sample in current_batch], padding=0) repeated_words = ops.lists_to_array([sample.repeated_words for sample in current_batch], padding=0) repeated_word_intensity = ops.lists_to_array([sample.repeated_word_intensity for sample in current_batch], padding=0) sent_lengths = ops.lists_to_array([sample.sentence_lengths for sample in current_batch], padding=0) answer_sentences = np.array([sample.answer_sentence for sample in current_batch]) answer_starts = np.array([sample.answer_start for sample in current_batch]) answer_ends = np.array([sample.answer_end for sample in current_batch]) ids = [sample.id for sample in current_batch] (yield [questions, contexts, same_as_question_word, repeated_words, repeated_word_intensity, sent_lengths, answer_sentences, answer_starts, answer_ends, ids]) current_batch = [] if (not cycle): break
def make_batches(samples, augmented_samples, batch_size, cycle=False): 'Convert samples from the samples generator into\n padded batches with `batch_size` as the first dimension.' current_batch = [] while True: if (augmented_samples is not None): augmented = featurize.random_sample(augmented_samples, 10000) epoch_samples = (samples + augmented) else: epoch_samples = samples random.shuffle(epoch_samples) for (idx, sample) in enumerate(epoch_samples): current_batch.append(load_sample(sample)) if ((len(current_batch) == batch_size) or (idx == (len(samples) - 1))): questions = ops.lists_to_array([sample.question for sample in current_batch], padding=(- 1)) contexts = ops.lists_to_array([sample.context for sample in current_batch], padding=(- 1)) same_as_question_word = ops.lists_to_array([sample.same_as_question_word for sample in current_batch], padding=0) repeated_words = ops.lists_to_array([sample.repeated_words for sample in current_batch], padding=0) repeated_word_intensity = ops.lists_to_array([sample.repeated_word_intensity for sample in current_batch], padding=0) sent_lengths = ops.lists_to_array([sample.sentence_lengths for sample in current_batch], padding=0) answer_sentences = np.array([sample.answer_sentence for sample in current_batch]) answer_starts = np.array([sample.answer_start for sample in current_batch]) answer_ends = np.array([sample.answer_end for sample in current_batch]) ids = [sample.id for sample in current_batch] (yield [questions, contexts, same_as_question_word, repeated_words, repeated_word_intensity, sent_lengths, answer_sentences, answer_starts, answer_ends, ids]) current_batch = [] if (not cycle): break<|docstring|>Convert samples from the samples generator into padded batches with `batch_size` as the first dimension.<|endoftext|>
77735b91bb10ee61b7a2a5229eb8d2208bad0ca185cbe82ecdb8c03c220dab9d
def load_input_data(path, batch_size, validation_size, current_iteration): '\n Load the input data from the provided directory, splitting it into\n validation batches and an infinite generator of training batches.\n\n Arguments:\n - path: Directory with one file or subdirectory per training sample.\n - batch_size: Size of each training batch (per GPU).\n - validation_size: Size of the validation set (not per GPU).\n - current_iteration: The number of training batches to skip.\n Return a tuple of (train data, validation data), where training and validation\n data are both generators of batches. A batch is a list of NumPy arrays,\n in the same order as returned by load_sample.\n ' if (not os.path.exists(os.path.join(path, 'train'))): print('Non-existent directory as input path: {}'.format(path), file=sys.stderr) sys.exit(1) train_samples = glob.glob(os.path.join(path, 'train', '*')) valid_samples = glob.glob(os.path.join(path, 'dev', '*')) augmented_samples = glob.glob(os.path.join(path, 'augmented', '*')) train_samples.sort() valid_samples.sort() augmented_samples.sort() random.shuffle(train_samples) random.shuffle(valid_samples) random.shuffle(augmented_samples) train = ops.prefetch_generator(make_batches(train_samples, augmented_samples, batch_size, cycle=True), to_fetch=(2 * batch_size)) valid = ops.prefetch_generator(make_batches(valid_samples, None, validation_size), to_fetch=validation_size) evals = make_eval_batches(os.path.join(path, 'eval.json'), 1) return (train, valid, evals)
Load the input data from the provided directory, splitting it into validation batches and an infinite generator of training batches. Arguments: - path: Directory with one file or subdirectory per training sample. - batch_size: Size of each training batch (per GPU). - validation_size: Size of the validation set (not per GPU). - current_iteration: The number of training batches to skip. Return a tuple of (train data, validation data), where training and validation data are both generators of batches. A batch is a list of NumPy arrays, in the same order as returned by load_sample.
gnr.py
load_input_data
baidu-research/GloballyNormalizedReader
73
python
def load_input_data(path, batch_size, validation_size, current_iteration): '\n Load the input data from the provided directory, splitting it into\n validation batches and an infinite generator of training batches.\n\n Arguments:\n - path: Directory with one file or subdirectory per training sample.\n - batch_size: Size of each training batch (per GPU).\n - validation_size: Size of the validation set (not per GPU).\n - current_iteration: The number of training batches to skip.\n Return a tuple of (train data, validation data), where training and validation\n data are both generators of batches. A batch is a list of NumPy arrays,\n in the same order as returned by load_sample.\n ' if (not os.path.exists(os.path.join(path, 'train'))): print('Non-existent directory as input path: {}'.format(path), file=sys.stderr) sys.exit(1) train_samples = glob.glob(os.path.join(path, 'train', '*')) valid_samples = glob.glob(os.path.join(path, 'dev', '*')) augmented_samples = glob.glob(os.path.join(path, 'augmented', '*')) train_samples.sort() valid_samples.sort() augmented_samples.sort() random.shuffle(train_samples) random.shuffle(valid_samples) random.shuffle(augmented_samples) train = ops.prefetch_generator(make_batches(train_samples, augmented_samples, batch_size, cycle=True), to_fetch=(2 * batch_size)) valid = ops.prefetch_generator(make_batches(valid_samples, None, validation_size), to_fetch=validation_size) evals = make_eval_batches(os.path.join(path, 'eval.json'), 1) return (train, valid, evals)
def load_input_data(path, batch_size, validation_size, current_iteration): '\n Load the input data from the provided directory, splitting it into\n validation batches and an infinite generator of training batches.\n\n Arguments:\n - path: Directory with one file or subdirectory per training sample.\n - batch_size: Size of each training batch (per GPU).\n - validation_size: Size of the validation set (not per GPU).\n - current_iteration: The number of training batches to skip.\n Return a tuple of (train data, validation data), where training and validation\n data are both generators of batches. A batch is a list of NumPy arrays,\n in the same order as returned by load_sample.\n ' if (not os.path.exists(os.path.join(path, 'train'))): print('Non-existent directory as input path: {}'.format(path), file=sys.stderr) sys.exit(1) train_samples = glob.glob(os.path.join(path, 'train', '*')) valid_samples = glob.glob(os.path.join(path, 'dev', '*')) augmented_samples = glob.glob(os.path.join(path, 'augmented', '*')) train_samples.sort() valid_samples.sort() augmented_samples.sort() random.shuffle(train_samples) random.shuffle(valid_samples) random.shuffle(augmented_samples) train = ops.prefetch_generator(make_batches(train_samples, augmented_samples, batch_size, cycle=True), to_fetch=(2 * batch_size)) valid = ops.prefetch_generator(make_batches(valid_samples, None, validation_size), to_fetch=validation_size) evals = make_eval_batches(os.path.join(path, 'eval.json'), 1) return (train, valid, evals)<|docstring|>Load the input data from the provided directory, splitting it into validation batches and an infinite generator of training batches. Arguments: - path: Directory with one file or subdirectory per training sample. - batch_size: Size of each training batch (per GPU). - validation_size: Size of the validation set (not per GPU). - current_iteration: The number of training batches to skip. Return a tuple of (train data, validation data), where training and validation data are both generators of batches. A batch is a list of NumPy arrays, in the same order as returned by load_sample.<|endoftext|>
a1c65974e75d7942cdc9461aa47ba60471186b0a3a52454284cd1d57b916e1f2
def featurize_question(model, questions, embedding_dropout, training): 'Embed the question as the final hidden state of a stack of Bi-LSTMs\n and a "passage-indenpendent" embedding from Rasor.\n\n Arguments:\n model: QA model hyperparameters.\n questions: Question word indices with shape `[batch, length]`.\n embedding_dropout: Dropout probability for the inputs to the LSTM.\n\n Returns:\n hiddens: Vector representation for the entire question with shape\n `[batch, features]`.\n ' with tf.variable_scope('GloveEmbeddings', reuse=True): embeds = tf.get_variable('GloveEmbeddings') question_embeddings = ops.masked_embedding_lookup(embeds, questions) question_embeddings = tf.nn.dropout(question_embeddings, embedding_dropout) with tf.variable_scope('QuestionLSTMs'): (hiddens, final_h, _) = ops.cudnn_lstm(question_embeddings, model.question_layers, model.layer_size, model.weight_noise, training) batch = tf.shape(final_h)[0] final_states = tf.reshape(final_h[(:, (- 2):, :)], [batch, (2 * model.layer_size)]) with tf.variable_scope('PassageIndependentEmbedding'): features = hiddens.get_shape()[(- 1)].value hiddens = tf.contrib.layers.fully_connected(inputs=hiddens, num_outputs=features, activation_fn=None) sentinel = tf.get_variable(shape=[features, 1], initializer=tf.contrib.layers.variance_scaling_initializer(), name='sentinel') alphas = ops.semibatch_matmul(hiddens, sentinel) alphas = tf.nn.softmax(alphas, dim=1) passage_indep_embedding = tf.reduce_sum((alphas * hiddens), axis=1) return tf.concat(axis=1, values=[final_states, passage_indep_embedding])
Embed the question as the final hidden state of a stack of Bi-LSTMs and a "passage-indenpendent" embedding from Rasor. Arguments: model: QA model hyperparameters. questions: Question word indices with shape `[batch, length]`. embedding_dropout: Dropout probability for the inputs to the LSTM. Returns: hiddens: Vector representation for the entire question with shape `[batch, features]`.
gnr.py
featurize_question
baidu-research/GloballyNormalizedReader
73
python
def featurize_question(model, questions, embedding_dropout, training): 'Embed the question as the final hidden state of a stack of Bi-LSTMs\n and a "passage-indenpendent" embedding from Rasor.\n\n Arguments:\n model: QA model hyperparameters.\n questions: Question word indices with shape `[batch, length]`.\n embedding_dropout: Dropout probability for the inputs to the LSTM.\n\n Returns:\n hiddens: Vector representation for the entire question with shape\n `[batch, features]`.\n ' with tf.variable_scope('GloveEmbeddings', reuse=True): embeds = tf.get_variable('GloveEmbeddings') question_embeddings = ops.masked_embedding_lookup(embeds, questions) question_embeddings = tf.nn.dropout(question_embeddings, embedding_dropout) with tf.variable_scope('QuestionLSTMs'): (hiddens, final_h, _) = ops.cudnn_lstm(question_embeddings, model.question_layers, model.layer_size, model.weight_noise, training) batch = tf.shape(final_h)[0] final_states = tf.reshape(final_h[(:, (- 2):, :)], [batch, (2 * model.layer_size)]) with tf.variable_scope('PassageIndependentEmbedding'): features = hiddens.get_shape()[(- 1)].value hiddens = tf.contrib.layers.fully_connected(inputs=hiddens, num_outputs=features, activation_fn=None) sentinel = tf.get_variable(shape=[features, 1], initializer=tf.contrib.layers.variance_scaling_initializer(), name='sentinel') alphas = ops.semibatch_matmul(hiddens, sentinel) alphas = tf.nn.softmax(alphas, dim=1) passage_indep_embedding = tf.reduce_sum((alphas * hiddens), axis=1) return tf.concat(axis=1, values=[final_states, passage_indep_embedding])
def featurize_question(model, questions, embedding_dropout, training): 'Embed the question as the final hidden state of a stack of Bi-LSTMs\n and a "passage-indenpendent" embedding from Rasor.\n\n Arguments:\n model: QA model hyperparameters.\n questions: Question word indices with shape `[batch, length]`.\n embedding_dropout: Dropout probability for the inputs to the LSTM.\n\n Returns:\n hiddens: Vector representation for the entire question with shape\n `[batch, features]`.\n ' with tf.variable_scope('GloveEmbeddings', reuse=True): embeds = tf.get_variable('GloveEmbeddings') question_embeddings = ops.masked_embedding_lookup(embeds, questions) question_embeddings = tf.nn.dropout(question_embeddings, embedding_dropout) with tf.variable_scope('QuestionLSTMs'): (hiddens, final_h, _) = ops.cudnn_lstm(question_embeddings, model.question_layers, model.layer_size, model.weight_noise, training) batch = tf.shape(final_h)[0] final_states = tf.reshape(final_h[(:, (- 2):, :)], [batch, (2 * model.layer_size)]) with tf.variable_scope('PassageIndependentEmbedding'): features = hiddens.get_shape()[(- 1)].value hiddens = tf.contrib.layers.fully_connected(inputs=hiddens, num_outputs=features, activation_fn=None) sentinel = tf.get_variable(shape=[features, 1], initializer=tf.contrib.layers.variance_scaling_initializer(), name='sentinel') alphas = ops.semibatch_matmul(hiddens, sentinel) alphas = tf.nn.softmax(alphas, dim=1) passage_indep_embedding = tf.reduce_sum((alphas * hiddens), axis=1) return tf.concat(axis=1, values=[final_states, passage_indep_embedding])<|docstring|>Embed the question as the final hidden state of a stack of Bi-LSTMs and a "passage-indenpendent" embedding from Rasor. Arguments: model: QA model hyperparameters. questions: Question word indices with shape `[batch, length]`. embedding_dropout: Dropout probability for the inputs to the LSTM. Returns: hiddens: Vector representation for the entire question with shape `[batch, features]`.<|endoftext|>
6bd4869524474bc3e5a85cceeaf153999f4f8e03d763eaeee3d241a0a8c8f968
def featurize_document(model, questions, documents, same_as_question, repeated_words, repeated_word_intensity, question_vector, embedding_dropout, training): "Run a stack of Bi-LSTM's over the document.\n Arguments:\n model: QA model hyperparameters.\n documents: Document word indices with shape `[batch, length]`.\n same_as_question: Boolean: Does the question contain this word? with\n shape `[batch, length]`\n question_vector: Vector representation of the question with\n shape `[batch, features]`\n embedding_dropout: Dropout probability for the LSTM inputs.\n hidden_dropout: Dropout probability for the LSTM hidden states.\n\n Returns:\n hiddens: Vector representation for each word in the document with\n shape `[batch, length, features]`.\n " with tf.variable_scope('GloveEmbeddings', reuse=True): embeds = tf.get_variable('GloveEmbeddings') document_embeddings = ops.masked_embedding_lookup(embeds, documents) question_embeddings = ops.masked_embedding_lookup(embeds, questions) qa_embeds = question_aligned_embeddings(document_embeddings, question_embeddings, model.layer_size) question_vector = tf.tile(tf.expand_dims(question_vector, 1), [1, tf.shape(documents)[1], 1]) same_as_question = tf.expand_dims(same_as_question, 2) repeated_words = tf.expand_dims(repeated_words, 2) repeated_word_intensity = tf.expand_dims(repeated_word_intensity, 2) document_embeddings = tf.concat(axis=2, values=[document_embeddings, same_as_question, repeated_words, repeated_word_intensity, question_vector, qa_embeds]) document_embeddings = tf.nn.dropout(document_embeddings, embedding_dropout) with tf.variable_scope('DocumentLSTMs'): (hiddens, _, _) = ops.cudnn_lstm(document_embeddings, model.document_layers, model.layer_size, model.weight_noise, training) return hiddens
Run a stack of Bi-LSTM's over the document. Arguments: model: QA model hyperparameters. documents: Document word indices with shape `[batch, length]`. same_as_question: Boolean: Does the question contain this word? with shape `[batch, length]` question_vector: Vector representation of the question with shape `[batch, features]` embedding_dropout: Dropout probability for the LSTM inputs. hidden_dropout: Dropout probability for the LSTM hidden states. Returns: hiddens: Vector representation for each word in the document with shape `[batch, length, features]`.
gnr.py
featurize_document
baidu-research/GloballyNormalizedReader
73
python
def featurize_document(model, questions, documents, same_as_question, repeated_words, repeated_word_intensity, question_vector, embedding_dropout, training): "Run a stack of Bi-LSTM's over the document.\n Arguments:\n model: QA model hyperparameters.\n documents: Document word indices with shape `[batch, length]`.\n same_as_question: Boolean: Does the question contain this word? with\n shape `[batch, length]`\n question_vector: Vector representation of the question with\n shape `[batch, features]`\n embedding_dropout: Dropout probability for the LSTM inputs.\n hidden_dropout: Dropout probability for the LSTM hidden states.\n\n Returns:\n hiddens: Vector representation for each word in the document with\n shape `[batch, length, features]`.\n " with tf.variable_scope('GloveEmbeddings', reuse=True): embeds = tf.get_variable('GloveEmbeddings') document_embeddings = ops.masked_embedding_lookup(embeds, documents) question_embeddings = ops.masked_embedding_lookup(embeds, questions) qa_embeds = question_aligned_embeddings(document_embeddings, question_embeddings, model.layer_size) question_vector = tf.tile(tf.expand_dims(question_vector, 1), [1, tf.shape(documents)[1], 1]) same_as_question = tf.expand_dims(same_as_question, 2) repeated_words = tf.expand_dims(repeated_words, 2) repeated_word_intensity = tf.expand_dims(repeated_word_intensity, 2) document_embeddings = tf.concat(axis=2, values=[document_embeddings, same_as_question, repeated_words, repeated_word_intensity, question_vector, qa_embeds]) document_embeddings = tf.nn.dropout(document_embeddings, embedding_dropout) with tf.variable_scope('DocumentLSTMs'): (hiddens, _, _) = ops.cudnn_lstm(document_embeddings, model.document_layers, model.layer_size, model.weight_noise, training) return hiddens
def featurize_document(model, questions, documents, same_as_question, repeated_words, repeated_word_intensity, question_vector, embedding_dropout, training): "Run a stack of Bi-LSTM's over the document.\n Arguments:\n model: QA model hyperparameters.\n documents: Document word indices with shape `[batch, length]`.\n same_as_question: Boolean: Does the question contain this word? with\n shape `[batch, length]`\n question_vector: Vector representation of the question with\n shape `[batch, features]`\n embedding_dropout: Dropout probability for the LSTM inputs.\n hidden_dropout: Dropout probability for the LSTM hidden states.\n\n Returns:\n hiddens: Vector representation for each word in the document with\n shape `[batch, length, features]`.\n " with tf.variable_scope('GloveEmbeddings', reuse=True): embeds = tf.get_variable('GloveEmbeddings') document_embeddings = ops.masked_embedding_lookup(embeds, documents) question_embeddings = ops.masked_embedding_lookup(embeds, questions) qa_embeds = question_aligned_embeddings(document_embeddings, question_embeddings, model.layer_size) question_vector = tf.tile(tf.expand_dims(question_vector, 1), [1, tf.shape(documents)[1], 1]) same_as_question = tf.expand_dims(same_as_question, 2) repeated_words = tf.expand_dims(repeated_words, 2) repeated_word_intensity = tf.expand_dims(repeated_word_intensity, 2) document_embeddings = tf.concat(axis=2, values=[document_embeddings, same_as_question, repeated_words, repeated_word_intensity, question_vector, qa_embeds]) document_embeddings = tf.nn.dropout(document_embeddings, embedding_dropout) with tf.variable_scope('DocumentLSTMs'): (hiddens, _, _) = ops.cudnn_lstm(document_embeddings, model.document_layers, model.layer_size, model.weight_noise, training) return hiddens<|docstring|>Run a stack of Bi-LSTM's over the document. Arguments: model: QA model hyperparameters. documents: Document word indices with shape `[batch, length]`. same_as_question: Boolean: Does the question contain this word? with shape `[batch, length]` question_vector: Vector representation of the question with shape `[batch, features]` embedding_dropout: Dropout probability for the LSTM inputs. hidden_dropout: Dropout probability for the LSTM hidden states. Returns: hiddens: Vector representation for each word in the document with shape `[batch, length, features]`.<|endoftext|>
8bfe7704986be9b786047bb583a556b0c47d76c35119240e066601449a4772ad
def score_sentences(model, documents_features, sentence_lengths, hidden_dropout): 'Compute logits for selecting each sentence in the document.\n\n Arguments:\n documents_features: Feature representation of the document\n with shape `[batch, length, features]`.\n sentence_lengths: Length of each sentence in the document\n with shape `[batch, num_sentences]`, used\n for finding sentence boundaries in\n documents_features.\n\n Returns:\n logits: Scores for each sentence with shape\n `[batch, num_sentences]`.\n ' batch_size = tf.shape(documents_features)[0] length = tf.shape(documents_features)[1] features = documents_features.get_shape()[(- 1)].value num_sentences = tf.shape(sentence_lengths)[1] sentence_start_positions = tf.cumsum(sentence_lengths, axis=1, exclusive=True) sentence_end_positions = (tf.cumsum(sentence_lengths, axis=1) - 1) offsets = (length * tf.expand_dims(tf.range(batch_size), 1)) sentence_start_positions = tf.reshape((sentence_start_positions + offsets), [(- 1)]) sentence_end_positions = tf.reshape((sentence_end_positions + offsets), [(- 1)]) documents_features = tf.reshape(documents_features, [(- 1), features]) forward_features = documents_features[(:, :(features // 2))] forward_states = tf.gather(forward_features, sentence_end_positions) backward_features = documents_features[(:, (features // 2):)] backward_states = tf.gather(backward_features, sentence_start_positions) sentence_states = tf.concat(axis=1, values=[forward_states, backward_states]) sentence_states = tf.reshape(sentence_states, [batch_size, num_sentences, features]) with tf.variable_scope('SentenceSelection'): logits = tf.contrib.layers.fully_connected(inputs=tf.nn.dropout(sentence_states, hidden_dropout), num_outputs=1, activation_fn=None) return tf.squeeze(logits, [2])
Compute logits for selecting each sentence in the document. Arguments: documents_features: Feature representation of the document with shape `[batch, length, features]`. sentence_lengths: Length of each sentence in the document with shape `[batch, num_sentences]`, used for finding sentence boundaries in documents_features. Returns: logits: Scores for each sentence with shape `[batch, num_sentences]`.
gnr.py
score_sentences
baidu-research/GloballyNormalizedReader
73
python
def score_sentences(model, documents_features, sentence_lengths, hidden_dropout): 'Compute logits for selecting each sentence in the document.\n\n Arguments:\n documents_features: Feature representation of the document\n with shape `[batch, length, features]`.\n sentence_lengths: Length of each sentence in the document\n with shape `[batch, num_sentences]`, used\n for finding sentence boundaries in\n documents_features.\n\n Returns:\n logits: Scores for each sentence with shape\n `[batch, num_sentences]`.\n ' batch_size = tf.shape(documents_features)[0] length = tf.shape(documents_features)[1] features = documents_features.get_shape()[(- 1)].value num_sentences = tf.shape(sentence_lengths)[1] sentence_start_positions = tf.cumsum(sentence_lengths, axis=1, exclusive=True) sentence_end_positions = (tf.cumsum(sentence_lengths, axis=1) - 1) offsets = (length * tf.expand_dims(tf.range(batch_size), 1)) sentence_start_positions = tf.reshape((sentence_start_positions + offsets), [(- 1)]) sentence_end_positions = tf.reshape((sentence_end_positions + offsets), [(- 1)]) documents_features = tf.reshape(documents_features, [(- 1), features]) forward_features = documents_features[(:, :(features // 2))] forward_states = tf.gather(forward_features, sentence_end_positions) backward_features = documents_features[(:, (features // 2):)] backward_states = tf.gather(backward_features, sentence_start_positions) sentence_states = tf.concat(axis=1, values=[forward_states, backward_states]) sentence_states = tf.reshape(sentence_states, [batch_size, num_sentences, features]) with tf.variable_scope('SentenceSelection'): logits = tf.contrib.layers.fully_connected(inputs=tf.nn.dropout(sentence_states, hidden_dropout), num_outputs=1, activation_fn=None) return tf.squeeze(logits, [2])
def score_sentences(model, documents_features, sentence_lengths, hidden_dropout): 'Compute logits for selecting each sentence in the document.\n\n Arguments:\n documents_features: Feature representation of the document\n with shape `[batch, length, features]`.\n sentence_lengths: Length of each sentence in the document\n with shape `[batch, num_sentences]`, used\n for finding sentence boundaries in\n documents_features.\n\n Returns:\n logits: Scores for each sentence with shape\n `[batch, num_sentences]`.\n ' batch_size = tf.shape(documents_features)[0] length = tf.shape(documents_features)[1] features = documents_features.get_shape()[(- 1)].value num_sentences = tf.shape(sentence_lengths)[1] sentence_start_positions = tf.cumsum(sentence_lengths, axis=1, exclusive=True) sentence_end_positions = (tf.cumsum(sentence_lengths, axis=1) - 1) offsets = (length * tf.expand_dims(tf.range(batch_size), 1)) sentence_start_positions = tf.reshape((sentence_start_positions + offsets), [(- 1)]) sentence_end_positions = tf.reshape((sentence_end_positions + offsets), [(- 1)]) documents_features = tf.reshape(documents_features, [(- 1), features]) forward_features = documents_features[(:, :(features // 2))] forward_states = tf.gather(forward_features, sentence_end_positions) backward_features = documents_features[(:, (features // 2):)] backward_states = tf.gather(backward_features, sentence_start_positions) sentence_states = tf.concat(axis=1, values=[forward_states, backward_states]) sentence_states = tf.reshape(sentence_states, [batch_size, num_sentences, features]) with tf.variable_scope('SentenceSelection'): logits = tf.contrib.layers.fully_connected(inputs=tf.nn.dropout(sentence_states, hidden_dropout), num_outputs=1, activation_fn=None) return tf.squeeze(logits, [2])<|docstring|>Compute logits for selecting each sentence in the document. Arguments: documents_features: Feature representation of the document with shape `[batch, length, features]`. sentence_lengths: Length of each sentence in the document with shape `[batch, num_sentences]`, used for finding sentence boundaries in documents_features. Returns: logits: Scores for each sentence with shape `[batch, num_sentences]`.<|endoftext|>
e8bba8c1345c309b7fe878b906f4e8a299383448ea66e47d387a92e858ee9f6a
def slice_sentences(document_features, picks, sentence_lengths): 'Extract selected sentence spans from the document features.\n\n Arguments:\n document_features: A `[batch, length, features]` representation\n of the documents.\n picks: Sentence to extract with shape\n `[batch, selections]`.\n sentence_lengths: Length of each sentence in the document with shape\n `[batch, num_sentences]`.\n\n Returns extracted features for each selected sentence as a tensor with shape\n `[batch, selections, max_sentence_len, features]`\n ' sentence_offsets = tf.cumsum(sentence_lengths, axis=1, exclusive=True) starts = ops.gather_from_rows(sentence_offsets, picks) lengths = ops.gather_from_rows(sentence_lengths, picks) sentence_embeddings = ops.slice_fragments(document_features, starts, lengths) return sentence_embeddings
Extract selected sentence spans from the document features. Arguments: document_features: A `[batch, length, features]` representation of the documents. picks: Sentence to extract with shape `[batch, selections]`. sentence_lengths: Length of each sentence in the document with shape `[batch, num_sentences]`. Returns extracted features for each selected sentence as a tensor with shape `[batch, selections, max_sentence_len, features]`
gnr.py
slice_sentences
baidu-research/GloballyNormalizedReader
73
python
def slice_sentences(document_features, picks, sentence_lengths): 'Extract selected sentence spans from the document features.\n\n Arguments:\n document_features: A `[batch, length, features]` representation\n of the documents.\n picks: Sentence to extract with shape\n `[batch, selections]`.\n sentence_lengths: Length of each sentence in the document with shape\n `[batch, num_sentences]`.\n\n Returns extracted features for each selected sentence as a tensor with shape\n `[batch, selections, max_sentence_len, features]`\n ' sentence_offsets = tf.cumsum(sentence_lengths, axis=1, exclusive=True) starts = ops.gather_from_rows(sentence_offsets, picks) lengths = ops.gather_from_rows(sentence_lengths, picks) sentence_embeddings = ops.slice_fragments(document_features, starts, lengths) return sentence_embeddings
def slice_sentences(document_features, picks, sentence_lengths): 'Extract selected sentence spans from the document features.\n\n Arguments:\n document_features: A `[batch, length, features]` representation\n of the documents.\n picks: Sentence to extract with shape\n `[batch, selections]`.\n sentence_lengths: Length of each sentence in the document with shape\n `[batch, num_sentences]`.\n\n Returns extracted features for each selected sentence as a tensor with shape\n `[batch, selections, max_sentence_len, features]`\n ' sentence_offsets = tf.cumsum(sentence_lengths, axis=1, exclusive=True) starts = ops.gather_from_rows(sentence_offsets, picks) lengths = ops.gather_from_rows(sentence_lengths, picks) sentence_embeddings = ops.slice_fragments(document_features, starts, lengths) return sentence_embeddings<|docstring|>Extract selected sentence spans from the document features. Arguments: document_features: A `[batch, length, features]` representation of the documents. picks: Sentence to extract with shape `[batch, selections]`. sentence_lengths: Length of each sentence in the document with shape `[batch, num_sentences]`. Returns extracted features for each selected sentence as a tensor with shape `[batch, selections, max_sentence_len, features]`<|endoftext|>
67dc8839cfed7df11d699842f679560dd1c3f152deb2b544f8fe988a91a3e556
def slice_end_of_sentence(sentence_features, start_word_picks, sentence_lengths): 'Extract the final span of each sentence after the selected\n starting words.\n\n Arguments:\n sentence_features: Sentence representation with shape\n `[batch, k, words, features]`.\n start_word_picks: Starting word selections with shape\n `[batch, k]`.\n sentence_lengths: Length of each sentence in\n sentence_features of shape `[batch, k]`.\n Used for masking.\n\n Returns extracted sentence spans with shape\n `[batch, k, max_fragment_len, features]`.\n ' fragment_lengths = (sentence_lengths - start_word_picks) beams = tf.shape(sentence_features)[1] words = tf.shape(sentence_features)[2] sentence_features = tf.reshape(sentence_features, [tf.shape(sentence_features)[0], (beams * words), sentence_features.get_shape()[(- 1)].value]) start_word_picks += (words * tf.expand_dims(tf.range(beams), 0)) sentence_fragments = ops.slice_fragments(sentence_features, start_word_picks, fragment_lengths) return sentence_fragments
Extract the final span of each sentence after the selected starting words. Arguments: sentence_features: Sentence representation with shape `[batch, k, words, features]`. start_word_picks: Starting word selections with shape `[batch, k]`. sentence_lengths: Length of each sentence in sentence_features of shape `[batch, k]`. Used for masking. Returns extracted sentence spans with shape `[batch, k, max_fragment_len, features]`.
gnr.py
slice_end_of_sentence
baidu-research/GloballyNormalizedReader
73
python
def slice_end_of_sentence(sentence_features, start_word_picks, sentence_lengths): 'Extract the final span of each sentence after the selected\n starting words.\n\n Arguments:\n sentence_features: Sentence representation with shape\n `[batch, k, words, features]`.\n start_word_picks: Starting word selections with shape\n `[batch, k]`.\n sentence_lengths: Length of each sentence in\n sentence_features of shape `[batch, k]`.\n Used for masking.\n\n Returns extracted sentence spans with shape\n `[batch, k, max_fragment_len, features]`.\n ' fragment_lengths = (sentence_lengths - start_word_picks) beams = tf.shape(sentence_features)[1] words = tf.shape(sentence_features)[2] sentence_features = tf.reshape(sentence_features, [tf.shape(sentence_features)[0], (beams * words), sentence_features.get_shape()[(- 1)].value]) start_word_picks += (words * tf.expand_dims(tf.range(beams), 0)) sentence_fragments = ops.slice_fragments(sentence_features, start_word_picks, fragment_lengths) return sentence_fragments
def slice_end_of_sentence(sentence_features, start_word_picks, sentence_lengths): 'Extract the final span of each sentence after the selected\n starting words.\n\n Arguments:\n sentence_features: Sentence representation with shape\n `[batch, k, words, features]`.\n start_word_picks: Starting word selections with shape\n `[batch, k]`.\n sentence_lengths: Length of each sentence in\n sentence_features of shape `[batch, k]`.\n Used for masking.\n\n Returns extracted sentence spans with shape\n `[batch, k, max_fragment_len, features]`.\n ' fragment_lengths = (sentence_lengths - start_word_picks) beams = tf.shape(sentence_features)[1] words = tf.shape(sentence_features)[2] sentence_features = tf.reshape(sentence_features, [tf.shape(sentence_features)[0], (beams * words), sentence_features.get_shape()[(- 1)].value]) start_word_picks += (words * tf.expand_dims(tf.range(beams), 0)) sentence_fragments = ops.slice_fragments(sentence_features, start_word_picks, fragment_lengths) return sentence_fragments<|docstring|>Extract the final span of each sentence after the selected starting words. Arguments: sentence_features: Sentence representation with shape `[batch, k, words, features]`. start_word_picks: Starting word selections with shape `[batch, k]`. sentence_lengths: Length of each sentence in sentence_features of shape `[batch, k]`. Used for masking. Returns extracted sentence spans with shape `[batch, k, max_fragment_len, features]`.<|endoftext|>
2018a62cb51c8b3b50fefd800c72193642e9bf21ce5a7afbfae0b402ac8a07dd
def score_start_word(model, document_embeddings, sentence_picks, sentence_lengths, hidden_dropout): 'Score each possible span spart word in a sentence by\n passing it through an MLP.\n\n Arguments:\n model: QA model hyperparameters.\n document_embeddings: Document representation with shape\n `[batch, length, features]`.\n sentence_picks: Selected sentences with shape\n `[batch, beam_size]`.\n sentence_lengths: Lengths of each sentence in the document\n with shape `[batch, num_sentences]`.\n\n Returns:\n logits: [batch, beam_size, length] scores for each start\n word in the beam, where `length` is the maximum\n `length` of a selected sentence.\n ' sentence_embeddings = slice_sentences(document_embeddings, sentence_picks, sentence_lengths) with tf.variable_scope('StartWordSelection'): logits = tf.contrib.layers.fully_connected(inputs=tf.nn.dropout(sentence_embeddings, hidden_dropout), num_outputs=1, activation_fn=None) return tf.squeeze(logits, [(- 1)])
Score each possible span spart word in a sentence by passing it through an MLP. Arguments: model: QA model hyperparameters. document_embeddings: Document representation with shape `[batch, length, features]`. sentence_picks: Selected sentences with shape `[batch, beam_size]`. sentence_lengths: Lengths of each sentence in the document with shape `[batch, num_sentences]`. Returns: logits: [batch, beam_size, length] scores for each start word in the beam, where `length` is the maximum `length` of a selected sentence.
gnr.py
score_start_word
baidu-research/GloballyNormalizedReader
73
python
def score_start_word(model, document_embeddings, sentence_picks, sentence_lengths, hidden_dropout): 'Score each possible span spart word in a sentence by\n passing it through an MLP.\n\n Arguments:\n model: QA model hyperparameters.\n document_embeddings: Document representation with shape\n `[batch, length, features]`.\n sentence_picks: Selected sentences with shape\n `[batch, beam_size]`.\n sentence_lengths: Lengths of each sentence in the document\n with shape `[batch, num_sentences]`.\n\n Returns:\n logits: [batch, beam_size, length] scores for each start\n word in the beam, where `length` is the maximum\n `length` of a selected sentence.\n ' sentence_embeddings = slice_sentences(document_embeddings, sentence_picks, sentence_lengths) with tf.variable_scope('StartWordSelection'): logits = tf.contrib.layers.fully_connected(inputs=tf.nn.dropout(sentence_embeddings, hidden_dropout), num_outputs=1, activation_fn=None) return tf.squeeze(logits, [(- 1)])
def score_start_word(model, document_embeddings, sentence_picks, sentence_lengths, hidden_dropout): 'Score each possible span spart word in a sentence by\n passing it through an MLP.\n\n Arguments:\n model: QA model hyperparameters.\n document_embeddings: Document representation with shape\n `[batch, length, features]`.\n sentence_picks: Selected sentences with shape\n `[batch, beam_size]`.\n sentence_lengths: Lengths of each sentence in the document\n with shape `[batch, num_sentences]`.\n\n Returns:\n logits: [batch, beam_size, length] scores for each start\n word in the beam, where `length` is the maximum\n `length` of a selected sentence.\n ' sentence_embeddings = slice_sentences(document_embeddings, sentence_picks, sentence_lengths) with tf.variable_scope('StartWordSelection'): logits = tf.contrib.layers.fully_connected(inputs=tf.nn.dropout(sentence_embeddings, hidden_dropout), num_outputs=1, activation_fn=None) return tf.squeeze(logits, [(- 1)])<|docstring|>Score each possible span spart word in a sentence by passing it through an MLP. Arguments: model: QA model hyperparameters. document_embeddings: Document representation with shape `[batch, length, features]`. sentence_picks: Selected sentences with shape `[batch, beam_size]`. sentence_lengths: Lengths of each sentence in the document with shape `[batch, num_sentences]`. Returns: logits: [batch, beam_size, length] scores for each start word in the beam, where `length` is the maximum `length` of a selected sentence.<|endoftext|>
fa12ef441529cfb51affe7c0ce563782df8cdb47348ecc35ec36e97d3b26c8b6
def score_end_words(model, document_embeddings, sentence_picks, start_word_picks, sentence_lengths, hidden_dropout, training): 'Score each possible span end word in the sentence by\n running a Bi-LSTM over the remaining sentence span and\n passing the result through an MLP.\n\n Arguments:\n model: QA model hyperparameters\n document_embeddings: A `[batch, length, features]`\n representation of a document.\n sentence_picks: Index of selected sentences in\n the document with shape `[batch, k]`.\n start_word_picks: Index of start words in each selected\n sentence with shape `[batch, k]`.\n sentence_lengths: Length of each sentence with shape\n `[batch, num_sentences]`.\n\n Returns scores for each possible span end word with shape\n `[batch, k, max-span-length]`.\n ' sentence_embeddings = slice_sentences(document_embeddings, sentence_picks, sentence_lengths) picked_sentence_lengths = ops.gather_from_rows(sentence_lengths, sentence_picks) sentence_fragments = slice_end_of_sentence(sentence_embeddings, start_word_picks, picked_sentence_lengths) batch = tf.shape(sentence_fragments)[0] beam = tf.shape(sentence_fragments)[1] frag_len = tf.shape(sentence_fragments)[2] features = sentence_fragments.get_shape()[(- 1)].value sentence_fragments = tf.reshape(sentence_fragments, [(batch * beam), frag_len, features]) with tf.variable_scope('PickEndWordLSTMs'): (hiddens, _, _) = ops.cudnn_lstm(sentence_fragments, model.pick_end_word_layers, model.layer_size, model.weight_noise, training) hiddens = tf.reshape(hiddens, [batch, beam, frag_len, hiddens.get_shape()[(- 1)].value]) with tf.variable_scope('EndWordSelection'): logits = tf.contrib.layers.fully_connected(inputs=tf.nn.dropout(hiddens, hidden_dropout), num_outputs=1, activation_fn=None) return tf.squeeze(logits, [(- 1)])
Score each possible span end word in the sentence by running a Bi-LSTM over the remaining sentence span and passing the result through an MLP. Arguments: model: QA model hyperparameters document_embeddings: A `[batch, length, features]` representation of a document. sentence_picks: Index of selected sentences in the document with shape `[batch, k]`. start_word_picks: Index of start words in each selected sentence with shape `[batch, k]`. sentence_lengths: Length of each sentence with shape `[batch, num_sentences]`. Returns scores for each possible span end word with shape `[batch, k, max-span-length]`.
gnr.py
score_end_words
baidu-research/GloballyNormalizedReader
73
python
def score_end_words(model, document_embeddings, sentence_picks, start_word_picks, sentence_lengths, hidden_dropout, training): 'Score each possible span end word in the sentence by\n running a Bi-LSTM over the remaining sentence span and\n passing the result through an MLP.\n\n Arguments:\n model: QA model hyperparameters\n document_embeddings: A `[batch, length, features]`\n representation of a document.\n sentence_picks: Index of selected sentences in\n the document with shape `[batch, k]`.\n start_word_picks: Index of start words in each selected\n sentence with shape `[batch, k]`.\n sentence_lengths: Length of each sentence with shape\n `[batch, num_sentences]`.\n\n Returns scores for each possible span end word with shape\n `[batch, k, max-span-length]`.\n ' sentence_embeddings = slice_sentences(document_embeddings, sentence_picks, sentence_lengths) picked_sentence_lengths = ops.gather_from_rows(sentence_lengths, sentence_picks) sentence_fragments = slice_end_of_sentence(sentence_embeddings, start_word_picks, picked_sentence_lengths) batch = tf.shape(sentence_fragments)[0] beam = tf.shape(sentence_fragments)[1] frag_len = tf.shape(sentence_fragments)[2] features = sentence_fragments.get_shape()[(- 1)].value sentence_fragments = tf.reshape(sentence_fragments, [(batch * beam), frag_len, features]) with tf.variable_scope('PickEndWordLSTMs'): (hiddens, _, _) = ops.cudnn_lstm(sentence_fragments, model.pick_end_word_layers, model.layer_size, model.weight_noise, training) hiddens = tf.reshape(hiddens, [batch, beam, frag_len, hiddens.get_shape()[(- 1)].value]) with tf.variable_scope('EndWordSelection'): logits = tf.contrib.layers.fully_connected(inputs=tf.nn.dropout(hiddens, hidden_dropout), num_outputs=1, activation_fn=None) return tf.squeeze(logits, [(- 1)])
def score_end_words(model, document_embeddings, sentence_picks, start_word_picks, sentence_lengths, hidden_dropout, training): 'Score each possible span end word in the sentence by\n running a Bi-LSTM over the remaining sentence span and\n passing the result through an MLP.\n\n Arguments:\n model: QA model hyperparameters\n document_embeddings: A `[batch, length, features]`\n representation of a document.\n sentence_picks: Index of selected sentences in\n the document with shape `[batch, k]`.\n start_word_picks: Index of start words in each selected\n sentence with shape `[batch, k]`.\n sentence_lengths: Length of each sentence with shape\n `[batch, num_sentences]`.\n\n Returns scores for each possible span end word with shape\n `[batch, k, max-span-length]`.\n ' sentence_embeddings = slice_sentences(document_embeddings, sentence_picks, sentence_lengths) picked_sentence_lengths = ops.gather_from_rows(sentence_lengths, sentence_picks) sentence_fragments = slice_end_of_sentence(sentence_embeddings, start_word_picks, picked_sentence_lengths) batch = tf.shape(sentence_fragments)[0] beam = tf.shape(sentence_fragments)[1] frag_len = tf.shape(sentence_fragments)[2] features = sentence_fragments.get_shape()[(- 1)].value sentence_fragments = tf.reshape(sentence_fragments, [(batch * beam), frag_len, features]) with tf.variable_scope('PickEndWordLSTMs'): (hiddens, _, _) = ops.cudnn_lstm(sentence_fragments, model.pick_end_word_layers, model.layer_size, model.weight_noise, training) hiddens = tf.reshape(hiddens, [batch, beam, frag_len, hiddens.get_shape()[(- 1)].value]) with tf.variable_scope('EndWordSelection'): logits = tf.contrib.layers.fully_connected(inputs=tf.nn.dropout(hiddens, hidden_dropout), num_outputs=1, activation_fn=None) return tf.squeeze(logits, [(- 1)])<|docstring|>Score each possible span end word in the sentence by running a Bi-LSTM over the remaining sentence span and passing the result through an MLP. Arguments: model: QA model hyperparameters document_embeddings: A `[batch, length, features]` representation of a document. sentence_picks: Index of selected sentences in the document with shape `[batch, k]`. start_word_picks: Index of start words in each selected sentence with shape `[batch, k]`. sentence_lengths: Length of each sentence with shape `[batch, num_sentences]`. Returns scores for each possible span end word with shape `[batch, k, max-span-length]`.<|endoftext|>
cb8093731a39e10e2bc1f1d13c5f4b3f67f35eb477b5cb108a035b932a523656
def globally_normalized_loss(beam_states, labels): 'Global normalized loss with early updating.\n\n Arguments:\n beam_states: List of previous decisions and current decision scores\n for each step in the search process, as well as the final\n beam. Previous decisions are a tensor of indices with shape\n `[batch, beam_size]` corresponding to the selection at\n previous time steps and scores has shape\n `[batch, beam_size, classes]`.\n labels: List of correct labels at each step in the search process,\n each with shape `[batch]`.\n\n Returns scalar loss averaged across each example in the batch.\n ' (loss, loss_mask) = (0.0, 1.0) for (i, (prev_decisions, scores)) in enumerate(reversed(beam_states)): batch = tf.shape(scores)[0] beam = tf.shape(scores)[1] correct = tf.cast(tf.ones((batch, beam)), tf.bool) for (decision, label) in zip(prev_decisions, labels): correct = tf.logical_and(correct, tf.equal(tf.expand_dims(label, 1), decision)) correct_mask = tf.cast(correct, tf.float32) any_correct = tf.cast(tf.reduce_sum(correct_mask, axis=1), tf.bool) if (len(prev_decisions) == len(labels)): targets = tf.argmax(correct_mask, axis=1) stage_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=scores) stage_loss = tf.where(any_correct, stage_loss, tf.zeros_like(stage_loss)) else: targets = labels[len(prev_decisions)] targets = tf.reshape(tf.tile(tf.expand_dims(targets, 1), [1, beam]), [(- 1)]) targets = tf.minimum(targets, (tf.shape(scores)[2] - 1)) scores = tf.reshape(scores, [(batch * beam), (- 1)]) stage_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=scores) stage_loss = tf.reshape(stage_loss, [batch, beam]) stage_loss = tf.where(tf.cast(correct_mask, tf.bool), stage_loss, tf.zeros_like(stage_loss)) stage_loss = tf.reduce_sum(stage_loss, axis=1) loss += (loss_mask * stage_loss) loss_mask *= (1.0 - tf.cast(any_correct, tf.float32)) return tf.reduce_mean(loss)
Global normalized loss with early updating. Arguments: beam_states: List of previous decisions and current decision scores for each step in the search process, as well as the final beam. Previous decisions are a tensor of indices with shape `[batch, beam_size]` corresponding to the selection at previous time steps and scores has shape `[batch, beam_size, classes]`. labels: List of correct labels at each step in the search process, each with shape `[batch]`. Returns scalar loss averaged across each example in the batch.
gnr.py
globally_normalized_loss
baidu-research/GloballyNormalizedReader
73
python
def globally_normalized_loss(beam_states, labels): 'Global normalized loss with early updating.\n\n Arguments:\n beam_states: List of previous decisions and current decision scores\n for each step in the search process, as well as the final\n beam. Previous decisions are a tensor of indices with shape\n `[batch, beam_size]` corresponding to the selection at\n previous time steps and scores has shape\n `[batch, beam_size, classes]`.\n labels: List of correct labels at each step in the search process,\n each with shape `[batch]`.\n\n Returns scalar loss averaged across each example in the batch.\n ' (loss, loss_mask) = (0.0, 1.0) for (i, (prev_decisions, scores)) in enumerate(reversed(beam_states)): batch = tf.shape(scores)[0] beam = tf.shape(scores)[1] correct = tf.cast(tf.ones((batch, beam)), tf.bool) for (decision, label) in zip(prev_decisions, labels): correct = tf.logical_and(correct, tf.equal(tf.expand_dims(label, 1), decision)) correct_mask = tf.cast(correct, tf.float32) any_correct = tf.cast(tf.reduce_sum(correct_mask, axis=1), tf.bool) if (len(prev_decisions) == len(labels)): targets = tf.argmax(correct_mask, axis=1) stage_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=scores) stage_loss = tf.where(any_correct, stage_loss, tf.zeros_like(stage_loss)) else: targets = labels[len(prev_decisions)] targets = tf.reshape(tf.tile(tf.expand_dims(targets, 1), [1, beam]), [(- 1)]) targets = tf.minimum(targets, (tf.shape(scores)[2] - 1)) scores = tf.reshape(scores, [(batch * beam), (- 1)]) stage_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=scores) stage_loss = tf.reshape(stage_loss, [batch, beam]) stage_loss = tf.where(tf.cast(correct_mask, tf.bool), stage_loss, tf.zeros_like(stage_loss)) stage_loss = tf.reduce_sum(stage_loss, axis=1) loss += (loss_mask * stage_loss) loss_mask *= (1.0 - tf.cast(any_correct, tf.float32)) return tf.reduce_mean(loss)
def globally_normalized_loss(beam_states, labels): 'Global normalized loss with early updating.\n\n Arguments:\n beam_states: List of previous decisions and current decision scores\n for each step in the search process, as well as the final\n beam. Previous decisions are a tensor of indices with shape\n `[batch, beam_size]` corresponding to the selection at\n previous time steps and scores has shape\n `[batch, beam_size, classes]`.\n labels: List of correct labels at each step in the search process,\n each with shape `[batch]`.\n\n Returns scalar loss averaged across each example in the batch.\n ' (loss, loss_mask) = (0.0, 1.0) for (i, (prev_decisions, scores)) in enumerate(reversed(beam_states)): batch = tf.shape(scores)[0] beam = tf.shape(scores)[1] correct = tf.cast(tf.ones((batch, beam)), tf.bool) for (decision, label) in zip(prev_decisions, labels): correct = tf.logical_and(correct, tf.equal(tf.expand_dims(label, 1), decision)) correct_mask = tf.cast(correct, tf.float32) any_correct = tf.cast(tf.reduce_sum(correct_mask, axis=1), tf.bool) if (len(prev_decisions) == len(labels)): targets = tf.argmax(correct_mask, axis=1) stage_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=scores) stage_loss = tf.where(any_correct, stage_loss, tf.zeros_like(stage_loss)) else: targets = labels[len(prev_decisions)] targets = tf.reshape(tf.tile(tf.expand_dims(targets, 1), [1, beam]), [(- 1)]) targets = tf.minimum(targets, (tf.shape(scores)[2] - 1)) scores = tf.reshape(scores, [(batch * beam), (- 1)]) stage_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=scores) stage_loss = tf.reshape(stage_loss, [batch, beam]) stage_loss = tf.where(tf.cast(correct_mask, tf.bool), stage_loss, tf.zeros_like(stage_loss)) stage_loss = tf.reduce_sum(stage_loss, axis=1) loss += (loss_mask * stage_loss) loss_mask *= (1.0 - tf.cast(any_correct, tf.float32)) return tf.reduce_mean(loss)<|docstring|>Global normalized loss with early updating. Arguments: beam_states: List of previous decisions and current decision scores for each step in the search process, as well as the final beam. Previous decisions are a tensor of indices with shape `[batch, beam_size]` corresponding to the selection at previous time steps and scores has shape `[batch, beam_size, classes]`. labels: List of correct labels at each step in the search process, each with shape `[batch]`. Returns scalar loss averaged across each example in the batch.<|endoftext|>
2ef4ef615aac27c185bcff559d33c9e8c87760c4ada2f234b6b85b30958ebb80
def build_model(model): 'Build a Tensorflow graph for the QA model.\n Return a model.Model for training, evaluation, etc.\n ' with tf.name_scope('Inputs'): questions = tf.placeholder(tf.int32, name='Questions', shape=[None, None]) documents = tf.placeholder(tf.int32, name='Documents', shape=[None, None]) same_as_question_feature = tf.placeholder(tf.float32, name='SameAsQuestionFeature', shape=[None, None]) repeated_words = tf.placeholder(tf.float32, name='RepeatedWordFeature', shape=[None, None]) repeated_word_intensity = tf.placeholder(tf.float32, name='RepeatedWordIntensity', shape=[None, None]) sentence_lengths = tf.placeholder(tf.int32, name='SentenceOffsets', shape=[None, None]) sentence_labels = tf.placeholder(tf.int32, name='SentenceLabels', shape=[None]) word_start_labels = tf.placeholder(tf.int32, name='WordStartLabels', shape=[None]) word_end_labels = tf.placeholder(tf.int32, name='WordEndLabels', shape=[None]) embedding_dropout = tf.placeholder_with_default(model.embedding_dropout_prob, shape=[]) hidden_dropout = tf.placeholder_with_default(model.hidden_dropout_prob, shape=[]) training = tf.placeholder_with_default(True, shape=[], name='TrainingIndicator') exact_match = tf.placeholder(tf.float32, name='ExactMatch', shape=[]) f1 = tf.placeholder(tf.float32, name='F1', shape=[]) with tf.variable_scope('GloveEmbeddings'): embeddings = tf.get_variable(shape=[model.vocab_size, EMBEDDING_DIM], initializer=tf.zeros_initializer(), trainable=False, name='GloveEmbeddings') embedding_placeholder = tf.placeholder(tf.float32, [model.vocab_size, EMBEDDING_DIM]) embedding_init = embeddings.assign(embedding_placeholder) with tf.name_scope('QuestionEmbeddings'): question_vector = featurize_question(model, questions, embedding_dropout, training) with tf.name_scope('DocumentEmbeddings'): document_embeddings = featurize_document(model, questions, documents, same_as_question_feature, repeated_words, repeated_word_intensity, question_vector, embedding_dropout, training) beam_states = [] with tf.name_scope('PickSentence'): sentence_scores = score_sentences(model, document_embeddings, sentence_lengths, hidden_dropout) beam_states.append(([], tf.expand_dims(sentence_scores, 1))) (beam_scores, sentence_picks) = tf.nn.top_k(sentence_scores, k=tf.minimum(model.beam_size, tf.shape(sentence_scores)[1]), sorted=True) sentence_correct = tf.reduce_mean(tf.cast(tf.equal(sentence_labels, sentence_picks[(:, 0)]), tf.float32)) with tf.name_scope('PickStartWord'): start_word_scores = score_start_word(model, document_embeddings, sentence_picks, sentence_lengths, hidden_dropout) beam_scores = (tf.expand_dims(beam_scores, 2) + start_word_scores) beam_states.append(([sentence_picks], beam_scores)) (beam_scores, kept_sentences, start_words) = ops.prune_beam(beam_scores, sentence_picks, model.beam_size) start_word_correct = tf.reduce_mean(tf.cast(tf.logical_and(tf.equal(word_start_labels, start_words[(:, 0)]), tf.equal(sentence_labels, kept_sentences[(:, 0)])), tf.float32)) with tf.name_scope('PickEndWord'): end_word_scores = score_end_words(model, document_embeddings, kept_sentences, start_words, sentence_lengths, hidden_dropout, training) beam_scores = (tf.expand_dims(beam_scores, 2) + end_word_scores) beam_states.append(([kept_sentences, start_words], beam_scores)) (beam_scores, (kept_sentences, kept_start_words), end_words) = ops.prune_beam(beam_scores, [kept_sentences, start_words], model.beam_size) beam_states.append(([kept_sentences, kept_start_words, end_words], beam_scores)) end_word_picks = (kept_start_words + end_words) final_states = [kept_sentences, kept_start_words, end_word_picks] end_word_correct = tf.reduce_mean(tf.cast(tf.logical_and(tf.logical_and(tf.equal(word_end_labels, end_word_picks[(:, 0)]), tf.equal(word_start_labels, kept_start_words[(:, 0)])), tf.equal(sentence_labels, kept_sentences[(:, 0)])), tf.float32)) with tf.name_scope('Loss'): end_labels = (word_end_labels - word_start_labels) labels = (sentence_labels, word_start_labels, end_labels) loss = globally_normalized_loss(beam_states, labels) l2_penalty = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(model.l2_scale), tf.trainable_variables()) loss += l2_penalty with tf.name_scope('TrainStep'): (iteration, (step, loss, gradnorm)) = ops.default_train_step(model, loss) with tf.name_scope('TrainSummary'): train_summary = ops.scalar_summaries({'Train-Loss': loss, 'Gradient-Norm': gradnorm, 'Sentence-Correct': sentence_correct, 'Start-Word-Correct': start_word_correct, 'End-Word-Correct': end_word_correct}) with tf.name_scope('ValidSummary'): valid_summary = ops.scalar_summaries({'Validation-Loss': loss, 'Sentence-Correct': sentence_correct, 'Start-Word-Correct': start_word_correct, 'End-Word-Correct': end_word_correct}) with tf.name_scope('SquadSummary'): squad_summary = ops.scalar_summaries({'Exact-Match': exact_match, 'F1': f1}) return Model(inputs=[questions, documents, same_as_question_feature, repeated_words, repeated_word_intensity, sentence_lengths, sentence_labels, word_start_labels, word_end_labels], outputs=[kept_sentences, kept_start_words, end_word_picks, sentence_correct, start_word_correct, end_word_correct], loss=loss, training=training, dropout=[embedding_dropout, hidden_dropout], gradnorm=gradnorm, step=step, iteration=iteration, train_summary=train_summary, valid_summary=valid_summary, embedding_init=embedding_init, embedding_placeholder=embedding_placeholder, squad_summary=squad_summary, squad_inputs=[exact_match, f1])
Build a Tensorflow graph for the QA model. Return a model.Model for training, evaluation, etc.
gnr.py
build_model
baidu-research/GloballyNormalizedReader
73
python
def build_model(model): 'Build a Tensorflow graph for the QA model.\n Return a model.Model for training, evaluation, etc.\n ' with tf.name_scope('Inputs'): questions = tf.placeholder(tf.int32, name='Questions', shape=[None, None]) documents = tf.placeholder(tf.int32, name='Documents', shape=[None, None]) same_as_question_feature = tf.placeholder(tf.float32, name='SameAsQuestionFeature', shape=[None, None]) repeated_words = tf.placeholder(tf.float32, name='RepeatedWordFeature', shape=[None, None]) repeated_word_intensity = tf.placeholder(tf.float32, name='RepeatedWordIntensity', shape=[None, None]) sentence_lengths = tf.placeholder(tf.int32, name='SentenceOffsets', shape=[None, None]) sentence_labels = tf.placeholder(tf.int32, name='SentenceLabels', shape=[None]) word_start_labels = tf.placeholder(tf.int32, name='WordStartLabels', shape=[None]) word_end_labels = tf.placeholder(tf.int32, name='WordEndLabels', shape=[None]) embedding_dropout = tf.placeholder_with_default(model.embedding_dropout_prob, shape=[]) hidden_dropout = tf.placeholder_with_default(model.hidden_dropout_prob, shape=[]) training = tf.placeholder_with_default(True, shape=[], name='TrainingIndicator') exact_match = tf.placeholder(tf.float32, name='ExactMatch', shape=[]) f1 = tf.placeholder(tf.float32, name='F1', shape=[]) with tf.variable_scope('GloveEmbeddings'): embeddings = tf.get_variable(shape=[model.vocab_size, EMBEDDING_DIM], initializer=tf.zeros_initializer(), trainable=False, name='GloveEmbeddings') embedding_placeholder = tf.placeholder(tf.float32, [model.vocab_size, EMBEDDING_DIM]) embedding_init = embeddings.assign(embedding_placeholder) with tf.name_scope('QuestionEmbeddings'): question_vector = featurize_question(model, questions, embedding_dropout, training) with tf.name_scope('DocumentEmbeddings'): document_embeddings = featurize_document(model, questions, documents, same_as_question_feature, repeated_words, repeated_word_intensity, question_vector, embedding_dropout, training) beam_states = [] with tf.name_scope('PickSentence'): sentence_scores = score_sentences(model, document_embeddings, sentence_lengths, hidden_dropout) beam_states.append(([], tf.expand_dims(sentence_scores, 1))) (beam_scores, sentence_picks) = tf.nn.top_k(sentence_scores, k=tf.minimum(model.beam_size, tf.shape(sentence_scores)[1]), sorted=True) sentence_correct = tf.reduce_mean(tf.cast(tf.equal(sentence_labels, sentence_picks[(:, 0)]), tf.float32)) with tf.name_scope('PickStartWord'): start_word_scores = score_start_word(model, document_embeddings, sentence_picks, sentence_lengths, hidden_dropout) beam_scores = (tf.expand_dims(beam_scores, 2) + start_word_scores) beam_states.append(([sentence_picks], beam_scores)) (beam_scores, kept_sentences, start_words) = ops.prune_beam(beam_scores, sentence_picks, model.beam_size) start_word_correct = tf.reduce_mean(tf.cast(tf.logical_and(tf.equal(word_start_labels, start_words[(:, 0)]), tf.equal(sentence_labels, kept_sentences[(:, 0)])), tf.float32)) with tf.name_scope('PickEndWord'): end_word_scores = score_end_words(model, document_embeddings, kept_sentences, start_words, sentence_lengths, hidden_dropout, training) beam_scores = (tf.expand_dims(beam_scores, 2) + end_word_scores) beam_states.append(([kept_sentences, start_words], beam_scores)) (beam_scores, (kept_sentences, kept_start_words), end_words) = ops.prune_beam(beam_scores, [kept_sentences, start_words], model.beam_size) beam_states.append(([kept_sentences, kept_start_words, end_words], beam_scores)) end_word_picks = (kept_start_words + end_words) final_states = [kept_sentences, kept_start_words, end_word_picks] end_word_correct = tf.reduce_mean(tf.cast(tf.logical_and(tf.logical_and(tf.equal(word_end_labels, end_word_picks[(:, 0)]), tf.equal(word_start_labels, kept_start_words[(:, 0)])), tf.equal(sentence_labels, kept_sentences[(:, 0)])), tf.float32)) with tf.name_scope('Loss'): end_labels = (word_end_labels - word_start_labels) labels = (sentence_labels, word_start_labels, end_labels) loss = globally_normalized_loss(beam_states, labels) l2_penalty = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(model.l2_scale), tf.trainable_variables()) loss += l2_penalty with tf.name_scope('TrainStep'): (iteration, (step, loss, gradnorm)) = ops.default_train_step(model, loss) with tf.name_scope('TrainSummary'): train_summary = ops.scalar_summaries({'Train-Loss': loss, 'Gradient-Norm': gradnorm, 'Sentence-Correct': sentence_correct, 'Start-Word-Correct': start_word_correct, 'End-Word-Correct': end_word_correct}) with tf.name_scope('ValidSummary'): valid_summary = ops.scalar_summaries({'Validation-Loss': loss, 'Sentence-Correct': sentence_correct, 'Start-Word-Correct': start_word_correct, 'End-Word-Correct': end_word_correct}) with tf.name_scope('SquadSummary'): squad_summary = ops.scalar_summaries({'Exact-Match': exact_match, 'F1': f1}) return Model(inputs=[questions, documents, same_as_question_feature, repeated_words, repeated_word_intensity, sentence_lengths, sentence_labels, word_start_labels, word_end_labels], outputs=[kept_sentences, kept_start_words, end_word_picks, sentence_correct, start_word_correct, end_word_correct], loss=loss, training=training, dropout=[embedding_dropout, hidden_dropout], gradnorm=gradnorm, step=step, iteration=iteration, train_summary=train_summary, valid_summary=valid_summary, embedding_init=embedding_init, embedding_placeholder=embedding_placeholder, squad_summary=squad_summary, squad_inputs=[exact_match, f1])
def build_model(model): 'Build a Tensorflow graph for the QA model.\n Return a model.Model for training, evaluation, etc.\n ' with tf.name_scope('Inputs'): questions = tf.placeholder(tf.int32, name='Questions', shape=[None, None]) documents = tf.placeholder(tf.int32, name='Documents', shape=[None, None]) same_as_question_feature = tf.placeholder(tf.float32, name='SameAsQuestionFeature', shape=[None, None]) repeated_words = tf.placeholder(tf.float32, name='RepeatedWordFeature', shape=[None, None]) repeated_word_intensity = tf.placeholder(tf.float32, name='RepeatedWordIntensity', shape=[None, None]) sentence_lengths = tf.placeholder(tf.int32, name='SentenceOffsets', shape=[None, None]) sentence_labels = tf.placeholder(tf.int32, name='SentenceLabels', shape=[None]) word_start_labels = tf.placeholder(tf.int32, name='WordStartLabels', shape=[None]) word_end_labels = tf.placeholder(tf.int32, name='WordEndLabels', shape=[None]) embedding_dropout = tf.placeholder_with_default(model.embedding_dropout_prob, shape=[]) hidden_dropout = tf.placeholder_with_default(model.hidden_dropout_prob, shape=[]) training = tf.placeholder_with_default(True, shape=[], name='TrainingIndicator') exact_match = tf.placeholder(tf.float32, name='ExactMatch', shape=[]) f1 = tf.placeholder(tf.float32, name='F1', shape=[]) with tf.variable_scope('GloveEmbeddings'): embeddings = tf.get_variable(shape=[model.vocab_size, EMBEDDING_DIM], initializer=tf.zeros_initializer(), trainable=False, name='GloveEmbeddings') embedding_placeholder = tf.placeholder(tf.float32, [model.vocab_size, EMBEDDING_DIM]) embedding_init = embeddings.assign(embedding_placeholder) with tf.name_scope('QuestionEmbeddings'): question_vector = featurize_question(model, questions, embedding_dropout, training) with tf.name_scope('DocumentEmbeddings'): document_embeddings = featurize_document(model, questions, documents, same_as_question_feature, repeated_words, repeated_word_intensity, question_vector, embedding_dropout, training) beam_states = [] with tf.name_scope('PickSentence'): sentence_scores = score_sentences(model, document_embeddings, sentence_lengths, hidden_dropout) beam_states.append(([], tf.expand_dims(sentence_scores, 1))) (beam_scores, sentence_picks) = tf.nn.top_k(sentence_scores, k=tf.minimum(model.beam_size, tf.shape(sentence_scores)[1]), sorted=True) sentence_correct = tf.reduce_mean(tf.cast(tf.equal(sentence_labels, sentence_picks[(:, 0)]), tf.float32)) with tf.name_scope('PickStartWord'): start_word_scores = score_start_word(model, document_embeddings, sentence_picks, sentence_lengths, hidden_dropout) beam_scores = (tf.expand_dims(beam_scores, 2) + start_word_scores) beam_states.append(([sentence_picks], beam_scores)) (beam_scores, kept_sentences, start_words) = ops.prune_beam(beam_scores, sentence_picks, model.beam_size) start_word_correct = tf.reduce_mean(tf.cast(tf.logical_and(tf.equal(word_start_labels, start_words[(:, 0)]), tf.equal(sentence_labels, kept_sentences[(:, 0)])), tf.float32)) with tf.name_scope('PickEndWord'): end_word_scores = score_end_words(model, document_embeddings, kept_sentences, start_words, sentence_lengths, hidden_dropout, training) beam_scores = (tf.expand_dims(beam_scores, 2) + end_word_scores) beam_states.append(([kept_sentences, start_words], beam_scores)) (beam_scores, (kept_sentences, kept_start_words), end_words) = ops.prune_beam(beam_scores, [kept_sentences, start_words], model.beam_size) beam_states.append(([kept_sentences, kept_start_words, end_words], beam_scores)) end_word_picks = (kept_start_words + end_words) final_states = [kept_sentences, kept_start_words, end_word_picks] end_word_correct = tf.reduce_mean(tf.cast(tf.logical_and(tf.logical_and(tf.equal(word_end_labels, end_word_picks[(:, 0)]), tf.equal(word_start_labels, kept_start_words[(:, 0)])), tf.equal(sentence_labels, kept_sentences[(:, 0)])), tf.float32)) with tf.name_scope('Loss'): end_labels = (word_end_labels - word_start_labels) labels = (sentence_labels, word_start_labels, end_labels) loss = globally_normalized_loss(beam_states, labels) l2_penalty = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(model.l2_scale), tf.trainable_variables()) loss += l2_penalty with tf.name_scope('TrainStep'): (iteration, (step, loss, gradnorm)) = ops.default_train_step(model, loss) with tf.name_scope('TrainSummary'): train_summary = ops.scalar_summaries({'Train-Loss': loss, 'Gradient-Norm': gradnorm, 'Sentence-Correct': sentence_correct, 'Start-Word-Correct': start_word_correct, 'End-Word-Correct': end_word_correct}) with tf.name_scope('ValidSummary'): valid_summary = ops.scalar_summaries({'Validation-Loss': loss, 'Sentence-Correct': sentence_correct, 'Start-Word-Correct': start_word_correct, 'End-Word-Correct': end_word_correct}) with tf.name_scope('SquadSummary'): squad_summary = ops.scalar_summaries({'Exact-Match': exact_match, 'F1': f1}) return Model(inputs=[questions, documents, same_as_question_feature, repeated_words, repeated_word_intensity, sentence_lengths, sentence_labels, word_start_labels, word_end_labels], outputs=[kept_sentences, kept_start_words, end_word_picks, sentence_correct, start_word_correct, end_word_correct], loss=loss, training=training, dropout=[embedding_dropout, hidden_dropout], gradnorm=gradnorm, step=step, iteration=iteration, train_summary=train_summary, valid_summary=valid_summary, embedding_init=embedding_init, embedding_placeholder=embedding_placeholder, squad_summary=squad_summary, squad_inputs=[exact_match, f1])<|docstring|>Build a Tensorflow graph for the QA model. Return a model.Model for training, evaluation, etc.<|endoftext|>
5fed9307280de9b498e27f459a998af6064f5cc7510414889890b8072e64689a
def __init__(self, reftrack, parent=None): 'Initialize a new OptionSelector\n\n :param reftrack: the reftrack to show options for\n :type reftrack: :class:`jukeboxcore.reftrack.Reftrack`\n :param parent: the parent widget\n :type parent: :class:`QtGui.QWidget`\n :raises: None\n ' super(OptionSelector, self).__init__(parent) self.setupUi(self) self.selected = None self.reftrack = reftrack self.setup_ui() self.setup_signals() options = reftrack.get_options() self.browser.set_model(options) columns = self.reftrack.get_option_columns() for (i, c) in enumerate(columns): self.browser.get_level(i).setModelColumn(c) self.adjustSize()
Initialize a new OptionSelector :param reftrack: the reftrack to show options for :type reftrack: :class:`jukeboxcore.reftrack.Reftrack` :param parent: the parent widget :type parent: :class:`QtGui.QWidget` :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
__init__
JukeboxPipeline/jukebox-core
2
python
def __init__(self, reftrack, parent=None): 'Initialize a new OptionSelector\n\n :param reftrack: the reftrack to show options for\n :type reftrack: :class:`jukeboxcore.reftrack.Reftrack`\n :param parent: the parent widget\n :type parent: :class:`QtGui.QWidget`\n :raises: None\n ' super(OptionSelector, self).__init__(parent) self.setupUi(self) self.selected = None self.reftrack = reftrack self.setup_ui() self.setup_signals() options = reftrack.get_options() self.browser.set_model(options) columns = self.reftrack.get_option_columns() for (i, c) in enumerate(columns): self.browser.get_level(i).setModelColumn(c) self.adjustSize()
def __init__(self, reftrack, parent=None): 'Initialize a new OptionSelector\n\n :param reftrack: the reftrack to show options for\n :type reftrack: :class:`jukeboxcore.reftrack.Reftrack`\n :param parent: the parent widget\n :type parent: :class:`QtGui.QWidget`\n :raises: None\n ' super(OptionSelector, self).__init__(parent) self.setupUi(self) self.selected = None self.reftrack = reftrack self.setup_ui() self.setup_signals() options = reftrack.get_options() self.browser.set_model(options) columns = self.reftrack.get_option_columns() for (i, c) in enumerate(columns): self.browser.get_level(i).setModelColumn(c) self.adjustSize()<|docstring|>Initialize a new OptionSelector :param reftrack: the reftrack to show options for :type reftrack: :class:`jukeboxcore.reftrack.Reftrack` :param parent: the parent widget :type parent: :class:`QtGui.QWidget` :raises: None<|endoftext|>
f9abf1024ed34a31df9d81edac22a8bb2113abdbc7101bcf4c74ee31841b7ceb
def setup_ui(self): 'Setup the ui\n\n :returns: None\n :rtype: None\n :raises: None\n ' labels = self.reftrack.get_option_labels() self.browser = ComboBoxBrowser(len(labels), headers=labels) self.browser_vbox.addWidget(self.browser)
Setup the ui :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
setup_ui
JukeboxPipeline/jukebox-core
2
python
def setup_ui(self): 'Setup the ui\n\n :returns: None\n :rtype: None\n :raises: None\n ' labels = self.reftrack.get_option_labels() self.browser = ComboBoxBrowser(len(labels), headers=labels) self.browser_vbox.addWidget(self.browser)
def setup_ui(self): 'Setup the ui\n\n :returns: None\n :rtype: None\n :raises: None\n ' labels = self.reftrack.get_option_labels() self.browser = ComboBoxBrowser(len(labels), headers=labels) self.browser_vbox.addWidget(self.browser)<|docstring|>Setup the ui :returns: None :rtype: None :raises: None<|endoftext|>
6c256436fe778f0287dfbbd7300b6ea0b843d0fbfbefc64eedc78418e8a297ac
def setup_signals(self): 'Connect the signals with the slots to make the ui functional\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.select_pb.clicked.connect(self.select)
Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
setup_signals
JukeboxPipeline/jukebox-core
2
python
def setup_signals(self): 'Connect the signals with the slots to make the ui functional\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.select_pb.clicked.connect(self.select)
def setup_signals(self): 'Connect the signals with the slots to make the ui functional\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.select_pb.clicked.connect(self.select)<|docstring|>Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None<|endoftext|>
b0b026189b56338debfb8054cc31958ddb8996b9c95c41f2b8ae2b5c7242ae3c
def select(self): 'Store the selected taskfileinfo self.selected and accept the dialog\n\n :returns: None\n :rtype: None\n :raises: None\n ' s = self.browser.selected_indexes((self.browser.get_depth() - 1)) if (not s): return i = s[0].internalPointer() if i: tfi = i.internal_data() self.selected = tfi self.accept()
Store the selected taskfileinfo self.selected and accept the dialog :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
select
JukeboxPipeline/jukebox-core
2
python
def select(self): 'Store the selected taskfileinfo self.selected and accept the dialog\n\n :returns: None\n :rtype: None\n :raises: None\n ' s = self.browser.selected_indexes((self.browser.get_depth() - 1)) if (not s): return i = s[0].internalPointer() if i: tfi = i.internal_data() self.selected = tfi self.accept()
def select(self): 'Store the selected taskfileinfo self.selected and accept the dialog\n\n :returns: None\n :rtype: None\n :raises: None\n ' s = self.browser.selected_indexes((self.browser.get_depth() - 1)) if (not s): return i = s[0].internalPointer() if i: tfi = i.internal_data() self.selected = tfi self.accept()<|docstring|>Store the selected taskfileinfo self.selected and accept the dialog :returns: None :rtype: None :raises: None<|endoftext|>
079ff26b0af06842889e283de3b8b37156d8befa65e9d0392d8adb54f4a9c976
def __init__(self, parent=None): 'Initialize a new ReftrackWidget\n\n :param parent: widget parent\n :type parent: QtGui.QWidget\n :raises: None\n ' super(ReftrackWidget, self).__init__(parent) self.setupUi(self) self.reftrack = None self.setup_ui() self.setup_signals() self.upper_fr_default_bg_color = self.upper_fr.palette().color(QtGui.QPalette.Window)
Initialize a new ReftrackWidget :param parent: widget parent :type parent: QtGui.QWidget :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
__init__
JukeboxPipeline/jukebox-core
2
python
def __init__(self, parent=None): 'Initialize a new ReftrackWidget\n\n :param parent: widget parent\n :type parent: QtGui.QWidget\n :raises: None\n ' super(ReftrackWidget, self).__init__(parent) self.setupUi(self) self.reftrack = None self.setup_ui() self.setup_signals() self.upper_fr_default_bg_color = self.upper_fr.palette().color(QtGui.QPalette.Window)
def __init__(self, parent=None): 'Initialize a new ReftrackWidget\n\n :param parent: widget parent\n :type parent: QtGui.QWidget\n :raises: None\n ' super(ReftrackWidget, self).__init__(parent) self.setupUi(self) self.reftrack = None self.setup_ui() self.setup_signals() self.upper_fr_default_bg_color = self.upper_fr.palette().color(QtGui.QPalette.Window)<|docstring|>Initialize a new ReftrackWidget :param parent: widget parent :type parent: QtGui.QWidget :raises: None<|endoftext|>
66eebcae3e078853583412aa8f81a23316752e5e1144a13b46296a19e6156b5a
def setup_ui(self): 'Setup the ui\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.setup_icons()
Setup the ui :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
setup_ui
JukeboxPipeline/jukebox-core
2
python
def setup_ui(self): 'Setup the ui\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.setup_icons()
def setup_ui(self): 'Setup the ui\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.setup_icons()<|docstring|>Setup the ui :returns: None :rtype: None :raises: None<|endoftext|>
e11571a56c1f3c5a552763d38fa3cfa628b9202732ead9767ce49e055381a0b3
def setup_icons(self): 'Setup the icons of the ui\n\n :returns: None\n :rtype: None\n :raises: None\n ' iconbtns = [('menu_border_24x24.png', self.menu_tb), ('duplicate_border_24x24.png', self.duplicate_tb), ('delete_border_24x24.png', self.delete_tb), ('reference_border_24x24.png', self.reference_tb), ('load_border_24x24.png', self.load_tb), ('unload_border_24x24.png', self.unload_tb), ('replace_border_24x24.png', self.replace_tb), ('import_border_24x24.png', self.importref_tb), ('import_border_24x24.png', self.importtf_tb), ('alien.png', self.alien_tb), ('imported.png', self.imported_tb)] for (iconname, btn) in iconbtns: i = get_icon(iconname, asicon=True) btn.setIcon(i)
Setup the icons of the ui :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
setup_icons
JukeboxPipeline/jukebox-core
2
python
def setup_icons(self): 'Setup the icons of the ui\n\n :returns: None\n :rtype: None\n :raises: None\n ' iconbtns = [('menu_border_24x24.png', self.menu_tb), ('duplicate_border_24x24.png', self.duplicate_tb), ('delete_border_24x24.png', self.delete_tb), ('reference_border_24x24.png', self.reference_tb), ('load_border_24x24.png', self.load_tb), ('unload_border_24x24.png', self.unload_tb), ('replace_border_24x24.png', self.replace_tb), ('import_border_24x24.png', self.importref_tb), ('import_border_24x24.png', self.importtf_tb), ('alien.png', self.alien_tb), ('imported.png', self.imported_tb)] for (iconname, btn) in iconbtns: i = get_icon(iconname, asicon=True) btn.setIcon(i)
def setup_icons(self): 'Setup the icons of the ui\n\n :returns: None\n :rtype: None\n :raises: None\n ' iconbtns = [('menu_border_24x24.png', self.menu_tb), ('duplicate_border_24x24.png', self.duplicate_tb), ('delete_border_24x24.png', self.delete_tb), ('reference_border_24x24.png', self.reference_tb), ('load_border_24x24.png', self.load_tb), ('unload_border_24x24.png', self.unload_tb), ('replace_border_24x24.png', self.replace_tb), ('import_border_24x24.png', self.importref_tb), ('import_border_24x24.png', self.importtf_tb), ('alien.png', self.alien_tb), ('imported.png', self.imported_tb)] for (iconname, btn) in iconbtns: i = get_icon(iconname, asicon=True) btn.setIcon(i)<|docstring|>Setup the icons of the ui :returns: None :rtype: None :raises: None<|endoftext|>
7a65427bccabe0fffaafb524bddd62135cc348dba3ba8719e96cd22372c2dfef
def setup_signals(self): 'Connect the signals with the slots to make the ui functional\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.duplicate_tb.clicked.connect(self.duplicate) self.delete_tb.clicked.connect(self.delete) self.load_tb.clicked.connect(self.load) self.unload_tb.clicked.connect(self.unload) self.reference_tb.clicked.connect(self.reference) self.importtf_tb.clicked.connect(self.import_file) self.importref_tb.clicked.connect(self.import_reference) self.replace_tb.clicked.connect(self.replace) self.imported_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.imported_tb)) self.alien_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.alien_tb))
Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
setup_signals
JukeboxPipeline/jukebox-core
2
python
def setup_signals(self): 'Connect the signals with the slots to make the ui functional\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.duplicate_tb.clicked.connect(self.duplicate) self.delete_tb.clicked.connect(self.delete) self.load_tb.clicked.connect(self.load) self.unload_tb.clicked.connect(self.unload) self.reference_tb.clicked.connect(self.reference) self.importtf_tb.clicked.connect(self.import_file) self.importref_tb.clicked.connect(self.import_reference) self.replace_tb.clicked.connect(self.replace) self.imported_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.imported_tb)) self.alien_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.alien_tb))
def setup_signals(self): 'Connect the signals with the slots to make the ui functional\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.duplicate_tb.clicked.connect(self.duplicate) self.delete_tb.clicked.connect(self.delete) self.load_tb.clicked.connect(self.load) self.unload_tb.clicked.connect(self.unload) self.reference_tb.clicked.connect(self.reference) self.importtf_tb.clicked.connect(self.import_file) self.importref_tb.clicked.connect(self.import_reference) self.replace_tb.clicked.connect(self.replace) self.imported_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.imported_tb)) self.alien_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.alien_tb))<|docstring|>Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None<|endoftext|>
e710ac01a8d8ddc3a29b0835f2773e955028c01105a54df7c6969f0537ac88d5
def set_index(self, index): 'Display the data of the given index\n\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n ' self.index = index self.reftrack = index.model().index(index.row(), 18, index.parent()).data(REFTRACK_OBJECT_ROLE) self.set_maintext(self.index) self.set_identifiertext(self.index) self.set_type_icon(self.index) self.disable_restricted() self.hide_restricted() self.set_top_bar_color(self.index) self.set_status_buttons() self.set_menu()
Display the data of the given index :param index: the index to paint :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
set_index
JukeboxPipeline/jukebox-core
2
python
def set_index(self, index): 'Display the data of the given index\n\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n ' self.index = index self.reftrack = index.model().index(index.row(), 18, index.parent()).data(REFTRACK_OBJECT_ROLE) self.set_maintext(self.index) self.set_identifiertext(self.index) self.set_type_icon(self.index) self.disable_restricted() self.hide_restricted() self.set_top_bar_color(self.index) self.set_status_buttons() self.set_menu()
def set_index(self, index): 'Display the data of the given index\n\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n ' self.index = index self.reftrack = index.model().index(index.row(), 18, index.parent()).data(REFTRACK_OBJECT_ROLE) self.set_maintext(self.index) self.set_identifiertext(self.index) self.set_type_icon(self.index) self.disable_restricted() self.hide_restricted() self.set_top_bar_color(self.index) self.set_status_buttons() self.set_menu()<|docstring|>Display the data of the given index :param index: the index to paint :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None<|endoftext|>
155e8c5c846a8ce29a6e1b068bd3386893a185ab61885feacb3cdb18911efe83
def set_maintext(self, index): 'Set the maintext_lb to display text information about the given reftrack\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' dr = QtCore.Qt.DisplayRole text = '' model = index.model() for i in (1, 2, 3, 5, 6): new = model.index(index.row(), i, index.parent()).data(dr) if (new is not None): text = (' | '.join((text, new)) if text else new) self.maintext_lb.setText(text)
Set the maintext_lb to display text information about the given reftrack :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
set_maintext
JukeboxPipeline/jukebox-core
2
python
def set_maintext(self, index): 'Set the maintext_lb to display text information about the given reftrack\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' dr = QtCore.Qt.DisplayRole text = model = index.model() for i in (1, 2, 3, 5, 6): new = model.index(index.row(), i, index.parent()).data(dr) if (new is not None): text = (' | '.join((text, new)) if text else new) self.maintext_lb.setText(text)
def set_maintext(self, index): 'Set the maintext_lb to display text information about the given reftrack\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' dr = QtCore.Qt.DisplayRole text = model = index.model() for i in (1, 2, 3, 5, 6): new = model.index(index.row(), i, index.parent()).data(dr) if (new is not None): text = (' | '.join((text, new)) if text else new) self.maintext_lb.setText(text)<|docstring|>Set the maintext_lb to display text information about the given reftrack :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None<|endoftext|>
175bd29776e9be9ce93833722de50f5e14db395821e23492b75ab708756245cc
def set_identifiertext(self, index): 'Set the identifier text on the identifier_lb\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' dr = QtCore.Qt.DisplayRole t = index.model().index(index.row(), 17, index.parent()).data(dr) if (t is None): t = (- 1) else: t = (t + 1) self.identifier_lb.setText(('#%s' % t))
Set the identifier text on the identifier_lb :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
set_identifiertext
JukeboxPipeline/jukebox-core
2
python
def set_identifiertext(self, index): 'Set the identifier text on the identifier_lb\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' dr = QtCore.Qt.DisplayRole t = index.model().index(index.row(), 17, index.parent()).data(dr) if (t is None): t = (- 1) else: t = (t + 1) self.identifier_lb.setText(('#%s' % t))
def set_identifiertext(self, index): 'Set the identifier text on the identifier_lb\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' dr = QtCore.Qt.DisplayRole t = index.model().index(index.row(), 17, index.parent()).data(dr) if (t is None): t = (- 1) else: t = (t + 1) self.identifier_lb.setText(('#%s' % t))<|docstring|>Set the identifier text on the identifier_lb :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None<|endoftext|>
6971b2168fea57b18e338e123f7e3646a013661d48ac5a69fb435294fe71a513
def set_type_icon(self, index): 'Set the type icon on type_icon_lb\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' icon = index.model().index(index.row(), 0, index.parent()).data(QtCore.Qt.DecorationRole) if icon: pix = icon.pixmap(self.type_icon_lb.size()) self.type_icon_lb.setPixmap(pix) else: self.type_icon_lb.setPixmap(None)
Set the type icon on type_icon_lb :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
set_type_icon
JukeboxPipeline/jukebox-core
2
python
def set_type_icon(self, index): 'Set the type icon on type_icon_lb\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' icon = index.model().index(index.row(), 0, index.parent()).data(QtCore.Qt.DecorationRole) if icon: pix = icon.pixmap(self.type_icon_lb.size()) self.type_icon_lb.setPixmap(pix) else: self.type_icon_lb.setPixmap(None)
def set_type_icon(self, index): 'Set the type icon on type_icon_lb\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' icon = index.model().index(index.row(), 0, index.parent()).data(QtCore.Qt.DecorationRole) if icon: pix = icon.pixmap(self.type_icon_lb.size()) self.type_icon_lb.setPixmap(pix) else: self.type_icon_lb.setPixmap(None)<|docstring|>Set the type icon on type_icon_lb :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None<|endoftext|>
74460d1ad04d32c9f0f66801e5939295af7f5289f6c541525c8db4dea2111eae
def disable_restricted(self): 'Disable the restricted buttons\n\n :returns: None\n :rtype: None\n :raises: None\n ' todisable = [(self.reftrack.duplicate, self.duplicate_tb), (self.reftrack.delete, self.delete_tb), (self.reftrack.reference, self.reference_tb), (self.reftrack.replace, self.replace_tb)] for (action, btn) in todisable: res = self.reftrack.is_restricted(action) btn.setDisabled(res)
Disable the restricted buttons :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
disable_restricted
JukeboxPipeline/jukebox-core
2
python
def disable_restricted(self): 'Disable the restricted buttons\n\n :returns: None\n :rtype: None\n :raises: None\n ' todisable = [(self.reftrack.duplicate, self.duplicate_tb), (self.reftrack.delete, self.delete_tb), (self.reftrack.reference, self.reference_tb), (self.reftrack.replace, self.replace_tb)] for (action, btn) in todisable: res = self.reftrack.is_restricted(action) btn.setDisabled(res)
def disable_restricted(self): 'Disable the restricted buttons\n\n :returns: None\n :rtype: None\n :raises: None\n ' todisable = [(self.reftrack.duplicate, self.duplicate_tb), (self.reftrack.delete, self.delete_tb), (self.reftrack.reference, self.reference_tb), (self.reftrack.replace, self.replace_tb)] for (action, btn) in todisable: res = self.reftrack.is_restricted(action) btn.setDisabled(res)<|docstring|>Disable the restricted buttons :returns: None :rtype: None :raises: None<|endoftext|>
73cb6f51220fa07ff1708387880e2877d97da0467d6a39b9f6d5e2db8047f30b
def hide_restricted(self): 'Hide the restricted buttons\n\n :returns: None\n :rtype: None\n :raises: None\n ' tohide = [((self.reftrack.unload, self.unload_tb), (self.reftrack.load, self.load_tb)), ((self.reftrack.import_file, self.importtf_tb), (self.reftrack.import_reference, self.importref_tb))] for ((action1, btn1), (action2, btn2)) in tohide: res1 = self.reftrack.is_restricted(action1) res2 = self.reftrack.is_restricted(action2) if (res1 != res2): btn1.setEnabled(True) btn1.setHidden(res1) btn2.setHidden(res2) else: btn1.setDisabled(True) btn1.setVisible(True) btn2.setVisible(False)
Hide the restricted buttons :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
hide_restricted
JukeboxPipeline/jukebox-core
2
python
def hide_restricted(self): 'Hide the restricted buttons\n\n :returns: None\n :rtype: None\n :raises: None\n ' tohide = [((self.reftrack.unload, self.unload_tb), (self.reftrack.load, self.load_tb)), ((self.reftrack.import_file, self.importtf_tb), (self.reftrack.import_reference, self.importref_tb))] for ((action1, btn1), (action2, btn2)) in tohide: res1 = self.reftrack.is_restricted(action1) res2 = self.reftrack.is_restricted(action2) if (res1 != res2): btn1.setEnabled(True) btn1.setHidden(res1) btn2.setHidden(res2) else: btn1.setDisabled(True) btn1.setVisible(True) btn2.setVisible(False)
def hide_restricted(self): 'Hide the restricted buttons\n\n :returns: None\n :rtype: None\n :raises: None\n ' tohide = [((self.reftrack.unload, self.unload_tb), (self.reftrack.load, self.load_tb)), ((self.reftrack.import_file, self.importtf_tb), (self.reftrack.import_reference, self.importref_tb))] for ((action1, btn1), (action2, btn2)) in tohide: res1 = self.reftrack.is_restricted(action1) res2 = self.reftrack.is_restricted(action2) if (res1 != res2): btn1.setEnabled(True) btn1.setHidden(res1) btn2.setHidden(res2) else: btn1.setDisabled(True) btn1.setVisible(True) btn2.setVisible(False)<|docstring|>Hide the restricted buttons :returns: None :rtype: None :raises: None<|endoftext|>
fc4aa7e0159353f7a2a2fb4ec14661e1491ae84d5df5fff5785d8d520283d70f
def set_top_bar_color(self, index): 'Set the color of the upper frame to the background color of the reftrack status\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' dr = QtCore.Qt.ForegroundRole c = index.model().index(index.row(), 8, index.parent()).data(dr) if (not c): c = self.upper_fr_default_bg_color self.upper_fr.setStyleSheet(('background-color: rgb(%s, %s, %s)' % (c.red(), c.green(), c.blue())))
Set the color of the upper frame to the background color of the reftrack status :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
set_top_bar_color
JukeboxPipeline/jukebox-core
2
python
def set_top_bar_color(self, index): 'Set the color of the upper frame to the background color of the reftrack status\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' dr = QtCore.Qt.ForegroundRole c = index.model().index(index.row(), 8, index.parent()).data(dr) if (not c): c = self.upper_fr_default_bg_color self.upper_fr.setStyleSheet(('background-color: rgb(%s, %s, %s)' % (c.red(), c.green(), c.blue())))
def set_top_bar_color(self, index): 'Set the color of the upper frame to the background color of the reftrack status\n\n :param index: the index\n :type index: :class:`QtGui.QModelIndex`\n :returns: None\n :rtype: None\n :raises: None\n ' dr = QtCore.Qt.ForegroundRole c = index.model().index(index.row(), 8, index.parent()).data(dr) if (not c): c = self.upper_fr_default_bg_color self.upper_fr.setStyleSheet(('background-color: rgb(%s, %s, %s)' % (c.red(), c.green(), c.blue())))<|docstring|>Set the color of the upper frame to the background color of the reftrack status :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None<|endoftext|>
598e71e9d30ea015627698285272f955984d96984519b60b521c1001bf8d3f59
def set_status_buttons(self): 'Depending on the status of the reftrack, enable or disable\n the status buttons, for imported/alien status buttons\n\n :returns: None\n :rtype: None\n :raises: None\n ' imported = (self.reftrack.status() == self.reftrack.IMPORTED) alien = self.reftrack.alien() for (btn, enable) in [(self.imported_tb, imported), (self.alien_tb, alien)]: btn.setEnabled(enable) btn.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
Depending on the status of the reftrack, enable or disable the status buttons, for imported/alien status buttons :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
set_status_buttons
JukeboxPipeline/jukebox-core
2
python
def set_status_buttons(self): 'Depending on the status of the reftrack, enable or disable\n the status buttons, for imported/alien status buttons\n\n :returns: None\n :rtype: None\n :raises: None\n ' imported = (self.reftrack.status() == self.reftrack.IMPORTED) alien = self.reftrack.alien() for (btn, enable) in [(self.imported_tb, imported), (self.alien_tb, alien)]: btn.setEnabled(enable) btn.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
def set_status_buttons(self): 'Depending on the status of the reftrack, enable or disable\n the status buttons, for imported/alien status buttons\n\n :returns: None\n :rtype: None\n :raises: None\n ' imported = (self.reftrack.status() == self.reftrack.IMPORTED) alien = self.reftrack.alien() for (btn, enable) in [(self.imported_tb, imported), (self.alien_tb, alien)]: btn.setEnabled(enable) btn.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)<|docstring|>Depending on the status of the reftrack, enable or disable the status buttons, for imported/alien status buttons :returns: None :rtype: None :raises: None<|endoftext|>
5c1541f17ff19a3427972f98486f97b5222b54a42c0b41c4b0e96e61c3dacc2e
def toggle_tbstyle(self, button): 'Toogle the ToolButtonStyle of the given button between :data:`ToolButtonIconOnly` and :data:`ToolButtonTextBesideIcon`\n\n :param button: a tool button\n :type button: :class:`QtGui.QToolButton`\n :returns: None\n :rtype: None\n :raises: None\n ' old = button.toolButtonStyle() if (old == QtCore.Qt.ToolButtonIconOnly): new = QtCore.Qt.ToolButtonTextBesideIcon else: new = QtCore.Qt.ToolButtonIconOnly button.setToolButtonStyle(new)
Toogle the ToolButtonStyle of the given button between :data:`ToolButtonIconOnly` and :data:`ToolButtonTextBesideIcon` :param button: a tool button :type button: :class:`QtGui.QToolButton` :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
toggle_tbstyle
JukeboxPipeline/jukebox-core
2
python
def toggle_tbstyle(self, button): 'Toogle the ToolButtonStyle of the given button between :data:`ToolButtonIconOnly` and :data:`ToolButtonTextBesideIcon`\n\n :param button: a tool button\n :type button: :class:`QtGui.QToolButton`\n :returns: None\n :rtype: None\n :raises: None\n ' old = button.toolButtonStyle() if (old == QtCore.Qt.ToolButtonIconOnly): new = QtCore.Qt.ToolButtonTextBesideIcon else: new = QtCore.Qt.ToolButtonIconOnly button.setToolButtonStyle(new)
def toggle_tbstyle(self, button): 'Toogle the ToolButtonStyle of the given button between :data:`ToolButtonIconOnly` and :data:`ToolButtonTextBesideIcon`\n\n :param button: a tool button\n :type button: :class:`QtGui.QToolButton`\n :returns: None\n :rtype: None\n :raises: None\n ' old = button.toolButtonStyle() if (old == QtCore.Qt.ToolButtonIconOnly): new = QtCore.Qt.ToolButtonTextBesideIcon else: new = QtCore.Qt.ToolButtonIconOnly button.setToolButtonStyle(new)<|docstring|>Toogle the ToolButtonStyle of the given button between :data:`ToolButtonIconOnly` and :data:`ToolButtonTextBesideIcon` :param button: a tool button :type button: :class:`QtGui.QToolButton` :returns: None :rtype: None :raises: None<|endoftext|>
25849cb210e83e8ff80139b7c566076d3699d27f4d9a3db51884a6b1ad338d86
def set_menu(self): 'Setup the menu that the menu_tb button uses\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.menu = QtGui.QMenu(self) actions = self.reftrack.get_additional_actions() self.actions = [] for a in actions: if a.icon: qaction = QtGui.QAction(a.icon, a.name, self) else: qaction = QtGui.QAction(a.name, self) qaction.setCheckable(a.checkable) qaction.setChecked(a.checked) qaction.setEnabled(a.enabled) qaction.triggered.connect(a.action) self.actions.append(qaction) self.menu.addAction(qaction) self.menu_tb.setMenu(self.menu)
Setup the menu that the menu_tb button uses :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
set_menu
JukeboxPipeline/jukebox-core
2
python
def set_menu(self): 'Setup the menu that the menu_tb button uses\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.menu = QtGui.QMenu(self) actions = self.reftrack.get_additional_actions() self.actions = [] for a in actions: if a.icon: qaction = QtGui.QAction(a.icon, a.name, self) else: qaction = QtGui.QAction(a.name, self) qaction.setCheckable(a.checkable) qaction.setChecked(a.checked) qaction.setEnabled(a.enabled) qaction.triggered.connect(a.action) self.actions.append(qaction) self.menu.addAction(qaction) self.menu_tb.setMenu(self.menu)
def set_menu(self): 'Setup the menu that the menu_tb button uses\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.menu = QtGui.QMenu(self) actions = self.reftrack.get_additional_actions() self.actions = [] for a in actions: if a.icon: qaction = QtGui.QAction(a.icon, a.name, self) else: qaction = QtGui.QAction(a.name, self) qaction.setCheckable(a.checkable) qaction.setChecked(a.checked) qaction.setEnabled(a.enabled) qaction.triggered.connect(a.action) self.actions.append(qaction) self.menu.addAction(qaction) self.menu_tb.setMenu(self.menu)<|docstring|>Setup the menu that the menu_tb button uses :returns: None :rtype: None :raises: None<|endoftext|>
db4547a21863dddd3c5024fa658358809c6dc787785346b9890d8d52e1bb1ecd
def get_taskfileinfo_selection(self): 'Return a taskfileinfo that the user chose from the available options\n\n :returns: the chosen taskfileinfo\n :rtype: :class:`jukeboxcore.filesys.TaskFileInfo`\n :raises: None\n ' sel = OptionSelector(self.reftrack) sel.exec_() return sel.selected
Return a taskfileinfo that the user chose from the available options :returns: the chosen taskfileinfo :rtype: :class:`jukeboxcore.filesys.TaskFileInfo` :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
get_taskfileinfo_selection
JukeboxPipeline/jukebox-core
2
python
def get_taskfileinfo_selection(self): 'Return a taskfileinfo that the user chose from the available options\n\n :returns: the chosen taskfileinfo\n :rtype: :class:`jukeboxcore.filesys.TaskFileInfo`\n :raises: None\n ' sel = OptionSelector(self.reftrack) sel.exec_() return sel.selected
def get_taskfileinfo_selection(self): 'Return a taskfileinfo that the user chose from the available options\n\n :returns: the chosen taskfileinfo\n :rtype: :class:`jukeboxcore.filesys.TaskFileInfo`\n :raises: None\n ' sel = OptionSelector(self.reftrack) sel.exec_() return sel.selected<|docstring|>Return a taskfileinfo that the user chose from the available options :returns: the chosen taskfileinfo :rtype: :class:`jukeboxcore.filesys.TaskFileInfo` :raises: None<|endoftext|>
c59e0b6bdb6f0bf2b49ed20f6cebcc260f49709d3f08f7307a8967be9a8b4533
def duplicate(self): 'Duplicate the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.duplicate()
Duplicate the current reftrack :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
duplicate
JukeboxPipeline/jukebox-core
2
python
def duplicate(self): 'Duplicate the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.duplicate()
def duplicate(self): 'Duplicate the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.duplicate()<|docstring|>Duplicate the current reftrack :returns: None :rtype: None :raises: None<|endoftext|>
e2e83b4ac64ab8da566bcf94f225d92d57c4a9e0ca44f79c8d4e8b0c6cc434c1
def delete(self): 'Delete the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.delete()
Delete the current reftrack :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
delete
JukeboxPipeline/jukebox-core
2
python
def delete(self): 'Delete the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.delete()
def delete(self): 'Delete the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.delete()<|docstring|>Delete the current reftrack :returns: None :rtype: None :raises: None<|endoftext|>
f3063a86137df79ed06bc54c280851872a1c9e2b7fb5dbae971a30a60105482c
def load(self): 'Load the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.load()
Load the current reftrack :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
load
JukeboxPipeline/jukebox-core
2
python
def load(self): 'Load the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.load()
def load(self): 'Load the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.load()<|docstring|>Load the current reftrack :returns: None :rtype: None :raises: None<|endoftext|>
d101663f1ee726f8152e2ed222e70477c076542a346384c12dd1754b75885dba
def unload(self): 'Unload the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.unload()
Unload the current reftrack :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
unload
JukeboxPipeline/jukebox-core
2
python
def unload(self): 'Unload the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.unload()
def unload(self): 'Unload the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.unload()<|docstring|>Unload the current reftrack :returns: None :rtype: None :raises: None<|endoftext|>
c8f1413e46f58abc095bc9d73d81d49fe54861ffbf74ab5c3dffb4a38b5bbddd
def reference(self): 'Reference a file\n\n :returns: None\n :rtype: None\n :raises: None\n ' tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.reference(tfi)
Reference a file :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
reference
JukeboxPipeline/jukebox-core
2
python
def reference(self): 'Reference a file\n\n :returns: None\n :rtype: None\n :raises: None\n ' tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.reference(tfi)
def reference(self): 'Reference a file\n\n :returns: None\n :rtype: None\n :raises: None\n ' tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.reference(tfi)<|docstring|>Reference a file :returns: None :rtype: None :raises: None<|endoftext|>
d7b21559fd136cbdba58dc5c1b95d90d41bd11977ee19d7859c56fa695601e96
def import_file(self): 'Import a file\n\n :returns: None\n :rtype: None\n :raises: NotImplementedError\n ' tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.import_file(tfi)
Import a file :returns: None :rtype: None :raises: NotImplementedError
src/jukeboxcore/gui/widgets/reftrackwidget.py
import_file
JukeboxPipeline/jukebox-core
2
python
def import_file(self): 'Import a file\n\n :returns: None\n :rtype: None\n :raises: NotImplementedError\n ' tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.import_file(tfi)
def import_file(self): 'Import a file\n\n :returns: None\n :rtype: None\n :raises: NotImplementedError\n ' tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.import_file(tfi)<|docstring|>Import a file :returns: None :rtype: None :raises: NotImplementedError<|endoftext|>