text
stringlengths 1
22.8M
|
|---|
```xml
import { Component } from '@angular/core';
import { ImportDoc } from '@doc/autofocus/importdoc';
import { BasicDoc } from '@doc/autofocus/basicdoc';
@Component({
selector: 'autofocusdemo',
templateUrl: './autofocusdemo.html'
})
export class AutoFocusDemo {
docs = [
{
id: 'import',
label: 'Import',
component: ImportDoc
},
{
id: 'basic',
label: 'Basic',
component: BasicDoc
}
];
}
```
|
```html
<html lang="en">
<head>
<title>PowerPC-Syntax - Using as</title>
<meta http-equiv="Content-Type" content="text/html">
<meta name="description" content="Using as">
<meta name="generator" content="makeinfo 4.8">
<link title="Top" rel="start" href="index.html#Top">
<link rel="up" href="PPC_002dDependent.html#PPC_002dDependent" title="PPC-Dependent">
<link rel="prev" href="PowerPC_002dPseudo.html#PowerPC_002dPseudo" title="PowerPC-Pseudo">
<link href="path_to_url" rel="generator-home" title="Texinfo Homepage">
<!--
This file documents the GNU Assembler "as".
Permission is granted to copy, distribute and/or modify this document
or any later version published by the Free Software Foundation;
with no Invariant Sections, with no Front-Cover Texts, and with no
Back-Cover Texts. A copy of the license is included in the
-->
<meta http-equiv="Content-Style-Type" content="text/css">
<style type="text/css"><!--
pre.display { font-family:inherit }
pre.format { font-family:inherit }
pre.smalldisplay { font-family:inherit; font-size:smaller }
pre.smallformat { font-family:inherit; font-size:smaller }
pre.smallexample { font-size:smaller }
pre.smalllisp { font-size:smaller }
span.sc { font-variant:small-caps }
span.roman { font-family:serif; font-weight:normal; }
span.sansserif { font-family:sans-serif; font-weight:normal; }
--></style>
</head>
<body>
<div class="node">
<p>
<a name="PowerPC-Syntax"></a>
<a name="PowerPC_002dSyntax"></a>
Previous: <a rel="previous" accesskey="p" href="PowerPC_002dPseudo.html#PowerPC_002dPseudo">PowerPC-Pseudo</a>,
Up: <a rel="up" accesskey="u" href="PPC_002dDependent.html#PPC_002dDependent">PPC-Dependent</a>
<hr>
</div>
<h4 class="subsection">9.35.3 PowerPC Syntax</h4>
<ul class="menu">
<li><a accesskey="1" href="PowerPC_002dChars.html#PowerPC_002dChars">PowerPC-Chars</a>: Special Characters
</ul>
</body></html>
```
|
```javascript
var group__backgroundNet =
[
[ "backgroundNet", "group__backgroundNet.html#classbackgroundNet", [
[ "~backgroundNet", "group__backgroundNet.html#a6b474d88a5447623257e0f473352b465", null ],
[ "backgroundNet", "group__backgroundNet.html#a4a5cb05216bf994f05887d44b249f6b4", null ],
[ "init", "group__backgroundNet.html#ac3d21c4f9d5982c1e317ce7b01d3dea4", null ],
[ "Process", "group__backgroundNet.html#adf58aca64daa5f7b6267df690bc00c92", null ],
[ "Process", "group__backgroundNet.html#a21a10a6dbc5d74d9f6723cfc95d64b55", null ],
[ "Process", "group__backgroundNet.html#a4c90e4c05c2bfb87b4c87ad7c746609d", null ],
[ "Process", "group__backgroundNet.html#a101e44eee311188da5db39a812295a75", null ]
] ],
[ "BACKGROUNDNET_DEFAULT_INPUT", "group__backgroundNet.html#gaedcfc9671390875215c85dcddd3cff09", null ],
[ "BACKGROUNDNET_DEFAULT_OUTPUT", "group__backgroundNet.html#ga3ebbfc2bb8d09adb2e1505704ebedde6", null ],
[ "BACKGROUNDNET_MODEL_TYPE", "group__backgroundNet.html#ga4ead266677aa864b484cae25a3c6062f", null ],
[ "BACKGROUNDNET_USAGE_STRING", "group__backgroundNet.html#ga554b40e53cb2ec9b6768adaf32087f57", null ]
];
```
|
```c++
* (See accompanying file LICENSE_1_0.txt or copy at
* path_to_url
*
* See path_to_url for library home page.
*/
#ifndef BOOST_MULTI_INDEX_FWD_HPP
#define BOOST_MULTI_INDEX_FWD_HPP
#if defined(_MSC_VER)
#pragma once
#endif
#include <boost/config.hpp> /* keep it first to prevent nasty warns in MSVC */
#include <boost/multi_index/identity.hpp>
#include <boost/multi_index/indexed_by.hpp>
#include <boost/multi_index/ordered_index_fwd.hpp>
#include <memory>
namespace boost{
namespace multi_index{
/* Default value for IndexSpecifierList specifies a container
* equivalent to std::set<Value>.
*/
template<
typename Value,
typename IndexSpecifierList=indexed_by<ordered_unique<identity<Value> > >,
typename Allocator=std::allocator<Value> >
class multi_index_container;
template<typename MultiIndexContainer,int N>
struct nth_index;
template<typename MultiIndexContainer,typename Tag>
struct index;
template<typename MultiIndexContainer,int N>
struct nth_index_iterator;
template<typename MultiIndexContainer,int N>
struct nth_index_const_iterator;
template<typename MultiIndexContainer,typename Tag>
struct index_iterator;
template<typename MultiIndexContainer,typename Tag>
struct index_const_iterator;
/* get and project functions not fwd declared due to problems
* with dependent typenames
*/
template<
typename Value1,typename IndexSpecifierList1,typename Allocator1,
typename Value2,typename IndexSpecifierList2,typename Allocator2
>
bool operator==(
const multi_index_container<Value1,IndexSpecifierList1,Allocator1>& x,
const multi_index_container<Value2,IndexSpecifierList2,Allocator2>& y);
template<
typename Value1,typename IndexSpecifierList1,typename Allocator1,
typename Value2,typename IndexSpecifierList2,typename Allocator2
>
bool operator<(
const multi_index_container<Value1,IndexSpecifierList1,Allocator1>& x,
const multi_index_container<Value2,IndexSpecifierList2,Allocator2>& y);
template<
typename Value1,typename IndexSpecifierList1,typename Allocator1,
typename Value2,typename IndexSpecifierList2,typename Allocator2
>
bool operator!=(
const multi_index_container<Value1,IndexSpecifierList1,Allocator1>& x,
const multi_index_container<Value2,IndexSpecifierList2,Allocator2>& y);
template<
typename Value1,typename IndexSpecifierList1,typename Allocator1,
typename Value2,typename IndexSpecifierList2,typename Allocator2
>
bool operator>(
const multi_index_container<Value1,IndexSpecifierList1,Allocator1>& x,
const multi_index_container<Value2,IndexSpecifierList2,Allocator2>& y);
template<
typename Value1,typename IndexSpecifierList1,typename Allocator1,
typename Value2,typename IndexSpecifierList2,typename Allocator2
>
bool operator>=(
const multi_index_container<Value1,IndexSpecifierList1,Allocator1>& x,
const multi_index_container<Value2,IndexSpecifierList2,Allocator2>& y);
template<
typename Value1,typename IndexSpecifierList1,typename Allocator1,
typename Value2,typename IndexSpecifierList2,typename Allocator2
>
bool operator<=(
const multi_index_container<Value1,IndexSpecifierList1,Allocator1>& x,
const multi_index_container<Value2,IndexSpecifierList2,Allocator2>& y);
template<typename Value,typename IndexSpecifierList,typename Allocator>
void swap(
multi_index_container<Value,IndexSpecifierList,Allocator>& x,
multi_index_container<Value,IndexSpecifierList,Allocator>& y);
} /* namespace multi_index */
/* multi_index_container, being the main type of this library, is promoted to
* namespace boost.
*/
using multi_index::multi_index_container;
} /* namespace boost */
#endif
```
|
```java
package chapter1.section4;
import edu.princeton.cs.algs4.StdOut;
import edu.princeton.cs.algs4.StdRandom;
import edu.princeton.cs.algs4.Stopwatch;
import java.util.LinkedList;
import java.util.List;
/**
* Created by Rene Argento on 27/11/16.
*/
public class Exercise43_ResizingArraysVersusLinkedList<Item> {
private interface Stack<Item> {
boolean isEmpty();
int size();
void push(Item item);
Item pop();
}
private class Node {
Item item;
Node next;
}
private class StackWithLinkedList implements Stack<Item>{
private Node first;
private int size;
public boolean isEmpty() {
return size == 0;
}
public int size() {
return size;
}
public void push(Item item) {
Node oldFirst = first;
first = new Node();
first.item = item;
first.next = oldFirst;
size++;
}
public Item pop() {
if (isEmpty()) {
throw new RuntimeException("Stack underflow");
}
Item item = first.item;
first = first.next;
size--;
return item;
}
}
@SuppressWarnings("unchecked")
private class StackWithResizingArray implements Stack<Item>{
Item[] values;
int size;
public StackWithResizingArray(int initialSize) {
values = (Item[]) new Object[initialSize];
size = 0;
}
public boolean isEmpty() {
return size == 0;
}
public int size() {
return size;
}
public void push(Item item) {
if (size == values.length) {
resizeArray(size * 2);
}
values[size] = item;
size++;
}
public Item pop() {
if (isEmpty()) {
throw new RuntimeException("Stack underflow");
}
Item item = values[size - 1];
values[size - 1] = null; // to avoid loitering
size--;
if (size == values.length / 4) {
resizeArray(values.length / 2);
}
return item;
}
private void resizeArray(int newCapacity) {
Item[] newValues = (Item[]) new Object[newCapacity];
for (int i = 0; i < size; i++) {
newValues[i] = values[i];
}
values = newValues;
}
}
private final int INITIAL_NUMBER_OF_OPERATIONS = 524288; // 2^19 = 524288
private final int FINAL_NUMBER_OF_OPERATIONS = 67108864; // 2^26 = 67108864
public static void main(String[] args) {
Exercise43_ResizingArraysVersusLinkedList<Integer> resizingArrayXLinkedList = new Exercise43_ResizingArraysVersusLinkedList<>();
//Resizing array stack
Stack<Integer> resizingArrayStack = resizingArrayXLinkedList.new StackWithResizingArray(10);
List<Double> resizingArrayRunningTimes = resizingArrayXLinkedList.runExperiments(resizingArrayStack);
//Linked list stack
Stack<Integer> linkedListStack = resizingArrayXLinkedList.new StackWithLinkedList();
List<Double> linkedListRunningTimes = resizingArrayXLinkedList.runExperiments(linkedListStack);
double[][] runningTimes = new double[resizingArrayRunningTimes.size()][2];
for (int i = 0; i < resizingArrayRunningTimes.size(); i++) {
runningTimes[i][0] = resizingArrayRunningTimes.get(i);
}
for (int i = 0; i < linkedListRunningTimes.size(); i++) {
runningTimes[i][1] = linkedListRunningTimes.get(i);
}
resizingArrayXLinkedList.printResults(runningTimes);
}
private List<Double> runExperiments(Stack<Integer> stack) {
List<Double> runningTimes = new LinkedList<>();
for (int n = INITIAL_NUMBER_OF_OPERATIONS; n <= FINAL_NUMBER_OF_OPERATIONS; n += n) {
double runningTime = timeTrial(n, stack);
runningTimes.add(runningTime);
}
return runningTimes;
}
private double timeTrial(int n, Stack<Integer> stack) {
int max = 1000000;
int[] numbers = new int[n];
for (int i = 0; i < n; i++) {
numbers[i] = StdRandom.uniform(-max, max);
}
Stopwatch timer = new Stopwatch();
//N pushes and pop operations
for (int number : numbers) {
stack.push(number);
}
while (!stack.isEmpty()) {
stack.pop();
}
return timer.elapsedTime();
}
private void printResults(double[][] runningTimes) {
StdOut.printf("%13s %7s %6s %9s\n", "N operations", "Array", "List", "Ratio");
int numberOfOperations = INITIAL_NUMBER_OF_OPERATIONS;
for (int i = 0; i < runningTimes.length; i++) {
StdOut.printf("%13d %7.1f %6.1f", numberOfOperations, runningTimes[i][0], runningTimes[i][1]);
StdOut.printf("%9.1f\n", runningTimes[i][0] / runningTimes[i][1]);
numberOfOperations *= 2;
}
}
}
```
|
```yaml
category: Utilities
sectionOrder:
- Connect
- Collect
commonfields:
id: GitHub
version: -1
configuration:
- display: Fetch incidents
name: isFetch
type: 8
section: Collect
required: false
- defaultvalue: Issue
display: Select an Issue or Pull requests to Fetch
name: fetch_object
options:
- Issue
- Pull_requests
type: 15
section: Collect
advanced: true
required: false
- defaultvalue: path_to_url
display: Server URL
name: url
type: 0
additionalinfo: The REST API URL
section: Connect
required: false
- displaypassword: API Token
name: api_token
type: 9
hiddenusername: true
section: Connect
required: false
- display: Credentials
name: credentials
type: 9
section: Connect
required: false
- display: 'Username of the repository owner, for example: github.com/repos/{_owner_}/{repo}/issues'
name: user
type: 0
section: Connect
required: false
- display: The name of the requested repository
name: repository
type: 0
section: Connect
advanced: true
required: false
- defaultvalue: '3'
display: First fetch interval (in days)
name: fetch_time
type: 0
section: Collect
required: false
- display: Use system proxy settings
name: proxy
type: 8
section: Connect
advanced: true
required: false
- defaultvalue: 'false'
display: Trust any certificate (not secure)
name: insecure
type: 8
section: Connect
advanced: true
required: false
- display: Incident type
name: incidentType
type: 13
section: Connect
required: false
- display: GitHub app integration ID
name: integration_id
type: 0
section: Connect
advanced: true
required: false
- display: GitHub app installation ID
name: installation_id
type: 0
section: Connect
advanced: true
required: false
- display: API Token
name: token
type: 4
hidden: true
section: Connect
advanced: true
required: false
description: Integration to GitHub API.
display: GitHub
name: GitHub
script:
commands:
- arguments:
- description: The title of the issue.
name: title
required: true
- description: The contents of the issue.
name: body
- description: Labels to associate with this issue.
isArray: true
name: labels
- description: Logins for Users to assign to this issue.
isArray: true
name: assignees
description: Creates an issue in GitHub.
name: GitHub-create-issue
outputs:
- contextPath: GitHub.Issue.ID
description: The ID of the created issue.
type: Number
- contextPath: GitHub.Issue.Repository
description: The repository of the created issue.
type: String
- contextPath: GitHub.Issue.Title
description: The title of the created issue.
type: String
- contextPath: GitHub.Issue.Body
description: The body of the created issue.
type: Unknown
- contextPath: GitHub.Issue.State
description: The state of the created issue.
type: String
- contextPath: GitHub.Issue.Labels
description: The labels applied to the issue.
type: String
- contextPath: GitHub.Issue.Assignees
description: The users assigned to this issue.
type: String
- contextPath: GitHub.Issue.Created_at
description: The date the issue was created.
type: Date
- contextPath: GitHub.Issue.Updated_at
description: The date the issue was last updated.
type: Date
- contextPath: GitHub.Issue.Closed_at
description: The date the issue was closed.
type: Date
- contextPath: GitHub.Issue.Closed_by
description: The user who closed the issue.
type: String
- contextPath: GitHub.Issue.Organization
description: The repository organization.
type: String
- arguments:
- description: The number of the issue to close.
name: ID
required: true
description: Closes an existing issue.
name: GitHub-close-issue
outputs:
- contextPath: GitHub.Issue.ID
description: The ID of the closed issue.
type: Number
- contextPath: GitHub.Issue.Repository
description: The repository of the closed issue.
type: String
- contextPath: GitHub.Issue.Title
description: The title of the closed issue.
type: String
- contextPath: GitHub.Issue.Body
description: The body of the closed issue.
type: Unknown
- contextPath: GitHub.Issue.State
description: The state of the closed issue.
type: String
- contextPath: GitHub.Issue.Labels
description: The labels applied to the issue.
type: String
- contextPath: GitHub.Issue.Assignees
description: Users assigned to the issue.
type: String
- contextPath: GitHub.Issue.Created_at
description: The date the issue was created.
type: Date
- contextPath: GitHub.Issue.Updated_at
description: The date the issue was last updated.
type: Date
- contextPath: GitHub.Issue.Closed_at
description: The date the issue was closed.
type: Date
- contextPath: GitHub.Issue.Closed_by
description: The user who closed the issue.
type: String
- contextPath: GitHub.Issue.Organization
description: The repository organization.
type: String
- arguments:
- description: The number of the issue to update.
name: ID
required: true
- description: The title of the issue.
name: title
- description: The contents of the issue.
name: body
- description: State of the issue. Either open or closed.
name: state
- description: 'Labels to apply to this issue. Pass one or more Labels to replace the set of Labels on this Issue. Send an empty array ([]) to clear all Labels from the Issue. '
isArray: true
name: labels
- description: Logins for Users to assign to this issue. Pass one or more user logins to replace the set of assignees on this Issue. Send an empty array ([]) to clear all assignees from the Issue.
isArray: true
name: assignees
description: Updates the parameters of a specified issue.
name: GitHub-update-issue
outputs:
- contextPath: GitHub.Issue.ID
description: The ID of the updated issue.
type: Number
- contextPath: GitHub.Issue.Repository
description: The repository of the updated issue.
type: String
- contextPath: GitHub.Issue.Title
description: The title of the updated issue.
type: String
- contextPath: GitHub.Issue.Body
description: The body of the updated issue.
type: Unknown
- contextPath: GitHub.Issue.State
description: The state of the updated issue.
type: String
- contextPath: GitHub.Issue.Labels
description: The labels applied to the issue.
type: String
- contextPath: GitHub.Issue.Assignees
description: Users assigned to the issue.
type: String
- contextPath: GitHub.Issue.Created_at
description: The date the issue was created.
type: Date
- contextPath: GitHub.Issue.Updated_at
description: The date the issue was last updated.
type: Date
- contextPath: GitHub.Issue.Closed_at
description: The date the issue was closed.
type: Date
- contextPath: GitHub.Issue.Closed_by
description: The user who closed the issue.
type: String
- contextPath: GitHub.Issue.Organization
description: The repository organization.
type: String
- arguments:
- auto: PREDEFINED
defaultValue: open
description: The state of the issues to return. Can be 'open', 'closed' or 'all'. Default is 'open'.
name: state
predefined:
- open
- closed
- all
required: true
- defaultValue: '50'
description: The number of issues to return. Default is 50. Maximum is 100.
name: limit
description: Lists all issues that the user has access to view.
name: GitHub-list-all-issues
outputs:
- contextPath: GitHub.Issue.ID
description: The ID of the issue.
type: Number
- contextPath: GitHub.Issue.Repository
description: The repository of the issue.
type: String
- contextPath: GitHub.Issue.Title
description: The title of the issue.
type: String
- contextPath: GitHub.Issue.Body
description: The body of the issue.
type: Unknown
- contextPath: GitHub.Issue.State
description: The state of the issue.
type: String
- contextPath: GitHub.Issue.Labels
description: The labels applied to the issue.
type: String
- contextPath: GitHub.Issue.Assignees
description: Users assigned to the issue.
type: String
- contextPath: GitHub.Issue.Created_at
description: The date the issue was created.
type: Date
- contextPath: GitHub.Issue.Updated_at
description: The date the issue was last updated.
type: Date
- contextPath: GitHub.Issue.Closed_at
description: The date the issue was closed.
type: Date
- contextPath: GitHub.Issue.Closed_by
description: The user who closed the issue.
type: String
- contextPath: GitHub.Issue.Organization
description: The repository organization.
type: String
- arguments:
- description: The query line for the search. For more information see the GitHub documentation at path_to_url
name: query
required: true
- description: The page number.
name: page_number
- description: The size of the requested page. Maximum is 100.
name: page_size
- description: The number of results to return. Default is 50.
name: limit
description: Searches for code in repositories that match a given query.
name: GitHub-search-code
outputs:
- contextPath: GitHub.CodeSearchResults.name
description: The file name where the code is found.
type: String
- contextPath: GitHub.CodeSearchResults.path
description: The full file path where the code is found.
type: String
- contextPath: GitHub.CodeSearchResults.html_url
description: The URL to the file.
type: String
- contextPath: GitHub.CodeSearchResults.repository.full_name
description: The repository name.
type: String
- contextPath: GitHub.CodeSearchResults.repository.html_url
description: The URL to the repository.
type: String
- contextPath: GitHub.CodeSearchResults.repository.description
description: The repository description.
type: String
- contextPath: GitHub.CodeSearchResults.repository.private
description: True if the repository is private, false if public.
type: Boolean
- contextPath: GitHub.CodeSearchResults.repository.id
description: The ID of the repository.
type: String
- contextPath: GitHub.CodeSearchResults.repository.releases_url
description: The URL to the releases of the repository.
type: String
- contextPath: GitHub.CodeSearchResults.repository.branches_url
description: The URL to the branches of the repository.
type: String
- contextPath: GitHub.CodeSearchResults.repository.commits_url
description: The URL to the commits of the repository.
type: String
- arguments:
- description: The query line for the search. For more information see the GitHub documentation at path_to_url
name: query
required: true
- defaultValue: '50'
description: The number of issues to return. Default is 50. Maximum is 100.
name: limit
description: Searches for and returns issues that match a given query.
name: GitHub-search-issues
outputs:
- contextPath: GitHub.Issue.ID
description: The ID of the issue.
type: Number
- contextPath: GitHub.Issue.Repository
description: The repository of the issue.
type: String
- contextPath: GitHub.Issue.Title
description: The title of the issue.
type: String
- contextPath: GitHub.Issue.Body
description: The body of the issue.
type: Unknown
- contextPath: GitHub.Issue.State
description: The state of the issue.
type: String
- contextPath: GitHub.Issue.Labels
description: The labels applied to the issue.
type: String
- contextPath: GitHub.Issue.Assignees
description: Users assigned to the issue.
type: String
- contextPath: GitHub.Issue.Created_at
description: The date the issue was created.
type: Date
- contextPath: GitHub.Issue.Updated_at
description: The date the issue was last updated.
type: Date
- contextPath: GitHub.Issue.Closed_at
description: The date the issue was closed.
type: Date
- contextPath: GitHub.Issue.Closed_by
description: The user who closed the issue.
type: String
- contextPath: GitHub.Issue.Organization
description: The repository organization.
type: String
- description: Returns the total number of downloads for all releases for the specified repository.
name: GitHub-get-download-count
outputs:
- contextPath: GitHub.Release.ID
description: ID of the release.
type: Number
- contextPath: GitHub.Release.Download_count
description: The download count for the release.
type: Number
- contextPath: GitHub.Release.Name
description: The name of the release.
type: String
- contextPath: GitHub.Release.Body
description: The body of the release.
type: String
- contextPath: GitHub.Release.Created_at
description: The date when the release was created.
type: Date
- contextPath: GitHub.Release.Published_at
description: The date when the release was published.
type: Date
- arguments:
- default: true
defaultValue: 3 days
description: Time of inactivity after which a PR is considered stale.
name: stale_time
required: true
- description: The label used to identify PRs of interest.
name: label
description: Gets inactive pull requests.
name: GitHub-get-stale-prs
outputs:
- contextPath: GitHub.PR.URL
description: The html URL of the PR.
type: String
- contextPath: GitHub.PR.Number
description: The GitHub pull request number.
type: Number
- contextPath: GitHub.PR.RequestedReviewer
description: A list of the PR's requested reviewers.
type: Unknown
- arguments:
- description: The name of the branch to retrieve.
name: branch_name
required: true
description: Gets a branch.
name: GitHub-get-branch
outputs:
- contextPath: GitHub.Branch.Name
description: The name of the branch.
type: String
- contextPath: GitHub.Branch.CommitSHA
description: The SHA of the commit the branch references.
type: String
- contextPath: GitHub.Branch.CommitNodeID
description: The Node ID of the commit the branch references.
type: String
- contextPath: GitHub.Branch.CommitAuthorID
description: The GitHub ID number of the author of the commit the branch references.
type: Number
- contextPath: GitHub.Branch.CommitAuthorLogin
description: The GitHub login of the author of the commit the branch references.
type: String
- contextPath: GitHub.Branch.CommitParentSHA
description: The SHAs of parent commits.
type: String
- contextPath: GitHub.Branch.Protected
description: Whether the branch is protected.
type: Boolean
- arguments:
- description: The name for the new branch.
name: branch_name
required: true
- description: The SHA hash of the commit to reference. Try executing the 'GitHub-get-branch' command to find a commit SHA hash to reference.
name: commit_sha
required: true
description: Create a new branch.
name: GitHub-create-branch
- arguments:
- description: The ID number by which the team is identified. Try executing the 'GitHub-list-teams' command to find team IDs to reference.
name: team_id
required: true
- description: The login of the user whose membership you wish to check.
name: user_name
required: true
description: Retrieves a user membership status with a team.
name: GitHub-get-team-membership
outputs:
- contextPath: GitHub.Team.Member.Role
description: The user's role on a team.
type: String
- contextPath: GitHub.Team.Member.State
description: The user's state for a team.
type: String
- contextPath: GitHub.Team.ID
description: The ID number of the team.
type: Number
- contextPath: GitHub.Team.Member.Login
description: The login of the team member.
type: String
- arguments:
- description: The number of the pull request you want to request review for.
name: pull_number
required: true
- description: A CSV list of GitHub users to request review from for a pull request.
isArray: true
name: reviewers
required: true
description: Requests reviews from GitHub users for a given pull request.
name: GitHub-request-review
outputs:
- contextPath: GitHub.PR.Number
description: The number of the pull request.
type: Number
- contextPath: GitHub.PR.RequestedReviewer.Login
description: The login of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.ID
description: The ID of the user requested for review.
type: Number
- contextPath: GitHub.PR.RequestedReviewer.NodeID
description: The node ID of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.Type
description: The type of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.SiteAdmin
description: Whether the user requested for review is a site admin.
type: Boolean
- arguments:
- description: The number of the issue to comment on.
name: issue_number
required: true
- description: The contents of the comment.
name: body
required: true
description: Creates a comment for a given issue.
name: GitHub-create-comment
outputs:
- contextPath: GitHub.Comment.IssueNumber
description: The number of the issue to which the comment belongs.
type: Number
- contextPath: GitHub.Comment.ID
description: The ID of the comment.
type: Number
- contextPath: GitHub.Comment.NodeID
description: The node ID of the comment.
type: String
- contextPath: GitHub.Comment.Body
description: The body content of the comment.
type: String
- contextPath: GitHub.Comment.User.Login
description: The login of the user who commented.
type: String
- contextPath: GitHub.Comment.User.ID
description: The ID of the user who commented.
type: Number
- contextPath: GitHub.Comment.User.NodeID
description: The node ID of the user who commented.
type: String
- contextPath: GitHub.Comment.User.Type
description: The type of the user who commented.
type: String
- contextPath: GitHub.Comment.User.SiteAdmin
description: Whether the user who commented is a site admin.
type: Boolean
- arguments:
- description: The number of the issue to list comments for.
name: issue_number
required: true
- description: 'Only show notifications updated after the given time. This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ.'
name: since
description: Lists comments on an issue.
name: GitHub-list-issue-comments
outputs:
- contextPath: GitHub.Comment.IssueNumber
description: The number of the issue to which the comment belongs.
type: Number
- contextPath: GitHub.Comment.ID
description: The ID of the comment.
type: Number
- contextPath: GitHub.Comment.NodeID
description: The node ID of the comment.
type: String
- contextPath: GitHub.Comment.Body
description: The body content of the comment.
type: String
- contextPath: GitHub.Comment.User.Login
description: The login of the user who commented.
type: String
- contextPath: GitHub.Comment.User.ID
description: The ID of the user who commented.
type: Number
- contextPath: GitHub.Comment.User.NodeID
description: The node ID of the user who commented.
type: String
- contextPath: GitHub.Comment.User.Type
description: The type of the user who commented.
type: String
- contextPath: GitHub.Comment.User.SiteAdmin
description: Whether the user who commented is a site admin.
type: Boolean
- arguments:
- description: The number of the pull request.
name: pull_number
required: true
- description: The name of the organization.
name: organization
- description: The repository of the pull request.
name: repository
description: Lists the pull request files.
name: GitHub-list-pr-files
outputs:
- contextPath: GitHub.PR.Number
description: The number of the pull request.
type: Number
- contextPath: GitHub.PR.File.SHA
description: The SHA hash of the last commit involving the file.
type: String
- contextPath: GitHub.PR.File.Name
description: The name of the file.
type: String
- contextPath: GitHub.PR.File.Status
description: The status of the file.
type: String
- contextPath: GitHub.PR.File.Additions
description: The number of additions to the file.
type: Number
- contextPath: GitHub.PR.File.Deletions
description: The number of deletions in the file.
type: Number
- contextPath: GitHub.PR.File.Changes
description: The number of changes made in the file.
type: Number
- arguments:
- description: The number of the pull request.
name: pull_number
required: true
description: Lists reviews on a pull request.
name: GitHub-list-pr-reviews
outputs:
- contextPath: GitHub.PR.Number
description: The number of the pull request.
type: Number
- contextPath: GitHub.PR.Review.ID
description: The ID of the review.
type: Number
- contextPath: GitHub.PR.Review.NodeID
description: The node ID of the review.
type: String
- contextPath: GitHub.PR.Review.Body
description: The content of the review.
type: String
- contextPath: GitHub.PR.Review.CommitID
description: The ID of the commit the review is for.
type: String
- contextPath: GitHub.PR.Review.State
description: The state of the review.
type: String
- contextPath: GitHub.PR.Review.User.Login
description: The reviewer's user login.
type: String
- contextPath: GitHub.PR.Review.User.ID
description: The reviewer's user ID.
type: Number
- contextPath: GitHub.PR.Review.User.NodeID
description: The reviewer's user node ID.
type: String
- contextPath: GitHub.PR.Review.User.Type
description: The reviewer user type.
type: String
- contextPath: GitHub.PR.Review.User.SiteAdmin
description: Whether the reviewer is a site admin.
type: Boolean
- arguments:
- description: The SHA hash of the commit. Try executing the 'GitHub-get-branch' command to find a commit SHA hash to reference.
name: commit_sha
required: true
description: Gets a commit.
name: GitHub-get-commit
outputs:
- contextPath: GitHub.Commit.SHA
description: The SHA hash of the commit.
type: String
- contextPath: GitHub.Commit.Author.Date
description: The commit author date.
type: String
- contextPath: GitHub.Commit.Author.Name
description: The name of the author.
type: String
- contextPath: GitHub.Commit.Author.Email
description: The email of the author.
type: String
- contextPath: GitHub.Commit.Committer.Date
description: The date the committer committed.
type: String
- contextPath: GitHub.Commit.Committer.Name
description: The name of the committer.
type: String
- contextPath: GitHub.Commit.Committer.Email
description: The email of the committer.
type: String
- contextPath: GitHub.Commit.Message
description: The message associated with the commit.
type: String
- contextPath: GitHub.Commit.Parent
description: Lists of parent SHA hashes.
type: Unknown
- contextPath: GitHub.Commit.TreeSHA
description: The SHA hash of the commit's tree.
type: String
- contextPath: GitHub.Commit.Verification.Verified
description: Whether the commit was verified.
type: Boolean
- contextPath: GitHub.Commit.Verification.Reason
description: The reason why the commit was or was not verified.
type: String
- contextPath: GitHub.Commit.Verification.Signature
description: The commit verification signature.
type: Unknown
- contextPath: GitHub.Commit.Verification.Payload
description: The commit verification payload.
type: Unknown
- arguments:
- description: The number of the issue to add labels to.
name: issue_number
required: true
- description: A CSV list of labels to add to an issue.
isArray: true
name: labels
required: true
description: Adds labels to an issue.
name: GitHub-add-label
- arguments:
- description: The number of the pull request to retrieve.
name: pull_number
required: true
- description: The name of the organization.
name: organization
- description: The repository of the pull request.
name: repository
description: Gets a pull request.
name: GitHub-get-pull-request
outputs:
- contextPath: GitHub.PR.ID
description: The ID number of the pull request.
type: Number
- contextPath: GitHub.PR.NodeID
description: The node ID of the pull request.
type: String
- contextPath: GitHub.PR.Number
description: The issue number of the pull request.
type: Number
- contextPath: GitHub.PR.State
description: The state of the pull request.
type: String
- contextPath: GitHub.PR.Locked
description: Whether the pull request is locked.
type: Boolean
- contextPath: GitHub.PR.Title
description: The title of the pull request.
type: String
- contextPath: GitHub.PR.User.Login
description: The login of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.ID
description: The ID of the user who opened the pull request.
type: Number
- contextPath: GitHub.PR.User.NodeID
description: The node ID of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.Type
description: The type of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.SiteAdmin
description: Whether the user who opened the pull request is a site admin or not.
type: Boolean
- contextPath: GitHub.PR.Body
description: The body content of the pull request.
type: String
- contextPath: GitHub.PR.Label.ID
description: The ID of the label.
type: Number
- contextPath: GitHub.PR.Label.NodeID
description: The node ID of the label.
type: String
- contextPath: GitHub.PR.Label.Name
description: The name of the label.
type: String
- contextPath: GitHub.PR.Label.Description
description: The description of the label.
type: String
- contextPath: GitHub.PR.Label.Color
description: The hex color value of the label.
type: String
- contextPath: GitHub.PR.Label.Default
description: Whether the label is a default.
type: Boolean
- contextPath: GitHub.PR.Milestone.ID
description: The ID of the milestone.
type: Number
- contextPath: GitHub.PR.Milestone.NodeID
description: The node ID of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Number
description: The number of the milestone.
type: Number
- contextPath: GitHub.PR.Milestone.State
description: The state of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Title
description: The title of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Description
description: The description of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Creator.Login
description: The login of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.ID
description: The ID the milestone creator.
type: Number
- contextPath: GitHub.PR.Milestone.Creator.NodeID
description: The node ID of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.Type
description: The type of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.SiteAdmin
description: Whether the milestone creator is a site admin.
type: Boolean
- contextPath: GitHub.PR.Milestone.OpenIssues
description: The number of open issues with this milestone.
type: Number
- contextPath: GitHub.PR.Milestone.ClosedIssues
description: The number of closed issues with this milestone.
type: Number
- contextPath: GitHub.PR.Milestone.CreatedAt
description: The date the milestone was created.
type: String
- contextPath: GitHub.PR.Milestone.UpdatedAt
description: The date the milestone was updated.
type: String
- contextPath: GitHub.PR.Milestone.ClosedAt
description: The date the milestone was closed.
type: String
- contextPath: GitHub.PR.Milestone.DueOn
description: The due date for the milestone.
type: String
- contextPath: GitHub.PR.ActiveLockReason
description: The reason the pull request is locked.
type: String
- contextPath: GitHub.PR.CreatedAt
description: The date the pull request was created.
type: String
- contextPath: GitHub.PR.UpdatedAt
description: The date the pull request was updated.
type: String
- contextPath: GitHub.PR.ClosedAt
description: The date the pull request was closed.
type: String
- contextPath: GitHub.PR.MergedAt
description: The date the pull request was merged.
type: String
- contextPath: GitHub.PR.MergeCommitSHA
description: The SHA hash of the pull request's merge commit.
type: String
- contextPath: GitHub.PR.Assignee.Login
description: The login of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.ID
description: The ID of the user assigned to the pull request.
type: Number
- contextPath: GitHub.PR.Assignee.NodeID
description: The node ID of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.Type
description: The type of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.SiteAdmin
description: Whether the user assigned to the pull request is a site admin or not.
type: Boolean
- contextPath: GitHub.PR.RequestedReviewer.Login
description: The login of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.ID
description: The ID of the user requested for review.
type: Number
- contextPath: GitHub.PR.RequestedReviewer.NodeID
description: The node ID of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.Type
description: The type of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.SiteAdmin
description: Whether the user requested for review is a site admin.
type: Boolean
- contextPath: GitHub.PR.RequestedTeam.ID
description: The ID of the team requested for review.
type: Number
- contextPath: GitHub.PR.RequestedTeam.NodeID
description: The node ID of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Name
description: The name of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Slug
description: The slug of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Description
description: The description of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Privacy
description: The privacy setting of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Permission
description: The permissions of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Parent
description: The parent of the team requested for review.
type: Unknown
- contextPath: GitHub.PR.Head.Label
description: The label of the branch that HEAD points to.
type: String
- contextPath: GitHub.PR.Head.Ref
description: The reference of the branch that HEAD points to.
type: String
- contextPath: GitHub.PR.Head.SHA
description: The SHA hash of the commit that HEAD points to.
type: String
- contextPath: GitHub.PR.Head.User.Login
description: The login of the committer of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.ID
description: The ID of the committer of the HEAD commit of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.User.NodeID
description: The node ID of the committer of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.Type
description: The type of the committer of the HEAD commit of the checked out. branch.
type: String
- contextPath: GitHub.PR.Head.User.SiteAdmin
description: Whether the committer of the HEAD commit of the checked out branch is a site admin.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.ID
description: The ID of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.NodeID
description: The node ID of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Name
description: The name of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.FullName
description: The full name of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.Login
description: The user login of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.ID
description: The user ID of the owner of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Owner.NodeID
description: The user node ID of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.Type
description: The user type of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.SiteAdmin
description: Whether the owner of the repository of the checked out branch is a site admin.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Private
description: Whether the repository of the checked out branch is private or not.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Description
description: The description of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Fork
description: Whether the repository of the checked out branch is a fork.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Language
description: The language of the repository of the checked out branch.
type: Unknown
- contextPath: GitHub.PR.Head.Repo.ForksCount
description: The number of forks of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.StargazersCount
description: The number of stars of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.WatchersCount
description: The number of entities watching the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Size
description: The size of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.DefaultBranch
description: The default branch of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.OpenIssuesCount
description: The open issues of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Topics
description: The topics listed for the repository of the checked out branch.
type: Unknown
- contextPath: GitHub.PR.Head.Repo.HasIssues
description: Whether the repository of the checked out branch has issues or not.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasProjects
description: Whether the repository of the checked out branch has projects or not.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasWiki
description: Whether the repository of the checked out branch has a wiki or not.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasPages
description: Whether the repository of the checked out branch has pages.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasDownloads
description: Whether the repository of the checked out branch has downloads .
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Archived
description: Whether the repository of the checked out branch has been arvhived .
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Disabled
description: Whether the repository of the checked out branch has been disabled .
type: Boolean
- contextPath: GitHub.PR.Head.Repo.PushedAt
description: The date of the latest push to the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.CreatedAt
description: The date of creation of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.UpdatedAt
description: The date the repository of the checked out branch was last updated.
type: String
- contextPath: GitHub.PR.Head.Repo.AllowRebaseMerge
description: Whether the repository of the checked out branch permits rebase-style merges.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.AllowSquashMerge
description: Whether the repository of the checked out branch permits squash merges.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.AllowMergeCommit
description: Whether the repository of the checked out branch permits merge commits.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.SubscribersCount
description: The number of entities subscribing to the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Base.Label
description: The label of the base branch.
type: String
- contextPath: GitHub.PR.Base.Ref
description: The reference of the base branch.
type: String
- contextPath: GitHub.PR.Base.SHA
description: The SHA hash of the base branch.
type: String
- contextPath: GitHub.PR.Base.User.Login
description: The login of the committer of the commit that the base branch points to.
type: String
- contextPath: GitHub.PR.Base.User.ID
description: The ID of the committer of the commit that the base branch points to.
type: Number
- contextPath: GitHub.PR.Base.User.NodeID
description: The node ID of the committer of the commit that the base branch points to.
type: String
- contextPath: GitHub.PR.Base.User.Type
description: The user type of the committer of the commit that the base branch points to.
type: String
- contextPath: GitHub.PR.Base.User.SiteAdmin
description: Whether the committer of the commit that the base branch points to is a site admin.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.ID
description: The ID of the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.NodeID
description: The node ID of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Name
description: The name of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.FullName
description: The full name of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.Login
description: The user login of the owner of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.ID
description: The user ID of the owner of the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Owner.NodeID
description: The user node ID of the owner of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.Type
description: The user type of the owner of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.SiteAdmin
description: Whether the owner of the repository that the base branch belongs to is a site admin.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Private
description: Whether the repository that the base branch belongs to is private .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Description
description: The description of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Fork
description: Whether the repository that the base branch belongs to is a fork .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Language
description: The language of the repository that the base branch belongs to.
type: Unknown
- contextPath: GitHub.PR.Base.Repo.ForksCount
description: The number of times that the repository that the base branch belongs to has been forked.
type: Number
- contextPath: GitHub.PR.Base.Repo.StargazersCount
description: The number of times that the repository that the base branch belongs to has been starred.
type: Number
- contextPath: GitHub.PR.Base.Repo.WatchersCount
description: The number of entities watching the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Size
description: The size of the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.DefaultBranch
description: The default branch of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.OpenIssuesCount
description: The number of open issues in the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Topics
description: The topics listed for the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.HasIssues
description: Whether the repository that the base branch belongs to has issues .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasProjects
description: Whether the repository that the base branch belongs to has projects .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasWiki
description: Whether the repository that the base branch belongs to has a wiki .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasPages
description: Whether the repository that the base branch belongs to has pages .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasDownloads
description: Whether the repository that the base branch belongs to has downloads .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Archived
description: Whether the repository that the base branch belongs to is archived .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Disabled
description: Whether the repository that the base branch belongs to is disabled .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.PushedAt
description: The date that the repository that the base branch belongs to was last pushed to.
type: String
- contextPath: GitHub.PR.Base.Repo.CreatedAt
description: The date of creation of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.UpdatedAt
description: The date that the repository that the base branch belongs to was last updated.
type: String
- contextPath: GitHub.PR.Base.Repo.AllowRebaseMerge
description: Whether the repository that the base branch belongs to allows rebase-style merges.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.AllowSquashMerge
description: Whether the repository that the base branch belongs to allows squash merges.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.AllowMergeCommit
description: Whether the repository that the base branch belongs to allows merge commits.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.SubscribersCount
description: The number of entities that subscribe to the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.AuthorAssociation
description: The pull request author association.
type: String
- contextPath: GitHub.PR.Draft
description: Whether the pull request is a draft.
type: Boolean
- contextPath: GitHub.PR.Merged
description: Whether the pull request is merged.
type: Boolean
- contextPath: GitHub.PR.Mergeable
description: Whether the pull request is mergeable.
type: Boolean
- contextPath: GitHub.PR.Rebaseable
description: Whether the pull request is rebaseable.
type: Boolean
- contextPath: GitHub.PR.MergeableState
description: The mergeable state of the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.Login
description: The login of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.ID
description: The ID of the user who merged the pull request.
type: Number
- contextPath: GitHub.PR.MergedBy.NodeID
description: The node ID of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.Type
description: The type of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.SiteAdmin
description: Whether the user who merged the pull request is a site admin or not.
type: Boolean
- contextPath: GitHub.PR.Comments
description: The number of comments on the pull request.
type: Number
- contextPath: GitHub.PR.ReviewComments
description: The number of review comments on the pull request.
type: Number
- contextPath: GitHub.PR.MaintainerCanModify
description: Whether the maintainer can modify the pull request.
type: Boolean
- contextPath: GitHub.PR.Commits
description: The number of commits in the pull request.
type: Number
- contextPath: GitHub.PR.Additions
description: The number of additions in the pull request.
type: Number
- contextPath: GitHub.PR.Deletions
description: The number of deletions in the pull request.
type: Number
- contextPath: GitHub.PR.ChangedFiles
description: The number of changed files in the pull request.
type: Number
- arguments:
- description: The name of the organization.
name: organization
required: true
description: Lists the teams for an organization. Note that this API call is only available to authenticated members of the organization.
name: GitHub-list-teams
outputs:
- contextPath: GitHub.Team.ID
description: The ID of the team.
type: Number
- contextPath: GitHub.Team.NodeID
description: The node ID of the team.
type: String
- contextPath: GitHub.Team.Name
description: The name of the team.
type: String
- contextPath: GitHub.Team.Slug
description: The slug of the team.
type: String
- contextPath: GitHub.Team.Description
description: The description of the team.
type: String
- contextPath: GitHub.Team.Privacy
description: The privacy setting of the team.
type: String
- contextPath: GitHub.Team.Permission
description: The permissions of the team.
type: String
- contextPath: GitHub.Team.Parent
description: The parent of the team.
type: Unknown
- arguments:
- description: The name of the branch to delete.
name: branch_name
required: true
description: Deletes a branch.
name: GitHub-delete-branch
- arguments:
- description: The issue number of the pull request.
name: pull_number
required: true
description: Lists all the review comments for a pull request.
name: GitHub-list-pr-review-comments
outputs:
- contextPath: GitHub.PR.Number
description: The issue number of the pull request.
type: Number
- contextPath: GitHub.PR.ReviewComment.ID
description: The ID number of the pull request review comment.
type: Number
- contextPath: GitHub.PR.ReviewComment.NodeID
description: The Node ID of the pull request review comment.
type: String
- contextPath: GitHub.PR.ReviewComment.PullRequestReviewID
description: The ID of the pull request review.
type: Number
- contextPath: GitHub.PR.ReviewComment.DiffHunk
description: The diff hunk the review comment applies to.
type: String
- contextPath: GitHub.PR.ReviewComment.Path
description: The file path of the proposed file changes the review comment applies to.
type: String
- contextPath: GitHub.PR.ReviewComment.Position
description: The position of the change the review comment applies to.
type: Number
- contextPath: GitHub.PR.ReviewComment.OriginalPosition
description: The original position of the change the review comment applies to.
type: Number
- contextPath: GitHub.PR.ReviewComment.CommitID
description: The commit ID of the proposed change.
type: String
- contextPath: GitHub.PR.ReviewComment.OriginalCommitID
description: The commit ID of the commit before the proposed change.
type: String
- contextPath: GitHub.PR.ReviewComment.InReplyToID
description: The reply ID of the comment the review comment applies to.
type: Number
- contextPath: GitHub.PR.ReviewComment.User.Login
description: The login of the user who created the review comment.
type: String
- contextPath: GitHub.PR.ReviewComment.User.ID
description: The ID of the user who created the review comment.
type: Number
- contextPath: GitHub.PR.ReviewComment.User.NodeID
description: The Node ID of the user who created the review comment.
type: String
- contextPath: GitHub.PR.ReviewComment.User.Type
description: The type of the user who created the review comment.
type: String
- contextPath: GitHub.PR.ReviewComment.User.SiteAdmin
description: Whether the user who created the review comment is a site administrator.
type: Boolean
- contextPath: GitHub.PR.ReviewComment.Body
description: The body content of the review comment.
type: String
- contextPath: GitHub.PR.ReviewComment.CreatedAt
description: The time the review comment was created.
type: String
- contextPath: GitHub.PR.ReviewComment.UpdatedAt
description: The time the review comment was updated.
type: String
- contextPath: GitHub.PR.ReviewComment.AuthorAssociation
description: The association of the user who created the review comment.
type: String
- arguments:
- description: The new title of the pull request.
name: title
- description: The new body content of the pull request.
name: body
- auto: PREDEFINED
description: The new state of the pull request. Can be "open", or "closed".
name: state
predefined:
- open
- closed
- description: The name of the branch to pull your changes from. It must be an existing branch in the current repository. You cannot update the base branch in a pull request to point to another repository.
name: base
- auto: PREDEFINED
description: Indicates whether maintainers can modify the pull request.
name: maintainer_can_modify
predefined:
- 'true'
- 'false'
- description: The issue number of the pull request to modify.
name: pull_number
required: true
description: Updates a pull request in a repository.
name: GitHub-update-pull-request
outputs:
- contextPath: GitHub.PR.ID
description: The ID number of the pull request.
type: Number
- contextPath: GitHub.PR.NodeID
description: The Node ID of the pull request.
type: String
- contextPath: GitHub.PR.Number
description: The issue number of the pull request.
type: Number
- contextPath: GitHub.PR.State
description: The state of the pull request.
type: String
- contextPath: GitHub.PR.Locked
description: Whether the pull request is locked.
type: Boolean
- contextPath: GitHub.PR.Title
description: The title of the pull request.
type: String
- contextPath: GitHub.PR.User.Login
description: The login of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.ID
description: The ID of the user who opened the pull request.
type: Number
- contextPath: GitHub.PR.User.NodeID
description: The Node ID of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.Type
description: The type of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.SiteAdmin
description: Whether the user who opened the pull request is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Body
description: The body content of the pull request.
type: String
- contextPath: GitHub.PR.Label.ID
description: The ID of the label.
type: Number
- contextPath: GitHub.PR.Label.NodeID
description: The Node ID of the label.
type: String
- contextPath: GitHub.PR.Label.Name
description: The name of the label.
type: String
- contextPath: GitHub.PR.Label.Description
description: The description of the label.
type: String
- contextPath: GitHub.PR.Label.Color
description: The hex color value of the label.
type: String
- contextPath: GitHub.PR.Label.Default
description: Whether the label is a default.
type: Boolean
- contextPath: GitHub.PR.Milestone.ID
description: The ID of the milestone.
type: Number
- contextPath: GitHub.PR.Milestone.NodeID
description: The Node ID of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Number
description: The number of the milestone.
type: Number
- contextPath: GitHub.PR.Milestone.State
description: The state of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Title
description: The title of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Description
description: The description of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Creator.Login
description: The login of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.ID
description: The ID the milestone creator.
type: Number
- contextPath: GitHub.PR.Milestone.Creator.NodeID
description: The Node ID of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.Type
description: The type of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.SiteAdmin
description: Whether the milestone creator is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Milestone.OpenIssues
description: The number of open issues with this milestone.
type: Number
- contextPath: GitHub.PR.Milestone.ClosedIssues
description: The number of closed issues with this milestone.
type: Number
- contextPath: GitHub.PR.Milestone.CreatedAt
description: The date the milestone was created.
type: String
- contextPath: GitHub.PR.Milestone.UpdatedAt
description: The date the milestone was updated.
type: String
- contextPath: GitHub.PR.Milestone.ClosedAt
description: The date the milestone was closed.
type: String
- contextPath: GitHub.PR.Milestone.DueOn
description: The due date for the milestone.
type: String
- contextPath: GitHub.PR.ActiveLockReason
description: The reason the pull request is locked.
type: String
- contextPath: GitHub.PR.CreatedAt
description: The date the pull request was created.
type: String
- contextPath: GitHub.PR.UpdatedAt
description: The date the pull request was updated.
type: String
- contextPath: GitHub.PR.ClosedAt
description: The date the pull request was closed.
type: String
- contextPath: GitHub.PR.MergedAt
description: The date the pull request was merged.
type: String
- contextPath: GitHub.PR.MergeCommitSHA
description: The SHA hash of the pull request's merge commit.
type: String
- contextPath: GitHub.PR.Assignee.Login
description: The login of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.ID
description: The ID of the user assigned to the pull request.
type: Number
- contextPath: GitHub.PR.Assignee.NodeID
description: The Node ID of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.Type
description: The type of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.SiteAdmin
description: Whether the user assigned to the pull request is a site administrator. not.
type: Boolean
- contextPath: GitHub.PR.RequestedReviewer.Login
description: The login of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.ID
description: The ID of the user requested for review.
type: Number
- contextPath: GitHub.PR.RequestedReviewer.NodeID
description: The Node ID of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.Type
description: The type of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.SiteAdmin
description: Whether the user requested for review is a site administrator.
type: Boolean
- contextPath: GitHub.PR.RequestedTeam.ID
description: The ID of the team requested for review.
type: Number
- contextPath: GitHub.PR.RequestedTeam.NodeID
description: The Node ID of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Name
description: The name of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Slug
description: The slug of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Description
description: The description of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Privacy
description: The privacy setting of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Permission
description: The permissions of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Parent
description: The parent of the team requested for review.
type: Unknown
- contextPath: GitHub.PR.Head.Label
description: The label of the branch the HEAD points to.
type: String
- contextPath: GitHub.PR.Head.Ref
description: The reference of the branch the HEAD points to.
type: String
- contextPath: GitHub.PR.Head.SHA
description: The SHA hash of the commit the HEAD points to.
type: String
- contextPath: GitHub.PR.Head.User.Login
description: The committer login of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.ID
description: The committer ID of the HEAD commit of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.User.NodeID
description: The Node committer ID of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.Type
description: The committer type of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.SiteAdmin
description: Whether the committer of the HEAD commit of the checked out branch is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.ID
description: The ID of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.NodeID
description: The Node ID of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Name
description: The name of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.FullName
description: The full name of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.Login
description: The user login of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.ID
description: The user ID of the owner of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Owner.NodeID
description: The user node ID of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.Type
description: The user type of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.SiteAdmin
description: Whether the owner of the repository of the checked out branch is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Private
description: Whether the repository of the checked out branch is private.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Description
description: The description of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Fork
description: Whether the repository of the checked out branch is a fork.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Language
description: The language of the repository of the checked out branch.
type: Unknown
- contextPath: GitHub.PR.Head.Repo.ForksCount
description: The number of forks of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.StargazersCount
description: The number of stars of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.WatchersCount
description: The number of entities watching the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Size
description: The size of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.DefaultBranch
description: The default branch of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.OpenIssuesCount
description: The open issues of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Topics
description: The topics listed for the repository of the checked out branch.
type: Unknown
- contextPath: GitHub.PR.Head.Repo.HasIssues
description: Whether the repository of the checked out branch has issues.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasProjects
description: Whether the repository of the checked out branch has projects.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasWiki
description: Whether the repository of the checked out branch has a wiki.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasPages
description: Whether the repository of the checked out branch has pages.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasDownloads
description: Whether the repository of the checked out branch has downloads.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Archived
description: Whether the repository of the checked out branch has been archived.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Disabled
description: Whether the repository of the checked out branch has been disabled.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.PushedAt
description: The date of the latest push to the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.CreatedAt
description: The date of creation of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.UpdatedAt
description: The date the repository of the checked out branch was last updated.
type: String
- contextPath: GitHub.PR.Head.Repo.AllowRebaseMerge
description: Whether the repository of the checked out branch permits rebase-style merges.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.AllowSquashMerge
description: Whether the repository of the checked out branch permits squash merges.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.AllowMergeCommit
description: Whether the repository of the checked out branch permits merge commits.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.SubscribersCount
description: The number of entities subscribing to the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Base.Label
description: The label of the base branch.
type: String
- contextPath: GitHub.PR.Base.Ref
description: The reference of the base branch.
type: String
- contextPath: GitHub.PR.Base.SHA
description: The SHA hash of the base branch.
type: String
- contextPath: GitHub.PR.Base.User.Login
description: The committer login of the commit the base branch points to.
type: String
- contextPath: GitHub.PR.Base.User.ID
description: The ID of the committer of the commit the base branch points to.
type: Number
- contextPath: GitHub.PR.Base.User.NodeID
description: The committer Node ID of the commit the base branch points to.
type: String
- contextPath: GitHub.PR.Base.User.Type
description: The user committer type of the commit the base branch points to.
type: String
- contextPath: GitHub.PR.Base.User.SiteAdmin
description: Whether the committer of the commit the base branch points to is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.ID
description: The ID of the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.NodeID
description: The Node ID of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Name
description: The name of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.FullName
description: The full name of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.Login
description: The user login of the owner of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.ID
description: The user ID of the owner of the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Owner.NodeID
description: The user node ID of the owner of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.Type
description: The user type of the owner of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.SiteAdmin
description: Whether the owner of the repository the base branch belongs to is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Private
description: Whether the repository the base branch belongs to is private.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Description
description: The description of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Fork
description: Whether the repository the base branch belongs to is a fork.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Language
description: The language of the repository the base branch belongs to.
type: Unknown
- contextPath: GitHub.PR.Base.Repo.ForksCount
description: The number of times that the repository the base branch belongs to has been forked.
type: Number
- contextPath: GitHub.PR.Base.Repo.StargazersCount
description: The number of times that the repository the base branch belongs to has been starred.
type: Number
- contextPath: GitHub.PR.Base.Repo.WatchersCount
description: The number of entities watching the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Size
description: The size of the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.DefaultBranch
description: The default branch of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.OpenIssuesCount
description: The number of open issues in the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Topics
description: The topics listed for the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.HasIssues
description: Whether the repository the base branch belongs to has issues.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasProjects
description: Whether the repository the base branch belongs to has projects.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasWiki
description: Whether the repository the base branch belongs to has a wiki.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasPages
description: Whether the repository the base branch belongs to has pages.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasDownloads
description: Whether the repository the base branch belongs to has downloads.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Archived
description: Whether the repository the base branch belongs to is archived.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Disabled
description: Whether the repository the base branch belongs to is disabled.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.PushedAt
description: The date that the repository the base branch belongs to was last pushed.
type: String
- contextPath: GitHub.PR.Base.Repo.CreatedAt
description: The date of creation of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.UpdatedAt
description: The date that the repository the base branch belongs to was last updated.
type: String
- contextPath: GitHub.PR.Base.Repo.AllowRebaseMerge
description: Whether the repository the base branch belongs to allows rebase-style merges.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.AllowSquashMerge
description: Whether the repository the base branch belongs to allows squash merges.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.AllowMergeCommit
description: Whether the repository the base branch belongs to allows merge commits.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.SubscribersCount
description: The number of entities subscribe to the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.AuthorAssociation
description: The pull request author association.
type: String
- contextPath: GitHub.PR.Draft
description: Whether the pull request is a draft.
type: Boolean
- contextPath: GitHub.PR.Merged
description: Whether the pull request is merged.
type: Boolean
- contextPath: GitHub.PR.Mergeable
description: Whether the pull request is mergeable.
type: Boolean
- contextPath: GitHub.PR.Rebaseable
description: Whether the pull request is rebaseable.
type: Boolean
- contextPath: GitHub.PR.MergeableState
description: The mergeable state of the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.Login
description: The login of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.ID
description: The ID of the user who merged the pull request.
type: Number
- contextPath: GitHub.PR.MergedBy.NodeID
description: The Node ID of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.Type
description: The type of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.SiteAdmin
description: Whether the user who merged the pull request is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Comments
description: The number of comments on the pull request.
type: Number
- contextPath: GitHub.PR.ReviewComments
description: The number of review comments on the pull request.
type: Number
- contextPath: GitHub.PR.MaintainerCanModify
description: Whether the maintainer can modify the pull request.
type: Boolean
- contextPath: GitHub.PR.Commits
description: The number of commits in the pull request.
type: Number
- contextPath: GitHub.PR.Additions
description: The number of additions in the pull request.
type: Number
- contextPath: GitHub.PR.Deletions
description: The number of deletions in the pull request.
type: Number
- contextPath: GitHub.PR.ChangedFiles
description: The number of changed files in the pull request.
type: Number
- arguments:
- description: The issue number of the pull request to check.
name: pull_number
required: true
description: 'Returns a merged pull request. If the pull request has been merged, the API returns ''Status: 204 No Content''. If the pull request has not been merged the API returns ''Status: 404 Not Found''.'
name: GitHub-is-pr-merged
- arguments:
- description: The title of the pull request.
name: title
required: true
- description: The name of the branch where the changes are made.
name: head
required: true
- description: The name of the branch you want the changes pulled into, which must be an existing branch on the current repository.
name: base
required: true
- description: The contents of the pull request.
name: body
- auto: PREDEFINED
description: Indicates whether maintainers can modify the pull request.
name: maintainer_can_modify
predefined:
- 'true'
- 'false'
- auto: PREDEFINED
description: Indicates whether the pull request is a draft. For more information, see path_to_url#draft-pull-requests.
name: draft
predefined:
- 'true'
- 'false'
description: Creates a new pull request.
name: GitHub-create-pull-request
outputs:
- contextPath: GitHub.PR.ID
description: The ID number of the pull request.
type: Number
- contextPath: GitHub.PR.NodeID
description: The Node ID of the pull request.
type: String
- contextPath: GitHub.PR.Number
description: The issue number of the pull request.
type: Number
- contextPath: GitHub.PR.State
description: The state of the pull request.
type: String
- contextPath: GitHub.PR.Locked
description: Whether the pull request is locked.
type: Boolean
- contextPath: GitHub.PR.Title
description: The title of the pull request.
type: String
- contextPath: GitHub.PR.User.Login
description: The login of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.ID
description: The ID of the user who opened the pull request.
type: Number
- contextPath: GitHub.PR.User.NodeID
description: The Node ID of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.Type
description: The user type who opened the pull request.
type: String
- contextPath: GitHub.PR.User.SiteAdmin
description: Whether the user who opened the pull request is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Body
description: The body content of the pull request.
type: String
- contextPath: GitHub.PR.Label.ID
description: The ID of the label.
type: Number
- contextPath: GitHub.PR.Label.NodeID
description: The Node ID of the label.
type: String
- contextPath: GitHub.PR.Label.Name
description: The name of the label.
type: String
- contextPath: GitHub.PR.Label.Description
description: The description of the label.
type: String
- contextPath: GitHub.PR.Label.Color
description: The hex color value of the label.
type: String
- contextPath: GitHub.PR.Label.Default
description: Whether the label is a default.
type: Boolean
- contextPath: GitHub.PR.Milestone.ID
description: The ID of the milestone.
type: Number
- contextPath: GitHub.PR.Milestone.NodeID
description: The Node ID of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Number
description: The number of the milestone.
type: Number
- contextPath: GitHub.PR.Milestone.State
description: The state of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Title
description: The title of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Description
description: The description of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Creator.Login
description: The login of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.ID
description: The ID the milestone creator.
type: Number
- contextPath: GitHub.PR.Milestone.Creator.NodeID
description: The Node ID of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.Type
description: The type of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.SiteAdmin
description: Whether the milestone creator is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Milestone.OpenIssues
description: The number of open issues with this milestone.
type: Number
- contextPath: GitHub.PR.Milestone.ClosedIssues
description: The number of closed issues with this milestone.
type: Number
- contextPath: GitHub.PR.Milestone.CreatedAt
description: The date the milestone was created.
type: String
- contextPath: GitHub.PR.Milestone.UpdatedAt
description: The date the milestone was updated.
type: String
- contextPath: GitHub.PR.Milestone.ClosedAt
description: The date the milestone was closed.
type: String
- contextPath: GitHub.PR.Milestone.DueOn
description: The due date for the milestone.
type: String
- contextPath: GitHub.PR.ActiveLockReason
description: The reason the pull request is locked.
type: String
- contextPath: GitHub.PR.CreatedAt
description: The date the pull request was created.
type: String
- contextPath: GitHub.PR.UpdatedAt
description: The date the pull request was updated.
type: String
- contextPath: GitHub.PR.ClosedAt
description: The date the pull request was closed.
type: String
- contextPath: GitHub.PR.MergedAt
description: The date the pull request was merged.
type: String
- contextPath: GitHub.PR.MergeCommitSHA
description: The SHA hash of the pull request's merge commit.
type: String
- contextPath: GitHub.PR.Assignee.Login
description: The login of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.ID
description: The ID of the user assigned to the pull request.
type: Number
- contextPath: GitHub.PR.Assignee.NodeID
description: The Node ID of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.Type
description: The type of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.SiteAdmin
description: Whether the user assigned to the pull request is a site administrator.
type: Boolean
- contextPath: GitHub.PR.RequestedReviewer.Login
description: The login of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.ID
description: The ID of the user requested for review.
type: Number
- contextPath: GitHub.PR.RequestedReviewer.NodeID
description: The Node ID of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.Type
description: The type of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.SiteAdmin
description: Whether the user requested for review is a site administrator.
type: Boolean
- contextPath: GitHub.PR.RequestedTeam.ID
description: The ID of the team requested for review.
type: Number
- contextPath: GitHub.PR.RequestedTeam.NodeID
description: The Node ID of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Name
description: The name of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Slug
description: The slug of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Description
description: The description of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Privacy
description: The privacy setting of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Permission
description: The permissions of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Parent
description: The parent of the team requested for review.
type: Unknown
- contextPath: GitHub.PR.Head.Label
description: The label of the branch the HEAD points to.
type: String
- contextPath: GitHub.PR.Head.Ref
description: The reference of the branch the HEAD points to.
type: String
- contextPath: GitHub.PR.Head.SHA
description: The SHA hash of the commit the HEAD points to.
type: String
- contextPath: GitHub.PR.Head.User.Login
description: The committer login of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.ID
description: The committer ID of the HEAD commit of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.User.NodeID
description: The Node ID of the committer of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.Type
description: The committer type of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.SiteAdmin
description: Whether the committer of the HEAD commit of the checked out branch is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.ID
description: The ID of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.NodeID
description: The Node ID of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Name
description: The name of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.FullName
description: The full name of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.Login
description: The user login of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.ID
description: The user ID of the owner of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Owner.NodeID
description: The user Node ID of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.Type
description: The user type of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.SiteAdmin
description: Whether the owner of the repository of the checked out branch is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Private
description: Whether the repository of the checked out branch is private.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Description
description: The description of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Fork
description: Whether the repository of the checked out branch is a fork.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Language
description: The language of the repository of the checked out branch.
type: Unknown
- contextPath: GitHub.PR.Head.Repo.ForksCount
description: The number of forks of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.StargazersCount
description: The number of stars of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.WatchersCount
description: The number of entities watching the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Size
description: The size of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.DefaultBranch
description: The default branch of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.OpenIssuesCount
description: The open issues of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Topics
description: The topics listed for the repository of the checked out branch.
type: Unknown
- contextPath: GitHub.PR.Head.Repo.HasIssues
description: Whether the repository of the checked out branch has issues.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasProjects
description: Whether the repository of the checked out branch has projects.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasWiki
description: Whether the repository of the checked out branch has a wiki.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasPages
description: Whether the repository of the checked out branch has pages.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasDownloads
description: Whether the repository of the checked out branch has downloads.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Archived
description: Whether the repository of the checked out branch has been archived.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Disabled
description: Whether the repository of the checked out branch has been disabled.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.PushedAt
description: The date of the latest push to the repository of the checked out.
type: String
- contextPath: GitHub.PR.Head.Repo.CreatedAt
description: The date of creation of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.UpdatedAt
description: The date the repository of the checked out branch was last updated.
type: String
- contextPath: GitHub.PR.Head.Repo.AllowRebaseMerge
description: Whether the repository of the checked out branch permits rebase-style merges.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.AllowSquashMerge
description: Whether the repository of the checked out branch permits squash merges.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.AllowMergeCommit
description: Whether the repository of the checked out branch permits merge commits.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.SubscribersCount
description: The number of entities subscribing to the repository of the checked out.
type: Number
- contextPath: GitHub.PR.Base.Label
description: The label of the base branch.
type: String
- contextPath: GitHub.PR.Base.Ref
description: The reference of the base branch.
type: String
- contextPath: GitHub.PR.Base.SHA
description: The SHA hash of the base branch.
type: String
- contextPath: GitHub.PR.Base.User.Login
description: The committer login of the commit the base branch points.
type: String
- contextPath: GitHub.PR.Base.User.ID
description: The ID of the committer of the commit the base branch points to.
type: Number
- contextPath: GitHub.PR.Base.User.NodeID
description: The committer Node ID of the commit the base branch points.
type: String
- contextPath: GitHub.PR.Base.User.Type
description: The user type of the committer the commit base branch points.
type: String
- contextPath: GitHub.PR.Base.User.SiteAdmin
description: Whether the committer of the commit the base branch points to is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.ID
description: The ID of the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.NodeID
description: The Node ID of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Name
description: The name of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.FullName
description: The full name of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.Login
description: The user login of the owner of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.ID
description: The user ID of the owner of the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Owner.NodeID
description: The user node ID of the owner of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.Type
description: The user type of the owner of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.SiteAdmin
description: Whether the owner of the repository that the base branch belongs to is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Private
description: Whether the repository the base branch belongs to is private.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Description
description: The description of the repository the base branch belong to.
type: String
- contextPath: GitHub.PR.Base.Repo.Fork
description: Whether the repository that the base branch belongs to is a fork.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Language
description: The language of the repository the base branch belongs to.
type: Unknown
- contextPath: GitHub.PR.Base.Repo.ForksCount
description: The number of times that the repository the base branch belongs to has been forked.
type: Number
- contextPath: GitHub.PR.Base.Repo.StargazersCount
description: The number of times that the repository that the base branch belongs to has been starred.
type: Number
- contextPath: GitHub.PR.Base.Repo.WatchersCount
description: The number of entities watching the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Size
description: The size of the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.DefaultBranch
description: The default branch of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.OpenIssuesCount
description: The number of open issues in the repository the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Topics
description: The topics listed for the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.HasIssues
description: Whether the repository the base branch belongs to has issues.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasProjects
description: Whether the repository the base branch belongs to has projects.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasWiki
description: Whether the repository the base branch belongs to has a wiki.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasPages
description: Whether the repository the base branch belongs to has pages.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasDownloads
description: Whether the repository the base branch belongs to has downloads.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Archived
description: Whether the repository the base branch belongs to is archived.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Disabled
description: Whether the repository the base branch belongs to is disabled.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.PushedAt
description: The date that the repository the base branch belongs to was last pushed.
type: String
- contextPath: GitHub.PR.Base.Repo.CreatedAt
description: The date of creation of the repository the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.UpdatedAt
description: The date that the repository the base branch belongs to was last updated.
type: String
- contextPath: GitHub.PR.Base.Repo.AllowRebaseMerge
description: Whether the repository the base branch belongs to allows rebase-style merges.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.AllowSquashMerge
description: Whether the repository the base branch belongs to allows squash merges.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.AllowMergeCommit
description: Whether the repository the base branch belongs to allows merge commits.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.SubscribersCount
description: The number of entities that subscribe to the repository for which the base branch belongs to.
type: Number
- contextPath: GitHub.PR.AuthorAssociation
description: The pull request author association.
type: String
- contextPath: GitHub.PR.Draft
description: Whether the pull request is a draft.
type: Boolean
- contextPath: GitHub.PR.Merged
description: Whether the pull request is merged.
type: Boolean
- contextPath: GitHub.PR.Mergeable
description: Whether the pull request is mergeable.
type: Boolean
- contextPath: GitHub.PR.Rebaseable
description: Whether the pull request is rebaseable.
type: Boolean
- contextPath: GitHub.PR.MergeableState
description: The mergeable state of the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.Login
description: The login of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.ID
description: The ID of the user who merged the pull request.
type: Number
- contextPath: GitHub.PR.MergedBy.NodeID
description: The Node ID of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.Type
description: The user type who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.SiteAdmin
description: Whether the user who merged the pull request is a site administrator.
type: Boolean
- contextPath: GitHub.PR.Comments
description: The number of comments on the pull request.
type: Number
- contextPath: GitHub.PR.ReviewComments
description: The number of review comments on the pull request.
type: Number
- contextPath: GitHub.PR.MaintainerCanModify
description: Whether the maintainer can modify the pull request.
type: Boolean
- contextPath: GitHub.PR.Commits
description: The number of commits in the pull request.
type: Number
- contextPath: GitHub.PR.Additions
description: The number of additions in the pull request.
type: Number
- contextPath: GitHub.PR.Deletions
description: The number of deletions in the pull request.
type: Number
- contextPath: GitHub.PR.ChangedFiles
description: The number of changed files in the pull request.
type: Number
- arguments:
- description: The repository owner.
name: owner
required: true
description: Gets the usage details of GitHub action workflows of private repositories by repository owner.
name: Github-get-github-actions-usage
outputs:
- contextPath: GitHub.ActionsUsage.RepositoryName
description: The name of the private repository.
type: String
- contextPath: GitHub.ActionsUsage.WorkflowID
description: The workflow ID of the GitHub action.
type: Number
- contextPath: GitHub.ActionsUsage.WorkflowName
description: The display name of the GitHub action workflow.
type: String
- contextPath: GitHub.ActionsUsage.WorkflowUsage
description: The GitHub action workflow usage on different OS.
type: Unknown
- arguments:
- description: Organization or Owner.
name: owner
required: true
- description: Git Repository Name.
name: repository
required: true
- description: Check Run ID.
name: run_id
- description: Head Commit ID.
name: commit_id
description: Gets a check run details.
name: Github-get-check-run
outputs:
- contextPath: GitHub.CheckRuns.CheckRunConclusion
description: Check Run Conculsion.
type: String
- contextPath: GitHub.CheckRuns.CheckRunAppName
description: Check Run App Name.
type: String
- contextPath: GitHub.CheckRuns.CheckExternalID
description: Check Run External ID.
type: String
- contextPath: GitHub.CheckRuns.CheckRunName
description: Check Run Name.
type: String
- contextPath: GitHub.CheckRuns.CheckRunStatus
description: Check Run Status.
type: String
- contextPath: GitHub.CheckRuns.CheckRunID
description: Check Run ID.
type: String
- arguments:
- default: true
description: The path of the file.
name: file_path
required: true
- description: The branch name to get the file from.
name: branch_name
- defaultValue: raw
description: 'The media type in which the file contents will be fetched. Possible values are: "raw" and "html". Default value is "raw".'
name: media_type
predefined:
- raw
- html
- defaultValue: 'false'
description: 'Whether to create a file entry in the War Room with the file contents. Possible values are: "true" and "false". Default value is "false".'
name: create_file_from_content
predefined:
- 'true'
- 'false'
- description: The name of the organization.
name: organization
- description: The name of the repository.
name: repository
description: Gets the content of a file from GitHub.
name: GitHub-get-file-content
outputs:
- contextPath: GitHub.FileContent.Path
description: The path of the file.
type: String
- contextPath: GitHub.FileContent.Content
description: The content of the file.
type: Number
- contextPath: GitHub.FileContent.MediaType
description: The media type in which the file was fetched.
type: String
- contextPath: GitHub.FileContent.Branch
description: The branch from which the file was fetched.
type: Unknown
- arguments:
- description: The path in the branch to get the files from.
name: path
- description: The name of the organization.
name: organization
- description: The name of the repository.
name: repository
- description: The branch name from which to get the files.
name: branch
description: Gets list of files from the given path in the repository.
name: Github-list-files
outputs:
- contextPath: GitHub.File.Name
description: The name of the file.
type: String
- contextPath: GitHub.File.Type
description: Whether the item is file or directory.
type: String
- contextPath: GitHub.File.Size
description: The size of the file in bytes.
type: Number
- contextPath: GitHub.File.Path
description: The file path inside the repository.
type: String
- contextPath: GitHub.File.DownloadUrl
description: The link to download the file content.
type: String
- contextPath: GitHub.File.SHA
description: The SHA of the file.
type: String
- arguments:
- description: The name of the organization.
name: organization
required: true
- description: Team name.
name: team_slug
required: true
- defaultValue: '30'
description: Miximum number of users to return.
name: maximum_users
description: Lists team members.
name: GitHub-list-team-members
outputs:
- contextPath: GitHub.TeamMember.ID
description: The ID of the team member.
type: Number
- contextPath: GitHub.TeamMember.Login
description: The login name of the team member.
type: String
- contextPath: GitHub.TeamMember.Team
description: The user's team.
type: String
- arguments:
- description: Commit message.
name: commit_message
required: true
- description: The path to the file in the Github repo (including file name and file ending).
name: path_to_file
required: true
- description: The entry ID for the file to commit. Either "entry_id" or "file_text" must be provided.
name: entry_id
- description: The plain text for the file to commit. Either "entry_id" or "file_text" must be provided.
name: file_text
- description: The branch name.
name: branch_name
required: true
- description: The blob SHA of the file being replaced. Use the GitHub-list-files command to get the SHA value of the file. Required if you are updating a file.
name: file_sha
description: Commits a given file.
name: Github-commit-file
- arguments:
- description: The name of the release.
name: name
- description: The name of the release tag.
name: tag_name
required: true
- description: Text describing the contents of the tag.
name: body
- description: The target branch/commit SHA from where to create the release.
name: ref
- auto: PREDEFINED
defaultValue: 'True'
description: Whether to create a draft (unpublished) release.
name: draft
predefined:
- 'True'
- 'False'
description: Create a release.
name: GitHub-create-release
outputs:
- contextPath: GitHub.Release.draft
description: Whether the release is a draft.
type: Boolean
- contextPath: GitHub.Release.html_url
description: The release URL.
type: String
- contextPath: GitHub.Release.id
description: The ID of the release.
type: Number
- contextPath: GitHub.Release.url
description: GitHub API URL link to the release.
type: String
- arguments:
- description: The branch name from which to retrieve pull requests.
name: branch_name
required: true
- description: The name of the organization.
name: organization
- description: The repository for the pull request. Defaults to the repository parameter if not provided.
name: repository
description: Gets pull requests corresponding to the given branch name.
name: GitHub-list-branch-pull-requests
outputs:
- contextPath: GitHub.PR.ID
description: The ID number of the pull request.
type: Number
- contextPath: GitHub.PR.NodeID
description: The node ID of the pull request.
type: String
- contextPath: GitHub.PR.Number
description: The issue number of the pull request.
type: Number
- contextPath: GitHub.PR.State
description: The state of the pull request.
type: String
- contextPath: GitHub.PR.Locked
description: Whether the pull request is locked.
type: Boolean
- contextPath: GitHub.PR.User.Login
description: The login of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.ID
description: The ID of the user who opened the pull request.
type: Number
- contextPath: GitHub.PR.User.NodeID
description: The node ID of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.Type
description: The type of the user who opened the pull request.
type: String
- contextPath: GitHub.PR.User.SiteAdmin
description: Whether the user who opened the pull request is a site admin or not.
type: Boolean
- contextPath: GitHub.PR.Body
description: The body content of the pull request.
type: String
- contextPath: GitHub.PR.Label.ID
description: The ID of the label.
type: Number
- contextPath: GitHub.PR.Label.NodeID
description: The node ID of the label.
type: String
- contextPath: GitHub.PR.Label.Name
description: The name of the label.
type: String
- contextPath: GitHub.PR.Label.Description
description: The description of the label.
type: String
- contextPath: GitHub.PR.Label.Color
description: The hex color value of the label.
type: String
- contextPath: GitHub.PR.Label.Default
description: Whether the label is a default.
type: Boolean
- contextPath: GitHub.PR.Milestone.ID
description: The ID of the milestone.
type: Number
- contextPath: GitHub.PR.Milestone.NodeID
description: The node ID of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Number
description: The number of the milestone.
type: Number
- contextPath: GitHub.PR.Milestone.State
description: The state of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Title
description: The title of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Description
description: The description of the milestone.
type: String
- contextPath: GitHub.PR.Milestone.Creator.Login
description: The login of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.ID
description: The ID the milestone creator.
type: Number
- contextPath: GitHub.PR.Milestone.Creator.NodeID
description: The node ID of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.Type
description: The type of the milestone creator.
type: String
- contextPath: GitHub.PR.Milestone.Creator.SiteAdmin
description: Whether the milestone creator is a site admin.
type: Boolean
- contextPath: GitHub.PR.Milestone.OpenIssues
description: The number of open issues with this milestone.
type: Number
- contextPath: GitHub.PR.Milestone.ClosedIssues
description: The number of closed issues with this milestone.
type: Number
- contextPath: GitHub.PR.Milestone.CreatedAt
description: The date the milestone was created.
type: String
- contextPath: GitHub.PR.Milestone.UpdatedAt
description: The date the milestone was updated.
type: String
- contextPath: GitHub.PR.Milestone.ClosedAt
description: The date the milestone was closed.
type: String
- contextPath: GitHub.PR.Milestone.DueOn
description: The due date for the milestone.
type: String
- contextPath: GitHub.PR.ActiveLockReason
description: The reason the pull request is locked.
type: String
- contextPath: GitHub.PR.CreatedAt
description: The date the pull request was created.
type: String
- contextPath: GitHub.PR.UpdatedAt
description: The date the pull request was updated.
type: String
- contextPath: GitHub.PR.ClosedAt
description: The date the pull request was closed.
type: String
- contextPath: GitHub.PR.MergedAt
description: The date the pull request was merged.
type: String
- contextPath: GitHub.PR.MergeCommitSHA
description: The SHA hash of the pull request's merge commit.
type: String
- contextPath: GitHub.PR.Assignee.Login
description: The login of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.ID
description: The ID of the user assigned to the pull request.
type: Number
- contextPath: GitHub.PR.Assignee.NodeID
description: The node ID of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.Type
description: The type of the user assigned to the pull request.
type: String
- contextPath: GitHub.PR.Assignee.SiteAdmin
description: Whether the user assigned to the pull request is a site admin or not.
type: Boolean
- contextPath: GitHub.PR.RequestedReviewer.Login
description: The login of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.ID
description: The ID of the user requested for review.
type: Number
- contextPath: GitHub.PR.RequestedReviewer.NodeID
description: The node ID of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.Type
description: The type of the user requested for review.
type: String
- contextPath: GitHub.PR.RequestedReviewer.SiteAdmin
description: Whether the user requested for review is a site admin.
type: Boolean
- contextPath: GitHub.PR.RequestedTeam.ID
description: The ID of the team requested for review.
type: Number
- contextPath: GitHub.PR.RequestedTeam.NodeID
description: The node ID of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Name
description: The name of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Slug
description: The slug of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Description
description: The description of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Privacy
description: The privacy setting of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Permission
description: The permissions of the team requested for review.
type: String
- contextPath: GitHub.PR.RequestedTeam.Parent
description: The parent of the team requested for review.
type: Unknown
- contextPath: GitHub.PR.Head.Label
description: The label of the branch that HEAD points to.
type: String
- contextPath: GitHub.PR.Head.Ref
description: The reference of the branch that HEAD points to.
type: String
- contextPath: GitHub.PR.Head.SHA
description: The SHA hash of the commit that HEAD points to.
type: String
- contextPath: GitHub.PR.Head.User.Login
description: The login of the committer of the HEAD commit of the checked out. branch.
type: String
- contextPath: GitHub.PR.Head.User.ID
description: The ID of the committer of the HEAD commit of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.User.NodeID
description: The node ID of the committer of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.Type
description: The type of the committer of the HEAD commit of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.User.SiteAdmin
description: Whether the committer of the HEAD commit of the checked out branch is a site admin.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.ID
description: The ID of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.NodeID
description: The node ID of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Name
description: The name of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.FullName
description: The full name of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.Login
description: The user login of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.ID
description: The user ID of the owner of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Owner.NodeID
description: The user node ID of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.Type
description: The user type of the owner of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Owner.SiteAdmin
description: Whether the owner of the repository of the checked out branch is a site admin.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Private
description: Whether the repository of the checked out branch is private or not.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Description
description: The description of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.Fork
description: Whether the repository of the checked out branch is a fork.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Language
description: The language of the repository of the checked out branch.
type: Unknown
- contextPath: GitHub.PR.Head.Repo.ForksCount
description: The number of forks of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.StargazersCount
description: The number of stars of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.WatchersCount
description: The number of entities watching the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Size
description: The size of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.DefaultBranch
description: The default branch of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.OpenIssuesCount
description: The open issues of the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Head.Repo.Topics
description: The topics listed for the repository of the checked out branch.
type: Unknown
- contextPath: GitHub.PR.Head.Repo.HasIssues
description: Whether the repository of the checked out branch has issues or not.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasProjects
description: Whether the repository of the checked out branch has projects or not.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasWiki
description: Whether the repository of the checked out branch has a wiki or not.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasPages
description: Whether the repository of the checked out branch has pages.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.HasDownloads
description: Whether the repository of the checked out branch has downloads .
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Archived
description: Whether the repository of the checked out branch has been arvhived .
type: Boolean
- contextPath: GitHub.PR.Head.Repo.Disabled
description: Whether the repository of the checked out branch has been disabled .
type: Boolean
- contextPath: GitHub.PR.Head.Repo.PushedAt
description: The date of the latest push to the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.CreatedAt
description: The date of creation of the repository of the checked out branch.
type: String
- contextPath: GitHub.PR.Head.Repo.UpdatedAt
description: The date the repository of the checked out branch was last updated.
type: String
- contextPath: GitHub.PR.Head.Repo.AllowRebaseMerge
description: Whether the repository of the checked out branch permits rebase-style merges.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.AllowSquashMerge
description: Whether the repository of the checked out branch permits squash merges.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.AllowMergeCommit
description: Whether the repository of the checked out branch permits merge commits.
type: Boolean
- contextPath: GitHub.PR.Head.Repo.SubscribersCount
description: The number of entities subscribing to the repository of the checked out branch.
type: Number
- contextPath: GitHub.PR.Base.Label
description: The label of the base branch.
type: String
- contextPath: GitHub.PR.Base.Ref
description: The reference of the base branch.
type: String
- contextPath: GitHub.PR.Base.SHA
description: The SHA hash of the base branch.
type: String
- contextPath: GitHub.PR.Base.User.Login
description: The login of the committer of the commit that the base branch points to.
type: String
- contextPath: GitHub.PR.Base.User.ID
description: The ID of the committer of the commit that the base branch points to.
type: Number
- contextPath: GitHub.PR.Base.User.NodeID
description: The node ID of the committer of the commit that the base branch points to.
type: String
- contextPath: GitHub.PR.Base.User.Type
description: The user type of the committer of the commit that the base branch points to.
type: String
- contextPath: GitHub.PR.Base.User.SiteAdmin
description: Whether the committer of the commit that the base branch points to is a site admin.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.ID
description: The ID of the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.NodeID
description: The node ID of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Name
description: The name of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.FullName
description: The full name of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.Login
description: The user login of the owner of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.ID
description: The user ID of the owner of the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Owner.NodeID
description: The user node ID of the owner of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.Type
description: The user type of the owner of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Owner.SiteAdmin
description: Whether the owner of the repository that the base branch belongs to is a site admin.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Private
description: Whether the repository that the base branch belongs to is private .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Description
description: The description of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.Fork
description: Whether the repository that the base branch belongs to is a fork .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Language
description: The language of the repository that the base branch belongs to.
type: Unknown
- contextPath: GitHub.PR.Base.Repo.ForksCount
description: The number of times that the repository that the base branch belongs to has been forked.
type: Number
- contextPath: GitHub.PR.Base.Repo.StargazersCount
description: The number of times that the repository that the base branch belongs to has been starred.
type: Number
- contextPath: GitHub.PR.Base.Repo.WatchersCount
description: The number of entities watching the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Size
description: The size of the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.DefaultBranch
description: The default branch of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.OpenIssuesCount
description: The number of open issues in the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.Base.Repo.Topics
description: The topics listed for the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.HasIssues
description: Whether the repository that the base branch belongs to has issues .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasProjects
description: Whether the repository that the base branch belongs to has projects .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasWiki
description: Whether the repository that the base branch belongs to has a wiki .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasPages
description: Whether the repository that the base branch belongs to has pages .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.HasDownloads
description: Whether the repository that the base branch belongs to has downloads .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Archived
description: Whether the repository that the base branch belongs to is archived .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.Disabled
description: Whether the repository that the base branch belongs to is disabled .
type: Boolean
- contextPath: GitHub.PR.Base.Repo.PushedAt
description: The date that the repository that the base branch belongs to was last pushed to.
type: String
- contextPath: GitHub.PR.Base.Repo.CreatedAt
description: The date of creation of the repository that the base branch belongs to.
type: String
- contextPath: GitHub.PR.Base.Repo.UpdatedAt
description: The date that the repository that the base branch belongs to was last updated.
type: String
- contextPath: GitHub.PR.Base.Repo.AllowRebaseMerge
description: Whether the repository that the base branch belongs to allows rebase-style merges.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.AllowSquashMerge
description: Whether the repository that the base branch belongs to allows squash merges.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.AllowMergeCommit
description: Whether the repository that the base branch belongs to allows merge commits.
type: Boolean
- contextPath: GitHub.PR.Base.Repo.SubscribersCount
description: The number of entities that subscribe to the repository that the base branch belongs to.
type: Number
- contextPath: GitHub.PR.AuthorAssociation
description: The pull request author association.
type: String
- contextPath: GitHub.PR.Draft
description: Whether the pull request is a draft.
type: Boolean
- contextPath: GitHub.PR.Merged
description: Whether the pull request is merged.
type: Boolean
- contextPath: GitHub.PR.Mergeable
description: Whether the pull request is mergeable.
type: Boolean
- contextPath: GitHub.PR.Rebaseable
description: Whether the pull request is rebaseable.
type: Boolean
- contextPath: GitHub.PR.MergeableState
description: The mergeable state of the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.Login
description: The login of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.ID
description: The ID of the user who merged the pull request.
type: Number
- contextPath: GitHub.PR.MergedBy.NodeID
description: The node ID of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.Type
description: The type of the user who merged the pull request.
type: String
- contextPath: GitHub.PR.MergedBy.SiteAdmin
description: Whether the user who merged the pull request is a site admin or not.
type: Boolean
- contextPath: GitHub.PR.Comments
description: The number of comments on the pull request.
type: Number
- contextPath: GitHub.PR.ReviewComments
description: The number of review comments on the pull request.
type: Number
- contextPath: GitHub.PR.MaintainerCanModify
description: Whether the maintainer can modify the pull request.
type: Boolean
- contextPath: GitHub.PR.Commits
description: The number of commits in the pull request.
type: Number
- contextPath: GitHub.PR.Additions
description: The number of additions in the pull request.
type: Number
- contextPath: GitHub.PR.Deletions
description: The number of deletions in the pull request.
type: Number
- contextPath: GitHub.PR.ChangedFiles
description: The number of changed files in the pull request.
type: Number
- arguments:
- description: The issue number to retrieve events for.
name: issue_number
required: true
description: Returns events corresponding to the given issue.
name: Github-list-issue-events
outputs:
- contextPath: GitHub.IssueEvent.id
description: Event ID.
type: Number
- contextPath: GitHub.IssueEvent.node_id
description: Event node ID.
type: String
- contextPath: GitHub.IssueEvent.url
description: Event URL.
type: String
- contextPath: GitHub.IssueEvent.actor.login
description: Event actor login username.
type: String
- contextPath: GitHub.IssueEvent.actor.id
description: Event actor ID.
type: Number
- contextPath: GitHub.IssueEvent.actor.node_id
description: Event actor node ID.
type: String
- contextPath: GitHub.IssueEvent.actor.avatar_url
description: Event actor avatar URL.
type: String
- contextPath: GitHub.IssueEvent.actor.gravatar_id
description: Event actor gravatar ID.
type: String
- contextPath: GitHub.IssueEvent.actor.url
description: Event actor URL.
type: String
- contextPath: GitHub.IssueEvent.actor.html_url
description: Event actor HTML URL.
type: String
- contextPath: GitHub.IssueEvent.actor.followers_url
description: Event actor followers URL.
type: String
- contextPath: GitHub.IssueEvent.actor.following_url
description: Event actor following URL.
type: String
- contextPath: GitHub.IssueEvent.actor.gists_url
description: Event actor gists URL.
type: String
- contextPath: GitHub.IssueEvent.actor.starred_url
description: Event actor starred URL.
type: String
- contextPath: GitHub.IssueEvent.actor.subscriptions_url
description: Event actor subscriptions URL.
type: String
- contextPath: GitHub.IssueEvent.actor.organizations_url
description: Event actor organizations URL.
type: String
- contextPath: GitHub.IssueEvent.actor.repos_url
description: Event actor repos URL.
type: String
- contextPath: GitHub.IssueEvent.actor.events_url
description: Event actor events URL.
type: String
- contextPath: GitHub.IssueEvent.actor.received_events_url
description: Event actor received events URL.
type: String
- contextPath: GitHub.IssueEvent.actor.type
description: Event actor type.
type: String
- contextPath: GitHub.IssueEvent.actor.site_admin
description: Indicates whether the event actor is site admin.
type: Boolean
- contextPath: GitHub.IssueEvent.event
description: Issue event type, for example labeled, closed.
type: String
- contextPath: GitHub.IssueEvent.commit_id
description: Event commit ID.
type: Unknown
- contextPath: GitHub.IssueEvent.commit_url
description: Event commit URL.
type: Unknown
- contextPath: GitHub.IssueEvent.created_at
description: Event created time.
type: Date
- contextPath: GitHub.IssueEvent.label.name
description: Event label name.
type: String
- contextPath: GitHub.IssueEvent.label.color
description: Event label color.
type: String
- contextPath: GitHub.IssueEvent.performed_via_github_app
description: Indicates whether event was performed via a GitHub application.
type: Unknown
- contextPath: GitHub.IssueEvent.assignee.login
description: Assignee login username.
type: String
- contextPath: GitHub.IssueEvent.assignee.id
description: Assignee ID.
type: Number
- contextPath: GitHub.IssueEvent.assignee.node_id
description: Assignee node ID.
type: String
- contextPath: GitHub.IssueEvent.assignee.avatar_url
description: Assignee avatar URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.gravatar_id
description: Assignee gravatar ID.
type: String
- contextPath: GitHub.IssueEvent.assignee.url
description: Assignee URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.html_url
description: Assignee HTML URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.followers_url
description: Assignee followers URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.following_url
description: Assignee following URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.gists_url
description: Assignee gists URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.starred_url
description: Assignee starred URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.subscriptions_url
description: Assignee subscriptions URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.organizations_url
description: Assignee organizations URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.repos_url
description: Assignee repos URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.events_url
description: Assignee events URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.received_events_url
description: Assignee received events URL.
type: String
- contextPath: GitHub.IssueEvent.assignee.type
description: Assignee type.
type: String
- contextPath: GitHub.IssueEvent.assignee.site_admin
description: Indicates whether the assignee is a site admin.
type: Boolean
- contextPath: GitHub.IssueEvent.assigner.login
description: Assigner login username.
type: String
- contextPath: GitHub.IssueEvent.assigner.id
description: Assigner ID.
type: Number
- contextPath: GitHub.IssueEvent.assigner.node_id
description: Assigner node ID.
type: String
- contextPath: GitHub.IssueEvent.assigner.avatar_url
description: Assigner avatar URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.gravatar_id
description: Assigner gravatar ID.
type: String
- contextPath: GitHub.IssueEvent.assigner.url
description: Assigner URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.html_url
description: Assigner HTML URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.followers_url
description: Assigner followers URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.following_url
description: Assigner following URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.gists_url
description: Assigner gists URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.starred_url
description: Assigner starred URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.subscriptions_url
description: Assigner subscriptions URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.organizations_url
description: Assigner organizations URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.repos_url
description: Assigner repos URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.events_url
description: Assigner events URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.received_events_url
description: Assigner received events URL.
type: String
- contextPath: GitHub.IssueEvent.assigner.type
description: Assigner type.
type: String
- contextPath: GitHub.IssueEvent.assigner.site_admin
description: Indicates whether the assignee is a site admin.
type: Boolean
- arguments:
- description: Only list projects with the following numbers.
isArray: true
name: project_filter
- defaultValue: '20'
description: The number of projects to return. Default is 20. Maximum is 100.
name: limit
description: Lists all issues that the user has access to view.
name: GitHub-list-all-projects
outputs:
- contextPath: GitHub.Project.Name
description: The name of the project board.
type: String
- contextPath: GitHub.Project.ID
description: The ID of the project board.
type: Number
- contextPath: GitHub.Project.Number
description: The project board number.
type: Number
- contextPath: GitHub.Project.Columns.Name
description: The column name.
type: String
- contextPath: GitHub.Project.Columns.ColumnID
description: The ID of the column.
type: Number
- contextPath: GitHub.Project.Columns.Cards.CardID
description: The ID of the card.
type: Number
- contextPath: GitHub.Project.Columns.Cards.ContentNumber
description: The content number of this card, usually this is the issue number.
type: Number
- contextPath: GitHub.Project.Issues
description: Lists of all issue numbers that are in this project board.
type: Unknown
- arguments:
- description: 'Column unique id.'
name: column_id
required: true
- description: Issue unique ID.
name: issue_unique_id
required: true
- defaultValue: Issue
description: Content type of the project card.
name: content_type
description: Adds an Issue as a card in column of a spesific project.
name: GitHub-add-issue-to-project-board
- arguments:
- default: true
description: Relative path to retrieve its data.
name: relative_path
required: true
- description: The branch name from which to get the file data. Default is master.
name: branch_name
- description: The name of the organization containing the file.
name: organization
- description: The repository of the file.
name: repository
description: Gets the data of the a given path.
name: GitHub-get-path-data
outputs:
- contextPath: GitHub.PathData.name
description: The path name.
type: String
- contextPath: GitHub.PathData.path
description: The Relative path for the given repository.
type: String
- contextPath: GitHub.PathData.sha
description: The path SHA.
type: String
- contextPath: GitHub.PathData.size
description: The path size in bytes. Will be 0 if path to a dir was given.
type: Number
- contextPath: GitHub.PathData.url
description: The path URL.
type: String
- contextPath: GitHub.PathData.html_url
description: The path HTML URL.
type: String
- contextPath: GitHub.PathData.git_url
description: The path Git URL.
type: String
- contextPath: GitHub.PathData.download_url
description: The path download URL. If a directory path was given will be empty.
type: String
- contextPath: GitHub.PathData.type
description: The path data, for example file, dir.
type: String
- contextPath: GitHub.PathData.content
description: The content of the path if a file path was given.
type: String
- contextPath: GitHub.PathData.encoding
description: The encoding method if path to a file was given.
type: String
- contextPath: GitHub.PathData.entries.name
description: If a dir was given in file_path, name of the dir entry.
type: String
- contextPath: GitHub.PathData.entries.path
description: If a dir was given in file_path, path of the dir entry.
type: String
- contextPath: GitHub.PathData.entries.sha
description: If a dir was given in file_path, SHA of the dir entry.
type: String
- contextPath: GitHub.PathData.entries.size
description: If a dir was given in file_path, size of the dir entry. Will be 0 if entry is also a dir.
type: Number
- contextPath: GitHub.PathData.entries.url
description: If a dir was given in file_path, URL of the dir entry.
type: String
- contextPath: GitHub.PathData.entries.html_url
description: If a dir was given in file_path, HTML URL of the dir entry.
type: String
- contextPath: GitHub.PathData.entries.git_url
description: If a dir was given in file_path, Git URL of the dir entry.
type: String
- contextPath: GitHub.PathData.entries.download_url
description: If a dir was given in file_path, download URL of the dir entry. Will be empty if entry is also a dir.
type: String
- contextPath: GitHub.PathData.entries.type
description: If a dir was given in file_path, type of the dir entry.
type: String
- arguments:
- description: The page number to retrieve releases from. If limit argument is not given, defaults to 1.
name: page
- description: The size of the page. If limit argument is not given, defaults to 50.
name: page_size
- description: The maximum number of releases data to retrieve. Will get results of the first pages. Cannot be given with page_size or page arguments.
name: limit
- description: The name of the organization containing the repository. Defaults to organization instance parameter if not given.
name: organization
- description: The repository containing the releases. Defaults to repository instance parameter if not given.
name: repository
description: Gets releases data from a given repository and organization.
name: GitHub-releases-list
outputs:
- contextPath: GitHub.Release.url
description: The release URL.
type: String
- contextPath: GitHub.Release.assets_url
description: The release assets URL.
type: String
- contextPath: GitHub.Release.upload_url
description: Upload URL.
type: String
- contextPath: GitHub.Release.html_url
description: HTML URL.
type: String
- contextPath: GitHub.Release.id
description: The release ID.
type: Number
- contextPath: GitHub.Release.author.login
description: The release author login username.
type: String
- contextPath: GitHub.Release.author.id
description: The release author user ID.
type: Number
- contextPath: GitHub.Release.author.node_id
description: The release author node ID.
type: String
- contextPath: GitHub.Release.author.avatar_url
description: The release author avatar URL.
type: String
- contextPath: GitHub.Release.author.gravatar_id
description: The release author gravatar ID.
type: String
- contextPath: GitHub.Release.author.url
description: The release author URL.
type: String
- contextPath: GitHub.Release.author.html_url
description: The release author HTML URL.
type: String
- contextPath: GitHub.Release.author.followers_url
description: The release author followers URL.
type: String
- contextPath: GitHub.Release.author.following_url
description: The release author following URL.
type: String
- contextPath: GitHub.Release.author.gists_url
description: The release author gists URL.
type: String
- contextPath: GitHub.Release.author.starred_url
description: The release author starred URL.
type: String
- contextPath: GitHub.Release.author.subscriptions_url
description: The release author subscriptions URL.
type: String
- contextPath: GitHub.Release.author.organizations_url
description: The release author organizations URL.
type: String
- contextPath: GitHub.Release.author.repos_url
description: The release author repos URL.
type: String
- contextPath: GitHub.Release.author.events_url
description: The release author events URL.
type: String
- contextPath: GitHub.Release.author.received_events_url
description: The release author received events URL.
type: String
- contextPath: GitHub.Release.author.type
description: The release author type. (E.g, "User").
type: String
- contextPath: GitHub.Release.author.site_admin
description: Whether the release author is a site admin.
type: Boolean
- contextPath: GitHub.Release.node_id
description: The release Node ID.
type: String
- contextPath: GitHub.Release.tag_name
description: The release tag name.
type: String
- contextPath: GitHub.Release.target_commitish
description: The release target commit.
type: String
- contextPath: GitHub.Release.name
description: The release name.
type: String
- contextPath: GitHub.Release.draft
description: Whether release is draft.
type: Boolean
- contextPath: GitHub.Release.prerelease
description: Whether release is pre release.
type: Boolean
- contextPath: GitHub.Release.created_at
description: Date when release was created.
type: Date
- contextPath: GitHub.Release.published_at
description: Date when release was published.
type: Date
- contextPath: GitHub.Release.tarball_url
description: The release tar URL download.
type: String
- contextPath: GitHub.Release.zipball_url
description: The release zip URL download.
type: String
- contextPath: GitHub.Release.body
description: The release body.
type: String
- arguments:
- description: The number of the issue to comment on.
name: issue_number
required: true
- description: the comment id to update.
name: comment_id
required: true
- description: The contents of the comment.
name: body
required: true
description: Update an already existing comment.
name: GitHub-update-comment
outputs:
- contextPath: GitHub.Comment.IssueNumber
description: The number of the issue to which the comment belongs.
type: Number
- contextPath: GitHub.Comment.ID
description: The ID of the comment.
type: Number
- contextPath: GitHub.Comment.NodeID
description: The node ID of the comment.
type: String
- contextPath: GitHub.Comment.Body
description: The body content of the comment.
type: String
- contextPath: GitHub.Comment.User.Login
description: The login of the user who commented.
type: String
- contextPath: GitHub.Comment.User.ID
description: The ID of the user who commented.
type: Number
- contextPath: GitHub.Comment.User.NodeID
description: The node ID of the user who commented.
type: String
- contextPath: GitHub.Comment.User.Type
description: The type of the user who commented.
type: String
- contextPath: GitHub.Comment.User.SiteAdmin
description: Whether the user who commented is a site admin.
type: Boolean
- arguments:
- description: The id of comment to delete.
name: comment_id
required: true
description: Deletes a comment.
name: GitHub-delete-comment
- arguments:
- description: The number of PR/Issue to assign users to.
name: pull_request_number
required: true
- description: Users to assign, can be a list of users.
isArray: true
name: assignee
required: true
description: Adds up to 10 assignees to an issue/PR. Users already assigned to an issue are not replaced.
name: GitHub-add-assignee
outputs:
- contextPath: GitHub.Assignees.assignees
description: Assignees to the issue.
type: String
- contextPath: GitHub.Assignees.assignees.login
description: Login of assigned user.
type: String
- contextPath: GitHub.Assignees.assignees.gists_url
description: Gists URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.following_url
description: Following URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.followers_url
description: Followers URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.subscriptions_url
description: Subscriptions URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.received_events_url
description: Received events URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.events_url
description: Events URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.avatar_url
description: Avatar URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.url
description: URL for user for user.
type: String
- contextPath: GitHub.Assignees.assignees.starred_url
description: Starred URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.organizations_url
description: Organizations URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.repos_url
description: Repos URL for user.
type: String
- contextPath: GitHub.Assignees.assignees.gravatar_id
description: Gravatar_id for user.
type: String
- contextPath: GitHub.Assignees.assignees.site_admin
description: Is user site admin.
type: String
- contextPath: GitHub.Assignees.assignees.node_id
description: Node ID for user.
type: String
- contextPath: GitHub.Assignees.assignees.type
description: Type of user.
type: String
- contextPath: GitHub.Assignees.assignees.id
description: User ID.
type: String
- contextPath: GitHub.Assignees.assignees.html_url
description: HTML URL for user.
type: String
- arguments:
- description: The GitHub owner (organization or username) of the repository.
name: owner
- description: The GitHub repository name.
name: repository
- description: The branch to trigger the workflow on.
name: branch
- description: The name of your workflow file.
name: workflow
required: true
- description: The inputs for the workflow.
name: inputs
description: Triggers a GitHub workflow on a given repository and workflow.
name: GitHub-trigger-workflow
- arguments:
- description: The GitHub owner (organization or username) of the repository.
name: owner
- description: The GitHub repository name.
name: repository
- description: The ID of the workflow to cancel.
name: workflow_id
required: true
description: Cancels a GitHub workflow.
name: GitHub-cancel-workflow
- arguments:
- description: The GitHub owner (organization or username) of the repository.
name: owner
- description: The GitHub repository name.
isArray: true
name: repository
- description: The name of your workflow file.
isArray: true
name: workflow
required: true
- description: The number of workflows to return. Default is 100.
isArray: true
name: limit
description: Returns a list of GitHub workflows on a given repository.
name: GitHub-list-workflows
outputs:
- contextPath: GitHub.Workflow.id
description: The GitHub workflow ID (per run).
type: Number
- contextPath: GitHub.Workflow.name
description: The GitHub workflow name.
type: String
- contextPath: GitHub.Workflow.head_branch
description: The branch on which the workflow ran.
type: String
- contextPath: GitHub.Workflow.head_sha
description: The commit SHA on which the workflow ran.
type: String
- contextPath: GitHub.Workflow.path
description: The GitHub workflow name path.
type: String
- contextPath: GitHub.Workflow.display_title
description: The GitHub workflow title.
type: String
- contextPath: GitHub.Workflow.run_number
description: The GitHub workflow run number.
type: Number
- contextPath: GitHub.Workflow.event
description: The GitHub workflow trigger type (scheduled, dispatch).
type: String
- contextPath: GitHub.Workflow.status
description: The GitHub workflow status (in_progress, completed).
type: String
- contextPath: GitHub.Workflow.conclusion
description: The GitHub workflow conclusion (cancelled, success).
type: String
- contextPath: GitHub.Workflow.workflow_id
description: The GitHub workflow ID (per workflow).
type: String
- contextPath: GitHub.Workflow.url
description: The GitHub workflow API URL.
type: String
- contextPath: GitHub.Workflow.html_url
description: The GitHub workflow HTML URL.
type: String
- contextPath: GitHub.Workflow.created_at
description: Datetime the GitHub workflow was created at.
type: Date
- contextPath: GitHub.Workflow.updated_at
description: Datetime the GitHub workflow was updated at.
type: Date
dockerimage: demisto/auth-utils:1.0.0.100419
isfetch: true
runonce: false
script: '-'
subtype: python3
type: python
tests:
- No tests (auto formatted)
fromversion: 5.0.0
```
|
```python
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
import warnings
import numpy as np
from ._base import make_dataset
from ._sag_fast import sag32, sag64
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.validation import _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver.
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : {'log', 'squared', 'multinomial'}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, default=None
Number of rows in X. Useful if is_saga=True.
is_saga : bool, default=False
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
path_to_url
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
path_to_url
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see path_to_url
# slide 65
step = 1. / L
return step
@_deprecate_positional_args
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression.
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
loss : {'log', 'squared', 'multinomial'}, default='log'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, default=1.
L2 regularization term in the objective function
``(0.5 * alpha * || W ||_F^2)``.
beta : float, default=0.
L1 regularization term in the objective function
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
max_iter : int, default=1000
The max number of passes over the training data if the stopping
criteria is not reached.
tol : double, default=0.001
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol.
verbose : int, default=0
The verbosity level.
random_state : int, RandomState instance or None, default=None
Used when shuffling the data. Pass an int for reproducible output
across multiple function calls.
See :term:`Glossary <random_state>`.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, default=None
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : bool, default=False
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : ndarray of shape (n_features,)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(n_samples, n_features)
>>> y = rng.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
Ridge(solver='sag')
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(
... solver='sag', multi_class='multinomial')
>>> clf.fit(X, y)
LogisticRegression(multi_class='multinomial', solver='sag')
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
path_to_url
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
path_to_url
See Also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
_dtype = [np.float64, np.float32]
X = check_array(X, dtype=_dtype, accept_sparse='csr', order='C')
y = check_array(y, dtype=_dtype, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=X.dtype,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=X.dtype)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=X.dtype, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=X.dtype, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
sag = sag64 if X.dtype == np.float64 else sag32
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
```
|
Jimmy Doherty (born 24 May 1975) is an English television presenter and farmer. A childhood friend of Jamie Oliver, Doherty is known for the show Jimmy's Farm, detailing the operation of the Essex Pig Company that he and his wife Michaela Furney own in Suffolk.
Early life
Born in Ilford, Doherty moved to Clavering in Essex at the age of three. A childhood friend of Jamie Oliver, he attended Clavering Primary School and then studied at Newport Free Grammar School. Whilst at Newport Grammar School Doherty ran a magazine called 'The Natural Choice' sparking his love of nature and animals.
From the age of 13 he worked in the tropical butterfly house at Mole Hall Wildlife Park in Saffron Walden, assisting with the menagerie of different animals ranging from otters to chimpanzees. Doherty left Mole Hall, aged 24, to focus on his academic commitments.
He has a degree in animal biology from the University of East London and studied for a PhD in entomology at Coventry University's zoology department.
Career
Doherty served for five years in the Royal Corps of Signals.
In 2002, he appeared as a friend and guest on Oliver's Twist in the episodes "Painting Party in 2002 "Flash in the Pan" in 2003.
He then trained as a pig farmer, where, in 2002, he met runner Furney, when Channel 4's filming for Jamie's Kitchen took it to the Cumbrian farm where he was working.
After returning to Essex to run his own farm, Doherty and Furney set up The Essex Pig Company using free range meat production practices. Doherty gained the funds to set up the farm using the proceeds from his first book On The Farm which formed a diary of his farming ventures.
The Essex Pig Company, based in Ipswich, Suffolk, raised various rare breeds, most notably the endangered Essex, and now the Berkshire, Gloucestershire Old Spots, middle Whites, and Norfolk Horn sheep among many others.
People were eager to visit the farm to see the native domestic animals and buy fresh produce so, Doherty and Furney opened the doors to the farm in 2002. To enter the farm a sign read 'Jimmy's Farm' and when BBC Two followed their efforts with a series of fly-on-the-wall documentaries the name stuck.
In 2008, Doherty presented a series for BBC2 called Jimmy Doherty's Farming Heroes which aired from July 2008 to August 2008, followed by various other series and single documentaries for the BBC. Notably, Jimmy's Food Factory, in which he demonstrated the industrial techniques used in the production of processed foods, ran for two series on BBC One. When the Controller of BBC One at the time, Jay Hunt, left to become chief creative officer at Channel 4 at the end of 2010, she signed up Doherty to present exclusively for Channel 4. Doherty's last series for the BBC, A Farmer's Life for Me, was broadcast in February and March 2011.
Since 18 June 2011, Jimmy's Food Factory airs as ProSieben BBC Spezial – Jimmy's Food Factory every Saturday on German TV channel ProSieben.
In 2011, Jimmy's Farm 'Cambridge Sausage' was name in the top 10 sausages in the UK by The Independent. The sausage contains 93% pork from the farm and the recipe dates back to 1917.
In December 2012, Jimmy and Jamie Oliver presented the Channel 4 series Jamie & Jimmy's Food Fight Club. Since 2014, Doherty has co-presented the prime-time Channel 4 series Jamie & Jimmy's Friday Night Feast alongside Jamie Oliver.
Doherty and the team ensured conservation was at the forefront of their decisions and created areas at the farm for nature to blossom. As well as a flair for farming Doherty holds a passion for non-native species and, with a great team behind him, the farm applied for zoo status to help endangered species further afield. On 5 October 2016 Jimmy's Farm was awarded its zoo license from the local authority and Jimmy's Farm & Wildlife Park was born. The first three exotic species they welcomed were Reindeer, Tapirs and Meerkats.
In 2016 Jimmy became the youngest ever President of the Rare Breeds Survival Trust a charity closely linked to his work and passions. In 2019 he became a patron of the British Beekeepers Association. He is also the patron of the British Hen Welfare Trust.
In May 2019 Jimmy's Farm & Wildlife Park became accredited by the British and Irish Association of Zoos and Aquariums (BIAZA) who represent the best zoos and aquariums in Britain and Ireland that pride themselves on their excellent animal welfare, education, and conservation work. In July 2021 Jimmy's Farm & Wildlife Park became the first recipient of the Rare Breed Survival Trust's new Rare Breeds Approved Associate accreditation. This has been awarded in recognition of the farm's excellence in education about the importance of Britain's endangered native livestock and equine breeds and its work to conserve their unique genetic characteristics.
Personal life
Jimmy and Michaela Furney were married on 22 August 2009. The reception was held at their farm, Pannington Hall in Wherstead, Suffolk.
They have four daughters, born between 2010 and 2018.
In August 2014, Doherty was one of 200 public figures who were signatories to a letter to The Guardian expressing their hope that Scotland would vote to remain part of the United Kingdom in September's referendum on that issue.
Doherty has close links to Ipswich Town Football Club and follows their campaigns. In 2013 Jimmy's Farm created an Ipswich 'Super Blue' sausage named in honour of The Blues. The sausage, like all their sausages, was made with pork from the farm and this one included stilton and garlic.
Academic awards
Certificate of Fellowship from Harper Adams University College (September-2010)
Honorary Graduate at University of Suffolk (October-2010)
Honorary Degree of Doctor of the university from University of East Anglia and University of Essex (November-2010)
Certificate of Associateship from the Council for Awards of Royal Agricultural Societies in recognition of distinguished achievement in the agricultural industry (August-2011)
Honorary Doctor of Business Administration (Hon DBA) from Coventry University in recognition of contribution to food production and science by promoting responsible, sustainable farming (2014)
Chancellor of Writtle University College in Essex (from 2022)
Awards
Outstanding Communicator from Royal Agricultural Society of England (July-2009)
Duly Elected as Member of the Institute of Meat (October-2013)
Elsie M J Evans Award from RSPCA for his work to raise the profile of farm animal welfare (March-2014)
Charities supported worldwide
The Gherka Welfare Trust
Rare Breeds Survival Trust (RBST)
Lowland Tapir Conservation Initiative
Animal Advocacy and Protection (AAP)
Books
On the Farm (2004)
A Taste of the Country (2007)
A Farmer's Life for Me (2011)
Television series
Jimmy Doherty's New Zealand Escape (Channel 4 2023)
Builds on Wheels (discovery+ 2021)
Jimmy's Farm (Channel 4, 2020–present)
Escape to the Wild (Channel 4, 2017)
Jamie & Jimmy's Friday Night Feast (Channel 4, 2014–present)
Food Unwrapped (Channel 4, 2012–present)
Jamie & Jimmy's Food Fight Club (Channel 4, December 2012)
Jimmy and the Whale Whisperer (Channel 4 2012)
Jimmy and the Giant Supermarket (Channel 4 2012)
A Farmer's Life for Me (BBC 2011)
Museum of Life (BBC 2010)
Jimmy's Global Harvest (BBC 2010)
The Private Life of... (BBC 2010)
Jimmy's Food Factory (BBC 2009)
Jimmy Doherty in Darwin's Garden (OU & BBC 2009)
Jimmy's GM Food Fight (BBC Horizon 2008)
Jimmy and the Wild Honey Hunters (BBC 2008)
Jimmy's Farming Heroes (BBC 2008)
Crisis on Jimmy's Farm (BBC 2007)
Jimmy's Farm Diaries (BBC 2007)
Jimmy's Farm (BBC 2004–2006)
Richard & Judy (Channel 4 2004 & 2006)
Back on Jimmy's Farm'' (BBC 2004)
References
External links
Jimmy's Farm
1975 births
Living people
People from Ilford
People from Clavering, Essex
People from Babergh District
British people of Irish descent
People educated at Newport Free Grammar School
Alumni of Coventry University
Alumni of the University of East London
21st-century English farmers
English television presenters
English television personalities
|
Walter Place (born 1869, deceased) was an English professional footballer who played as a wing half.
Place also competed as a pigeon shooter.
References
1869 births
Year of death unknown
Footballers from Burnley
English men's footballers
Men's association football wing halves
Burnley F.C. players
Colne F.C. players
Bacup Borough F.C. players
English Football League players
|
```javascript
import _ from 'lodash-es';
import angular from 'angular';
import { AccessControlFormData } from 'Portainer/components/accessControlForm/porAccessControlFormModel';
import { confirmWebEditorDiscard } from '@@/modals/confirm';
class CreateConfigController {
/* @ngInject */
constructor($async, $state, $transition$, $window, Notifications, ConfigService, Authentication, FormValidator, ResourceControlService, endpoint) {
this.$state = $state;
this.$transition$ = $transition$;
this.$window = $window;
this.Notifications = Notifications;
this.ConfigService = ConfigService;
this.Authentication = Authentication;
this.FormValidator = FormValidator;
this.ResourceControlService = ResourceControlService;
this.$async = $async;
this.endpoint = endpoint;
this.formValues = {
Name: '',
Labels: [],
AccessControlData: new AccessControlFormData(),
ConfigContent: '',
};
this.state = {
formValidationError: '',
isEditorDirty: false,
};
this.editorUpdate = this.editorUpdate.bind(this);
this.createAsync = this.createAsync.bind(this);
}
async $onInit() {
this.$window.onbeforeunload = () => {
if (this.formValues.displayCodeEditor && this.formValues.ConfigContent && this.state.isEditorDirty) {
return '';
}
};
if (!this.$transition$.params().id) {
this.formValues.displayCodeEditor = true;
return;
}
try {
let data = await this.ConfigService.config(this.endpoint.Id, this.$transition$.params().id);
this.formValues.Name = data.Name + '_copy';
this.formValues.ConfigContent = data.Data;
let labels = _.keys(data.Labels);
for (let i = 0; i < labels.length; i++) {
let labelName = labels[i];
let labelValue = data.Labels[labelName];
this.formValues.Labels.push({ name: labelName, value: labelValue });
}
this.formValues.displayCodeEditor = true;
} catch (err) {
this.formValues.displayCodeEditor = true;
this.Notifications.error('Failure', err, 'Unable to clone config');
}
}
$onDestroy() {
this.state.isEditorDirty = false;
}
async uiCanExit() {
if (this.formValues.displayCodeEditor && this.formValues.ConfigContent && this.state.isEditorDirty) {
return confirmWebEditorDiscard();
}
}
addLabel() {
this.formValues.Labels.push({ name: '', value: '' });
}
removeLabel(index) {
this.formValues.Labels.splice(index, 1);
}
prepareLabelsConfig(config) {
let labels = {};
this.formValues.Labels.forEach(function (label) {
if (label.name && label.value) {
labels[label.name] = label.value;
}
});
config.Labels = labels;
}
prepareConfigData(config) {
let configData = this.formValues.ConfigContent;
config.Data = btoa(unescape(encodeURIComponent(configData)));
}
prepareConfiguration() {
let config = {};
config.Name = this.formValues.Name;
this.prepareConfigData(config);
this.prepareLabelsConfig(config);
return config;
}
validateForm(accessControlData, isAdmin) {
this.state.formValidationError = '';
let error = '';
error = this.FormValidator.validateAccessControl(accessControlData, isAdmin);
if (error) {
this.state.formValidationError = error;
return false;
}
return true;
}
create() {
return this.$async(this.createAsync);
}
async createAsync() {
const accessControlData = this.formValues.AccessControlData;
const userDetails = this.Authentication.getUserDetails();
const isAdmin = this.Authentication.isAdmin();
if (this.formValues.ConfigContent === '') {
this.state.formValidationError = 'Config content must not be empty';
return;
}
if (!this.validateForm(accessControlData, isAdmin)) {
return;
}
const config = this.prepareConfiguration();
try {
const data = await this.ConfigService.create(this.endpoint.Id, config);
const resourceControl = data.Portainer.ResourceControl;
const userId = userDetails.ID;
await this.ResourceControlService.applyResourceControl(userId, accessControlData, resourceControl);
this.Notifications.success('Success', 'Configuration successfully created');
this.state.isEditorDirty = false;
this.$state.go('docker.configs', {}, { reload: true });
} catch (err) {
this.Notifications.error('Failure', err, 'Unable to create config');
}
}
editorUpdate(value) {
this.formValues.ConfigContent = value;
this.state.isEditorDirty = true;
}
}
export default CreateConfigController;
angular.module('portainer.docker').controller('CreateConfigController', CreateConfigController);
```
|
```python
from django import forms
from django.utils.translation import gettext_lazy as _
from core.models import DataFile, DataSource
from utilities.forms.fields import DynamicModelChoiceField
__all__ = (
'SyncedDataMixin',
)
class SyncedDataMixin(forms.Form):
data_source = DynamicModelChoiceField(
queryset=DataSource.objects.all(),
required=False,
label=_('Data source')
)
data_file = DynamicModelChoiceField(
queryset=DataFile.objects.all(),
required=False,
label=_('File'),
query_params={
'source_id': '$data_source',
}
)
```
|
```c++
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*
COPYING CONDITIONS NOTICE:
This program is free software; you can redistribute it and/or modify
published by the Free Software Foundation, and provided that the
following conditions are met:
* Redistributions of source code must retain this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below).
* Redistributions in binary form must reproduce this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below) in the documentation and/or other materials
provided with the distribution.
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
COPYRIGHT NOTICE:
TokuDB, Tokutek Fractal Tree Indexing Library.
DISCLAIMER:
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
UNIVERSITY PATENT NOTICE:
The technology is licensed by the Massachusetts Institute of
Technology, Rutgers State University of New Jersey, and the Research
Foundation of State University of New York at Stony Brook under
United States of America Serial No. 11/760379 and to the patents
and/or patent applications resulting from it.
PATENT MARKING NOTICE:
This software is covered by US Patent No. 8,185,551.
This software is covered by US Patent No. 8,489,638.
PATENT RIGHTS GRANT:
"THIS IMPLEMENTATION" means the copyrightable works distributed by
Tokutek as part of the Fractal Tree project.
"PATENT CLAIMS" means the claims of patents that are owned or
licensable by Tokutek, both currently or in the future; and that in
the absence of this license would be infringed by THIS
IMPLEMENTATION or by using or running THIS IMPLEMENTATION.
"PATENT CHALLENGE" shall mean a challenge to the validity,
patentability, enforceability and/or non-infringement of any of the
PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS.
Tokutek hereby grants to you, for the term and geographical scope of
the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to
make, have made, use, offer to sell, sell, import, transfer, and
otherwise run, modify, and propagate the contents of THIS
IMPLEMENTATION, where such license applies only to the PATENT
CLAIMS. This grant does not include claims that would be infringed
only as a consequence of further modifications of THIS
IMPLEMENTATION. If you or your agent or licensee institute or order
or agree to the institution of patent litigation against any entity
(including a cross-claim or counterclaim in a lawsuit) alleging that
THIS IMPLEMENTATION constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any rights
such litigation is filed. If you or your agent or exclusive
licensee institute or order or agree to the institution of a PATENT
CHALLENGE, then Tokutek may terminate any rights granted to you
*/
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation // gcc: Class implementation
#endif
#include <my_config.h>
extern "C" {
#include "stdint.h"
#define __STDC_FORMAT_MACROS
#include "inttypes.h"
#if defined(_WIN32)
#include "misc.h"
#endif
}
#define MYSQL_SERVER 1
#include "mysql_version.h"
#include "sql_table.h"
#include "handler.h"
#include "table.h"
#include "log.h"
#include "sql_class.h"
#include "sql_show.h"
#include "discover.h"
#if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
#include <binlog.h>
#endif
#include "db.h"
#include "toku_os.h"
#include "hatoku_defines.h"
#include "hatoku_cmp.h"
#include "partitioned_counter.h"
static inline uint get_key_parts(const KEY *key);
#undef PACKAGE
#undef VERSION
#undef HAVE_DTRACE
#undef _DTRACE_VERSION
/* We define DTRACE after mysql_priv.h in case it disabled dtrace in the main server */
#ifdef HAVE_DTRACE
#define _DTRACE_VERSION 1
#else
#endif
#include "tokudb_buffer.h"
#include "tokudb_status.h"
#include "tokudb_card.h"
#include "ha_tokudb.h"
#include "hatoku_hton.h"
#include <mysql/plugin.h>
static const char *ha_tokudb_exts[] = {
ha_tokudb_ext,
NullS
};
//
// This offset is calculated starting from AFTER the NULL bytes
//
static inline uint32_t get_fixed_field_size(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) {
uint offset = 0;
for (uint i = 0; i < table_share->fields; i++) {
if (is_fixed_field(kc_info, i) && !bitmap_is_set(&kc_info->key_filters[keynr],i)) {
offset += kc_info->field_lengths[i];
}
}
return offset;
}
static inline uint32_t get_len_of_offsets(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) {
uint len = 0;
for (uint i = 0; i < table_share->fields; i++) {
if (is_variable_field(kc_info, i) && !bitmap_is_set(&kc_info->key_filters[keynr],i)) {
len += kc_info->num_offset_bytes;
}
}
return len;
}
static int allocate_key_and_col_info ( TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info) {
int error;
//
// initialize all of the bitmaps
//
for (uint i = 0; i < MAX_KEY + 1; i++) {
error = bitmap_init(
&kc_info->key_filters[i],
NULL,
table_share->fields,
false
);
if (error) {
goto exit;
}
}
//
// create the field lengths
//
kc_info->multi_ptr = tokudb_my_multi_malloc(MYF(MY_WME+MY_ZEROFILL),
&kc_info->field_types, (uint)(table_share->fields * sizeof (uint8_t)),
&kc_info->field_lengths, (uint)(table_share->fields * sizeof (uint16_t)),
&kc_info->length_bytes, (uint)(table_share->fields * sizeof (uint8_t)),
&kc_info->blob_fields, (uint)(table_share->fields * sizeof (uint32_t)),
NullS);
if (kc_info->multi_ptr == NULL) {
error = ENOMEM;
goto exit;
}
exit:
if (error) {
for (uint i = 0; MAX_KEY + 1; i++) {
bitmap_free(&kc_info->key_filters[i]);
}
tokudb_my_free(kc_info->multi_ptr);
}
return error;
}
static void free_key_and_col_info (KEY_AND_COL_INFO* kc_info) {
for (uint i = 0; i < MAX_KEY+1; i++) {
bitmap_free(&kc_info->key_filters[i]);
}
for (uint i = 0; i < MAX_KEY+1; i++) {
tokudb_my_free(kc_info->cp_info[i]);
kc_info->cp_info[i] = NULL; // 3144
}
tokudb_my_free(kc_info->multi_ptr);
kc_info->field_types = NULL;
kc_info->field_lengths = NULL;
kc_info->length_bytes = NULL;
kc_info->blob_fields = NULL;
}
void TOKUDB_SHARE::init(void) {
use_count = 0;
thr_lock_init(&lock);
tokudb_pthread_mutex_init(&mutex, MY_MUTEX_INIT_FAST);
my_rwlock_init(&num_DBs_lock, 0);
tokudb_pthread_cond_init(&m_openclose_cond, NULL);
m_state = CLOSED;
}
void TOKUDB_SHARE::destroy(void) {
assert(m_state == CLOSED);
thr_lock_delete(&lock);
tokudb_pthread_mutex_destroy(&mutex);
rwlock_destroy(&num_DBs_lock);
tokudb_pthread_cond_destroy(&m_openclose_cond);
tokudb_my_free(rec_per_key);
rec_per_key = NULL;
}
// MUST have tokudb_mutex locked on input
static TOKUDB_SHARE *get_share(const char *table_name, TABLE_SHARE* table_share) {
TOKUDB_SHARE *share = NULL;
int error = 0;
uint length = (uint) strlen(table_name);
if (!(share = (TOKUDB_SHARE *) my_hash_search(&tokudb_open_tables, (uchar *) table_name, length))) {
char *tmp_name;
// create share and fill it with all zeroes
// hence, all pointers are initialized to NULL
share = (TOKUDB_SHARE *) tokudb_my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
&share, sizeof(*share),
&tmp_name, length + 1,
NullS
);
assert(share);
share->init();
share->table_name_length = length;
share->table_name = tmp_name;
strmov(share->table_name, table_name);
error = my_hash_insert(&tokudb_open_tables, (uchar *) share);
if (error) {
free_key_and_col_info(&share->kc_info);
goto exit;
}
}
exit:
if (error) {
share->destroy();
tokudb_my_free((uchar *) share);
share = NULL;
}
return share;
}
static int free_share(TOKUDB_SHARE * share) {
int error, result = 0;
tokudb_pthread_mutex_lock(&share->mutex);
DBUG_PRINT("info", ("share->use_count %u", share->use_count));
if (!--share->use_count) {
share->m_state = TOKUDB_SHARE::CLOSING;
tokudb_pthread_mutex_unlock(&share->mutex);
//
// number of open DB's may not be equal to number of keys we have because add_index
// may have added some. So, we loop through entire array and close any non-NULL value
// It is imperative that we reset a DB to NULL once we are done with it.
//
for (uint i = 0; i < sizeof(share->key_file)/sizeof(share->key_file[0]); i++) {
if (share->key_file[i]) {
if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
TOKUDB_TRACE("dbclose:%p", share->key_file[i]);
}
error = share->key_file[i]->close(share->key_file[i], 0);
assert(error == 0);
if (error) {
result = error;
}
if (share->key_file[i] == share->file)
share->file = NULL;
share->key_file[i] = NULL;
}
}
error = tokudb::close_status(&share->status_block);
assert(error == 0);
free_key_and_col_info(&share->kc_info);
tokudb_pthread_mutex_lock(&tokudb_mutex);
tokudb_pthread_mutex_lock(&share->mutex);
share->m_state = TOKUDB_SHARE::CLOSED;
if (share->use_count > 0) {
tokudb_pthread_cond_broadcast(&share->m_openclose_cond);
tokudb_pthread_mutex_unlock(&share->mutex);
tokudb_pthread_mutex_unlock(&tokudb_mutex);
} else {
my_hash_delete(&tokudb_open_tables, (uchar *) share);
tokudb_pthread_mutex_unlock(&share->mutex);
tokudb_pthread_mutex_unlock(&tokudb_mutex);
share->destroy();
tokudb_my_free((uchar *) share);
}
} else {
tokudb_pthread_mutex_unlock(&share->mutex);
}
return result;
}
#define HANDLE_INVALID_CURSOR() \
if (cursor == NULL) { \
error = last_cursor_error; \
goto cleanup; \
}
const char *ha_tokudb::table_type() const {
extern const char * const tokudb_hton_name;
return tokudb_hton_name;
}
const char *ha_tokudb::index_type(uint inx) {
return "BTREE";
}
/*
* returns NULL terminated file extension string
*/
const char **ha_tokudb::bas_ext() const {
TOKUDB_HANDLER_DBUG_ENTER("");
DBUG_RETURN(ha_tokudb_exts);
}
static inline bool is_insert_ignore (THD* thd) {
//
// from path_to_url
//
return thd->lex->ignore && thd->lex->duplicates == DUP_ERROR;
}
static inline bool is_replace_into(THD* thd) {
return thd->lex->duplicates == DUP_REPLACE;
}
static inline bool do_ignore_flag_optimization(THD* thd, TABLE* table, bool opt_eligible) {
bool do_opt = false;
if (opt_eligible) {
if (is_replace_into(thd) || is_insert_ignore(thd)) {
uint pk_insert_mode = get_pk_insert_mode(thd);
if ((!table->triggers && pk_insert_mode < 2) || pk_insert_mode == 0) {
if (mysql_bin_log.is_open() && thd->variables.binlog_format != BINLOG_FORMAT_STMT) {
do_opt = false;
} else {
do_opt = true;
}
}
}
}
return do_opt;
}
static inline uint get_key_parts(const KEY *key) {
#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) || \
(100009 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099)
return key->user_defined_key_parts;
#else
return key->key_parts;
#endif
}
#if TOKU_INCLUDE_EXTENDED_KEYS
static inline uint get_ext_key_parts(const KEY *key) {
#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
return key->actual_key_parts;
#elif defined(MARIADB_BASE_VERSION)
return key->ext_key_parts;
#else
#error
#endif
}
#endif
ulonglong ha_tokudb::table_flags() const {
return int_table_flags | HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE;
}
//
// Returns a bit mask of capabilities of the key or its part specified by
// the arguments. The capabilities are defined in sql/handler.h.
//
ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
TOKUDB_HANDLER_DBUG_ENTER("");
assert(table_share);
ulong flags = (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE);
#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
flags |= HA_DO_INDEX_COND_PUSHDOWN;
#endif
if (key_is_clustering(&table_share->key_info[idx])) {
flags |= HA_CLUSTERED_INDEX;
}
DBUG_RETURN(flags);
}
//
// struct that will be used as a context for smart DBT callbacks
// contains parameters needed to complete the smart DBT cursor call
//
typedef struct smart_dbt_info {
ha_tokudb* ha; //instance to ha_tokudb needed for reading the row
uchar* buf; // output buffer where row will be written
uint keynr; // index into share->key_file that represents DB we are currently operating on
} *SMART_DBT_INFO;
typedef struct smart_dbt_bf_info {
ha_tokudb* ha;
bool need_val;
int direction;
THD* thd;
uchar* buf;
DBT* key_to_compare;
} *SMART_DBT_BF_INFO;
typedef struct index_read_info {
struct smart_dbt_info smart_dbt_info;
int cmp;
DBT* orig_key;
} *INDEX_READ_INFO;
static int ai_poll_fun(void *extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg, "The process has been killed, aborting add index.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg, "Adding of indexes about %.1f%% done", percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long) percentage, 100);
#endif
return 0;
}
static int loader_poll_fun(void *extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg, "The process has been killed, aborting bulk load.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg, "Loading of data about %.1f%% done", percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long) percentage, 100);
#endif
return 0;
}
static void loader_ai_err_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert(context->ha);
context->ha->set_loader_error(err);
}
static void loader_dup_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert(context->ha);
context->ha->set_loader_error(err);
if (err == DB_KEYEXIST) {
context->ha->set_dup_value_for_pk(key);
}
}
//
// smart DBT callback function for optimize
// in optimize, we want to flatten DB by doing
// a full table scan. Therefore, we don't
// want to actually do anything with the data, hence
// callback does nothing
//
static int smart_dbt_do_nothing (DBT const *key, DBT const *row, void *context) {
return 0;
}
static int
smart_dbt_callback_rowread_ptquery (DBT const *key, DBT const *row, void *context) {
SMART_DBT_INFO info = (SMART_DBT_INFO)context;
info->ha->extract_hidden_primary_key(info->keynr, key);
return info->ha->read_row_callback(info->buf,info->keynr,row,key);
}
//
// Smart DBT callback function in case where we have a covering index
//
static int
smart_dbt_callback_keyread(DBT const *key, DBT const *row, void *context) {
SMART_DBT_INFO info = (SMART_DBT_INFO)context;
info->ha->extract_hidden_primary_key(info->keynr, key);
info->ha->read_key_only(info->buf,info->keynr,key);
return 0;
}
//
// Smart DBT callback function in case where we do NOT have a covering index
//
static int
smart_dbt_callback_rowread(DBT const *key, DBT const *row, void *context) {
int error = 0;
SMART_DBT_INFO info = (SMART_DBT_INFO)context;
info->ha->extract_hidden_primary_key(info->keynr, key);
error = info->ha->read_primary_key(info->buf,info->keynr,row,key);
return error;
}
//
// Smart DBT callback function in case where we have a covering index
//
static int
smart_dbt_callback_ir_keyread(DBT const *key, DBT const *row, void *context) {
INDEX_READ_INFO ir_info = (INDEX_READ_INFO)context;
ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
if (ir_info->cmp) {
return 0;
}
return smart_dbt_callback_keyread(key, row, &ir_info->smart_dbt_info);
}
static int
smart_dbt_callback_lookup(DBT const *key, DBT const *row, void *context) {
INDEX_READ_INFO ir_info = (INDEX_READ_INFO)context;
ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
return 0;
}
//
// Smart DBT callback function in case where we do NOT have a covering index
//
static int
smart_dbt_callback_ir_rowread(DBT const *key, DBT const *row, void *context) {
INDEX_READ_INFO ir_info = (INDEX_READ_INFO)context;
ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
if (ir_info->cmp) {
return 0;
}
return smart_dbt_callback_rowread(key, row, &ir_info->smart_dbt_info);
}
//
// macro for Smart DBT callback function,
// so we do not need to put this long line of code in multiple places
//
#define SMART_DBT_CALLBACK(do_key_read) ((do_key_read) ? smart_dbt_callback_keyread : smart_dbt_callback_rowread )
#define SMART_DBT_IR_CALLBACK(do_key_read) ((do_key_read) ? smart_dbt_callback_ir_keyread : smart_dbt_callback_ir_rowread )
//
// macro that modifies read flag for cursor operations depending on whether
// we have preacquired lock or not
//
#define SET_PRELOCK_FLAG(flg) ((flg) | (range_lock_grabbed ? (use_write_locks ? DB_PRELOCKED_WRITE : DB_PRELOCKED) : 0))
//
// This method retrieves the value of the auto increment column of a record in MySQL format
// This was basically taken from MyISAM
// Parameters:
// type - the type of the auto increment column (e.g. int, float, double...)
// offset - offset into the record where the auto increment column is stored
// [in] record - MySQL row whose auto increment value we want to extract
// Returns:
// The value of the auto increment column in record
//
static ulonglong retrieve_auto_increment(uint16 type, uint32 offset,const uchar *record)
{
const uchar *key; /* Key */
ulonglong unsigned_autoinc = 0; /* Unsigned auto-increment */
longlong signed_autoinc = 0; /* Signed auto-increment */
enum { unsigned_type, signed_type } autoinc_type;
float float_tmp; /* Temporary variable */
double double_tmp; /* Temporary variable */
key = ((uchar *) record) + offset;
/* Set default autoincrement type */
autoinc_type = unsigned_type;
switch (type) {
case HA_KEYTYPE_INT8:
signed_autoinc = (longlong) *(char*)key;
autoinc_type = signed_type;
break;
case HA_KEYTYPE_BINARY:
unsigned_autoinc = (ulonglong) *(uchar*) key;
break;
case HA_KEYTYPE_SHORT_INT:
signed_autoinc = (longlong) sint2korr(key);
autoinc_type = signed_type;
break;
case HA_KEYTYPE_USHORT_INT:
unsigned_autoinc = (ulonglong) uint2korr(key);
break;
case HA_KEYTYPE_LONG_INT:
signed_autoinc = (longlong) sint4korr(key);
autoinc_type = signed_type;
break;
case HA_KEYTYPE_ULONG_INT:
unsigned_autoinc = (ulonglong) uint4korr(key);
break;
case HA_KEYTYPE_INT24:
signed_autoinc = (longlong) sint3korr(key);
autoinc_type = signed_type;
break;
case HA_KEYTYPE_UINT24:
unsigned_autoinc = (ulonglong) tokudb_uint3korr(key);
break;
case HA_KEYTYPE_LONGLONG:
signed_autoinc = sint8korr(key);
autoinc_type = signed_type;
break;
case HA_KEYTYPE_ULONGLONG:
unsigned_autoinc = uint8korr(key);
break;
/* The remaining two cases should not be used but are included for
compatibility */
case HA_KEYTYPE_FLOAT:
float4get(float_tmp, key); /* Note: float4get is a macro */
signed_autoinc = (longlong) float_tmp;
autoinc_type = signed_type;
break;
case HA_KEYTYPE_DOUBLE:
float8get(double_tmp, key); /* Note: float8get is a macro */
signed_autoinc = (longlong) double_tmp;
autoinc_type = signed_type;
break;
default:
DBUG_ASSERT(0);
unsigned_autoinc = 0;
}
if (signed_autoinc < 0) {
signed_autoinc = 0;
}
return autoinc_type == unsigned_type ?
unsigned_autoinc : (ulonglong) signed_autoinc;
}
static inline bool
is_null_field( TABLE* table, Field* field, const uchar* record) {
uint null_offset;
bool ret_val;
if (!field->real_maybe_null()) {
ret_val = false;
goto exitpt;
}
null_offset = get_null_offset(table,field);
ret_val = (record[null_offset] & field->null_bit) ? true: false;
exitpt:
return ret_val;
}
static inline ulong field_offset(Field* field, TABLE* table) {
return((ulong) (field->ptr - table->record[0]));
}
static inline HA_TOKU_ISO_LEVEL tx_to_toku_iso(ulong tx_isolation) {
if (tx_isolation == ISO_READ_UNCOMMITTED) {
return hatoku_iso_read_uncommitted;
}
else if (tx_isolation == ISO_READ_COMMITTED) {
return hatoku_iso_read_committed;
}
else if (tx_isolation == ISO_REPEATABLE_READ) {
return hatoku_iso_repeatable_read;
}
else {
return hatoku_iso_serializable;
}
}
static inline uint32_t toku_iso_to_txn_flag (HA_TOKU_ISO_LEVEL lvl) {
if (lvl == hatoku_iso_read_uncommitted) {
return DB_READ_UNCOMMITTED;
}
else if (lvl == hatoku_iso_read_committed) {
return DB_READ_COMMITTED;
}
else if (lvl == hatoku_iso_repeatable_read) {
return DB_TXN_SNAPSHOT;
}
else {
return 0;
}
}
static int filter_key_part_compare (const void* left, const void* right) {
FILTER_KEY_PART_INFO* left_part= (FILTER_KEY_PART_INFO *)left;
FILTER_KEY_PART_INFO* right_part = (FILTER_KEY_PART_INFO *)right;
return left_part->offset - right_part->offset;
}
//
// Be very careful with parameters passed to this function. Who knows
// if key, table have proper info set. I had to verify by checking
// in the debugger.
//
void set_key_filter(MY_BITMAP* key_filter, KEY* key, TABLE* table, bool get_offset_from_keypart) {
FILTER_KEY_PART_INFO parts[MAX_REF_PARTS];
uint curr_skip_index = 0;
for (uint i = 0; i < get_key_parts(key); i++) {
//
// horrendous hack due to bugs in mysql, basically
// we cannot always reliably get the offset from the same source
//
parts[i].offset = get_offset_from_keypart ? key->key_part[i].offset : field_offset(key->key_part[i].field, table);
parts[i].part_index = i;
}
qsort(
parts, // start of array
get_key_parts(key), //num elements
sizeof(*parts), //size of each element
filter_key_part_compare
);
for (uint i = 0; i < table->s->fields; i++) {
Field* field = table->field[i];
uint curr_field_offset = field_offset(field, table);
if (curr_skip_index < get_key_parts(key)) {
uint curr_skip_offset = 0;
curr_skip_offset = parts[curr_skip_index].offset;
if (curr_skip_offset == curr_field_offset) {
//
// we have hit a field that is a portion of the primary key
//
uint curr_key_index = parts[curr_skip_index].part_index;
curr_skip_index++;
//
// only choose to continue over the key if the key's length matches the field's length
// otherwise, we may have a situation where the column is a varchar(10), the
// key is only the first 3 characters, and we end up losing the last 7 bytes of the
// column
//
TOKU_TYPE toku_type = mysql_to_toku_type(field);
switch (toku_type) {
case toku_type_blob:
break;
case toku_type_varbinary:
case toku_type_varstring:
case toku_type_fixbinary:
case toku_type_fixstring:
if (key->key_part[curr_key_index].length == field->field_length) {
bitmap_set_bit(key_filter,i);
}
break;
default:
bitmap_set_bit(key_filter,i);
break;
}
}
}
}
}
static inline uchar* pack_fixed_field(
uchar* to_tokudb,
const uchar* from_mysql,
uint32_t num_bytes
)
{
switch (num_bytes) {
case (1):
memcpy(to_tokudb, from_mysql, 1);
break;
case (2):
memcpy(to_tokudb, from_mysql, 2);
break;
case (3):
memcpy(to_tokudb, from_mysql, 3);
break;
case (4):
memcpy(to_tokudb, from_mysql, 4);
break;
case (8):
memcpy(to_tokudb, from_mysql, 8);
break;
default:
memcpy(to_tokudb, from_mysql, num_bytes);
break;
}
return to_tokudb+num_bytes;
}
static inline const uchar* unpack_fixed_field(
uchar* to_mysql,
const uchar* from_tokudb,
uint32_t num_bytes
)
{
switch (num_bytes) {
case (1):
memcpy(to_mysql, from_tokudb, 1);
break;
case (2):
memcpy(to_mysql, from_tokudb, 2);
break;
case (3):
memcpy(to_mysql, from_tokudb, 3);
break;
case (4):
memcpy(to_mysql, from_tokudb, 4);
break;
case (8):
memcpy(to_mysql, from_tokudb, 8);
break;
default:
memcpy(to_mysql, from_tokudb, num_bytes);
break;
}
return from_tokudb+num_bytes;
}
static inline uchar* write_var_field(
uchar* to_tokudb_offset_ptr, //location where offset data is going to be written
uchar* to_tokudb_data, // location where data is going to be written
uchar* to_tokudb_offset_start, //location where offset starts, IS THIS A BAD NAME????
const uchar * data, // the data to write
uint32_t data_length, // length of data to write
uint32_t offset_bytes // number of offset bytes
)
{
memcpy(to_tokudb_data, data, data_length);
//
// for offset, we pack the offset where the data ENDS!
//
uint32_t offset = to_tokudb_data + data_length - to_tokudb_offset_start;
switch(offset_bytes) {
case (1):
to_tokudb_offset_ptr[0] = (uchar)offset;
break;
case (2):
int2store(to_tokudb_offset_ptr,offset);
break;
default:
assert(false);
break;
}
return to_tokudb_data + data_length;
}
static inline uint32_t get_var_data_length(
const uchar * from_mysql,
uint32_t mysql_length_bytes
)
{
uint32_t data_length;
switch(mysql_length_bytes) {
case(1):
data_length = from_mysql[0];
break;
case(2):
data_length = uint2korr(from_mysql);
break;
default:
assert(false);
break;
}
return data_length;
}
static inline uchar* pack_var_field(
uchar* to_tokudb_offset_ptr, //location where offset data is going to be written
uchar* to_tokudb_data, // pointer to where tokudb data should be written
uchar* to_tokudb_offset_start, //location where data starts, IS THIS A BAD NAME????
const uchar * from_mysql, // mysql data
uint32_t mysql_length_bytes, //number of bytes used to store length in from_mysql
uint32_t offset_bytes //number of offset_bytes used in tokudb row
)
{
uint data_length = get_var_data_length(from_mysql, mysql_length_bytes);
return write_var_field(
to_tokudb_offset_ptr,
to_tokudb_data,
to_tokudb_offset_start,
from_mysql + mysql_length_bytes,
data_length,
offset_bytes
);
}
static inline void unpack_var_field(
uchar* to_mysql,
const uchar* from_tokudb_data,
uint32_t from_tokudb_data_len,
uint32_t mysql_length_bytes
)
{
//
// store the length
//
switch (mysql_length_bytes) {
case(1):
to_mysql[0] = (uchar)from_tokudb_data_len;
break;
case(2):
int2store(to_mysql, from_tokudb_data_len);
break;
default:
assert(false);
break;
}
//
// store the data
//
memcpy(to_mysql+mysql_length_bytes, from_tokudb_data, from_tokudb_data_len);
}
static uchar* pack_toku_field_blob(
uchar* to_tokudb,
const uchar* from_mysql,
Field* field
)
{
uint32_t len_bytes = field->row_pack_length();
uint32_t length = 0;
uchar* data_ptr = NULL;
memcpy(to_tokudb, from_mysql, len_bytes);
switch (len_bytes) {
case (1):
length = (uint32_t)(*from_mysql);
break;
case (2):
length = uint2korr(from_mysql);
break;
case (3):
length = tokudb_uint3korr(from_mysql);
break;
case (4):
length = uint4korr(from_mysql);
break;
default:
assert(false);
}
if (length > 0) {
memcpy((uchar *)(&data_ptr), from_mysql + len_bytes, sizeof(uchar*));
memcpy(to_tokudb + len_bytes, data_ptr, length);
}
return (to_tokudb + len_bytes + length);
}
static int create_tokudb_trx_data_instance(tokudb_trx_data** out_trx) {
int error;
tokudb_trx_data* trx = (tokudb_trx_data *) tokudb_my_malloc(sizeof(*trx), MYF(MY_ZEROFILL));
if (!trx) {
error = ENOMEM;
goto cleanup;
}
*out_trx = trx;
error = 0;
cleanup:
return error;
}
static inline int tokudb_generate_row(
DB *dest_db,
DB *src_db,
DBT *dest_key,
DBT *dest_val,
const DBT *src_key,
const DBT *src_val
)
{
int error;
DB* curr_db = dest_db;
uchar* row_desc = NULL;
uint32_t desc_size;
uchar* buff = NULL;
uint32_t max_key_len = 0;
row_desc = (uchar *)curr_db->descriptor->dbt.data;
row_desc += (*(uint32_t *)row_desc);
desc_size = (*(uint32_t *)row_desc) - 4;
row_desc += 4;
if (is_key_pk(row_desc, desc_size)) {
if (dest_key->flags == DB_DBT_REALLOC && dest_key->data != NULL) {
free(dest_key->data);
}
if (dest_val != NULL) {
if (dest_val->flags == DB_DBT_REALLOC && dest_val->data != NULL) {
free(dest_val->data);
}
}
dest_key->data = src_key->data;
dest_key->size = src_key->size;
dest_key->flags = 0;
if (dest_val != NULL) {
dest_val->data = src_val->data;
dest_val->size = src_val->size;
dest_val->flags = 0;
}
error = 0;
goto cleanup;
}
// at this point, we need to create the key/val and set it
// in the DBTs
if (dest_key->flags == 0) {
dest_key->ulen = 0;
dest_key->size = 0;
dest_key->data = NULL;
dest_key->flags = DB_DBT_REALLOC;
}
if (dest_key->flags == DB_DBT_REALLOC) {
max_key_len = max_key_size_from_desc(row_desc, desc_size);
max_key_len += src_key->size;
if (max_key_len > dest_key->ulen) {
void* old_ptr = dest_key->data;
void* new_ptr = NULL;
new_ptr = realloc(old_ptr, max_key_len);
assert(new_ptr);
dest_key->data = new_ptr;
dest_key->ulen = max_key_len;
}
buff = (uchar *)dest_key->data;
assert(buff != NULL && max_key_len > 0);
}
else {
assert(false);
}
dest_key->size = pack_key_from_desc(
buff,
row_desc,
desc_size,
src_key,
src_val
);
assert(dest_key->ulen >= dest_key->size);
if (tokudb_debug & TOKUDB_DEBUG_CHECK_KEY && !max_key_len) {
max_key_len = max_key_size_from_desc(row_desc, desc_size);
max_key_len += src_key->size;
}
if (max_key_len) {
assert(max_key_len >= dest_key->size);
}
row_desc += desc_size;
desc_size = (*(uint32_t *)row_desc) - 4;
row_desc += 4;
if (dest_val != NULL) {
if (!is_key_clustering(row_desc, desc_size) || src_val->size == 0) {
dest_val->size = 0;
}
else {
uchar* buff = NULL;
if (dest_val->flags == 0) {
dest_val->ulen = 0;
dest_val->size = 0;
dest_val->data = NULL;
dest_val->flags = DB_DBT_REALLOC;
}
if (dest_val->flags == DB_DBT_REALLOC){
if (dest_val->ulen < src_val->size) {
void* old_ptr = dest_val->data;
void* new_ptr = NULL;
new_ptr = realloc(old_ptr, src_val->size);
assert(new_ptr);
dest_val->data = new_ptr;
dest_val->ulen = src_val->size;
}
buff = (uchar *)dest_val->data;
assert(buff != NULL);
}
else {
assert(false);
}
dest_val->size = pack_clustering_val_from_desc(
buff,
row_desc,
desc_size,
src_val
);
assert(dest_val->ulen >= dest_val->size);
}
}
error = 0;
cleanup:
return error;
}
static int generate_row_for_del(
DB *dest_db,
DB *src_db,
DBT_ARRAY *dest_key_arrays,
const DBT *src_key,
const DBT *src_val
)
{
DBT* dest_key = &dest_key_arrays->dbts[0];
return tokudb_generate_row(
dest_db,
src_db,
dest_key,
NULL,
src_key,
src_val
);
}
static int generate_row_for_put(
DB *dest_db,
DB *src_db,
DBT_ARRAY *dest_key_arrays,
DBT_ARRAY *dest_val_arrays,
const DBT *src_key,
const DBT *src_val
)
{
DBT* dest_key = &dest_key_arrays->dbts[0];
DBT *dest_val = (dest_val_arrays == NULL) ? NULL : &dest_val_arrays->dbts[0];
return tokudb_generate_row(
dest_db,
src_db,
dest_key,
dest_val,
src_key,
src_val
);
}
ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, table_arg) {
TOKUDB_HANDLER_DBUG_ENTER("");
share = NULL;
int_table_flags = HA_REC_NOT_IN_SEQ | HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_PRIMARY_KEY_IN_READ_INDEX | HA_PRIMARY_KEY_REQUIRED_FOR_POSITION |
HA_FILE_BASED | HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX | HA_CAN_WRITE_DURING_OPTIMIZE;
alloc_ptr = NULL;
rec_buff = NULL;
rec_update_buff = NULL;
transaction = NULL;
cursor = NULL;
fixed_cols_for_query = NULL;
var_cols_for_query = NULL;
num_fixed_cols_for_query = 0;
num_var_cols_for_query = 0;
unpack_entire_row = true;
read_blobs = false;
read_key = false;
added_rows = 0;
deleted_rows = 0;
last_dup_key = UINT_MAX;
using_ignore = false;
using_ignore_no_key = false;
last_cursor_error = 0;
range_lock_grabbed = false;
blob_buff = NULL;
num_blob_bytes = 0;
delay_updating_ai_metadata = false;
ai_metadata_update_required = false;
memset(mult_key_dbt_array, 0, sizeof(mult_key_dbt_array));
memset(mult_rec_dbt_array, 0, sizeof(mult_rec_dbt_array));
for (uint32_t i = 0; i < sizeof(mult_key_dbt_array)/sizeof(mult_key_dbt_array[0]); i++) {
toku_dbt_array_init(&mult_key_dbt_array[i], 1);
}
for (uint32_t i = 0; i < sizeof(mult_rec_dbt_array)/sizeof(mult_rec_dbt_array[0]); i++) {
toku_dbt_array_init(&mult_rec_dbt_array[i], 1);
}
loader = NULL;
abort_loader = false;
memset(&lc, 0, sizeof(lc));
lock.type = TL_IGNORE;
for (uint32_t i = 0; i < MAX_KEY+1; i++) {
mult_put_flags[i] = 0;
mult_del_flags[i] = DB_DELETE_ANY;
mult_dbt_flags[i] = DB_DBT_REALLOC;
}
num_DBs_locked_in_bulk = false;
lock_count = 0;
use_write_locks = false;
range_query_buff = NULL;
size_range_query_buff = 0;
bytes_used_in_range_query_buff = 0;
curr_range_query_buff_offset = 0;
doing_bulk_fetch = false;
prelocked_left_range_size = 0;
prelocked_right_range_size = 0;
tokudb_active_index = MAX_KEY;
invalidate_icp();
trx_handler_list.data = this;
in_rpl_write_rows = in_rpl_delete_rows = in_rpl_update_rows = false;
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
ha_tokudb::~ha_tokudb() {
TOKUDB_HANDLER_DBUG_ENTER("");
for (uint32_t i = 0; i < sizeof(mult_key_dbt_array)/sizeof(mult_key_dbt_array[0]); i++) {
toku_dbt_array_destroy(&mult_key_dbt_array[i]);
}
for (uint32_t i = 0; i < sizeof(mult_rec_dbt_array)/sizeof(mult_rec_dbt_array[0]); i++) {
toku_dbt_array_destroy(&mult_rec_dbt_array[i]);
}
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
//
// states if table has an auto increment column, if so, sets index where auto inc column is to index
// Parameters:
// [out] index - if auto inc exists, then this param is set to where it exists in table, if not, then unchanged
// Returns:
// true if auto inc column exists, false otherwise
//
bool ha_tokudb::has_auto_increment_flag(uint* index) {
//
// check to see if we have auto increment field
//
bool ai_found = false;
uint ai_index = 0;
for (uint i = 0; i < table_share->fields; i++, ai_index++) {
Field* field = table->field[i];
if (field->flags & AUTO_INCREMENT_FLAG) {
ai_found = true;
*index = ai_index;
break;
}
}
return ai_found;
}
static int open_status_dictionary(DB** ptr, const char* name, DB_TXN* txn) {
int error;
char* newname = NULL;
newname = (char *)tokudb_my_malloc(
get_max_dict_name_path_length(name),
MYF(MY_WME));
if (newname == NULL) {
error = ENOMEM;
goto cleanup;
}
make_name(newname, name, "status");
if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
TOKUDB_TRACE("open:%s", newname);
}
error = tokudb::open_status(db_env, ptr, newname, txn);
cleanup:
tokudb_my_free(newname);
return error;
}
int ha_tokudb::open_main_dictionary(const char* name, bool is_read_only, DB_TXN* txn) {
int error;
char* newname = NULL;
uint open_flags = (is_read_only ? DB_RDONLY : 0) | DB_THREAD;
assert(share->file == NULL);
assert(share->key_file[primary_key] == NULL);
newname = (char *)tokudb_my_malloc(
get_max_dict_name_path_length(name),
MYF(MY_WME|MY_ZEROFILL)
);
if (newname == NULL) {
error = ENOMEM;
goto exit;
}
make_name(newname, name, "main");
error = db_create(&share->file, db_env, 0);
if (error) {
goto exit;
}
share->key_file[primary_key] = share->file;
error = share->file->open(share->file, txn, newname, NULL, DB_BTREE, open_flags, 0);
if (error) {
goto exit;
}
if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
TOKUDB_HANDLER_TRACE("open:%s:file=%p", newname, share->file);
}
error = 0;
exit:
if (error) {
if (share->file) {
int r = share->file->close(
share->file,
0
);
assert(r==0);
share->file = NULL;
share->key_file[primary_key] = NULL;
}
}
tokudb_my_free(newname);
return error;
}
//
// Open a secondary table, the key will be a secondary index, the data will be a primary key
//
int ha_tokudb::open_secondary_dictionary(DB** ptr, KEY* key_info, const char* name, bool is_read_only, DB_TXN* txn) {
int error = ENOSYS;
char dict_name[MAX_DICT_NAME_LEN];
uint open_flags = (is_read_only ? DB_RDONLY : 0) | DB_THREAD;
char* newname = NULL;
uint newname_len = 0;
sprintf(dict_name, "key-%s", key_info->name);
newname_len = get_max_dict_name_path_length(name);
newname = (char *)tokudb_my_malloc(newname_len, MYF(MY_WME|MY_ZEROFILL));
if (newname == NULL) {
error = ENOMEM;
goto cleanup;
}
make_name(newname, name, dict_name);
if ((error = db_create(ptr, db_env, 0))) {
my_errno = error;
goto cleanup;
}
if ((error = (*ptr)->open(*ptr, txn, newname, NULL, DB_BTREE, open_flags, 0))) {
my_errno = error;
goto cleanup;
}
if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
TOKUDB_HANDLER_TRACE("open:%s:file=%p", newname, *ptr);
}
cleanup:
if (error) {
if (*ptr) {
int r = (*ptr)->close(*ptr, 0);
assert(r==0);
*ptr = NULL;
}
}
tokudb_my_free(newname);
return error;
}
static int initialize_col_pack_info(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) {
int error = ENOSYS;
//
// set up the cp_info
//
assert(kc_info->cp_info[keynr] == NULL);
kc_info->cp_info[keynr] = (COL_PACK_INFO *)tokudb_my_malloc(
table_share->fields*sizeof(COL_PACK_INFO),
MYF(MY_WME | MY_ZEROFILL)
);
if (kc_info->cp_info[keynr] == NULL) {
error = ENOMEM;
goto exit;
}
{
uint32_t curr_fixed_offset = 0;
uint32_t curr_var_index = 0;
for (uint j = 0; j < table_share->fields; j++) {
COL_PACK_INFO* curr = &kc_info->cp_info[keynr][j];
//
// need to set the offsets / indexes
// offsets are calculated AFTER the NULL bytes
//
if (!bitmap_is_set(&kc_info->key_filters[keynr],j)) {
if (is_fixed_field(kc_info, j)) {
curr->col_pack_val = curr_fixed_offset;
curr_fixed_offset += kc_info->field_lengths[j];
}
else if (is_variable_field(kc_info, j)) {
curr->col_pack_val = curr_var_index;
curr_var_index++;
}
}
}
//
// set up the mcp_info
//
kc_info->mcp_info[keynr].fixed_field_size = get_fixed_field_size(
kc_info,
table_share,
keynr
);
kc_info->mcp_info[keynr].len_of_offsets = get_len_of_offsets(
kc_info,
table_share,
keynr
);
error = 0;
}
exit:
return error;
}
// reset the kc_info state at keynr
static void reset_key_and_col_info(KEY_AND_COL_INFO *kc_info, uint keynr) {
bitmap_clear_all(&kc_info->key_filters[keynr]);
tokudb_my_free(kc_info->cp_info[keynr]);
kc_info->cp_info[keynr] = NULL;
kc_info->mcp_info[keynr] = (MULTI_COL_PACK_INFO) { 0, 0 };
}
static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, KEY_AND_COL_INFO* kc_info, uint hidden_primary_key, uint primary_key) {
int error = 0;
uint32_t curr_blob_field_index = 0;
uint32_t max_var_bytes = 0;
//
// fill in the field lengths. 0 means it is a variable sized field length
// fill in length_bytes, 0 means it is fixed or blob
//
for (uint i = 0; i < table_share->fields; i++) {
Field* field = table_share->field[i];
TOKU_TYPE toku_type = mysql_to_toku_type(field);
uint32 pack_length = 0;
switch (toku_type) {
case toku_type_int:
case toku_type_double:
case toku_type_float:
case toku_type_fixbinary:
case toku_type_fixstring:
pack_length = field->pack_length();
assert(pack_length < 1<<16);
kc_info->field_types[i] = KEY_AND_COL_INFO::TOKUDB_FIXED_FIELD;
kc_info->field_lengths[i] = (uint16_t)pack_length;
kc_info->length_bytes[i] = 0;
break;
case toku_type_blob:
kc_info->field_types[i] = KEY_AND_COL_INFO::TOKUDB_BLOB_FIELD;
kc_info->field_lengths[i] = 0;
kc_info->length_bytes[i] = 0;
kc_info->blob_fields[curr_blob_field_index] = i;
curr_blob_field_index++;
break;
case toku_type_varstring:
case toku_type_varbinary:
kc_info->field_types[i] = KEY_AND_COL_INFO::TOKUDB_VARIABLE_FIELD;
kc_info->field_lengths[i] = 0;
kc_info->length_bytes[i] = (uchar)((Field_varstring *)field)->length_bytes;
max_var_bytes += field->field_length;
break;
default:
assert(false);
}
}
kc_info->num_blobs = curr_blob_field_index;
//
// initialize share->num_offset_bytes
// because MAX_REF_LENGTH is 65536, we
// can safely set num_offset_bytes to 1 or 2
//
if (max_var_bytes < 256) {
kc_info->num_offset_bytes = 1;
}
else {
kc_info->num_offset_bytes = 2;
}
for (uint i = 0; i < table_share->keys + tokudb_test(hidden_primary_key); i++) {
//
// do the cluster/primary key filtering calculations
//
if (! (i==primary_key && hidden_primary_key) ){
if ( i == primary_key ) {
set_key_filter(
&kc_info->key_filters[primary_key],
&table_share->key_info[primary_key],
table,
true
);
}
else {
set_key_filter(
&kc_info->key_filters[i],
&table_share->key_info[i],
table,
true
);
if (!hidden_primary_key) {
set_key_filter(
&kc_info->key_filters[i],
&table_share->key_info[primary_key],
table,
true
);
}
}
}
if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
error = initialize_col_pack_info(kc_info,table_share,i);
if (error) {
goto exit;
}
}
}
exit:
return error;
}
bool ha_tokudb::can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info, uint pk) {
uint curr_num_DBs = table_share->keys + tokudb_test(hidden_primary_key);
bool ret_val;
if (curr_num_DBs == 1) {
ret_val = true;
goto exit;
}
ret_val = true;
for (uint curr_index = 0; curr_index < table_share->keys; curr_index++) {
if (curr_index == pk) continue;
KEY* curr_key_info = &table_share->key_info[curr_index];
for (uint i = 0; i < get_key_parts(curr_key_info); i++) {
uint16 curr_field_index = curr_key_info->key_part[i].field->field_index;
if (!bitmap_is_set(&kc_info->key_filters[curr_index],curr_field_index)) {
ret_val = false;
goto exit;
}
if (bitmap_is_set(&kc_info->key_filters[curr_index], curr_field_index) &&
!bitmap_is_set(&kc_info->key_filters[pk], curr_field_index)) {
ret_val = false;
goto exit;
}
}
}
exit:
return ret_val;
}
int ha_tokudb::initialize_share(const char* name, int mode) {
int error = 0;
uint64_t num_rows = 0;
DB_TXN* txn = NULL;
bool do_commit = false;
THD* thd = ha_thd();
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton);
if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) {
txn = trx->sub_sp_level;
}
else {
do_commit = true;
error = txn_begin(db_env, 0, &txn, 0, thd);
if (error) { goto exit; }
}
DBUG_PRINT("info", ("share->use_count %u", share->use_count));
share->m_initialize_count++;
error = get_status(txn);
if (error) {
goto exit;
}
if (share->version != HA_TOKU_VERSION) {
error = ENOSYS;
goto exit;
}
#if WITH_PARTITION_STORAGE_ENGINE
// verify frm data for non-partitioned tables
if (TOKU_PARTITION_WRITE_FRM_DATA || table->part_info == NULL) {
error = verify_frm_data(table->s->path.str, txn);
if (error)
goto exit;
} else {
// remove the frm data for partitions since we are not maintaining it
error = remove_frm_data(share->status_block, txn);
if (error)
goto exit;
}
#else
error = verify_frm_data(table->s->path.str, txn);
if (error)
goto exit;
#endif
error = initialize_key_and_col_info(
table_share,
table,
&share->kc_info,
hidden_primary_key,
primary_key
);
if (error) { goto exit; }
error = open_main_dictionary(name, mode == O_RDONLY, txn);
if (error) { goto exit; }
share->has_unique_keys = false;
/* Open other keys; These are part of the share structure */
for (uint i = 0; i < table_share->keys; i++) {
if (table_share->key_info[i].flags & HA_NOSAME) {
share->has_unique_keys = true;
}
if (i != primary_key) {
error = open_secondary_dictionary(
&share->key_file[i],
&table_share->key_info[i],
name,
mode == O_RDONLY,
txn
);
if (error) {
goto exit;
}
}
}
share->replace_into_fast = can_replace_into_be_fast(
table_share,
&share->kc_info,
primary_key
);
share->pk_has_string = false;
if (!hidden_primary_key) {
//
// We need to set the ref_length to start at 5, to account for
// the "infinity byte" in keys, and for placing the DBT size in the first four bytes
//
ref_length = sizeof(uint32_t) + sizeof(uchar);
KEY_PART_INFO *key_part = table->key_info[primary_key].key_part;
KEY_PART_INFO *end = key_part + get_key_parts(&table->key_info[primary_key]);
for (; key_part != end; key_part++) {
ref_length += key_part->field->max_packed_col_length(key_part->length);
TOKU_TYPE toku_type = mysql_to_toku_type(key_part->field);
if (toku_type == toku_type_fixstring ||
toku_type == toku_type_varstring ||
toku_type == toku_type_blob
)
{
share->pk_has_string = true;
}
}
share->status |= STATUS_PRIMARY_KEY_INIT;
}
share->ref_length = ref_length;
error = estimate_num_rows(share->file, &num_rows, txn);
//
// estimate_num_rows should not fail under normal conditions
//
if (error == 0) {
share->rows = num_rows;
}
else {
goto exit;
}
//
// initialize auto increment data
//
share->has_auto_inc = has_auto_increment_flag(&share->ai_field_index);
if (share->has_auto_inc) {
init_auto_increment();
}
if (may_table_be_empty(txn)) {
share->try_table_lock = true;
}
else {
share->try_table_lock = false;
}
share->num_DBs = table_share->keys + tokudb_test(hidden_primary_key);
init_hidden_prim_key_info(txn);
// initialize cardinality info from the status dictionary
share->n_rec_per_key = tokudb::compute_total_key_parts(table_share);
share->rec_per_key = (uint64_t *) tokudb_my_realloc(share->rec_per_key, share->n_rec_per_key * sizeof (uint64_t), MYF(MY_FAE + MY_ALLOW_ZERO_PTR));
error = tokudb::get_card_from_status(share->status_block, txn, share->n_rec_per_key, share->rec_per_key);
if (error) {
for (uint i = 0; i < share->n_rec_per_key; i++)
share->rec_per_key[i] = 0;
}
error = 0;
exit:
if (do_commit && txn) {
commit_txn(txn,0);
}
return error;
}
//
// Creates and opens a handle to a table which already exists in a tokudb
// database.
// Parameters:
// [in] name - table name
// mode - seems to specify if table is read only
// test_if_locked - unused
// Returns:
// 0 on success
// 1 on error
//
int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
TOKUDB_HANDLER_DBUG_ENTER("%s %o %u", name, mode, test_if_locked);
THD* thd = ha_thd();
int error = 0;
int ret_val = 0;
transaction = NULL;
cursor = NULL;
/* Open primary key */
hidden_primary_key = 0;
if ((primary_key = table_share->primary_key) >= MAX_KEY) {
// No primary key
primary_key = table_share->keys;
key_used_on_scan = MAX_KEY;
hidden_primary_key = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
ref_length = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH + sizeof(uint32_t);
}
else {
key_used_on_scan = primary_key;
}
/* Need some extra memory in case of packed keys */
// the "+ 1" is for the first byte that states +/- infinity
// multiply everything by 2 to account for clustered keys having a key and primary key together
max_key_length = 2*(table_share->max_key_length + MAX_REF_PARTS * 3 + sizeof(uchar));
alloc_ptr = tokudb_my_multi_malloc(MYF(MY_WME),
&key_buff, max_key_length,
&key_buff2, max_key_length,
&key_buff3, max_key_length,
&key_buff4, max_key_length,
&prelocked_left_range, max_key_length,
&prelocked_right_range, max_key_length,
&primary_key_buff, (hidden_primary_key ? 0 : max_key_length),
&fixed_cols_for_query, table_share->fields*sizeof(uint32_t),
&var_cols_for_query, table_share->fields*sizeof(uint32_t),
NullS
);
if (alloc_ptr == NULL) {
ret_val = 1;
goto exit;
}
size_range_query_buff = get_tokudb_read_buf_size(thd);
range_query_buff = (uchar *)tokudb_my_malloc(size_range_query_buff, MYF(MY_WME));
if (range_query_buff == NULL) {
ret_val = 1;
goto exit;
}
alloced_rec_buff_length = table_share->rec_buff_length + table_share->fields;
rec_buff = (uchar *) tokudb_my_malloc(alloced_rec_buff_length, MYF(MY_WME));
if (rec_buff == NULL) {
ret_val = 1;
goto exit;
}
alloced_update_rec_buff_length = alloced_rec_buff_length;
rec_update_buff = (uchar *) tokudb_my_malloc(alloced_update_rec_buff_length, MYF(MY_WME));
if (rec_update_buff == NULL) {
ret_val = 1;
goto exit;
}
// lookup or create share
tokudb_pthread_mutex_lock(&tokudb_mutex);
share = get_share(name, table_share);
assert(share);
thr_lock_data_init(&share->lock, &lock, NULL);
tokudb_pthread_mutex_lock(&share->mutex);
tokudb_pthread_mutex_unlock(&tokudb_mutex);
share->use_count++;
while (share->m_state == TOKUDB_SHARE::OPENING || share->m_state == TOKUDB_SHARE::CLOSING) {
tokudb_pthread_cond_wait(&share->m_openclose_cond, &share->mutex);
}
if (share->m_state == TOKUDB_SHARE::CLOSED) {
share->m_state = TOKUDB_SHARE::OPENING;
tokudb_pthread_mutex_unlock(&share->mutex);
ret_val = allocate_key_and_col_info(table_share, &share->kc_info);
if (ret_val == 0) {
ret_val = initialize_share(name, mode);
}
tokudb_pthread_mutex_lock(&share->mutex);
if (ret_val == 0) {
share->m_state = TOKUDB_SHARE::OPENED;
} else {
share->m_state = TOKUDB_SHARE::ERROR;
share->m_error = ret_val;
}
tokudb_pthread_cond_broadcast(&share->m_openclose_cond);
}
if (share->m_state == TOKUDB_SHARE::ERROR) {
ret_val = share->m_error;
tokudb_pthread_mutex_unlock(&share->mutex);
free_share(share);
goto exit;
} else {
assert(share->m_state == TOKUDB_SHARE::OPENED);
tokudb_pthread_mutex_unlock(&share->mutex);
}
ref_length = share->ref_length; // If second open
if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
TOKUDB_HANDLER_TRACE("tokudbopen:%p:share=%p:file=%p:table=%p:table->s=%p:%d",
this, share, share->file, table, table->s, share->use_count);
}
key_read = false;
stats.block_size = 1<<20; // QQQ Tokudb DB block size
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
exit:
if (ret_val) {
tokudb_my_free(range_query_buff);
range_query_buff = NULL;
tokudb_my_free(alloc_ptr);
alloc_ptr = NULL;
tokudb_my_free(rec_buff);
rec_buff = NULL;
tokudb_my_free(rec_update_buff);
rec_update_buff = NULL;
if (error) {
my_errno = error;
}
}
TOKUDB_HANDLER_DBUG_RETURN(ret_val);
}
//
// estimate the number of rows in a DB
// Parameters:
// [in] db - DB whose number of rows will be estimated
// [out] num_rows - number of estimated rows in db
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn) {
int error = ENOSYS;
bool do_commit = false;
DB_BTREE_STAT64 dict_stats;
DB_TXN* txn_to_use = NULL;
if (txn == NULL) {
error = txn_begin(db_env, 0, &txn_to_use, DB_READ_UNCOMMITTED, ha_thd());
if (error) goto cleanup;
do_commit = true;
}
else {
txn_to_use = txn;
}
error = db->stat64(db, txn_to_use, &dict_stats);
if (error) { goto cleanup; }
*num_rows = dict_stats.bt_ndata;
error = 0;
cleanup:
if (do_commit) {
commit_txn(txn_to_use, 0);
txn_to_use = NULL;
}
return error;
}
int ha_tokudb::write_to_status(DB* db, HA_METADATA_KEY curr_key_data, void* data, uint size, DB_TXN* txn ){
return write_metadata(db, &curr_key_data, sizeof curr_key_data, data, size, txn);
}
int ha_tokudb::remove_from_status(DB *db, HA_METADATA_KEY curr_key_data, DB_TXN *txn) {
return remove_metadata(db, &curr_key_data, sizeof curr_key_data, txn);
}
int ha_tokudb::remove_metadata(DB* db, void* key_data, uint key_size, DB_TXN* transaction){
int error;
DBT key;
DB_TXN* txn = NULL;
bool do_commit = false;
//
// transaction to be used for putting metadata into status.tokudb
//
if (transaction == NULL) {
error = txn_begin(db_env, 0, &txn, 0, ha_thd());
if (error) {
goto cleanup;
}
do_commit = true;
}
else {
txn = transaction;
}
memset(&key, 0, sizeof(key));
key.data = key_data;
key.size = key_size;
error = db->del(db, txn, &key, DB_DELETE_ANY);
if (error) {
goto cleanup;
}
error = 0;
cleanup:
if (do_commit && txn) {
if (!error) {
commit_txn(txn, DB_TXN_NOSYNC);
}
else {
abort_txn(txn);
}
}
return error;
}
//
// helper function to write a piece of metadata in to status.tokudb
//
int ha_tokudb::write_metadata(DB* db, void* key_data, uint key_size, void* val_data, uint val_size, DB_TXN* transaction ){
int error;
DBT key;
DBT value;
DB_TXN* txn = NULL;
bool do_commit = false;
//
// transaction to be used for putting metadata into status.tokudb
//
if (transaction == NULL) {
error = txn_begin(db_env, 0, &txn, 0, ha_thd());
if (error) {
goto cleanup;
}
do_commit = true;
}
else {
txn = transaction;
}
memset(&key, 0, sizeof(key));
memset(&value, 0, sizeof(value));
key.data = key_data;
key.size = key_size;
value.data = val_data;
value.size = val_size;
error = db->put(db, txn, &key, &value, 0);
if (error) {
goto cleanup;
}
error = 0;
cleanup:
if (do_commit && txn) {
if (!error) {
commit_txn(txn, DB_TXN_NOSYNC);
}
else {
abort_txn(txn);
}
}
return error;
}
int ha_tokudb::write_frm_data(DB* db, DB_TXN* txn, const char* frm_name) {
TOKUDB_HANDLER_DBUG_ENTER("%p %p %s", db, txn, frm_name);
uchar* frm_data = NULL;
size_t frm_len = 0;
int error = 0;
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
error = table_share->read_frm_image((const uchar**)&frm_data,&frm_len);
if (error) { goto cleanup; }
#else
error = readfrm(frm_name,&frm_data,&frm_len);
if (error) { goto cleanup; }
#endif
error = write_to_status(db,hatoku_frm_data,frm_data,(uint)frm_len, txn);
if (error) { goto cleanup; }
error = 0;
cleanup:
tokudb_my_free(frm_data);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::remove_frm_data(DB *db, DB_TXN *txn) {
return remove_from_status(db, hatoku_frm_data, txn);
}
static int smart_dbt_callback_verify_frm (DBT const *key, DBT const *row, void *context) {
DBT* stored_frm = (DBT *)context;
stored_frm->size = row->size;
stored_frm->data = (uchar *)tokudb_my_malloc(row->size, MYF(MY_WME));
assert(stored_frm->data);
memcpy(stored_frm->data, row->data, row->size);
return 0;
}
int ha_tokudb::verify_frm_data(const char* frm_name, DB_TXN* txn) {
TOKUDB_HANDLER_DBUG_ENTER("%s", frm_name);
uchar* mysql_frm_data = NULL;
size_t mysql_frm_len = 0;
DBT key = {};
DBT stored_frm = {};
int error = 0;
HA_METADATA_KEY curr_key = hatoku_frm_data;
// get the frm data from MySQL
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
error = table_share->read_frm_image((const uchar**)&mysql_frm_data,&mysql_frm_len);
if (error) {
goto cleanup;
}
#else
error = readfrm(frm_name,&mysql_frm_data,&mysql_frm_len);
if (error) {
goto cleanup;
}
#endif
key.data = &curr_key;
key.size = sizeof(curr_key);
error = share->status_block->getf_set(
share->status_block,
txn,
0,
&key,
smart_dbt_callback_verify_frm,
&stored_frm
);
if (error == DB_NOTFOUND) {
// if not found, write it
error = write_frm_data(share->status_block, txn, frm_name);
goto cleanup;
} else if (error) {
goto cleanup;
}
if (stored_frm.size != mysql_frm_len || memcmp(stored_frm.data, mysql_frm_data, stored_frm.size)) {
error = HA_ERR_TABLE_DEF_CHANGED;
goto cleanup;
}
error = 0;
cleanup:
tokudb_my_free(mysql_frm_data);
tokudb_my_free(stored_frm.data);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Updates status.tokudb with a new max value used for the auto increment column
// Parameters:
// [in] db - this will always be status.tokudb
// val - value to store
// Returns:
// 0 on success, error otherwise
//
//
int ha_tokudb::update_max_auto_inc(DB* db, ulonglong val){
return write_to_status(db,hatoku_max_ai,&val,sizeof(val), NULL);
}
//
// Writes the initial auto increment value, as specified by create table
// so if a user does "create table t1 (a int auto_increment, primary key (a)) auto_increment=100",
// then the value 100 will be stored here in val
// Parameters:
// [in] db - this will always be status.tokudb
// val - value to store
// Returns:
// 0 on success, error otherwise
//
//
int ha_tokudb::write_auto_inc_create(DB* db, ulonglong val, DB_TXN* txn){
return write_to_status(db,hatoku_ai_create_value,&val,sizeof(val), txn);
}
//
// Closes a handle to a table.
//
int ha_tokudb::close(void) {
TOKUDB_HANDLER_DBUG_ENTER("");
int r = __close();
TOKUDB_HANDLER_DBUG_RETURN(r);
}
int ha_tokudb::__close() {
TOKUDB_HANDLER_DBUG_ENTER("");
if (tokudb_debug & TOKUDB_DEBUG_OPEN)
TOKUDB_HANDLER_TRACE("close:%p", this);
tokudb_my_free(rec_buff);
tokudb_my_free(rec_update_buff);
tokudb_my_free(blob_buff);
tokudb_my_free(alloc_ptr);
tokudb_my_free(range_query_buff);
for (uint32_t i = 0; i < sizeof(mult_key_dbt_array)/sizeof(mult_key_dbt_array[0]); i++) {
toku_dbt_array_destroy(&mult_key_dbt_array[i]);
}
for (uint32_t i = 0; i < sizeof(mult_rec_dbt_array)/sizeof(mult_rec_dbt_array[0]); i++) {
toku_dbt_array_destroy(&mult_rec_dbt_array[i]);
}
rec_buff = NULL;
rec_update_buff = NULL;
alloc_ptr = NULL;
ha_tokudb::reset();
int retval = free_share(share);
TOKUDB_HANDLER_DBUG_RETURN(retval);
}
//
// Reallocate record buffer (rec_buff) if needed
// If not needed, does nothing
// Parameters:
// length - size of buffer required for rec_buff
//
bool ha_tokudb::fix_rec_buff_for_blob(ulong length) {
if (!rec_buff || (length > alloced_rec_buff_length)) {
uchar *newptr;
if (!(newptr = (uchar *) tokudb_my_realloc((void *) rec_buff, length, MYF(MY_ALLOW_ZERO_PTR))))
return 1;
rec_buff = newptr;
alloced_rec_buff_length = length;
}
return 0;
}
//
// Reallocate record buffer (rec_buff) if needed
// If not needed, does nothing
// Parameters:
// length - size of buffer required for rec_buff
//
bool ha_tokudb::fix_rec_update_buff_for_blob(ulong length) {
if (!rec_update_buff || (length > alloced_update_rec_buff_length)) {
uchar *newptr;
if (!(newptr = (uchar *) tokudb_my_realloc((void *) rec_update_buff, length, MYF(MY_ALLOW_ZERO_PTR))))
return 1;
rec_update_buff= newptr;
alloced_update_rec_buff_length = length;
}
return 0;
}
/* Calculate max length needed for row */
ulong ha_tokudb::max_row_length(const uchar * buf) {
ulong length = table_share->reclength + table_share->fields * 2;
uint *ptr, *end;
for (ptr = table_share->blob_field, end = ptr + table_share->blob_fields; ptr != end; ptr++) {
Field_blob *blob = ((Field_blob *) table->field[*ptr]);
length += blob->get_length((uchar *) (buf + field_offset(blob, table))) + 2;
}
return length;
}
/*
*/
//
// take the row passed in as a DBT*, and convert it into a row in MySQL format in record
// Pack a row for storage.
// If the row is of fixed length, just store the row 'as is'.
// If not, we will generate a packed row suitable for storage.
// This will only fail if we don't have enough memory to pack the row,
// which may only happen in rows with blobs, as the default row length is
// pre-allocated.
// Parameters:
// [out] row - row stored in DBT to be converted
// [out] buf - buffer where row is packed
// [in] record - row in MySQL format
//
int ha_tokudb::pack_row_in_buff(
DBT * row,
const uchar* record,
uint index,
uchar* row_buff
)
{
uchar* fixed_field_ptr = NULL;
uchar* var_field_offset_ptr = NULL;
uchar* start_field_data_ptr = NULL;
uchar* var_field_data_ptr = NULL;
int r = ENOSYS;
memset((void *) row, 0, sizeof(*row));
my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
// Copy null bytes
memcpy(row_buff, record, table_share->null_bytes);
fixed_field_ptr = row_buff + table_share->null_bytes;
var_field_offset_ptr = fixed_field_ptr + share->kc_info.mcp_info[index].fixed_field_size;
start_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[index].len_of_offsets;
var_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[index].len_of_offsets;
// assert that when the hidden primary key exists, primary_key_offsets is NULL
for (uint i = 0; i < table_share->fields; i++) {
Field* field = table->field[i];
uint curr_field_offset = field_offset(field, table);
if (bitmap_is_set(&share->kc_info.key_filters[index],i)) {
continue;
}
if (is_fixed_field(&share->kc_info, i)) {
fixed_field_ptr = pack_fixed_field(
fixed_field_ptr,
record + curr_field_offset,
share->kc_info.field_lengths[i]
);
}
else if (is_variable_field(&share->kc_info, i)) {
var_field_data_ptr = pack_var_field(
var_field_offset_ptr,
var_field_data_ptr,
start_field_data_ptr,
record + curr_field_offset,
share->kc_info.length_bytes[i],
share->kc_info.num_offset_bytes
);
var_field_offset_ptr += share->kc_info.num_offset_bytes;
}
}
for (uint i = 0; i < share->kc_info.num_blobs; i++) {
Field* field = table->field[share->kc_info.blob_fields[i]];
var_field_data_ptr = pack_toku_field_blob(
var_field_data_ptr,
record + field_offset(field, table),
field
);
}
row->data = row_buff;
row->size = (size_t) (var_field_data_ptr - row_buff);
r = 0;
dbug_tmp_restore_column_map(table->write_set, old_map);
return r;
}
int ha_tokudb::pack_row(
DBT * row,
const uchar* record,
uint index
)
{
return pack_row_in_buff(row,record,index,rec_buff);
}
int ha_tokudb::pack_old_row_for_update(
DBT * row,
const uchar* record,
uint index
)
{
return pack_row_in_buff(row,record,index,rec_update_buff);
}
int ha_tokudb::unpack_blobs(
uchar* record,
const uchar* from_tokudb_blob,
uint32_t num_bytes,
bool check_bitmap
)
{
uint error = 0;
uchar* ptr = NULL;
const uchar* buff = NULL;
//
// assert that num_bytes > 0 iff share->num_blobs > 0
//
assert( !((share->kc_info.num_blobs == 0) && (num_bytes > 0)) );
if (num_bytes > num_blob_bytes) {
ptr = (uchar *)tokudb_my_realloc((void *)blob_buff, num_bytes, MYF(MY_ALLOW_ZERO_PTR));
if (ptr == NULL) {
error = ENOMEM;
goto exit;
}
blob_buff = ptr;
num_blob_bytes = num_bytes;
}
memcpy(blob_buff, from_tokudb_blob, num_bytes);
buff= blob_buff;
for (uint i = 0; i < share->kc_info.num_blobs; i++) {
uint32_t curr_field_index = share->kc_info.blob_fields[i];
bool skip = check_bitmap ?
!(bitmap_is_set(table->read_set,curr_field_index) ||
bitmap_is_set(table->write_set,curr_field_index)) :
false;
Field* field = table->field[curr_field_index];
uint32_t len_bytes = field->row_pack_length();
const uchar* end_buff = unpack_toku_field_blob(
record + field_offset(field, table),
buff,
len_bytes,
skip
);
// verify that the pointers to the blobs are all contained within the blob_buff
if (!(blob_buff <= buff && end_buff <= blob_buff + num_bytes)) {
error = -3000000;
goto exit;
}
buff = end_buff;
}
// verify that the entire blob buffer was parsed
if (share->kc_info.num_blobs > 0 && !(num_bytes > 0 && buff == blob_buff + num_bytes)) {
error = -4000000;
goto exit;
}
error = 0;
exit:
return error;
}
//
// take the row passed in as a DBT*, and convert it into a row in MySQL format in record
// Parameters:
// [out] record - row in MySQL format
// [in] row - row stored in DBT to be converted
//
int ha_tokudb::unpack_row(
uchar* record,
DBT const *row,
DBT const *key,
uint index
)
{
//
// two cases, fixed length row, and variable length row
// fixed length row is first below
//
/* Copy null bits */
int error = 0;
const uchar* fixed_field_ptr = (const uchar *) row->data;
const uchar* var_field_offset_ptr = NULL;
const uchar* var_field_data_ptr = NULL;
uint32_t data_end_offset = 0;
memcpy(record, fixed_field_ptr, table_share->null_bytes);
fixed_field_ptr += table_share->null_bytes;
var_field_offset_ptr = fixed_field_ptr + share->kc_info.mcp_info[index].fixed_field_size;
var_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[index].len_of_offsets;
//
// unpack the key, if necessary
//
if (!(hidden_primary_key && index == primary_key)) {
unpack_key(record,key,index);
}
uint32_t last_offset = 0;
//
// we have two methods of unpacking, one if we need to unpack the entire row
// the second if we unpack a subset of the entire row
// first method here is if we unpack the entire row
//
if (unpack_entire_row) {
//
// fill in parts of record that are not part of the key
//
for (uint i = 0; i < table_share->fields; i++) {
Field* field = table->field[i];
if (bitmap_is_set(&share->kc_info.key_filters[index],i)) {
continue;
}
if (is_fixed_field(&share->kc_info, i)) {
fixed_field_ptr = unpack_fixed_field(
record + field_offset(field, table),
fixed_field_ptr,
share->kc_info.field_lengths[i]
);
}
//
// here, we DO modify var_field_data_ptr or var_field_offset_ptr
// as we unpack variable sized fields
//
else if (is_variable_field(&share->kc_info, i)) {
switch (share->kc_info.num_offset_bytes) {
case (1):
data_end_offset = var_field_offset_ptr[0];
break;
case (2):
data_end_offset = uint2korr(var_field_offset_ptr);
break;
default:
assert(false);
break;
}
unpack_var_field(
record + field_offset(field, table),
var_field_data_ptr,
data_end_offset - last_offset,
share->kc_info.length_bytes[i]
);
var_field_offset_ptr += share->kc_info.num_offset_bytes;
var_field_data_ptr += data_end_offset - last_offset;
last_offset = data_end_offset;
}
}
error = unpack_blobs(
record,
var_field_data_ptr,
row->size - (uint32_t)(var_field_data_ptr - (const uchar *)row->data),
false
);
if (error) {
goto exit;
}
}
//
// in this case, we unpack only what is specified
// in fixed_cols_for_query and var_cols_for_query
//
else {
//
// first the fixed fields
//
for (uint32_t i = 0; i < num_fixed_cols_for_query; i++) {
uint field_index = fixed_cols_for_query[i];
Field* field = table->field[field_index];
unpack_fixed_field(
record + field_offset(field, table),
fixed_field_ptr + share->kc_info.cp_info[index][field_index].col_pack_val,
share->kc_info.field_lengths[field_index]
);
}
//
// now the var fields
// here, we do NOT modify var_field_data_ptr or var_field_offset_ptr
//
for (uint32_t i = 0; i < num_var_cols_for_query; i++) {
uint field_index = var_cols_for_query[i];
Field* field = table->field[field_index];
uint32_t var_field_index = share->kc_info.cp_info[index][field_index].col_pack_val;
uint32_t data_start_offset;
uint32_t field_len;
get_var_field_info(
&field_len,
&data_start_offset,
var_field_index,
var_field_offset_ptr,
share->kc_info.num_offset_bytes
);
unpack_var_field(
record + field_offset(field, table),
var_field_data_ptr + data_start_offset,
field_len,
share->kc_info.length_bytes[field_index]
);
}
if (read_blobs) {
//
// now the blobs
//
get_blob_field_info(
&data_end_offset,
share->kc_info.mcp_info[index].len_of_offsets,
var_field_data_ptr,
share->kc_info.num_offset_bytes
);
var_field_data_ptr += data_end_offset;
error = unpack_blobs(
record,
var_field_data_ptr,
row->size - (uint32_t)(var_field_data_ptr - (const uchar *)row->data),
true
);
if (error) {
goto exit;
}
}
}
error = 0;
exit:
return error;
}
uint32_t ha_tokudb::place_key_into_mysql_buff(
KEY* key_info,
uchar * record,
uchar* data
)
{
KEY_PART_INFO *key_part = key_info->key_part, *end = key_part + get_key_parts(key_info);
uchar *pos = data;
for (; key_part != end; key_part++) {
if (key_part->field->null_bit) {
uint null_offset = get_null_offset(table, key_part->field);
if (*pos++ == NULL_COL_VAL) { // Null value
//
// We don't need to reset the record data as we will not access it
// if the null data is set
//
record[null_offset] |= key_part->field->null_bit;
continue;
}
record[null_offset] &= ~key_part->field->null_bit;
}
#if !defined(MARIADB_BASE_VERSION)
//
// HOPEFULLY TEMPORARY
//
assert(table->s->db_low_byte_first);
#endif
pos = unpack_toku_key_field(
record + field_offset(key_part->field, table),
pos,
key_part->field,
key_part->length
);
}
return pos-data;
}
//
// Store the key and the primary key into the row
// Parameters:
// [out] record - key stored in MySQL format
// [in] key - key stored in DBT to be converted
// index -index into key_file that represents the DB
// unpacking a key of
//
void ha_tokudb::unpack_key(uchar * record, DBT const *key, uint index) {
uint32_t bytes_read;
uchar *pos = (uchar *) key->data + 1;
bytes_read = place_key_into_mysql_buff(
&table->key_info[index],
record,
pos
);
if( (index != primary_key) && !hidden_primary_key) {
//
// also unpack primary key
//
place_key_into_mysql_buff(
&table->key_info[primary_key],
record,
pos+bytes_read
);
}
}
uint32_t ha_tokudb::place_key_into_dbt_buff(
KEY* key_info,
uchar * buff,
const uchar * record,
bool* has_null,
int key_length
)
{
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
uchar* curr_buff = buff;
*has_null = false;
for (; key_part != end && key_length > 0; key_part++) {
//
// accessing key_part->field->null_bit instead off key_part->null_bit
// because key_part->null_bit is not set in add_index
// filed ticket 862 to look into this
//
if (key_part->field->null_bit) {
/* Store 0 if the key part is a NULL part */
uint null_offset = get_null_offset(table, key_part->field);
if (record[null_offset] & key_part->field->null_bit) {
*curr_buff++ = NULL_COL_VAL;
*has_null = true;
continue;
}
*curr_buff++ = NONNULL_COL_VAL; // Store NOT NULL marker
}
#if !defined(MARIADB_BASE_VERSION)
//
// HOPEFULLY TEMPORARY
//
assert(table->s->db_low_byte_first);
#endif
//
// accessing field_offset(key_part->field) instead off key_part->offset
// because key_part->offset is SET INCORRECTLY in add_index
// filed ticket 862 to look into this
//
curr_buff = pack_toku_key_field(
curr_buff,
(uchar *) (record + field_offset(key_part->field, table)),
key_part->field,
key_part->length
);
key_length -= key_part->length;
}
return curr_buff - buff;
}
//
// Create a packed key from a row. This key will be written as such
// to the index tree. This will never fail as the key buffer is pre-allocated.
// Parameters:
// [out] key - DBT that holds the key
// [in] key_info - holds data about the key, such as it's length and offset into record
// [out] buff - buffer that will hold the data for key (unless
// we have a hidden primary key)
// [in] record - row from which to create the key
// key_length - currently set to MAX_KEY_LENGTH, is it size of buff?
// Returns:
// the parameter key
//
DBT* ha_tokudb::create_dbt_key_from_key(
DBT * key,
KEY* key_info,
uchar * buff,
const uchar * record,
bool* has_null,
bool dont_pack_pk,
int key_length,
uint8_t inf_byte
)
{
uint32_t size = 0;
uchar* tmp_buff = buff;
my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
key->data = buff;
//
// first put the "infinity" byte at beginning. States if missing columns are implicitly
// positive infinity or negative infinity or zero. For this, because we are creating key
// from a row, there is no way that columns can be missing, so in practice,
// this will be meaningless. Might as well put in a value
//
*tmp_buff++ = inf_byte;
size++;
size += place_key_into_dbt_buff(
key_info,
tmp_buff,
record,
has_null,
key_length
);
if (!dont_pack_pk) {
tmp_buff = buff + size;
if (hidden_primary_key) {
memcpy(tmp_buff, current_ident, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
size += TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
}
else {
bool tmp_bool = false;
size += place_key_into_dbt_buff(
&table->key_info[primary_key],
tmp_buff,
record,
&tmp_bool,
MAX_KEY_LENGTH //this parameter does not matter
);
}
}
key->size = size;
DBUG_DUMP("key", (uchar *) key->data, key->size);
dbug_tmp_restore_column_map(table->write_set, old_map);
return key;
}
//
// Create a packed key from a row. This key will be written as such
// to the index tree. This will never fail as the key buffer is pre-allocated.
// Parameters:
// [out] key - DBT that holds the key
// keynr - index for which to create the key
// [out] buff - buffer that will hold the data for key (unless
// we have a hidden primary key)
// [in] record - row from which to create the key
// [out] has_null - says if the key has a NULL value for one of its columns
// key_length - currently set to MAX_KEY_LENGTH, is it size of buff?
// Returns:
// the parameter key
//
DBT *ha_tokudb::create_dbt_key_from_table(
DBT * key,
uint keynr,
uchar * buff,
const uchar * record,
bool* has_null,
int key_length
)
{
TOKUDB_HANDLER_DBUG_ENTER("");
memset((void *) key, 0, sizeof(*key));
if (hidden_primary_key && keynr == primary_key) {
key->data = buff;
memcpy(buff, ¤t_ident, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
key->size = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
*has_null = false;
DBUG_RETURN(key);
}
DBUG_RETURN(create_dbt_key_from_key(key, &table->key_info[keynr],buff,record, has_null, (keynr == primary_key), key_length, COL_ZERO));
}
DBT* ha_tokudb::create_dbt_key_for_lookup(
DBT * key,
KEY* key_info,
uchar * buff,
const uchar * record,
bool* has_null,
int key_length
)
{
TOKUDB_HANDLER_DBUG_ENTER("");
// override the infinity byte, needed in case the pk is a string
// to make sure that the cursor that uses this key properly positions
// it at the right location. If the table stores "D", but we look up for "d",
// and the infinity byte is 0, then we will skip the "D", because
// in bytes, "d" > "D".
DBT* ret = create_dbt_key_from_key(key, key_info, buff, record, has_null, true, key_length, COL_NEG_INF);
DBUG_RETURN(ret);
}
//
// Create a packed key from from a MySQL unpacked key (like the one that is
// sent from the index_read() This key is to be used to read a row
// Parameters:
// [out] key - DBT that holds the key
// keynr - index for which to pack the key
// [out] buff - buffer that will hold the data for key
// [in] key_ptr - MySQL unpacked key
// key_length - length of key_ptr
// Returns:
// the parameter key
//
DBT *ha_tokudb::pack_key(
DBT * key,
uint keynr,
uchar * buff,
const uchar * key_ptr,
uint key_length,
int8_t inf_byte
)
{
TOKUDB_HANDLER_DBUG_ENTER("key %p %u:%2.2x inf=%d", key_ptr, key_length, key_length > 0 ? key_ptr[0] : 0, inf_byte);
#if TOKU_INCLUDE_EXTENDED_KEYS
if (keynr != primary_key && !tokudb_test(hidden_primary_key)) {
DBUG_RETURN(pack_ext_key(key, keynr, buff, key_ptr, key_length, inf_byte));
}
#endif
KEY *key_info = &table->key_info[keynr];
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
// first put the "infinity" byte at beginning. States if missing columns are implicitly
// positive infinity or negative infinity
*buff++ = (uchar)inf_byte;
for (; key_part != end && (int) key_length > 0; key_part++) {
uint offset = 0;
if (key_part->null_bit) {
if (!(*key_ptr == 0)) {
*buff++ = NULL_COL_VAL;
key_length -= key_part->store_length;
key_ptr += key_part->store_length;
continue;
}
*buff++ = NONNULL_COL_VAL;
offset = 1; // Data is at key_ptr+1
}
#if !defined(MARIADB_BASE_VERSION)
assert(table->s->db_low_byte_first);
#endif
buff = pack_key_toku_key_field(
buff,
(uchar *) key_ptr + offset,
key_part->field,
key_part->length
);
key_ptr += key_part->store_length;
key_length -= key_part->store_length;
}
key->size = (buff - (uchar *) key->data);
DBUG_DUMP("key", (uchar *) key->data, key->size);
dbug_tmp_restore_column_map(table->write_set, old_map);
DBUG_RETURN(key);
}
#if TOKU_INCLUDE_EXTENDED_KEYS
DBT *ha_tokudb::pack_ext_key(
DBT * key,
uint keynr,
uchar * buff,
const uchar * key_ptr,
uint key_length,
int8_t inf_byte
)
{
TOKUDB_HANDLER_DBUG_ENTER("");
// build a list of PK parts that are in the SK. we will use this list to build the
// extended key if necessary.
KEY *pk_key_info = &table->key_info[primary_key];
uint pk_parts = get_key_parts(pk_key_info);
uint pk_next = 0;
struct {
const uchar *key_ptr;
KEY_PART_INFO *key_part;
} pk_info[pk_parts];
KEY *key_info = &table->key_info[keynr];
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
// first put the "infinity" byte at beginning. States if missing columns are implicitly
// positive infinity or negative infinity
*buff++ = (uchar)inf_byte;
for (; key_part != end && (int) key_length > 0; key_part++) {
// if the SK part is part of the PK, then append it to the list.
if (key_part->field->part_of_key.is_set(primary_key)) {
assert(pk_next < pk_parts);
pk_info[pk_next].key_ptr = key_ptr;
pk_info[pk_next].key_part = key_part;
pk_next++;
}
uint offset = 0;
if (key_part->null_bit) {
if (!(*key_ptr == 0)) {
*buff++ = NULL_COL_VAL;
key_length -= key_part->store_length;
key_ptr += key_part->store_length;
continue;
}
*buff++ = NONNULL_COL_VAL;
offset = 1; // Data is at key_ptr+1
}
#if !defined(MARIADB_BASE_VERSION)
assert(table->s->db_low_byte_first);
#endif
buff = pack_key_toku_key_field(
buff,
(uchar *) key_ptr + offset,
key_part->field,
key_part->length
);
key_ptr += key_part->store_length;
key_length -= key_part->store_length;
}
if (key_length > 0) {
assert(key_part == end);
end = key_info->key_part + get_ext_key_parts(key_info);
// pack PK in order of PK key parts
for (uint pk_index = 0; key_part != end && (int) key_length > 0 && pk_index < pk_parts; pk_index++) {
uint i;
for (i = 0; i < pk_next; i++) {
if (pk_info[i].key_part->fieldnr == pk_key_info->key_part[pk_index].fieldnr)
break;
}
if (i < pk_next) {
const uchar *this_key_ptr = pk_info[i].key_ptr;
KEY_PART_INFO *this_key_part = pk_info[i].key_part;
buff = pack_key_toku_key_field(buff, (uchar *) this_key_ptr, this_key_part->field, this_key_part->length);
} else {
buff = pack_key_toku_key_field(buff, (uchar *) key_ptr, key_part->field, key_part->length);
key_ptr += key_part->store_length;
key_length -= key_part->store_length;
key_part++;
}
}
}
key->size = (buff - (uchar *) key->data);
DBUG_DUMP("key", (uchar *) key->data, key->size);
dbug_tmp_restore_column_map(table->write_set, old_map);
DBUG_RETURN(key);
}
#endif
//
// get max used hidden primary key value
//
void ha_tokudb::init_hidden_prim_key_info(DB_TXN *txn) {
TOKUDB_HANDLER_DBUG_ENTER("");
if (!(share->status & STATUS_PRIMARY_KEY_INIT)) {
int error = 0;
DBC* c = NULL;
error = share->key_file[primary_key]->cursor(share->key_file[primary_key], txn, &c, 0);
assert(error == 0);
DBT key,val;
memset(&key, 0, sizeof(key));
memset(&val, 0, sizeof(val));
error = c->c_get(c, &key, &val, DB_LAST);
if (error == 0) {
assert(key.size == TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
share->auto_ident = hpk_char_to_num((uchar *)key.data);
}
error = c->c_close(c);
assert(error == 0);
share->status |= STATUS_PRIMARY_KEY_INIT;
}
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
/** @brief
Get metadata info stored in status.tokudb
*/
int ha_tokudb::get_status(DB_TXN* txn) {
TOKUDB_HANDLER_DBUG_ENTER("");
DBT key, value;
HA_METADATA_KEY curr_key;
int error;
//
// open status.tokudb
//
if (!share->status_block) {
error = open_status_dictionary(
&share->status_block,
share->table_name,
txn
);
if (error) {
goto cleanup;
}
}
//
// transaction to be used for putting metadata into status.tokudb
//
memset(&key, 0, sizeof(key));
memset(&value, 0, sizeof(value));
key.data = &curr_key;
key.size = sizeof(curr_key);
value.flags = DB_DBT_USERMEM;
assert(share->status_block);
//
// get version
//
value.ulen = sizeof(share->version);
value.data = &share->version;
curr_key = hatoku_new_version;
error = share->status_block->get(
share->status_block,
txn,
&key,
&value,
0
);
if (error == DB_NOTFOUND) {
//
// hack to keep handle the issues of going back and forth
// between 5.0.3 to 5.0.4
// the problem with going back and forth
// is with storing the frm file, 5.0.4 stores it, 5.0.3 does not
// so, if a user goes back and forth and alters the schema
// the frm stored can get out of sync with the schema of the table
// This can cause issues.
// To take care of this, we are doing this versioning work here.
// We change the key that stores the version.
// In 5.0.3, it is hatoku_old_version, in 5.0.4 it is hatoku_new_version
// When we encounter a table that does not have hatoku_new_version
// set, we give it the right one, and overwrite the old one with zero.
// This ensures that 5.0.3 cannot open the table. Once it has been opened by 5.0.4
//
uint dummy_version = 0;
share->version = HA_TOKU_ORIG_VERSION;
error = write_to_status(
share->status_block,
hatoku_new_version,
&share->version,
sizeof(share->version),
txn
);
if (error) { goto cleanup; }
error = write_to_status(
share->status_block,
hatoku_old_version,
&dummy_version,
sizeof(dummy_version),
txn
);
if (error) { goto cleanup; }
}
else if (error || value.size != sizeof(share->version)) {
if (error == 0) {
error = HA_ERR_INTERNAL_ERROR;
}
goto cleanup;
}
//
// get capabilities
//
curr_key = hatoku_capabilities;
value.ulen = sizeof(share->capabilities);
value.data = &share->capabilities;
error = share->status_block->get(
share->status_block,
txn,
&key,
&value,
0
);
if (error == DB_NOTFOUND) {
share->capabilities= 0;
}
else if (error || value.size != sizeof(share->version)) {
if (error == 0) {
error = HA_ERR_INTERNAL_ERROR;
}
goto cleanup;
}
error = 0;
cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
/** @brief
Return an estimated of the number of rows in the table.
Used when sorting to allocate buffers and by the optimizer.
This is used in filesort.cc.
*/
ha_rows ha_tokudb::estimate_rows_upper_bound() {
TOKUDB_HANDLER_DBUG_ENTER("");
DBUG_RETURN(share->rows + HA_TOKUDB_EXTRA_ROWS);
}
//
// Function that compares two primary keys that were saved as part of rnd_pos
// and ::position
//
int ha_tokudb::cmp_ref(const uchar * ref1, const uchar * ref2) {
int ret_val = 0;
bool read_string = false;
ret_val = tokudb_compare_two_keys(
ref1 + sizeof(uint32_t),
*(uint32_t *)ref1,
ref2 + sizeof(uint32_t),
*(uint32_t *)ref2,
(uchar *)share->file->descriptor->dbt.data + 4,
*(uint32_t *)share->file->descriptor->dbt.data - 4,
false,
&read_string
);
return ret_val;
}
bool ha_tokudb::check_if_incompatible_data(HA_CREATE_INFO * info, uint table_changes) {
//
// This is a horrendous hack for now, as copied by InnoDB.
// This states that if the auto increment create field has changed,
// via a "alter table foo auto_increment=new_val", that this
// change is incompatible, and to rebuild the entire table
// This will need to be fixed
//
if ((info->used_fields & HA_CREATE_USED_AUTO) &&
info->auto_increment_value != 0) {
return COMPATIBLE_DATA_NO;
}
if (table_changes != IS_EQUAL_YES)
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
//
// Method that is called before the beginning of many calls
// to insert rows (ha_tokudb::write_row). There is no guarantee
// that start_bulk_insert is called, however there is a guarantee
// that if start_bulk_insert is called, then end_bulk_insert may be
// called as well.
// Parameters:
// [in] rows - an estimate of the number of rows that will be inserted
// if number of rows is unknown (such as if doing
// "insert into foo select * from bar), then rows
// will be 0
//
//
// This function returns true if the table MAY be empty.
// It is NOT meant to be a 100% check for emptiness.
// This is used for a bulk load optimization.
//
bool ha_tokudb::may_table_be_empty(DB_TXN *txn) {
int error;
bool ret_val = false;
DBC* tmp_cursor = NULL;
DB_TXN* tmp_txn = NULL;
const int empty_scan = THDVAR(ha_thd(), empty_scan);
if (empty_scan == TOKUDB_EMPTY_SCAN_DISABLED)
goto cleanup;
if (txn == NULL) {
error = txn_begin(db_env, 0, &tmp_txn, 0, ha_thd());
if (error) {
goto cleanup;
}
txn = tmp_txn;
}
error = share->file->cursor(share->file, txn, &tmp_cursor, 0);
if (error)
goto cleanup;
tmp_cursor->c_set_check_interrupt_callback(tmp_cursor, tokudb_killed_thd_callback, ha_thd());
if (empty_scan == TOKUDB_EMPTY_SCAN_LR)
error = tmp_cursor->c_getf_next(tmp_cursor, 0, smart_dbt_do_nothing, NULL);
else
error = tmp_cursor->c_getf_prev(tmp_cursor, 0, smart_dbt_do_nothing, NULL);
error = map_to_handler_error(error);
if (error == DB_NOTFOUND)
ret_val = true;
else
ret_val = false;
error = 0;
cleanup:
if (tmp_cursor) {
int r = tmp_cursor->c_close(tmp_cursor);
assert(r == 0);
tmp_cursor = NULL;
}
if (tmp_txn) {
commit_txn(tmp_txn, 0);
tmp_txn = NULL;
}
return ret_val;
}
#if MYSQL_VERSION_ID >= 100000
void ha_tokudb::start_bulk_insert(ha_rows rows, uint flags) {
TOKUDB_HANDLER_DBUG_ENTER("%llu %u txn %p", (unsigned long long) rows, flags, transaction);
#else
void ha_tokudb::start_bulk_insert(ha_rows rows) {
TOKUDB_HANDLER_DBUG_ENTER("%llu txn %p", (unsigned long long) rows, transaction);
#endif
THD* thd = ha_thd();
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
delay_updating_ai_metadata = true;
ai_metadata_update_required = false;
abort_loader = false;
rw_rdlock(&share->num_DBs_lock);
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
num_DBs_locked_in_bulk = true;
lock_count = 0;
if ((rows == 0 || rows > 1) && share->try_table_lock) {
if (get_prelock_empty(thd) && may_table_be_empty(transaction) && transaction != NULL) {
if (using_ignore || is_insert_ignore(thd) || thd->lex->duplicates != DUP_ERROR) {
acquire_table_lock(transaction, lock_write);
}
else {
mult_dbt_flags[primary_key] = 0;
if (!thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS) && !hidden_primary_key) {
mult_put_flags[primary_key] = DB_NOOVERWRITE;
}
uint32_t loader_flags = (get_load_save_space(thd)) ?
LOADER_COMPRESS_INTERMEDIATES : 0;
int error = db_env->create_loader(
db_env,
transaction,
&loader,
NULL, // no src_db needed
curr_num_DBs,
share->key_file,
mult_put_flags,
mult_dbt_flags,
loader_flags
);
if (error) {
assert(loader == NULL);
goto exit_try_table_lock;
}
lc.thd = thd;
lc.ha = this;
error = loader->set_poll_function(loader, loader_poll_fun, &lc);
assert(!error);
error = loader->set_error_callback(loader, loader_dup_fun, &lc);
assert(!error);
trx->stmt_progress.using_loader = true;
}
}
exit_try_table_lock:
tokudb_pthread_mutex_lock(&share->mutex);
share->try_table_lock = false;
tokudb_pthread_mutex_unlock(&share->mutex);
}
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
//
// Method that is called at the end of many calls to insert rows
// (ha_tokudb::write_row). If start_bulk_insert is called, then
// this is guaranteed to be called.
//
int ha_tokudb::end_bulk_insert(bool abort) {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
THD* thd = ha_thd();
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
bool using_loader = (loader != NULL);
if (ai_metadata_update_required) {
tokudb_pthread_mutex_lock(&share->mutex);
error = update_max_auto_inc(share->status_block, share->last_auto_increment);
tokudb_pthread_mutex_unlock(&share->mutex);
if (error) { goto cleanup; }
}
delay_updating_ai_metadata = false;
ai_metadata_update_required = false;
loader_error = 0;
if (loader) {
if (!abort_loader && !thd_killed(thd)) {
DBUG_EXECUTE_IF("tokudb_end_bulk_insert_sleep", {
const char *orig_proc_info = tokudb_thd_get_proc_info(thd);
thd_proc_info(thd, "DBUG sleep");
my_sleep(20000000);
thd_proc_info(thd, orig_proc_info);
});
error = loader->close(loader);
loader = NULL;
if (error) {
if (thd_killed(thd)) {
my_error(ER_QUERY_INTERRUPTED, MYF(0));
}
goto cleanup;
}
for (uint i = 0; i < table_share->keys; i++) {
if (table_share->key_info[i].flags & HA_NOSAME) {
bool is_unique;
if (i == primary_key && !share->pk_has_string) {
continue;
}
error = is_index_unique(&is_unique, transaction, share->key_file[i], &table->key_info[i],
DB_PRELOCKED_WRITE);
if (error) goto cleanup;
if (!is_unique) {
error = HA_ERR_FOUND_DUPP_KEY;
last_dup_key = i;
goto cleanup;
}
}
}
}
else {
error = sprintf(write_status_msg, "aborting bulk load");
thd_proc_info(thd, write_status_msg);
loader->abort(loader);
loader = NULL;
share->try_table_lock = true;
}
}
cleanup:
if (num_DBs_locked_in_bulk) {
rw_unlock(&share->num_DBs_lock);
}
num_DBs_locked_in_bulk = false;
lock_count = 0;
if (loader) {
error = sprintf(write_status_msg, "aborting bulk load");
thd_proc_info(thd, write_status_msg);
loader->abort(loader);
loader = NULL;
}
abort_loader = false;
memset(&lc, 0, sizeof(lc));
if (error || loader_error) {
my_errno = error ? error : loader_error;
if (using_loader) {
share->try_table_lock = true;
}
}
trx->stmt_progress.using_loader = false;
thd_proc_info(thd, 0);
TOKUDB_HANDLER_DBUG_RETURN(error ? error : loader_error);
}
int ha_tokudb::end_bulk_insert() {
return end_bulk_insert( false );
}
int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info, int lock_flags) {
int error;
DBC* tmp_cursor1 = NULL;
DBC* tmp_cursor2 = NULL;
DBT key1, key2, val, packed_key1, packed_key2;
uint64_t cnt = 0;
char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound.
THD* thd = ha_thd();
const char *orig_proc_info = tokudb_thd_get_proc_info(thd);
memset(&key1, 0, sizeof(key1));
memset(&key2, 0, sizeof(key2));
memset(&val, 0, sizeof(val));
memset(&packed_key1, 0, sizeof(packed_key1));
memset(&packed_key2, 0, sizeof(packed_key2));
*is_unique = true;
error = db->cursor(db, txn, &tmp_cursor1, DB_SERIALIZABLE);
if (error) { goto cleanup; }
error = db->cursor(db, txn, &tmp_cursor2, DB_SERIALIZABLE);
if (error) { goto cleanup; }
error = tmp_cursor1->c_get(tmp_cursor1, &key1, &val, DB_NEXT + lock_flags);
if (error == DB_NOTFOUND) {
*is_unique = true;
error = 0;
goto cleanup;
}
else if (error) { goto cleanup; }
error = tmp_cursor2->c_get(tmp_cursor2, &key2, &val, DB_NEXT + lock_flags);
if (error) { goto cleanup; }
error = tmp_cursor2->c_get(tmp_cursor2, &key2, &val, DB_NEXT + lock_flags);
if (error == DB_NOTFOUND) {
*is_unique = true;
error = 0;
goto cleanup;
}
else if (error) { goto cleanup; }
while (error != DB_NOTFOUND) {
bool has_null1;
bool has_null2;
int cmp;
place_key_into_mysql_buff(key_info, table->record[0], (uchar *) key1.data + 1);
place_key_into_mysql_buff(key_info, table->record[1], (uchar *) key2.data + 1);
create_dbt_key_for_lookup(&packed_key1, key_info, key_buff, table->record[0], &has_null1);
create_dbt_key_for_lookup(&packed_key2, key_info, key_buff2, table->record[1], &has_null2);
if (!has_null1 && !has_null2) {
cmp = tokudb_prefix_cmp_dbt_key(db, &packed_key1, &packed_key2);
if (cmp == 0) {
memcpy(key_buff, key1.data, key1.size);
place_key_into_mysql_buff(key_info, table->record[0], (uchar *) key_buff + 1);
*is_unique = false;
break;
}
}
error = tmp_cursor1->c_get(tmp_cursor1, &key1, &val, DB_NEXT + lock_flags);
if (error) { goto cleanup; }
error = tmp_cursor2->c_get(tmp_cursor2, &key2, &val, DB_NEXT + lock_flags);
if (error && (error != DB_NOTFOUND)) { goto cleanup; }
cnt++;
if ((cnt % 10000) == 0) {
sprintf(
status_msg,
"Verifying index uniqueness: Checked %llu of %llu rows in key-%s.",
(long long unsigned) cnt,
share->rows,
key_info->name);
thd_proc_info(thd, status_msg);
if (thd_killed(thd)) {
my_error(ER_QUERY_INTERRUPTED, MYF(0));
error = ER_QUERY_INTERRUPTED;
goto cleanup;
}
}
}
error = 0;
cleanup:
thd_proc_info(thd, orig_proc_info);
if (tmp_cursor1) {
tmp_cursor1->c_close(tmp_cursor1);
tmp_cursor1 = NULL;
}
if (tmp_cursor2) {
tmp_cursor2->c_close(tmp_cursor2);
tmp_cursor2 = NULL;
}
return error;
}
int ha_tokudb::is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn) {
int error = 0;
bool has_null;
DBC* tmp_cursor = NULL;
DBT key; memset((void *)&key, 0, sizeof(key));
create_dbt_key_from_key(&key, key_info, key_buff2, record, &has_null, true, MAX_KEY_LENGTH, COL_NEG_INF);
if (has_null) {
error = 0;
*is_unique = true;
goto cleanup;
}
error = share->key_file[dict_index]->cursor(share->key_file[dict_index], txn, &tmp_cursor, DB_SERIALIZABLE | DB_RMW);
if (error) {
goto cleanup;
} else {
// prelock (key,-inf),(key,+inf) so that the subsequent key lookup does not overlock
uint flags = 0;
DBT key_right; memset(&key_right, 0, sizeof key_right);
create_dbt_key_from_key(&key_right, key_info, key_buff3, record, &has_null, true, MAX_KEY_LENGTH, COL_POS_INF);
error = tmp_cursor->c_set_bounds(tmp_cursor, &key, &key_right, true, DB_NOTFOUND);
if (error == 0) {
flags = DB_PRELOCKED | DB_PRELOCKED_WRITE;
}
// lookup key and check unique prefix
struct smart_dbt_info info;
info.ha = this;
info.buf = NULL;
info.keynr = dict_index;
struct index_read_info ir_info;
ir_info.orig_key = &key;
ir_info.smart_dbt_info = info;
error = tmp_cursor->c_getf_set_range(tmp_cursor, flags, &key, smart_dbt_callback_lookup, &ir_info);
if (error == DB_NOTFOUND) {
*is_unique = true;
error = 0;
goto cleanup;
}
else if (error) {
error = map_to_handler_error(error);
goto cleanup;
}
if (ir_info.cmp) {
*is_unique = true;
}
else {
*is_unique = false;
}
}
error = 0;
cleanup:
if (tmp_cursor) {
int r = tmp_cursor->c_close(tmp_cursor);
assert(r==0);
tmp_cursor = NULL;
}
return error;
}
static void maybe_do_unique_checks_delay(THD *thd) {
if (thd->slave_thread) {
uint64_t delay_ms = THDVAR(thd, rpl_unique_checks_delay);
if (delay_ms)
usleep(delay_ms * 1000);
}
}
static bool need_read_only(THD *thd) {
return opt_readonly || !THDVAR(thd, rpl_check_readonly);
}
static bool do_unique_checks(THD *thd, bool do_rpl_event) {
if (do_rpl_event && thd->slave_thread && need_read_only(thd) && !THDVAR(thd, rpl_unique_checks))
return false;
else
return !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS);
}
int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) {
int error = 0;
//
// first do uniqueness checks
//
if (share->has_unique_keys && do_unique_checks(thd, in_rpl_write_rows)) {
for (uint keynr = 0; keynr < table_share->keys; keynr++) {
bool is_unique_key = (table->key_info[keynr].flags & HA_NOSAME) || (keynr == primary_key);
bool is_unique = false;
//
// don't need to do check for primary key that don't have strings
//
if (keynr == primary_key && !share->pk_has_string) {
continue;
}
if (!is_unique_key) {
continue;
}
maybe_do_unique_checks_delay(thd);
//
// if unique key, check uniqueness constraint
// but, we do not need to check it if the key has a null
// and we do not need to check it if unique_checks is off
//
error = is_val_unique(&is_unique, record, &table->key_info[keynr], keynr, txn);
if (error) {
goto cleanup;
}
if (!is_unique) {
error = DB_KEYEXIST;
last_dup_key = keynr;
goto cleanup;
}
}
}
cleanup:
return error;
}
void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
int error;
DBT row, key;
//
// variables for testing key packing, only used in some debug modes
//
uchar* tmp_pk_key_data = NULL;
uchar* tmp_pk_val_data = NULL;
DBT tmp_pk_key;
DBT tmp_pk_val;
bool has_null;
int cmp;
memset(&tmp_pk_key, 0, sizeof(DBT));
memset(&tmp_pk_val, 0, sizeof(DBT));
//
//use for testing the packing of keys
//
tmp_pk_key_data = (uchar *)tokudb_my_malloc(pk_key->size, MYF(MY_WME));
assert(tmp_pk_key_data);
tmp_pk_val_data = (uchar *)tokudb_my_malloc(pk_val->size, MYF(MY_WME));
assert(tmp_pk_val_data);
memcpy(tmp_pk_key_data, pk_key->data, pk_key->size);
memcpy(tmp_pk_val_data, pk_val->data, pk_val->size);
tmp_pk_key.data = tmp_pk_key_data;
tmp_pk_key.size = pk_key->size;
tmp_pk_val.data = tmp_pk_val_data;
tmp_pk_val.size = pk_val->size;
for (uint keynr = 0; keynr < table_share->keys; keynr++) {
uint32_t tmp_num_bytes = 0;
uchar* row_desc = NULL;
uint32_t desc_size = 0;
if (keynr == primary_key) {
continue;
}
create_dbt_key_from_table(&key, keynr, key_buff2, record, &has_null);
//
// TEST
//
row_desc = (uchar *)share->key_file[keynr]->descriptor->dbt.data;
row_desc += (*(uint32_t *)row_desc);
desc_size = (*(uint32_t *)row_desc) - 4;
row_desc += 4;
tmp_num_bytes = pack_key_from_desc(
key_buff3,
row_desc,
desc_size,
&tmp_pk_key,
&tmp_pk_val
);
assert(tmp_num_bytes == key.size);
cmp = memcmp(key_buff3,key_buff2,tmp_num_bytes);
assert(cmp == 0);
//
// test key packing of clustering keys
//
if (key_is_clustering(&table->key_info[keynr])) {
error = pack_row(&row, (const uchar *) record, keynr);
assert(error == 0);
uchar* tmp_buff = NULL;
tmp_buff = (uchar *)tokudb_my_malloc(alloced_rec_buff_length,MYF(MY_WME));
assert(tmp_buff);
row_desc = (uchar *)share->key_file[keynr]->descriptor->dbt.data;
row_desc += (*(uint32_t *)row_desc);
row_desc += (*(uint32_t *)row_desc);
desc_size = (*(uint32_t *)row_desc) - 4;
row_desc += 4;
tmp_num_bytes = pack_clustering_val_from_desc(
tmp_buff,
row_desc,
desc_size,
&tmp_pk_val
);
assert(tmp_num_bytes == row.size);
cmp = memcmp(tmp_buff,rec_buff,tmp_num_bytes);
assert(cmp == 0);
tokudb_my_free(tmp_buff);
}
}
//
// copy stuff back out
//
error = pack_row(pk_val, (const uchar *) record, primary_key);
assert(pk_val->size == tmp_pk_val.size);
cmp = memcmp(pk_val->data, tmp_pk_val_data, pk_val->size);
assert( cmp == 0);
tokudb_my_free(tmp_pk_key_data);
tokudb_my_free(tmp_pk_val_data);
}
// set the put flags for the main dictionary
void ha_tokudb::set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags) {
uint32_t old_prelock_flags = 0;
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
bool in_hot_index = share->num_DBs > curr_num_DBs;
bool using_ignore_flag_opt = do_ignore_flag_optimization(thd, table, share->replace_into_fast && !using_ignore_no_key);
//
// optimization for "REPLACE INTO..." (and "INSERT IGNORE") command
// if the command is "REPLACE INTO" and the only table
// is the main table (or all indexes are a subset of the pk),
// then we can simply insert the element
// with DB_YESOVERWRITE. If the element does not exist,
// it will act as a normal insert, and if it does exist, it
// will act as a replace, which is exactly what REPLACE INTO is supposed
// to do. We cannot do this if otherwise, because then we lose
// consistency between indexes
//
if (hidden_primary_key)
{
*put_flags = old_prelock_flags;
}
else if (!do_unique_checks(thd, in_rpl_write_rows | in_rpl_update_rows) && !is_replace_into(thd) && !is_insert_ignore(thd))
{
*put_flags = old_prelock_flags;
}
else if (using_ignore_flag_opt && is_replace_into(thd)
&& !in_hot_index)
{
*put_flags = old_prelock_flags;
}
else if (opt_eligible && using_ignore_flag_opt && is_insert_ignore(thd)
&& !in_hot_index)
{
*put_flags = DB_NOOVERWRITE_NO_ERROR | old_prelock_flags;
}
else
{
*put_flags = DB_NOOVERWRITE | old_prelock_flags;
}
}
int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn) {
int error = 0;
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
assert(curr_num_DBs == 1);
uint32_t put_flags = mult_put_flags[primary_key];
THD *thd = ha_thd();
set_main_dict_put_flags(thd, true, &put_flags);
// for test, make unique checks have a very long duration
if ((put_flags & DB_OPFLAGS_MASK) == DB_NOOVERWRITE)
maybe_do_unique_checks_delay(thd);
error = share->file->put(share->file, txn, pk_key, pk_val, put_flags);
if (error) {
last_dup_key = primary_key;
goto cleanup;
}
cleanup:
return error;
}
int ha_tokudb::insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN* txn, THD* thd) {
int error = 0;
uint curr_num_DBs = share->num_DBs;
set_main_dict_put_flags(thd, true, &mult_put_flags[primary_key]);
uint32_t flags = mult_put_flags[primary_key];
// for test, make unique checks have a very long duration
if ((flags & DB_OPFLAGS_MASK) == DB_NOOVERWRITE)
maybe_do_unique_checks_delay(thd);
// the insert ignore optimization uses DB_NOOVERWRITE_NO_ERROR,
// which is not allowed with env->put_multiple.
// we have to insert the rows one by one in this case.
if (flags & DB_NOOVERWRITE_NO_ERROR) {
DB * src_db = share->key_file[primary_key];
for (uint32_t i = 0; i < curr_num_DBs; i++) {
DB * db = share->key_file[i];
if (i == primary_key) {
// if it's the primary key, insert the rows
// as they are.
error = db->put(db, txn, pk_key, pk_val, flags);
} else {
// generate a row for secondary keys.
// use our multi put key/rec buffers
// just as the ydb layer would have in
// env->put_multiple(), except that
// we will just do a put() right away.
error = tokudb_generate_row(db, src_db,
&mult_key_dbt_array[i].dbts[0], &mult_rec_dbt_array[i].dbts[0],
pk_key, pk_val);
if (error != 0) {
goto out;
}
error = db->put(db, txn, &mult_key_dbt_array[i].dbts[0],
&mult_rec_dbt_array[i].dbts[0], flags);
}
if (error != 0) {
goto out;
}
}
} else {
// not insert ignore, so we can use put multiple
error = db_env->put_multiple(
db_env,
share->key_file[primary_key],
txn,
pk_key,
pk_val,
curr_num_DBs,
share->key_file,
mult_key_dbt_array,
mult_rec_dbt_array,
mult_put_flags
);
}
out:
//
// We break if we hit an error, unless it is a dup key error
// and MySQL told us to ignore duplicate key errors
//
if (error) {
last_dup_key = primary_key;
}
return error;
}
//
// Stores a row in the table, called when handling an INSERT query
// Parameters:
// [in] record - a row in MySQL format
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::write_row(uchar * record) {
TOKUDB_HANDLER_DBUG_ENTER("%p", record);
DBT row, prim_key;
int error;
THD *thd = ha_thd();
bool has_null;
DB_TXN* sub_trans = NULL;
DB_TXN* txn = NULL;
tokudb_trx_data *trx = NULL;
uint curr_num_DBs;
bool create_sub_trans = false;
bool num_DBs_locked = false;
//
// some crap that needs to be done because MySQL does not properly abstract
// this work away from us, namely filling in auto increment and setting auto timestamp
//
ha_statistic_increment(&SSV::ha_write_count);
#if MYSQL_VERSION_ID < 50600
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) {
table->timestamp_field->set_time();
}
#endif
if (table->next_number_field && record == table->record[0]) {
error = update_auto_increment();
if (error)
goto cleanup;
}
//
// check to see if some value for the auto increment column that is bigger
// than anything else til now is being used. If so, update the metadata to reflect it
// the goal here is we never want to have a dup key error due to a bad increment
// of the auto inc field.
//
if (share->has_auto_inc && record == table->record[0]) {
tokudb_pthread_mutex_lock(&share->mutex);
ulonglong curr_auto_inc = retrieve_auto_increment(
table->field[share->ai_field_index]->key_type(), field_offset(table->field[share->ai_field_index], table), record);
if (curr_auto_inc > share->last_auto_increment) {
share->last_auto_increment = curr_auto_inc;
if (delay_updating_ai_metadata) {
ai_metadata_update_required = true;
}
else {
update_max_auto_inc(share->status_block, share->last_auto_increment);
}
}
tokudb_pthread_mutex_unlock(&share->mutex);
}
//
// grab reader lock on numDBs_lock
//
if (!num_DBs_locked_in_bulk) {
rw_rdlock(&share->num_DBs_lock);
num_DBs_locked = true;
}
else {
lock_count++;
if (lock_count >= 2000) {
rw_unlock(&share->num_DBs_lock);
rw_rdlock(&share->num_DBs_lock);
lock_count = 0;
}
}
curr_num_DBs = share->num_DBs;
if (hidden_primary_key) {
get_auto_primary_key(current_ident);
}
if (table_share->blob_fields) {
if (fix_rec_buff_for_blob(max_row_length(record))) {
error = HA_ERR_OUT_OF_MEM;
goto cleanup;
}
}
create_dbt_key_from_table(&prim_key, primary_key, primary_key_buff, record, &has_null);
if ((error = pack_row(&row, (const uchar *) record, primary_key))){
goto cleanup;
}
create_sub_trans = (using_ignore && !(do_ignore_flag_optimization(thd,table,share->replace_into_fast && !using_ignore_no_key)));
if (create_sub_trans) {
error = txn_begin(db_env, transaction, &sub_trans, DB_INHERIT_ISOLATION, thd);
if (error) {
goto cleanup;
}
}
txn = create_sub_trans ? sub_trans : transaction;
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("txn %p", txn);
}
if (tokudb_debug & TOKUDB_DEBUG_CHECK_KEY) {
test_row_packing(record,&prim_key,&row);
}
if (loader) {
error = loader->put(loader, &prim_key, &row);
if (error) {
abort_loader = true;
goto cleanup;
}
}
else {
error = do_uniqueness_checks(record, txn, thd);
if (error) {
// for #4633
// if we have a duplicate key error, let's check the primary key to see
// if there is a duplicate there. If so, set last_dup_key to the pk
if (error == DB_KEYEXIST && !tokudb_test(hidden_primary_key) && last_dup_key != primary_key) {
int r = share->file->getf_set(share->file, txn, DB_SERIALIZABLE, &prim_key, smart_dbt_do_nothing, NULL);
if (r == 0) {
// if we get no error, that means the row
// was found and this is a duplicate key,
// so we set last_dup_key
last_dup_key = primary_key;
}
else if (r != DB_NOTFOUND) {
// if some other error is returned, return that to the user.
error = r;
}
}
goto cleanup;
}
if (curr_num_DBs == 1) {
error = insert_row_to_main_dictionary(record,&prim_key, &row, txn);
if (error) { goto cleanup; }
}
else {
error = insert_rows_to_dictionaries_mult(&prim_key, &row, txn, thd);
if (error) { goto cleanup; }
}
if (error == 0) {
uint64_t full_row_size = prim_key.size + row.size;
toku_hton_update_primary_key_bytes_inserted(full_row_size);
}
}
trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
if (!error) {
added_rows++;
trx->stmt_progress.inserted++;
//inserted increment
increment_partitioned_counter(toku_row_status.inserted, 1);
track_progress(thd);
}
cleanup:
if (num_DBs_locked) {
rw_unlock(&share->num_DBs_lock);
}
if (error == DB_KEYEXIST) {
error = HA_ERR_FOUND_DUPP_KEY;
}
if (sub_trans) {
// no point in recording error value of abort.
// nothing we can do about it anyway and it is not what
// we want to return.
if (error) {
abort_txn(sub_trans);
}
else {
commit_txn(sub_trans, DB_TXN_NOSYNC);
}
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
/* Compare if a key in a row has changed */
bool ha_tokudb::key_changed(uint keynr, const uchar * old_row, const uchar * new_row) {
DBT old_key;
DBT new_key;
memset((void *) &old_key, 0, sizeof(old_key));
memset((void *) &new_key, 0, sizeof(new_key));
bool has_null;
create_dbt_key_from_table(&new_key, keynr, key_buff2, new_row, &has_null);
create_dbt_key_for_lookup(&old_key,&table->key_info[keynr], key_buff3, old_row, &has_null);
return tokudb_prefix_cmp_dbt_key(share->key_file[keynr], &old_key, &new_key);
}
//
// Updates a row in the table, called when handling an UPDATE query
// Parameters:
// [in] old_row - row to be updated, in MySQL format
// [in] new_row - new row, in MySQL format
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
TOKUDB_HANDLER_DBUG_ENTER("");
DBT prim_key, old_prim_key, prim_row, old_prim_row;
int error;
bool has_null;
THD* thd = ha_thd();
DB_TXN* sub_trans = NULL;
DB_TXN* txn = NULL;
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
uint curr_num_DBs;
LINT_INIT(error);
memset((void *) &prim_key, 0, sizeof(prim_key));
memset((void *) &old_prim_key, 0, sizeof(old_prim_key));
memset((void *) &prim_row, 0, sizeof(prim_row));
memset((void *) &old_prim_row, 0, sizeof(old_prim_row));
ha_statistic_increment(&SSV::ha_update_count);
#if MYSQL_VERSION_ID < 50600
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) {
table->timestamp_field->set_time();
}
#endif
//
// check to see if some value for the auto increment column that is bigger
// than anything else til now is being used. If so, update the metadata to reflect it
// the goal here is we never want to have a dup key error due to a bad increment
// of the auto inc field.
//
if (share->has_auto_inc && new_row == table->record[0]) {
tokudb_pthread_mutex_lock(&share->mutex);
ulonglong curr_auto_inc = retrieve_auto_increment(
table->field[share->ai_field_index]->key_type(),
field_offset(table->field[share->ai_field_index], table),
new_row
);
if (curr_auto_inc > share->last_auto_increment) {
error = update_max_auto_inc(share->status_block, curr_auto_inc);
if (!error) {
share->last_auto_increment = curr_auto_inc;
}
}
tokudb_pthread_mutex_unlock(&share->mutex);
}
//
// grab reader lock on numDBs_lock
//
bool num_DBs_locked = false;
if (!num_DBs_locked_in_bulk) {
rw_rdlock(&share->num_DBs_lock);
num_DBs_locked = true;
}
curr_num_DBs = share->num_DBs;
if (using_ignore) {
error = txn_begin(db_env, transaction, &sub_trans, DB_INHERIT_ISOLATION, thd);
if (error) {
goto cleanup;
}
}
txn = using_ignore ? sub_trans : transaction;
if (hidden_primary_key) {
memset((void *) &prim_key, 0, sizeof(prim_key));
prim_key.data = (void *) current_ident;
prim_key.size = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
old_prim_key = prim_key;
}
else {
create_dbt_key_from_table(&prim_key, primary_key, key_buff, new_row, &has_null);
create_dbt_key_from_table(&old_prim_key, primary_key, primary_key_buff, old_row, &has_null);
}
// do uniqueness checks
if (share->has_unique_keys && do_unique_checks(thd, in_rpl_update_rows)) {
for (uint keynr = 0; keynr < table_share->keys; keynr++) {
bool is_unique_key = (table->key_info[keynr].flags & HA_NOSAME) || (keynr == primary_key);
if (keynr == primary_key && !share->pk_has_string) {
continue;
}
if (is_unique_key) {
bool key_ch = key_changed(keynr, old_row, new_row);
if (key_ch) {
bool is_unique;
error = is_val_unique(&is_unique, new_row, &table->key_info[keynr], keynr, txn);
if (error) goto cleanup;
if (!is_unique) {
error = DB_KEYEXIST;
last_dup_key = keynr;
goto cleanup;
}
}
}
}
}
if (table_share->blob_fields) {
if (fix_rec_buff_for_blob(max_row_length(new_row))) {
error = HA_ERR_OUT_OF_MEM;
goto cleanup;
}
if (fix_rec_update_buff_for_blob(max_row_length(old_row))) {
error = HA_ERR_OUT_OF_MEM;
goto cleanup;
}
}
error = pack_row(&prim_row, new_row, primary_key);
if (error) { goto cleanup; }
error = pack_old_row_for_update(&old_prim_row, old_row, primary_key);
if (error) { goto cleanup; }
set_main_dict_put_flags(thd, false, &mult_put_flags[primary_key]);
// for test, make unique checks have a very long duration
if ((mult_put_flags[primary_key] & DB_OPFLAGS_MASK) == DB_NOOVERWRITE)
maybe_do_unique_checks_delay(thd);
error = db_env->update_multiple(
db_env,
share->key_file[primary_key],
txn,
&old_prim_key,
&old_prim_row,
&prim_key,
&prim_row,
curr_num_DBs,
share->key_file,
mult_put_flags,
2*curr_num_DBs,
mult_key_dbt_array,
curr_num_DBs,
mult_rec_dbt_array
);
if (error == DB_KEYEXIST) {
last_dup_key = primary_key;
}
else if (!error) {
trx->stmt_progress.updated++;
//updated incrment
increment_partitioned_counter(toku_row_status.updated, 1);
track_progress(thd);
}
cleanup:
if (num_DBs_locked) {
rw_unlock(&share->num_DBs_lock);
}
if (error == DB_KEYEXIST) {
error = HA_ERR_FOUND_DUPP_KEY;
}
if (sub_trans) {
// no point in recording error value of abort.
// nothing we can do about it anyway and it is not what
// we want to return.
if (error) {
abort_txn(sub_trans);
}
else {
commit_txn(sub_trans, DB_TXN_NOSYNC);
}
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Deletes a row in the table, called when handling a DELETE query
// Parameters:
// [in] record - row to be deleted, in MySQL format
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::delete_row(const uchar * record) {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = ENOSYS;
DBT row, prim_key;
bool has_null;
THD* thd = ha_thd();
uint curr_num_DBs;
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
ha_statistic_increment(&SSV::ha_delete_count);
//
// grab reader lock on numDBs_lock
//
bool num_DBs_locked = false;
if (!num_DBs_locked_in_bulk) {
rw_rdlock(&share->num_DBs_lock);
num_DBs_locked = true;
}
curr_num_DBs = share->num_DBs;
create_dbt_key_from_table(&prim_key, primary_key, key_buff, record, &has_null);
if (table_share->blob_fields) {
if (fix_rec_buff_for_blob(max_row_length(record))) {
error = HA_ERR_OUT_OF_MEM;
goto cleanup;
}
}
if ((error = pack_row(&row, (const uchar *) record, primary_key))){
goto cleanup;
}
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("all %p stmt %p sub_sp_level %p transaction %p", trx->all, trx->stmt, trx->sub_sp_level, transaction);
}
error = db_env->del_multiple(
db_env,
share->key_file[primary_key],
transaction,
&prim_key,
&row,
curr_num_DBs,
share->key_file,
mult_key_dbt_array,
mult_del_flags
);
if (error) {
DBUG_PRINT("error", ("Got error %d", error));
}
else {
deleted_rows++;
trx->stmt_progress.deleted++;
//deleted increment
increment_partitioned_counter(toku_row_status.deleted, 1);
track_progress(thd);
}
cleanup:
if (num_DBs_locked) {
rw_unlock(&share->num_DBs_lock);
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// takes as input table->read_set and table->write_set
// and puts list of field indexes that need to be read in
// unpack_row in the member variables fixed_cols_for_query
// and var_cols_for_query
//
void ha_tokudb::set_query_columns(uint keynr) {
uint32_t curr_fixed_col_index = 0;
uint32_t curr_var_col_index = 0;
read_key = false;
read_blobs = false;
//
// i know this is probably confusing and will need to be explained better
//
uint key_index = 0;
if (keynr == primary_key || keynr == MAX_KEY) {
key_index = primary_key;
}
else {
key_index = (key_is_clustering(&table->key_info[keynr]) ? keynr : primary_key);
}
for (uint i = 0; i < table_share->fields; i++) {
if (bitmap_is_set(table->read_set,i) ||
bitmap_is_set(table->write_set,i)
)
{
if (bitmap_is_set(&share->kc_info.key_filters[key_index],i)) {
read_key = true;
}
else {
//
// if fixed field length
//
if (is_fixed_field(&share->kc_info, i)) {
//
// save the offset into the list
//
fixed_cols_for_query[curr_fixed_col_index] = i;
curr_fixed_col_index++;
}
//
// varchar or varbinary
//
else if (is_variable_field(&share->kc_info, i)) {
var_cols_for_query[curr_var_col_index] = i;
curr_var_col_index++;
}
//
// it is a blob
//
else {
read_blobs = true;
}
}
}
}
num_fixed_cols_for_query = curr_fixed_col_index;
num_var_cols_for_query = curr_var_col_index;
}
void ha_tokudb::column_bitmaps_signal() {
//
// if we have max number of indexes, then MAX_KEY == primary_key
//
if (tokudb_active_index != MAX_KEY || tokudb_active_index == primary_key) {
set_query_columns(tokudb_active_index);
}
}
//
// Notification that a scan of entire secondary table is about
// to take place. Will pre acquire table read lock
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::prepare_index_scan() {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
HANDLE_INVALID_CURSOR();
error = prelock_range(NULL, NULL);
if (error) { last_cursor_error = error; goto cleanup; }
range_lock_grabbed = true;
error = 0;
cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
static bool index_key_is_null(TABLE *table, uint keynr, const uchar *key, uint key_len) {
bool key_can_be_null = false;
KEY *key_info = &table->key_info[keynr];
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
for (; key_part != end; key_part++) {
if (key_part->null_bit) {
key_can_be_null = true;
break;
}
}
return key_can_be_null && key_len > 0 && key[0] != 0;
}
// Return true if bulk fetch can be used
static bool tokudb_do_bulk_fetch(THD *thd) {
switch (thd_sql_command(thd)) {
case SQLCOM_SELECT:
case SQLCOM_CREATE_TABLE:
case SQLCOM_INSERT_SELECT:
case SQLCOM_REPLACE_SELECT:
case SQLCOM_DELETE:
return THDVAR(thd, bulk_fetch) != 0;
default:
return false;
}
}
//
// Notification that a range query getting all elements that equal a key
// to take place. Will pre acquire read lock
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::prepare_index_key_scan(const uchar * key, uint key_len) {
TOKUDB_HANDLER_DBUG_ENTER("%p %u", key, key_len);
int error = 0;
DBT start_key, end_key;
THD* thd = ha_thd();
HANDLE_INVALID_CURSOR();
pack_key(&start_key, tokudb_active_index, prelocked_left_range, key, key_len, COL_NEG_INF);
prelocked_left_range_size = start_key.size;
pack_key(&end_key, tokudb_active_index, prelocked_right_range, key, key_len, COL_POS_INF);
prelocked_right_range_size = end_key.size;
error = cursor->c_set_bounds(
cursor,
&start_key,
&end_key,
true,
(cursor_flags & DB_SERIALIZABLE) != 0 ? DB_NOTFOUND : 0
);
if (error){
goto cleanup;
}
range_lock_grabbed = true;
range_lock_grabbed_null = index_key_is_null(table, tokudb_active_index, key, key_len);
doing_bulk_fetch = tokudb_do_bulk_fetch(thd);
bulk_fetch_iteration = 0;
rows_fetched_using_bulk_fetch = 0;
error = 0;
cleanup:
if (error) {
error = map_to_handler_error(error);
last_cursor_error = error;
//
// cursor should be initialized here, but in case it is not,
// we still check
//
if (cursor) {
int r = cursor->c_close(cursor);
assert(r==0);
cursor = NULL;
remove_from_trx_handler_list();
}
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
void ha_tokudb::invalidate_bulk_fetch() {
bytes_used_in_range_query_buff= 0;
curr_range_query_buff_offset = 0;
icp_went_out_of_range = false;
}
void ha_tokudb::invalidate_icp() {
toku_pushed_idx_cond = NULL;
toku_pushed_idx_cond_keyno = MAX_KEY;
icp_went_out_of_range = false;
}
//
// Initializes local cursor on DB with index keynr
// Parameters:
// keynr - key (index) number
// sorted - 1 if result MUST be sorted according to index
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::index_init(uint keynr, bool sorted) {
TOKUDB_HANDLER_DBUG_ENTER("%d %u txn %p", keynr, sorted, transaction);
int error;
THD* thd = ha_thd();
DBUG_PRINT("enter", ("table: '%s' key: %d", table_share->table_name.str, keynr));
/*
Under some very rare conditions (like full joins) we may already have
an active cursor at this point
*/
if (cursor) {
DBUG_PRINT("note", ("Closing active cursor"));
int r = cursor->c_close(cursor);
assert(r==0);
remove_from_trx_handler_list();
}
active_index = keynr;
if (active_index < MAX_KEY) {
DBUG_ASSERT(keynr <= table->s->keys);
} else {
DBUG_ASSERT(active_index == MAX_KEY);
keynr = primary_key;
}
tokudb_active_index = keynr;
#if TOKU_CLUSTERING_IS_COVERING
if (keynr < table->s->keys && table->key_info[keynr].option_struct->clustering)
key_read = false;
#endif
last_cursor_error = 0;
range_lock_grabbed = false;
range_lock_grabbed_null = false;
DBUG_ASSERT(share->key_file[keynr]);
cursor_flags = get_cursor_isolation_flags(lock.type, thd);
if (use_write_locks) {
cursor_flags |= DB_RMW;
}
if (get_disable_prefetching(thd)) {
cursor_flags |= DBC_DISABLE_PREFETCHING;
}
if ((error = share->key_file[keynr]->cursor(share->key_file[keynr], transaction, &cursor, cursor_flags))) {
if (error == TOKUDB_MVCC_DICTIONARY_TOO_NEW) {
error = HA_ERR_TABLE_DEF_CHANGED;
my_error(ER_TABLE_DEF_CHANGED, MYF(0));
}
if (error == DB_LOCK_NOTGRANTED) {
error = HA_ERR_LOCK_WAIT_TIMEOUT;
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
}
table->status = STATUS_NOT_FOUND;
error = map_to_handler_error(error);
last_cursor_error = error;
cursor = NULL; // Safety
goto exit;
}
cursor->c_set_check_interrupt_callback(cursor, tokudb_killed_thd_callback, thd);
memset((void *) &last_key, 0, sizeof(last_key));
add_to_trx_handler_list();
if (thd_sql_command(thd) == SQLCOM_SELECT) {
set_query_columns(keynr);
unpack_entire_row = false;
}
else {
unpack_entire_row = true;
}
invalidate_bulk_fetch();
doing_bulk_fetch = false;
maybe_index_scan = false;
error = 0;
exit:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// closes the local cursor
//
int ha_tokudb::index_end() {
TOKUDB_HANDLER_DBUG_ENTER("");
range_lock_grabbed = false;
range_lock_grabbed_null = false;
if (cursor) {
DBUG_PRINT("enter", ("table: '%s'", table_share->table_name.str));
int r = cursor->c_close(cursor);
assert(r==0);
cursor = NULL;
remove_from_trx_handler_list();
last_cursor_error = 0;
}
active_index = tokudb_active_index = MAX_KEY;
//
// reset query variables
//
unpack_entire_row = true;
read_blobs = true;
read_key = true;
num_fixed_cols_for_query = 0;
num_var_cols_for_query = 0;
invalidate_bulk_fetch();
invalidate_icp();
doing_bulk_fetch = false;
close_dsmrr();
TOKUDB_HANDLER_DBUG_RETURN(0);
}
int ha_tokudb::handle_cursor_error(int error, int err_to_return, uint keynr) {
TOKUDB_HANDLER_DBUG_ENTER("");
if (error) {
error = map_to_handler_error(error);
last_cursor_error = error;
table->status = STATUS_NOT_FOUND;
if (error == DB_NOTFOUND) {
error = err_to_return;
}
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Helper function for read_row and smart_dbt_callback_xxx functions
// When using a hidden primary key, upon reading a row,
// we set the current_ident field to whatever the primary key we retrieved
// was
//
void ha_tokudb::extract_hidden_primary_key(uint keynr, DBT const *found_key) {
//
// extract hidden primary key to current_ident
//
if (hidden_primary_key) {
if (keynr == primary_key) {
memcpy(current_ident, (char *) found_key->data, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
}
//
// if secondary key, hidden primary key is at end of found_key
//
else {
memcpy(
current_ident,
(char *) found_key->data + found_key->size - TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH,
TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH
);
}
}
}
int ha_tokudb::read_row_callback (uchar * buf, uint keynr, DBT const *row, DBT const *found_key) {
assert(keynr == primary_key);
return unpack_row(buf, row,found_key, keynr);
}
//
// Reads the contents of row and found_key, DBT's retrieved from the DB associated to keynr, into buf
// This function assumes that we are using a covering index, as a result, if keynr is the primary key,
// we do not read row into buf
// Parameters:
// [out] buf - buffer for the row, in MySQL format
// keynr - index into key_file that represents DB we are currently operating on.
// [in] row - the row that has been read from the preceding DB call
// [in] found_key - key used to retrieve the row
//
void ha_tokudb::read_key_only(uchar * buf, uint keynr, DBT const *found_key) {
TOKUDB_HANDLER_DBUG_ENTER("");
table->status = 0;
//
// only case when we do not unpack the key is if we are dealing with the main dictionary
// of a table with a hidden primary key
//
if (!(hidden_primary_key && keynr == primary_key)) {
unpack_key(buf, found_key, keynr);
}
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
//
// Helper function used to try to retrieve the entire row
// If keynr is associated with the main table, reads contents of found_key and row into buf, otherwise,
// makes copy of primary key and saves it to last_key. This can later be used to retrieve the entire row
// Parameters:
// [out] buf - buffer for the row, in MySQL format
// keynr - index into key_file that represents DB we are currently operating on.
// [in] row - the row that has been read from the preceding DB call
// [in] found_key - key used to retrieve the row
//
int ha_tokudb::read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT const *found_key) {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
table->status = 0;
//
// case where we read from secondary table that is not clustered
//
if (keynr != primary_key && !key_is_clustering(&table->key_info[keynr])) {
bool has_null;
//
// create a DBT that has the same data as row, this is inefficient
// extract_hidden_primary_key MUST have been called before this
//
memset((void *) &last_key, 0, sizeof(last_key));
if (!hidden_primary_key) {
unpack_key(buf, found_key, keynr);
}
create_dbt_key_from_table(
&last_key,
primary_key,
key_buff,
buf,
&has_null
);
}
//
// else read from clustered/primary key
//
else {
error = unpack_row(buf, row, found_key, keynr);
if (error) { goto exit; }
}
if (found_key) { DBUG_DUMP("read row key", (uchar *) found_key->data, found_key->size); }
error = 0;
exit:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// This function reads an entire row into buf. This function also assumes that
// the key needed to retrieve the row is stored in the member variable last_key
// Parameters:
// [out] buf - buffer for the row, in MySQL format
// Returns:
// 0 on success, error otherwise
//
int ha_tokudb::read_full_row(uchar * buf) {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
struct smart_dbt_info info;
info.ha = this;
info.buf = buf;
info.keynr = primary_key;
//
// assumes key is stored in this->last_key
//
error = share->file->getf_set(
share->file,
transaction,
cursor_flags,
&last_key,
smart_dbt_callback_rowread_ptquery,
&info
);
if (error) {
if (error == DB_LOCK_NOTGRANTED) {
error = HA_ERR_LOCK_WAIT_TIMEOUT;
}
table->status = STATUS_NOT_FOUND;
TOKUDB_HANDLER_DBUG_RETURN(error == DB_NOTFOUND ? HA_ERR_CRASHED : error);
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Reads the next row matching to the key, on success, advances cursor
// Parameters:
// [out] buf - buffer for the next row, in MySQL format
// [in] key - key value
// keylen - length of key
// Returns:
// 0 on success
// HA_ERR_END_OF_FILE if not found
// error otherwise
//
int ha_tokudb::index_next_same(uchar * buf, const uchar * key, uint keylen) {
TOKUDB_HANDLER_DBUG_ENTER("");
ha_statistic_increment(&SSV::ha_read_next_count);
DBT curr_key;
DBT found_key;
bool has_null;
int cmp;
// create the key that will be used to compare with what is found
// in order to figure out if we should return an error
pack_key(&curr_key, tokudb_active_index, key_buff2, key, keylen, COL_ZERO);
int error = get_next(buf, 1, &curr_key, key_read);
if (error) {
goto cleanup;
}
//
// now do the comparison
//
create_dbt_key_from_table(&found_key,tokudb_active_index,key_buff3,buf,&has_null);
cmp = tokudb_prefix_cmp_dbt_key(share->key_file[tokudb_active_index], &curr_key, &found_key);
if (cmp) {
error = HA_ERR_END_OF_FILE;
}
cleanup:
error = handle_cursor_error(error, HA_ERR_END_OF_FILE, tokudb_active_index);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// According to InnoDB handlerton: Positions an index cursor to the index
// specified in keynr. Fetches the row if any
// Parameters:
// [out] buf - buffer for the returned row
// [in] key - key value, according to InnoDB, if NULL,
// position cursor at start or end of index,
// not sure if this is done now
// key_len - length of key
// find_flag - according to InnoDB, search flags from my_base.h
// Returns:
// 0 on success
// HA_ERR_KEY_NOT_FOUND if not found (per InnoDB),
// we seem to return HA_ERR_END_OF_FILE if find_flag != HA_READ_KEY_EXACT
// TODO: investigate this for correctness
// error otherwise
//
int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_rkey_function find_flag) {
TOKUDB_HANDLER_DBUG_ENTER("key %p %u:%2.2x find=%u", key, key_len, key ? key[0] : 0, find_flag);
invalidate_bulk_fetch();
if (tokudb_debug & TOKUDB_DEBUG_INDEX_KEY) {
TOKUDB_DBUG_DUMP("mysql key=", key, key_len);
}
DBT row;
DBT lookup_key;
int error = 0;
uint32_t flags = 0;
THD* thd = ha_thd();
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);;
struct smart_dbt_info info;
struct index_read_info ir_info;
HANDLE_INVALID_CURSOR();
// if we locked a non-null key range and we now have a null key, then remove the bounds from the cursor
if (range_lock_grabbed && !range_lock_grabbed_null && index_key_is_null(table, tokudb_active_index, key, key_len)) {
range_lock_grabbed = range_lock_grabbed_null = false;
cursor->c_remove_restriction(cursor);
}
ha_statistic_increment(&SSV::ha_read_key_count);
memset((void *) &row, 0, sizeof(row));
info.ha = this;
info.buf = buf;
info.keynr = tokudb_active_index;
ir_info.smart_dbt_info = info;
ir_info.cmp = 0;
flags = SET_PRELOCK_FLAG(0);
switch (find_flag) {
case HA_READ_KEY_EXACT: /* Find first record else error */ {
pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_NEG_INF);
DBT lookup_bound;
pack_key(&lookup_bound, tokudb_active_index, key_buff4, key, key_len, COL_POS_INF);
if (tokudb_debug & TOKUDB_DEBUG_INDEX_KEY) {
TOKUDB_DBUG_DUMP("tokudb key=", lookup_key.data, lookup_key.size);
}
ir_info.orig_key = &lookup_key;
error = cursor->c_getf_set_range_with_bound(cursor, flags, &lookup_key, &lookup_bound, SMART_DBT_IR_CALLBACK(key_read), &ir_info);
if (ir_info.cmp) {
error = DB_NOTFOUND;
}
break;
}
case HA_READ_AFTER_KEY: /* Find next rec. after key-record */
pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_POS_INF);
error = cursor->c_getf_set_range(cursor, flags, &lookup_key, SMART_DBT_CALLBACK(key_read), &info);
break;
case HA_READ_BEFORE_KEY: /* Find next rec. before key-record */
pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_NEG_INF);
error = cursor->c_getf_set_range_reverse(cursor, flags, &lookup_key, SMART_DBT_CALLBACK(key_read), &info);
break;
case HA_READ_KEY_OR_NEXT: /* Record or next record */
pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_NEG_INF);
error = cursor->c_getf_set_range(cursor, flags, &lookup_key, SMART_DBT_CALLBACK(key_read), &info);
break;
//
// This case does not seem to ever be used, it is ok for it to be slow
//
case HA_READ_KEY_OR_PREV: /* Record or previous */
pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_NEG_INF);
ir_info.orig_key = &lookup_key;
error = cursor->c_getf_set_range(cursor, flags, &lookup_key, SMART_DBT_IR_CALLBACK(key_read), &ir_info);
if (error == DB_NOTFOUND) {
error = cursor->c_getf_last(cursor, flags, SMART_DBT_CALLBACK(key_read), &info);
}
else if (ir_info.cmp) {
error = cursor->c_getf_prev(cursor, flags, SMART_DBT_CALLBACK(key_read), &info);
}
break;
case HA_READ_PREFIX_LAST_OR_PREV: /* Last or prev key with the same prefix */
pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_POS_INF);
error = cursor->c_getf_set_range_reverse(cursor, flags, &lookup_key, SMART_DBT_CALLBACK(key_read), &info);
break;
case HA_READ_PREFIX_LAST:
pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_POS_INF);
ir_info.orig_key = &lookup_key;
error = cursor->c_getf_set_range_reverse(cursor, flags, &lookup_key, SMART_DBT_IR_CALLBACK(key_read), &ir_info);
if (ir_info.cmp) {
error = DB_NOTFOUND;
}
break;
default:
TOKUDB_HANDLER_TRACE("unsupported:%d", find_flag);
error = HA_ERR_UNSUPPORTED;
break;
}
error = handle_cursor_error(error,HA_ERR_KEY_NOT_FOUND,tokudb_active_index);
if (!error && !key_read && tokudb_active_index != primary_key && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf);
}
if (error && (tokudb_debug & TOKUDB_DEBUG_ERROR)) {
TOKUDB_HANDLER_TRACE("error:%d:%d", error, find_flag);
}
trx->stmt_progress.queried++;
//read increment
increment_partitioned_counter(toku_row_status.read, 1);
track_progress(thd);
cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::read_data_from_range_query_buff(uchar* buf, bool need_val, bool do_key_read) {
// buffer has the next row, get it from there
int error;
uchar* curr_pos = range_query_buff+curr_range_query_buff_offset;
DBT curr_key;
memset((void *) &curr_key, 0, sizeof(curr_key));
// get key info
uint32_t key_size = *(uint32_t *)curr_pos;
curr_pos += sizeof(key_size);
uchar* curr_key_buff = curr_pos;
curr_pos += key_size;
curr_key.data = curr_key_buff;
curr_key.size = key_size;
// if this is a covering index, this is all we need
if (do_key_read) {
assert(!need_val);
extract_hidden_primary_key(tokudb_active_index, &curr_key);
read_key_only(buf, tokudb_active_index, &curr_key);
error = 0;
}
// we need to get more data
else {
DBT curr_val;
memset((void *) &curr_val, 0, sizeof(curr_val));
uchar* curr_val_buff = NULL;
uint32_t val_size = 0;
// in this case, we don't have a val, we are simply extracting the pk
if (!need_val) {
curr_val.data = curr_val_buff;
curr_val.size = val_size;
extract_hidden_primary_key(tokudb_active_index, &curr_key);
error = read_primary_key( buf, tokudb_active_index, &curr_val, &curr_key);
}
else {
extract_hidden_primary_key(tokudb_active_index, &curr_key);
// need to extract a val and place it into buf
if (unpack_entire_row) {
// get val info
val_size = *(uint32_t *)curr_pos;
curr_pos += sizeof(val_size);
curr_val_buff = curr_pos;
curr_pos += val_size;
curr_val.data = curr_val_buff;
curr_val.size = val_size;
error = unpack_row(buf,&curr_val, &curr_key, tokudb_active_index);
}
else {
if (!(hidden_primary_key && tokudb_active_index == primary_key)) {
unpack_key(buf,&curr_key,tokudb_active_index);
}
// read rows we care about
// first the null bytes;
memcpy(buf, curr_pos, table_share->null_bytes);
curr_pos += table_share->null_bytes;
// now the fixed sized rows
for (uint32_t i = 0; i < num_fixed_cols_for_query; i++) {
uint field_index = fixed_cols_for_query[i];
Field* field = table->field[field_index];
unpack_fixed_field(
buf + field_offset(field, table),
curr_pos,
share->kc_info.field_lengths[field_index]
);
curr_pos += share->kc_info.field_lengths[field_index];
}
// now the variable sized rows
for (uint32_t i = 0; i < num_var_cols_for_query; i++) {
uint field_index = var_cols_for_query[i];
Field* field = table->field[field_index];
uint32_t field_len = *(uint32_t *)curr_pos;
curr_pos += sizeof(field_len);
unpack_var_field(
buf + field_offset(field, table),
curr_pos,
field_len,
share->kc_info.length_bytes[field_index]
);
curr_pos += field_len;
}
// now the blobs
if (read_blobs) {
uint32_t blob_size = *(uint32_t *)curr_pos;
curr_pos += sizeof(blob_size);
error = unpack_blobs(
buf,
curr_pos,
blob_size,
true
);
curr_pos += blob_size;
if (error) {
invalidate_bulk_fetch();
goto exit;
}
}
error = 0;
}
}
}
curr_range_query_buff_offset = curr_pos - range_query_buff;
exit:
return error;
}
static int
smart_dbt_bf_callback(DBT const *key, DBT const *row, void *context) {
SMART_DBT_BF_INFO info = (SMART_DBT_BF_INFO)context;
return info->ha->fill_range_query_buf(info->need_val, key, row, info->direction, info->thd, info->buf, info->key_to_compare);
}
#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
enum icp_result ha_tokudb::toku_handler_index_cond_check(Item* pushed_idx_cond)
{
enum icp_result res;
if (end_range ) {
int cmp;
#ifdef MARIADB_BASE_VERSION
cmp = compare_key2(end_range);
#else
cmp = compare_key_icp(end_range);
#endif
if (cmp > 0) {
return ICP_OUT_OF_RANGE;
}
}
res = pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH;
return res;
}
#endif
// fill in the range query buf for bulk fetch
int ha_tokudb::fill_range_query_buf(
bool need_val,
DBT const *key,
DBT const *row,
int direction,
THD* thd,
uchar* buf,
DBT* key_to_compare
) {
int error;
//
// first put the value into range_query_buf
//
uint32_t size_remaining = size_range_query_buff - bytes_used_in_range_query_buff;
uint32_t size_needed;
uint32_t user_defined_size = get_tokudb_read_buf_size(thd);
uchar* curr_pos = NULL;
if (key_to_compare) {
int cmp = tokudb_prefix_cmp_dbt_key(
share->key_file[tokudb_active_index],
key_to_compare,
key
);
if (cmp) {
icp_went_out_of_range = true;
error = 0;
goto cleanup;
}
}
#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
// if we have an index condition pushed down, we check it
if (toku_pushed_idx_cond && (tokudb_active_index == toku_pushed_idx_cond_keyno)) {
unpack_key(buf, key, tokudb_active_index);
enum icp_result result = toku_handler_index_cond_check(toku_pushed_idx_cond);
// If we have reason to stop, we set icp_went_out_of_range and get out
if (result == ICP_OUT_OF_RANGE || thd_killed(thd)) {
icp_went_out_of_range = true;
error = 0;
goto cleanup;
}
// otherwise, if we simply see that the current key is no match,
// we tell the cursor to continue and don't store
// the key locally
else if (result == ICP_NO_MATCH) {
error = TOKUDB_CURSOR_CONTINUE;
goto cleanup;
}
}
#endif
// at this point, if ICP is on, we have verified that the key is one
// we are interested in, so we proceed with placing the data
// into the range query buffer
if (need_val) {
if (unpack_entire_row) {
size_needed = 2*sizeof(uint32_t) + key->size + row->size;
}
else {
// this is an upper bound
size_needed = sizeof(uint32_t) + // size of key length
key->size + row->size + //key and row
num_var_cols_for_query*(sizeof(uint32_t)) + //lengths of varchars stored
sizeof(uint32_t); //length of blobs
}
}
else {
size_needed = sizeof(uint32_t) + key->size;
}
if (size_remaining < size_needed) {
range_query_buff = (uchar *)tokudb_my_realloc(
(void *)range_query_buff,
bytes_used_in_range_query_buff+size_needed,
MYF(MY_WME)
);
if (range_query_buff == NULL) {
error = ENOMEM;
invalidate_bulk_fetch();
goto cleanup;
}
size_range_query_buff = bytes_used_in_range_query_buff+size_needed;
}
//
// now we know we have the size, let's fill the buffer, starting with the key
//
curr_pos = range_query_buff + bytes_used_in_range_query_buff;
*(uint32_t *)curr_pos = key->size;
curr_pos += sizeof(uint32_t);
memcpy(curr_pos, key->data, key->size);
curr_pos += key->size;
if (need_val) {
if (unpack_entire_row) {
*(uint32_t *)curr_pos = row->size;
curr_pos += sizeof(uint32_t);
memcpy(curr_pos, row->data, row->size);
curr_pos += row->size;
}
else {
// need to unpack just the data we care about
const uchar* fixed_field_ptr = (const uchar *) row->data;
fixed_field_ptr += table_share->null_bytes;
const uchar* var_field_offset_ptr = NULL;
const uchar* var_field_data_ptr = NULL;
var_field_offset_ptr = fixed_field_ptr + share->kc_info.mcp_info[tokudb_active_index].fixed_field_size;
var_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[tokudb_active_index].len_of_offsets;
// first the null bytes
memcpy(curr_pos, row->data, table_share->null_bytes);
curr_pos += table_share->null_bytes;
// now the fixed fields
//
// first the fixed fields
//
for (uint32_t i = 0; i < num_fixed_cols_for_query; i++) {
uint field_index = fixed_cols_for_query[i];
memcpy(
curr_pos,
fixed_field_ptr + share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val,
share->kc_info.field_lengths[field_index]
);
curr_pos += share->kc_info.field_lengths[field_index];
}
//
// now the var fields
//
for (uint32_t i = 0; i < num_var_cols_for_query; i++) {
uint field_index = var_cols_for_query[i];
uint32_t var_field_index = share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val;
uint32_t data_start_offset;
uint32_t field_len;
get_var_field_info(
&field_len,
&data_start_offset,
var_field_index,
var_field_offset_ptr,
share->kc_info.num_offset_bytes
);
memcpy(curr_pos, &field_len, sizeof(field_len));
curr_pos += sizeof(field_len);
memcpy(curr_pos, var_field_data_ptr + data_start_offset, field_len);
curr_pos += field_len;
}
if (read_blobs) {
uint32_t blob_offset = 0;
uint32_t data_size = 0;
//
// now the blobs
//
get_blob_field_info(
&blob_offset,
share->kc_info.mcp_info[tokudb_active_index].len_of_offsets,
var_field_data_ptr,
share->kc_info.num_offset_bytes
);
data_size = row->size - blob_offset - (uint32_t)(var_field_data_ptr - (const uchar *)row->data);
memcpy(curr_pos, &data_size, sizeof(data_size));
curr_pos += sizeof(data_size);
memcpy(curr_pos, var_field_data_ptr + blob_offset, data_size);
curr_pos += data_size;
}
}
}
bytes_used_in_range_query_buff = curr_pos - range_query_buff;
assert(bytes_used_in_range_query_buff <= size_range_query_buff);
//
// now determine if we should continue with the bulk fetch
// we want to stop under these conditions:
// - we overran the prelocked range
// - we are close to the end of the buffer
// - we have fetched an exponential amount of rows with
// respect to the bulk fetch iteration, which is initialized
// to 0 in index_init() and prelock_range().
rows_fetched_using_bulk_fetch++;
// if the iteration is less than the number of possible shifts on
// a 64 bit integer, check that we haven't exceeded this iterations
// row fetch upper bound.
if (bulk_fetch_iteration < HA_TOKU_BULK_FETCH_ITERATION_MAX) {
uint64_t row_fetch_upper_bound = 1LLU << bulk_fetch_iteration;
assert(row_fetch_upper_bound > 0);
if (rows_fetched_using_bulk_fetch >= row_fetch_upper_bound) {
error = 0;
goto cleanup;
}
}
if (bytes_used_in_range_query_buff + table_share->rec_buff_length > user_defined_size) {
error = 0;
goto cleanup;
}
if (direction > 0) {
// compare what we got to the right endpoint of prelocked range
// because we are searching keys in ascending order
if (prelocked_right_range_size == 0) {
error = TOKUDB_CURSOR_CONTINUE;
goto cleanup;
}
DBT right_range;
memset(&right_range, 0, sizeof(right_range));
right_range.size = prelocked_right_range_size;
right_range.data = prelocked_right_range;
int cmp = tokudb_cmp_dbt_key(
share->key_file[tokudb_active_index],
key,
&right_range
);
error = (cmp > 0) ? 0 : TOKUDB_CURSOR_CONTINUE;
}
else {
// compare what we got to the left endpoint of prelocked range
// because we are searching keys in descending order
if (prelocked_left_range_size == 0) {
error = TOKUDB_CURSOR_CONTINUE;
goto cleanup;
}
DBT left_range;
memset(&left_range, 0, sizeof(left_range));
left_range.size = prelocked_left_range_size;
left_range.data = prelocked_left_range;
int cmp = tokudb_cmp_dbt_key(
share->key_file[tokudb_active_index],
key,
&left_range
);
error = (cmp < 0) ? 0 : TOKUDB_CURSOR_CONTINUE;
}
cleanup:
return error;
}
int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_key_read) {
int error = 0;
HANDLE_INVALID_CURSOR();
if (maybe_index_scan) {
maybe_index_scan = false;
if (!range_lock_grabbed) {
error = prepare_index_scan();
}
}
if (!error) {
uint32_t flags = SET_PRELOCK_FLAG(0);
// we need to read the val of what we retrieve if
// we do NOT have a covering index AND we are using a clustering secondary
// key
bool need_val = (do_key_read == 0) &&
(tokudb_active_index == primary_key || key_is_clustering(&table->key_info[tokudb_active_index]));
if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) {
error = read_data_from_range_query_buff(buf, need_val, do_key_read);
}
else if (icp_went_out_of_range) {
icp_went_out_of_range = false;
error = HA_ERR_END_OF_FILE;
}
else {
invalidate_bulk_fetch();
if (doing_bulk_fetch) {
struct smart_dbt_bf_info bf_info;
bf_info.ha = this;
// you need the val if you have a clustering index and key_read is not 0;
bf_info.direction = direction;
bf_info.thd = ha_thd();
bf_info.need_val = need_val;
bf_info.buf = buf;
bf_info.key_to_compare = key_to_compare;
//
// call c_getf_next with purpose of filling in range_query_buff
//
rows_fetched_using_bulk_fetch = 0;
// it is expected that we can do ICP in the smart_dbt_bf_callback
// as a result, it's possible we don't return any data because
// none of the rows matched the index condition. Therefore, we need
// this while loop. icp_out_of_range will be set if we hit a row that
// the index condition states is out of our range. When that hits,
// we know all the data in the buffer is the last data we will retrieve
while (bytes_used_in_range_query_buff == 0 && !icp_went_out_of_range && error == 0) {
if (direction > 0) {
error = cursor->c_getf_next(cursor, flags, smart_dbt_bf_callback, &bf_info);
} else {
error = cursor->c_getf_prev(cursor, flags, smart_dbt_bf_callback, &bf_info);
}
}
// if there is no data set and we went out of range,
// then there is nothing to return
if (bytes_used_in_range_query_buff == 0 && icp_went_out_of_range) {
icp_went_out_of_range = false;
error = HA_ERR_END_OF_FILE;
}
if (bulk_fetch_iteration < HA_TOKU_BULK_FETCH_ITERATION_MAX) {
bulk_fetch_iteration++;
}
error = handle_cursor_error(error, HA_ERR_END_OF_FILE,tokudb_active_index);
if (error) { goto cleanup; }
//
// now that range_query_buff is filled, read an element
//
error = read_data_from_range_query_buff(buf, need_val, do_key_read);
}
else {
struct smart_dbt_info info;
info.ha = this;
info.buf = buf;
info.keynr = tokudb_active_index;
if (direction > 0) {
error = cursor->c_getf_next(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info);
} else {
error = cursor->c_getf_prev(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info);
}
error = handle_cursor_error(error, HA_ERR_END_OF_FILE, tokudb_active_index);
}
}
}
//
// at this point, one of two things has happened
// either we have unpacked the data into buf, and we
// are done, or we have unpacked the primary key
// into last_key, and we use the code below to
// read the full row by doing a point query into the
// main table.
//
if (!error && !do_key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf);
}
if (!error) {
THD *thd = ha_thd();
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
trx->stmt_progress.queried++;
//read increment
increment_partitioned_counter(toku_row_status.read, 1);
track_progress(thd);
if (thd_killed(thd))
error = ER_ABORTING_CONNECTION;
}
cleanup:
return error;
}
//
// Reads the next row from the active index (cursor) into buf, and advances cursor
// Parameters:
// [out] buf - buffer for the next row, in MySQL format
// Returns:
// 0 on success
// HA_ERR_END_OF_FILE if not found
// error otherwise
//
int ha_tokudb::index_next(uchar * buf) {
TOKUDB_HANDLER_DBUG_ENTER("");
ha_statistic_increment(&SSV::ha_read_next_count);
int error = get_next(buf, 1, NULL, key_read);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::index_read_last(uchar * buf, const uchar * key, uint key_len) {
return(index_read(buf, key, key_len, HA_READ_PREFIX_LAST));
}
//
// Reads the previous row from the active index (cursor) into buf, and advances cursor
// Parameters:
// [out] buf - buffer for the next row, in MySQL format
// Returns:
// 0 on success
// HA_ERR_END_OF_FILE if not found
// error otherwise
//
int ha_tokudb::index_prev(uchar * buf) {
TOKUDB_HANDLER_DBUG_ENTER("");
ha_statistic_increment(&SSV::ha_read_prev_count);
int error = get_next(buf, -1, NULL, key_read);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Reads the first row from the active index (cursor) into buf, and advances cursor
// Parameters:
// [out] buf - buffer for the next row, in MySQL format
// Returns:
// 0 on success
// HA_ERR_END_OF_FILE if not found
// error otherwise
//
int ha_tokudb::index_first(uchar * buf) {
TOKUDB_HANDLER_DBUG_ENTER("");
invalidate_bulk_fetch();
int error = 0;
struct smart_dbt_info info;
uint32_t flags = SET_PRELOCK_FLAG(0);
THD* thd = ha_thd();
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);;
HANDLE_INVALID_CURSOR();
ha_statistic_increment(&SSV::ha_read_first_count);
info.ha = this;
info.buf = buf;
info.keynr = tokudb_active_index;
error = cursor->c_getf_first(cursor, flags, SMART_DBT_CALLBACK(key_read), &info);
error = handle_cursor_error(error,HA_ERR_END_OF_FILE,tokudb_active_index);
//
// still need to get entire contents of the row if operation done on
// secondary DB and it was NOT a covering index
//
if (!error && !key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf);
}
if (trx) {
trx->stmt_progress.queried++;
//read increment
increment_partitioned_counter(toku_row_status.read, 1);
}
track_progress(thd);
maybe_index_scan = true;
cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Reads the last row from the active index (cursor) into buf, and advances cursor
// Parameters:
// [out] buf - buffer for the next row, in MySQL format
// Returns:
// 0 on success
// HA_ERR_END_OF_FILE if not found
// error otherwise
//
int ha_tokudb::index_last(uchar * buf) {
TOKUDB_HANDLER_DBUG_ENTER("");
invalidate_bulk_fetch();
int error = 0;
struct smart_dbt_info info;
uint32_t flags = SET_PRELOCK_FLAG(0);
THD* thd = ha_thd();
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);;
HANDLE_INVALID_CURSOR();
ha_statistic_increment(&SSV::ha_read_last_count);
info.ha = this;
info.buf = buf;
info.keynr = tokudb_active_index;
error = cursor->c_getf_last(cursor, flags, SMART_DBT_CALLBACK(key_read), &info);
error = handle_cursor_error(error,HA_ERR_END_OF_FILE,tokudb_active_index);
//
// still need to get entire contents of the row if operation done on
// secondary DB and it was NOT a covering index
//
if (!error && !key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf);
}
if (trx) {
trx->stmt_progress.queried++;
//read increment
increment_partitioned_counter(toku_row_status.read, 1);
}
track_progress(thd);
maybe_index_scan = true;
cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Initialize a scan of the table (which is why index_init is called on primary_key)
// Parameters:
// scan - unused
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::rnd_init(bool scan) {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
range_lock_grabbed = false;
error = index_init(MAX_KEY, 0);
if (error) { goto cleanup;}
if (scan) {
error = prelock_range(NULL, NULL);
if (error) { goto cleanup; }
// only want to set range_lock_grabbed to true after index_init
// successfully executed for two reasons:
// 1) index_init will reset it to false anyway
// 2) if it fails, we don't want prelocking on,
range_lock_grabbed = true;
}
error = 0;
cleanup:
if (error) {
index_end();
last_cursor_error = error;
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// End a scan of the table
//
int ha_tokudb::rnd_end() {
TOKUDB_HANDLER_DBUG_ENTER("");
range_lock_grabbed = false;
TOKUDB_HANDLER_DBUG_RETURN(index_end());
}
//
// Read the next row in a table scan
// Parameters:
// [out] buf - buffer for the next row, in MySQL format
// Returns:
// 0 on success
// HA_ERR_END_OF_FILE if not found
// error otherwise
//
int ha_tokudb::rnd_next(uchar * buf) {
TOKUDB_HANDLER_DBUG_ENTER("");
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
int error = get_next(buf, 1, NULL, false);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
void ha_tokudb::track_progress(THD* thd) {
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
if (trx) {
ulonglong num_written = trx->stmt_progress.inserted + trx->stmt_progress.updated + trx->stmt_progress.deleted;
bool update_status =
(trx->stmt_progress.queried && tokudb_read_status_frequency && (trx->stmt_progress.queried % tokudb_read_status_frequency) == 0) ||
(num_written && tokudb_write_status_frequency && (num_written % tokudb_write_status_frequency) == 0);
if (update_status) {
char *next_status = write_status_msg;
bool first = true;
int r;
if (trx->stmt_progress.queried) {
r = sprintf(next_status, "Queried about %llu row%s", trx->stmt_progress.queried, trx->stmt_progress.queried == 1 ? "" : "s");
assert(r >= 0);
next_status += r;
first = false;
}
if (trx->stmt_progress.inserted) {
if (trx->stmt_progress.using_loader) {
r = sprintf(next_status, "%sFetched about %llu row%s, loading data still remains", first ? "" : ", ", trx->stmt_progress.inserted, trx->stmt_progress.inserted == 1 ? "" : "s");
}
else {
r = sprintf(next_status, "%sInserted about %llu row%s", first ? "" : ", ", trx->stmt_progress.inserted, trx->stmt_progress.inserted == 1 ? "" : "s");
}
assert(r >= 0);
next_status += r;
first = false;
}
if (trx->stmt_progress.updated) {
r = sprintf(next_status, "%sUpdated about %llu row%s", first ? "" : ", ", trx->stmt_progress.updated, trx->stmt_progress.updated == 1 ? "" : "s");
assert(r >= 0);
next_status += r;
first = false;
}
if (trx->stmt_progress.deleted) {
r = sprintf(next_status, "%sDeleted about %llu row%s", first ? "" : ", ", trx->stmt_progress.deleted, trx->stmt_progress.deleted == 1 ? "" : "s");
assert(r >= 0);
next_status += r;
first = false;
}
if (!first)
thd_proc_info(thd, write_status_msg);
}
}
}
DBT *ha_tokudb::get_pos(DBT * to, uchar * pos) {
TOKUDB_HANDLER_DBUG_ENTER("");
/* We don't need to set app_data here */
memset((void *) to, 0, sizeof(*to));
to->data = pos + sizeof(uint32_t);
to->size = *(uint32_t *)pos;
DBUG_DUMP("key", (const uchar *) to->data, to->size);
DBUG_RETURN(to);
}
// Retrieves a row with based on the primary key saved in pos
// Returns:
// 0 on success
// HA_ERR_KEY_NOT_FOUND if not found
// error otherwise
int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) {
TOKUDB_HANDLER_DBUG_ENTER("");
DBT db_pos;
int error = 0;
struct smart_dbt_info info;
bool old_unpack_entire_row = unpack_entire_row;
DBT* key = get_pos(&db_pos, pos);
unpack_entire_row = true;
ha_statistic_increment(&SSV::ha_read_rnd_count);
tokudb_active_index = MAX_KEY;
// test rpl slave by inducing a delay before the point query
THD *thd = ha_thd();
if (thd->slave_thread && (in_rpl_delete_rows || in_rpl_update_rows)) {
uint64_t delay_ms = THDVAR(thd, rpl_lookup_rows_delay);
if (delay_ms)
usleep(delay_ms * 1000);
}
info.ha = this;
info.buf = buf;
info.keynr = primary_key;
error = share->file->getf_set(share->file, transaction,
get_cursor_isolation_flags(lock.type, thd),
key, smart_dbt_callback_rowread_ptquery, &info);
if (error == DB_NOTFOUND) {
error = HA_ERR_KEY_NOT_FOUND;
goto cleanup;
}
cleanup:
unpack_entire_row = old_unpack_entire_row;
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::prelock_range(const key_range *start_key, const key_range *end_key) {
TOKUDB_HANDLER_DBUG_ENTER("%p %p", start_key, end_key);
THD* thd = ha_thd();
int error = 0;
DBT start_dbt_key;
DBT end_dbt_key;
uchar* start_key_buff = prelocked_left_range;
uchar* end_key_buff = prelocked_right_range;
memset((void *) &start_dbt_key, 0, sizeof(start_dbt_key));
memset((void *) &end_dbt_key, 0, sizeof(end_dbt_key));
HANDLE_INVALID_CURSOR();
if (start_key) {
switch (start_key->flag) {
case HA_READ_AFTER_KEY:
pack_key(&start_dbt_key, tokudb_active_index, start_key_buff, start_key->key, start_key->length, COL_POS_INF);
break;
default:
pack_key(&start_dbt_key, tokudb_active_index, start_key_buff, start_key->key, start_key->length, COL_NEG_INF);
break;
}
prelocked_left_range_size = start_dbt_key.size;
}
else {
prelocked_left_range_size = 0;
}
if (end_key) {
switch (end_key->flag) {
case HA_READ_BEFORE_KEY:
pack_key(&end_dbt_key, tokudb_active_index, end_key_buff, end_key->key, end_key->length, COL_NEG_INF);
break;
default:
pack_key(&end_dbt_key, tokudb_active_index, end_key_buff, end_key->key, end_key->length, COL_POS_INF);
break;
}
prelocked_right_range_size = end_dbt_key.size;
}
else {
prelocked_right_range_size = 0;
}
error = cursor->c_set_bounds(
cursor,
start_key ? &start_dbt_key : share->key_file[tokudb_active_index]->dbt_neg_infty(),
end_key ? &end_dbt_key : share->key_file[tokudb_active_index]->dbt_pos_infty(),
true,
(cursor_flags & DB_SERIALIZABLE) != 0 ? DB_NOTFOUND : 0
);
if (error) {
error = map_to_handler_error(error);
last_cursor_error = error;
//
// cursor should be initialized here, but in case it is not, we still check
//
if (cursor) {
int r = cursor->c_close(cursor);
assert(r==0);
cursor = NULL;
remove_from_trx_handler_list();
}
goto cleanup;
}
// at this point, determine if we will be doing bulk fetch
doing_bulk_fetch = tokudb_do_bulk_fetch(thd);
bulk_fetch_iteration = 0;
rows_fetched_using_bulk_fetch = 0;
cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Prelock range if possible, start_key is leftmost, end_key is rightmost
// whether scanning forward or backward. This function is called by MySQL
// for backward range queries (in QUICK_SELECT_DESC::get_next).
// Forward scans use read_range_first()/read_range_next().
//
int ha_tokudb::prepare_range_scan( const key_range *start_key, const key_range *end_key) {
TOKUDB_HANDLER_DBUG_ENTER("%p %p", start_key, end_key);
int error = prelock_range(start_key, end_key);
if (!error) {
range_lock_grabbed = true;
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::read_range_first(
const key_range *start_key,
const key_range *end_key,
bool eq_range,
bool sorted)
{
TOKUDB_HANDLER_DBUG_ENTER("%p %p %u %u", start_key, end_key, eq_range, sorted);
int error = prelock_range(start_key, end_key);
if (error) { goto cleanup; }
range_lock_grabbed = true;
error = handler::read_range_first(start_key, end_key, eq_range, sorted);
cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::read_range_next()
{
TOKUDB_HANDLER_DBUG_ENTER("");
int error;
error = handler::read_range_next();
if (error) {
range_lock_grabbed = false;
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
/*
Set a reference to the current record in (ref,ref_length).
SYNOPSIS
ha_tokudb::position()
record The current record buffer
DESCRIPTION
The BDB handler stores the primary key in (ref,ref_length).
There is either an explicit primary key, or an implicit (hidden)
primary key.
During open(), 'ref_length' is calculated as the maximum primary
key length. When an actual key is shorter than that, the rest of
the buffer must be cleared out. The row cannot be identified, if
garbage follows behind the end of the key. There is no length
field for the current key, so that the whole ref_length is used
for comparison.
RETURN
nothing
*/
void ha_tokudb::position(const uchar * record) {
TOKUDB_HANDLER_DBUG_ENTER("");
DBT key;
if (hidden_primary_key) {
DBUG_ASSERT(ref_length == (TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH + sizeof(uint32_t)));
memcpy(ref + sizeof(uint32_t), current_ident, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
*(uint32_t *)ref = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
}
else {
bool has_null;
//
// save the data
//
create_dbt_key_from_table(&key, primary_key, ref + sizeof(uint32_t), record, &has_null);
//
// save the size of data in the first four bytes of ref
//
memcpy(ref, &key.size, sizeof(uint32_t));
}
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
//
// Per InnoDB: Returns statistics information of the table to the MySQL interpreter,
// in various fields of the handle object.
// Return:
// 0, always success
//
int ha_tokudb::info(uint flag) {
TOKUDB_HANDLER_DBUG_ENTER("%d", flag);
int error = 0;
#if TOKU_CLUSTERING_IS_COVERING
for (uint i=0; i < table->s->keys; i++)
if (key_is_clustering(&table->key_info[i]))
table->covering_keys.set_bit(i);
#endif
DB_TXN* txn = NULL;
if (flag & HA_STATUS_VARIABLE) {
// Just to get optimizations right
stats.records = share->rows + share->rows_from_locked_table;
if (stats.records == 0) {
stats.records++;
}
stats.deleted = 0;
if (!(flag & HA_STATUS_NO_LOCK)) {
uint64_t num_rows = 0;
TOKU_DB_FRAGMENTATION_S frag_info;
memset(&frag_info, 0, sizeof frag_info);
error = txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED, ha_thd());
if (error) { goto cleanup; }
// we should always have a primary key
assert(share->file != NULL);
error = estimate_num_rows(share->file,&num_rows, txn);
if (error == 0) {
share->rows = num_rows;
stats.records = num_rows;
if (stats.records == 0) {
stats.records++;
}
}
else {
goto cleanup;
}
error = share->file->get_fragmentation(share->file, &frag_info);
if (error) { goto cleanup; }
stats.delete_length = frag_info.unused_bytes;
DB_BTREE_STAT64 dict_stats;
error = share->file->stat64(share->file, txn, &dict_stats);
if (error) { goto cleanup; }
stats.create_time = dict_stats.bt_create_time_sec;
stats.update_time = dict_stats.bt_modify_time_sec;
stats.check_time = dict_stats.bt_verify_time_sec;
stats.data_file_length = dict_stats.bt_dsize;
if (hidden_primary_key) {
//
// in this case, we have a hidden primary key, do not
// want to report space taken up by the hidden primary key to the user
//
uint64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata;
stats.data_file_length = (hpk_space > stats.data_file_length) ? 0 : stats.data_file_length - hpk_space;
}
else {
//
// one infinity byte per key needs to be subtracted
//
uint64_t inf_byte_space = dict_stats.bt_ndata;
stats.data_file_length = (inf_byte_space > stats.data_file_length) ? 0 : stats.data_file_length - inf_byte_space;
}
stats.mean_rec_length = stats.records ? (ulong)(stats.data_file_length/stats.records) : 0;
stats.index_file_length = 0;
// curr_num_DBs is the number of keys we have, according
// to the mysql layer. if drop index is running concurrently
// with info() (it can, because info does not take table locks),
// then it could be the case that one of the dbs was dropped
// and set to NULL before mysql was able to set table->s->keys
// accordingly.
//
// we should just ignore any DB * that is NULL.
//
// this solution is much simpler than trying to maintain an
// accurate number of valid keys at the handlerton layer.
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint i = 0; i < curr_num_DBs; i++) {
// skip the primary key, skip dropped indexes
if (i == primary_key || share->key_file[i] == NULL) {
continue;
}
error = share->key_file[i]->stat64(
share->key_file[i],
txn,
&dict_stats
);
if (error) { goto cleanup; }
stats.index_file_length += dict_stats.bt_dsize;
error = share->file->get_fragmentation(
share->file,
&frag_info
);
if (error) { goto cleanup; }
stats.delete_length += frag_info.unused_bytes;
}
}
}
if ((flag & HA_STATUS_CONST)) {
stats.max_data_file_length= 9223372036854775807ULL;
tokudb::set_card_in_key_info(table, share->n_rec_per_key, share->rec_per_key);
}
/* Don't return key if we got an error for the internal primary key */
if (flag & HA_STATUS_ERRKEY && last_dup_key < table_share->keys) {
errkey = last_dup_key;
}
if (flag & HA_STATUS_AUTO && table->found_next_number_field) {
THD *thd= table->in_use;
struct system_variables *variables= &thd->variables;
stats.auto_increment_value = share->last_auto_increment + variables->auto_increment_increment;
}
error = 0;
cleanup:
if (txn != NULL) {
commit_txn(txn, DB_TXN_NOSYNC);
txn = NULL;
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Per InnoDB: Tells something additional to the handler about how to do things.
//
int ha_tokudb::extra(enum ha_extra_function operation) {
TOKUDB_HANDLER_DBUG_ENTER("%d", operation);
switch (operation) {
case HA_EXTRA_RESET_STATE:
reset();
break;
case HA_EXTRA_KEYREAD:
key_read = true; // Query satisfied with key
break;
case HA_EXTRA_NO_KEYREAD:
key_read = false;
break;
case HA_EXTRA_IGNORE_DUP_KEY:
using_ignore = true;
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
using_ignore = false;
break;
case HA_EXTRA_IGNORE_NO_KEY:
using_ignore_no_key = true;
break;
case HA_EXTRA_NO_IGNORE_NO_KEY:
using_ignore_no_key = false;
break;
case HA_EXTRA_NOT_USED:
case HA_EXTRA_PREPARE_FOR_RENAME:
break; // must do nothing and return 0
default:
break;
}
TOKUDB_HANDLER_DBUG_RETURN(0);
}
int ha_tokudb::reset(void) {
TOKUDB_HANDLER_DBUG_ENTER("");
key_read = false;
using_ignore = false;
using_ignore_no_key = false;
reset_dsmrr();
invalidate_icp();
TOKUDB_HANDLER_DBUG_RETURN(0);
}
//
// helper function that iterates through all DB's
// and grabs a lock (either read or write, but not both)
// Parameters:
// [in] trans - transaction to be used to pre acquire the lock
// lt - type of lock to get, either lock_read or lock_write
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::acquire_table_lock (DB_TXN* trans, TABLE_LOCK_TYPE lt) {
TOKUDB_HANDLER_DBUG_ENTER("%p %s", trans, lt == lock_read ? "r" : "w");
int error = ENOSYS;
if (!num_DBs_locked_in_bulk) {
rw_rdlock(&share->num_DBs_lock);
}
uint curr_num_DBs = share->num_DBs;
if (lt == lock_read) {
error = 0;
goto cleanup;
}
else if (lt == lock_write) {
for (uint i = 0; i < curr_num_DBs; i++) {
DB* db = share->key_file[i];
error = db->pre_acquire_table_lock(db, trans);
if (error == EINVAL)
TOKUDB_HANDLER_TRACE("%d db=%p trans=%p", i, db, trans);
if (error) break;
}
if (tokudb_debug & TOKUDB_DEBUG_LOCK)
TOKUDB_HANDLER_TRACE("error=%d", error);
if (error) goto cleanup;
}
else {
error = ENOSYS;
goto cleanup;
}
error = 0;
cleanup:
if (!num_DBs_locked_in_bulk) {
rw_unlock(&share->num_DBs_lock);
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::create_txn(THD* thd, tokudb_trx_data* trx) {
int error;
ulong tx_isolation = thd_tx_isolation(thd);
HA_TOKU_ISO_LEVEL toku_iso_level = tx_to_toku_iso(tx_isolation);
bool is_autocommit = !thd_test_options(
thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN);
/* First table lock, start transaction */
if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) &&
!trx->all &&
(thd_sql_command(thd) != SQLCOM_CREATE_TABLE) &&
(thd_sql_command(thd) != SQLCOM_DROP_TABLE) &&
(thd_sql_command(thd) != SQLCOM_DROP_INDEX) &&
(thd_sql_command(thd) != SQLCOM_CREATE_INDEX) &&
(thd_sql_command(thd) != SQLCOM_ALTER_TABLE)) {
/* QQQ We have to start a master transaction */
// DBUG_PRINT("trans", ("starting transaction all "));
uint32_t txn_begin_flags = toku_iso_to_txn_flag(toku_iso_level);
#if 50614 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
if (thd_tx_is_read_only(thd)) {
txn_begin_flags |= DB_TXN_READ_ONLY;
}
#endif
if ((error = txn_begin(db_env, NULL, &trx->all, txn_begin_flags, thd))) {
goto cleanup;
}
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("created master %p", trx->all);
}
trx->sp_level = trx->all;
trans_register_ha(thd, true, tokudb_hton);
}
DBUG_PRINT("trans", ("starting transaction stmt"));
if (trx->stmt) {
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("warning:stmt=%p", trx->stmt);
}
}
uint32_t txn_begin_flags;
if (trx->all == NULL) {
txn_begin_flags = toku_iso_to_txn_flag(toku_iso_level);
//
// if the isolation level that the user has set is serializable,
// but autocommit is on and this is just a select,
// then we can go ahead and set the isolation level to
// be a snapshot read, because we can serialize
// the transaction to be the point in time at which the snapshot began.
//
if (txn_begin_flags == 0 && is_autocommit && thd_sql_command(thd) == SQLCOM_SELECT) {
txn_begin_flags = DB_TXN_SNAPSHOT;
}
if (is_autocommit && thd_sql_command(thd) == SQLCOM_SELECT && !thd->in_sub_stmt && lock.type <= TL_READ_NO_INSERT && !thd->lex->uses_stored_routines()) {
txn_begin_flags |= DB_TXN_READ_ONLY;
}
}
else {
txn_begin_flags = DB_INHERIT_ISOLATION;
}
if ((error = txn_begin(db_env, trx->sp_level, &trx->stmt, txn_begin_flags, thd))) {
/* We leave the possible master transaction open */
goto cleanup;
}
trx->sub_sp_level = trx->stmt;
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("created stmt %p sp_level %p", trx->sp_level, trx->stmt);
}
reset_stmt_progress(&trx->stmt_progress);
trans_register_ha(thd, false, tokudb_hton);
cleanup:
return error;
}
static const char *lock_type_str(int lock_type) {
if (lock_type == F_RDLCK) return "F_RDLCK";
if (lock_type == F_WRLCK) return "F_WRLCK";
if (lock_type == F_UNLCK) return "F_UNLCK";
return "?";
}
/*
As MySQL will execute an external lock for every new table it uses
we can use this to start the transactions.
If we are in auto_commit mode we just need to start a transaction
for the statement to be able to rollback the statement.
If not, we have to start a master transaction if there doesn't exist
one from before.
*/
//
// Parameters:
// [in] thd - handle to the user thread
// lock_type - the type of lock
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::external_lock(THD * thd, int lock_type) {
TOKUDB_HANDLER_DBUG_ENTER("cmd %d lock %d %s %s", thd_sql_command(thd), lock_type, lock_type_str(lock_type), share->table_name);
if (!(tokudb_debug & TOKUDB_DEBUG_ENTER) && (tokudb_debug & TOKUDB_DEBUG_LOCK)) {
TOKUDB_HANDLER_TRACE("cmd %d lock %d %s %s", thd_sql_command(thd), lock_type, lock_type_str(lock_type), share->table_name);
}
if (tokudb_debug & TOKUDB_DEBUG_LOCK) {
TOKUDB_HANDLER_TRACE("q %s", thd->query());
}
int error = 0;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
if (!trx) {
error = create_tokudb_trx_data_instance(&trx);
if (error) { goto cleanup; }
thd_set_ha_data(thd, tokudb_hton, trx);
}
if (trx->all == NULL) {
trx->sp_level = NULL;
}
if (lock_type != F_UNLCK) {
use_write_locks = false;
if (lock_type == F_WRLCK) {
use_write_locks = true;
}
if (!trx->tokudb_lock_count++) {
if (trx->stmt) {
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("stmt already set %p %p %p %p", trx->all, trx->stmt, trx->sp_level, trx->sub_sp_level);
}
} else {
assert(trx->stmt == 0);
transaction = NULL; // Safety
error = create_txn(thd, trx);
if (error) {
trx->tokudb_lock_count--; // We didn't get the lock
goto cleanup;
}
}
}
transaction = trx->sub_sp_level;
}
else {
tokudb_pthread_mutex_lock(&share->mutex);
// hate dealing with comparison of signed vs unsigned, so doing this
if (deleted_rows > added_rows && share->rows < (deleted_rows - added_rows)) {
share->rows = 0;
}
else {
share->rows += (added_rows - deleted_rows);
}
tokudb_pthread_mutex_unlock(&share->mutex);
added_rows = 0;
deleted_rows = 0;
share->rows_from_locked_table = 0;
if (trx->tokudb_lock_count > 0 && !--trx->tokudb_lock_count) {
if (trx->stmt) {
/*
F_UNLCK is done without a transaction commit / rollback.
This happens if the thread didn't update any rows
We must in this case commit the work to keep the row locks
*/
DBUG_PRINT("trans", ("commiting non-updating transaction"));
reset_stmt_progress(&trx->stmt_progress);
commit_txn(trx->stmt, 0);
trx->stmt = NULL;
trx->sub_sp_level = NULL;
}
}
transaction = NULL;
}
cleanup:
if (tokudb_debug & TOKUDB_DEBUG_LOCK)
TOKUDB_HANDLER_TRACE("error=%d", error);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
/*
When using LOCK TABLE's external_lock is only called when the actual
TABLE LOCK is done.
Under LOCK TABLES, each used tables will force a call to start_stmt.
*/
int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) {
TOKUDB_HANDLER_DBUG_ENTER("cmd %d lock %d %s", thd_sql_command(thd), lock_type, share->table_name);
if (0)
TOKUDB_HANDLER_TRACE("q %s", thd->query());
int error = 0;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
if (!trx) {
error = create_tokudb_trx_data_instance(&trx);
if (error) { goto cleanup; }
thd_set_ha_data(thd, tokudb_hton, trx);
}
/*
note that trx->stmt may have been already initialized as start_stmt()
is called for *each table* not for each storage engine,
and there could be many bdb tables referenced in the query
*/
if (!trx->stmt) {
error = create_txn(thd, trx);
if (error) {
goto cleanup;
}
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("%p %p %p %p %u", trx->all, trx->stmt, trx->sp_level, trx->sub_sp_level, trx->tokudb_lock_count);
}
}
else {
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("trx->stmt %p already existed", trx->stmt);
}
}
if (added_rows > deleted_rows) {
share->rows_from_locked_table = added_rows - deleted_rows;
}
transaction = trx->sub_sp_level;
trans_register_ha(thd, false, tokudb_hton);
cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
uint32_t ha_tokudb::get_cursor_isolation_flags(enum thr_lock_type lock_type, THD* thd) {
uint sql_command = thd_sql_command(thd);
bool in_lock_tables = thd_in_lock_tables(thd);
//
// following InnoDB's lead and having checksum command use a snapshot read if told
//
if (sql_command == SQLCOM_CHECKSUM) {
return 0;
}
else if ((lock_type == TL_READ && in_lock_tables) ||
(lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) ||
sql_command != SQLCOM_SELECT ||
(sql_command == SQLCOM_SELECT && lock_type >= TL_WRITE_ALLOW_WRITE)) { // select for update
ulong tx_isolation = thd_tx_isolation(thd);
// pattern matched from InnoDB
if ( (tx_isolation == ISO_READ_COMMITTED || tx_isolation == ISO_READ_UNCOMMITTED) &&
(lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) &&
(sql_command == SQLCOM_INSERT_SELECT
|| sql_command == SQLCOM_REPLACE_SELECT
|| sql_command == SQLCOM_UPDATE
|| sql_command == SQLCOM_CREATE_TABLE) )
{
return 0;
}
else {
return DB_SERIALIZABLE;
}
}
else {
return 0;
}
}
/*
The idea with handler::store_lock() is the following:
The statement decided which locks we should need for the table
for updates/deletes/inserts we get WRITE locks, for SELECT... we get
read locks.
Before adding the lock into the table lock handler (see thr_lock.c)
mysqld calls store lock with the requested locks. Store lock can now
modify a write lock to a read lock (or some other lock), ignore the
lock (if we don't want to use MySQL table locks at all) or add locks
for many tables (like we do when we are using a MERGE handler).
TokuDB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which
signals that we are doing WRITES, but we are still allowing other
reader's and writer's.
When releasing locks, store_lock() are also called. In this case one
usually doesn't have to do anything.
In some exceptional cases MySQL may send a request for a TL_IGNORE;
This means that we are requesting the same lock as last time and this
should also be ignored. (This may happen when someone does a flush
table when we have opened a part of the tables, in which case mysqld
closes and reopens the tables and tries to get the same locks at last
time). In the future we will probably try to remove this.
*/
THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_lock_type lock_type) {
TOKUDB_HANDLER_DBUG_ENTER("lock_type=%d cmd=%d", lock_type, thd_sql_command(thd));
if (tokudb_debug & TOKUDB_DEBUG_LOCK) {
TOKUDB_HANDLER_TRACE("lock_type=%d cmd=%d", lock_type, thd_sql_command(thd));
}
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) {
enum_sql_command sql_command = (enum_sql_command) thd_sql_command(thd);
if (!thd->in_lock_tables) {
if (sql_command == SQLCOM_CREATE_INDEX && get_create_index_online(thd)) {
// hot indexing
rw_rdlock(&share->num_DBs_lock);
if (share->num_DBs == (table->s->keys + tokudb_test(hidden_primary_key))) {
lock_type = TL_WRITE_ALLOW_WRITE;
}
rw_unlock(&share->num_DBs_lock);
} else if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) &&
sql_command != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) {
// allow concurrent writes
lock_type = TL_WRITE_ALLOW_WRITE;
} else if (sql_command == SQLCOM_OPTIMIZE && lock_type == TL_READ_NO_INSERT) {
// hot optimize table
lock_type = TL_READ;
}
}
lock.type = lock_type;
}
*to++ = &lock;
if (tokudb_debug & TOKUDB_DEBUG_LOCK)
TOKUDB_HANDLER_TRACE("lock_type=%d", lock_type);
DBUG_RETURN(to);
}
static toku_compression_method get_compression_method(DB *file) {
enum toku_compression_method method;
int r = file->get_compression_method(file, &method);
assert(r == 0);
return method;
}
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
enum row_type ha_tokudb::get_row_type(void) const {
toku_compression_method compression_method = get_compression_method(share->file);
return toku_compression_method_to_row_type(compression_method);
}
#endif
static int create_sub_table(
const char *table_name,
DBT* row_descriptor,
DB_TXN* txn,
uint32_t block_size,
uint32_t read_block_size,
toku_compression_method compression_method,
bool is_hot_index
)
{
TOKUDB_DBUG_ENTER("");
int error;
DB *file = NULL;
uint32_t create_flags;
error = db_create(&file, db_env, 0);
if (error) {
DBUG_PRINT("error", ("Got error: %d when creating table", error));
my_errno = error;
goto exit;
}
if (block_size != 0) {
error = file->set_pagesize(file, block_size);
if (error != 0) {
DBUG_PRINT("error", ("Got error: %d when setting block size %u for table '%s'", error, block_size, table_name));
goto exit;
}
}
if (read_block_size != 0) {
error = file->set_readpagesize(file, read_block_size);
if (error != 0) {
DBUG_PRINT("error", ("Got error: %d when setting read block size %u for table '%s'", error, read_block_size, table_name));
goto exit;
}
}
error = file->set_compression_method(file, compression_method);
if (error != 0) {
DBUG_PRINT("error", ("Got error: %d when setting compression type %u for table '%s'", error, compression_method, table_name));
goto exit;
}
create_flags = DB_THREAD | DB_CREATE | DB_EXCL | (is_hot_index ? DB_IS_HOT_INDEX : 0);
error = file->open(file, txn, table_name, NULL, DB_BTREE, create_flags, my_umask);
if (error) {
DBUG_PRINT("error", ("Got error: %d when opening table '%s'", error, table_name));
goto exit;
}
error = file->change_descriptor(file, txn, row_descriptor, (is_hot_index ? DB_IS_HOT_INDEX | DB_UPDATE_CMP_DESCRIPTOR : DB_UPDATE_CMP_DESCRIPTOR));
if (error) {
DBUG_PRINT("error", ("Got error: %d when setting row descriptor for table '%s'", error, table_name));
goto exit;
}
error = 0;
exit:
if (file) {
int r = file->close(file, 0);
assert(r==0);
}
TOKUDB_DBUG_RETURN(error);
}
void ha_tokudb::update_create_info(HA_CREATE_INFO* create_info) {
if (share->has_auto_inc) {
info(HA_STATUS_AUTO);
if (!(create_info->used_fields & HA_CREATE_USED_AUTO) ||
create_info->auto_increment_value < stats.auto_increment_value) {
create_info->auto_increment_value = stats.auto_increment_value;
}
}
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
if (!(create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)) {
// show create table asks us to update this create_info, this makes it
// so we'll always show what compression type we're using
create_info->row_type = get_row_type();
if (create_info->row_type == ROW_TYPE_TOKU_ZLIB && THDVAR(ha_thd(), hide_default_row_format) != 0) {
create_info->row_type = ROW_TYPE_DEFAULT;
}
}
#endif
}
//
// removes key name from status.tokudb.
// needed for when we are dropping indexes, so that
// during drop table, we do not attempt to remove already dropped
// indexes because we did not keep status.tokudb in sync with list of indexes.
//
int ha_tokudb::remove_key_name_from_status(DB* status_block, char* key_name, DB_TXN* txn) {
int error;
uchar status_key_info[FN_REFLEN + sizeof(HA_METADATA_KEY)];
HA_METADATA_KEY md_key = hatoku_key_name;
memcpy(status_key_info, &md_key, sizeof(HA_METADATA_KEY));
//
// put index name in status.tokudb
//
memcpy(
status_key_info + sizeof(HA_METADATA_KEY),
key_name,
strlen(key_name) + 1
);
error = remove_metadata(
status_block,
status_key_info,
sizeof(HA_METADATA_KEY) + strlen(key_name) + 1,
txn
);
return error;
}
//
// writes the key name in status.tokudb, so that we may later delete or rename
// the dictionary associated with key_name
//
int ha_tokudb::write_key_name_to_status(DB* status_block, char* key_name, DB_TXN* txn) {
int error;
uchar status_key_info[FN_REFLEN + sizeof(HA_METADATA_KEY)];
HA_METADATA_KEY md_key = hatoku_key_name;
memcpy(status_key_info, &md_key, sizeof(HA_METADATA_KEY));
//
// put index name in status.tokudb
//
memcpy(
status_key_info + sizeof(HA_METADATA_KEY),
key_name,
strlen(key_name) + 1
);
error = write_metadata(
status_block,
status_key_info,
sizeof(HA_METADATA_KEY) + strlen(key_name) + 1,
NULL,
0,
txn
);
return error;
}
//
// some tracing moved out of ha_tokudb::create, because ::create was getting cluttered
//
void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
uint i;
//
// tracing information about what type of table we are creating
//
if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
for (i = 0; i < form->s->fields; i++) {
Field *field = form->s->field[i];
TOKUDB_HANDLER_TRACE("field:%d:%s:type=%d:flags=%x", i, field->field_name, field->type(), field->flags);
}
for (i = 0; i < form->s->keys; i++) {
KEY *key = &form->s->key_info[i];
TOKUDB_HANDLER_TRACE("key:%d:%s:%d", i, key->name, get_key_parts(key));
uint p;
for (p = 0; p < get_key_parts(key); p++) {
KEY_PART_INFO *key_part = &key->key_part[p];
Field *field = key_part->field;
TOKUDB_HANDLER_TRACE("key:%d:%d:length=%d:%s:type=%d:flags=%x",
i, p, key_part->length, field->field_name, field->type(), field->flags);
}
}
}
}
static uint32_t get_max_desc_size(KEY_AND_COL_INFO* kc_info, TABLE* form) {
uint32_t max_row_desc_buff_size;
max_row_desc_buff_size = 2*(form->s->fields * 6)+10; // upper bound of key comparison descriptor
max_row_desc_buff_size += get_max_secondary_key_pack_desc_size(kc_info); // upper bound for sec. key part
max_row_desc_buff_size += get_max_clustering_val_pack_desc_size(form->s); // upper bound for clustering val part
return max_row_desc_buff_size;
}
static uint32_t create_secondary_key_descriptor(
uchar* buf,
KEY* key_info,
KEY* prim_key,
uint hpk,
TABLE* form,
uint primary_key,
uint32_t keynr,
KEY_AND_COL_INFO* kc_info
)
{
uchar* ptr = NULL;
ptr = buf;
ptr += create_toku_key_descriptor(
ptr,
false,
key_info,
hpk,
prim_key
);
ptr += create_toku_secondary_key_pack_descriptor(
ptr,
hpk,
primary_key,
form->s,
form,
kc_info,
key_info,
prim_key
);
ptr += create_toku_clustering_val_pack_descriptor(
ptr,
primary_key,
form->s,
kc_info,
keynr,
key_is_clustering(key_info)
);
return ptr - buf;
}
//
// creates dictionary for secondary index, with key description key_info, all using txn
//
int ha_tokudb::create_secondary_dictionary(
const char* name, TABLE* form,
KEY* key_info,
DB_TXN* txn,
KEY_AND_COL_INFO* kc_info,
uint32_t keynr,
bool is_hot_index,
toku_compression_method compression_method
)
{
int error;
DBT row_descriptor;
uchar* row_desc_buff = NULL;
char* newname = NULL;
KEY* prim_key = NULL;
char dict_name[MAX_DICT_NAME_LEN];
uint32_t max_row_desc_buff_size;
uint hpk= (form->s->primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
uint32_t block_size;
uint32_t read_block_size;
THD* thd = ha_thd();
memset(&row_descriptor, 0, sizeof(row_descriptor));
max_row_desc_buff_size = get_max_desc_size(kc_info,form);
row_desc_buff = (uchar *)tokudb_my_malloc(max_row_desc_buff_size, MYF(MY_WME));
if (row_desc_buff == NULL){ error = ENOMEM; goto cleanup;}
newname = (char *)tokudb_my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
if (newname == NULL){ error = ENOMEM; goto cleanup;}
sprintf(dict_name, "key-%s", key_info->name);
make_name(newname, name, dict_name);
prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
//
// setup the row descriptor
//
row_descriptor.data = row_desc_buff;
//
// save data necessary for key comparisons
//
row_descriptor.size = create_secondary_key_descriptor(
row_desc_buff,
key_info,
prim_key,
hpk,
form,
primary_key,
keynr,
kc_info
);
assert(row_descriptor.size <= max_row_desc_buff_size);
block_size = get_tokudb_block_size(thd);
read_block_size = get_tokudb_read_block_size(thd);
error = create_sub_table(newname, &row_descriptor, txn, block_size, read_block_size, compression_method, is_hot_index);
cleanup:
tokudb_my_free(newname);
tokudb_my_free(row_desc_buff);
return error;
}
static uint32_t create_main_key_descriptor(
uchar* buf,
KEY* prim_key,
uint hpk,
uint primary_key,
TABLE* form,
KEY_AND_COL_INFO* kc_info
)
{
uchar* ptr = buf;
ptr += create_toku_key_descriptor(
ptr,
hpk,
prim_key,
false,
NULL
);
ptr += create_toku_main_key_pack_descriptor(
ptr
);
ptr += create_toku_clustering_val_pack_descriptor(
ptr,
primary_key,
form->s,
kc_info,
primary_key,
false
);
return ptr - buf;
}
//
// create and close the main dictionarr with name of "name" using table form, all within
// transaction txn.
//
int ha_tokudb::create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, toku_compression_method compression_method) {
int error;
DBT row_descriptor;
uchar* row_desc_buff = NULL;
char* newname = NULL;
KEY* prim_key = NULL;
uint32_t max_row_desc_buff_size;
uint hpk= (form->s->primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
uint32_t block_size;
uint32_t read_block_size;
THD* thd = ha_thd();
memset(&row_descriptor, 0, sizeof(row_descriptor));
max_row_desc_buff_size = get_max_desc_size(kc_info, form);
row_desc_buff = (uchar *)tokudb_my_malloc(max_row_desc_buff_size, MYF(MY_WME));
if (row_desc_buff == NULL){ error = ENOMEM; goto cleanup;}
newname = (char *)tokudb_my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
if (newname == NULL){ error = ENOMEM; goto cleanup;}
make_name(newname, name, "main");
prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
//
// setup the row descriptor
//
row_descriptor.data = row_desc_buff;
//
// save data necessary for key comparisons
//
row_descriptor.size = create_main_key_descriptor(
row_desc_buff,
prim_key,
hpk,
primary_key,
form,
kc_info
);
assert(row_descriptor.size <= max_row_desc_buff_size);
block_size = get_tokudb_block_size(thd);
read_block_size = get_tokudb_read_block_size(thd);
/* Create the main table that will hold the real rows */
error = create_sub_table(newname, &row_descriptor, txn, block_size, read_block_size, compression_method, false);
cleanup:
tokudb_my_free(newname);
tokudb_my_free(row_desc_buff);
return error;
}
//
// Creates a new table
// Parameters:
// [in] name - table name
// [in] form - info on table, columns and indexes
// [in] create_info - more info on table, CURRENTLY UNUSED
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_info) {
TOKUDB_HANDLER_DBUG_ENTER("%s", name);
int error;
DB *status_block = NULL;
uint version;
uint capabilities;
DB_TXN* txn = NULL;
bool do_commit = false;
char* newname = NULL;
KEY_AND_COL_INFO kc_info;
tokudb_trx_data *trx = NULL;
THD* thd = ha_thd();
memset(&kc_info, 0, sizeof(kc_info));
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100999
// TokuDB does not support discover_table_names() and writes no files
// in the database directory, so automatic filename-based
// discover_table_names() doesn't work either. So, it must force .frm
// file to disk.
form->s->write_frm_image();
#endif
#if TOKU_INCLUDE_OPTION_STRUCTS
const srv_row_format_t row_format = (srv_row_format_t) form->s->option_struct->row_format;
#else
const srv_row_format_t row_format = (create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)
? row_type_to_row_format(create_info->row_type)
: get_row_format(thd);
#endif
const toku_compression_method compression_method = row_format_to_toku_compression_method(row_format);
bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
if (create_from_engine) {
// table already exists, nothing to do
error = 0;
goto cleanup;
}
// validate the fields in the table. If the table has fields
// we do not support that came from an old version of MySQL,
// gracefully return an error
for (uint32_t i = 0; i < form->s->fields; i++) {
Field* field = table_share->field[i];
if (!field_valid_for_tokudb_table(field)) {
sql_print_error("Table %s has an invalid field %s, that was created "
"with an old version of MySQL. This field is no longer supported. "
"This is probably due to an alter table engine=TokuDB. To load this "
"table, do a dump and load",
name,
field->field_name
);
error = HA_ERR_UNSUPPORTED;
goto cleanup;
}
}
newname = (char *)tokudb_my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
if (newname == NULL){ error = ENOMEM; goto cleanup;}
trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton);
if (trx && trx->sub_sp_level && thd_sql_command(thd) == SQLCOM_CREATE_TABLE) {
txn = trx->sub_sp_level;
}
else {
do_commit = true;
error = txn_begin(db_env, 0, &txn, 0, thd);
if (error) { goto cleanup; }
}
primary_key = form->s->primary_key;
hidden_primary_key = (primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
if (hidden_primary_key) {
primary_key = form->s->keys;
}
/* do some tracing */
trace_create_table_info(name,form);
/* Create status.tokudb and save relevant metadata */
make_name(newname, name, "status");
error = tokudb::create_status(db_env, &status_block, newname, txn);
if (error) { goto cleanup; }
version = HA_TOKU_VERSION;
error = write_to_status(status_block, hatoku_new_version,&version,sizeof(version), txn);
if (error) { goto cleanup; }
capabilities = HA_TOKU_CAP;
error = write_to_status(status_block, hatoku_capabilities,&capabilities,sizeof(capabilities), txn);
if (error) { goto cleanup; }
error = write_auto_inc_create(status_block, create_info->auto_increment_value, txn);
if (error) { goto cleanup; }
#if WITH_PARTITION_STORAGE_ENGINE
if (TOKU_PARTITION_WRITE_FRM_DATA || form->part_info == NULL) {
error = write_frm_data(status_block, txn, form->s->path.str);
if (error) { goto cleanup; }
}
#else
error = write_frm_data(status_block, txn, form->s->path.str);
if (error) { goto cleanup; }
#endif
error = allocate_key_and_col_info(form->s, &kc_info);
if (error) { goto cleanup; }
error = initialize_key_and_col_info(
form->s,
form,
&kc_info,
hidden_primary_key,
primary_key
);
if (error) { goto cleanup; }
error = create_main_dictionary(name, form, txn, &kc_info, compression_method);
if (error) {
goto cleanup;
}
for (uint i = 0; i < form->s->keys; i++) {
if (i != primary_key) {
error = create_secondary_dictionary(name, form, &form->key_info[i], txn, &kc_info, i, false, compression_method);
if (error) {
goto cleanup;
}
error = write_key_name_to_status(status_block, form->s->key_info[i].name, txn);
if (error) { goto cleanup; }
}
}
error = 0;
cleanup:
if (status_block != NULL) {
int r = tokudb::close_status(&status_block);
assert(r==0);
}
free_key_and_col_info(&kc_info);
if (do_commit && txn) {
if (error) {
abort_txn(txn);
}
else {
commit_txn(txn,0);
}
}
tokudb_my_free(newname);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::discard_or_import_tablespace(my_bool discard) {
/*
if (discard) {
my_errno=HA_ERR_WRONG_COMMAND;
return my_errno;
}
return add_table_to_metadata(share->table_name);
*/
my_errno=HA_ERR_WRONG_COMMAND;
return my_errno;
}
//
// deletes from_name or renames from_name to to_name, all using transaction txn.
// is_delete specifies which we are doing
// is_key specifies if it is a secondary index (and hence a "key-" needs to be prepended) or
// if it is not a secondary index
//
int ha_tokudb::delete_or_rename_dictionary( const char* from_name, const char* to_name, const char* secondary_name, bool is_key, DB_TXN* txn, bool is_delete) {
int error;
char dict_name[MAX_DICT_NAME_LEN];
char* new_from_name = NULL;
char* new_to_name = NULL;
assert(txn);
new_from_name = (char *)tokudb_my_malloc(
get_max_dict_name_path_length(from_name),
MYF(MY_WME)
);
if (new_from_name == NULL) {
error = ENOMEM;
goto cleanup;
}
if (!is_delete) {
assert(to_name);
new_to_name = (char *)tokudb_my_malloc(
get_max_dict_name_path_length(to_name),
MYF(MY_WME)
);
if (new_to_name == NULL) {
error = ENOMEM;
goto cleanup;
}
}
if (is_key) {
sprintf(dict_name, "key-%s", secondary_name);
make_name(new_from_name, from_name, dict_name);
}
else {
make_name(new_from_name, from_name, secondary_name);
}
if (!is_delete) {
if (is_key) {
sprintf(dict_name, "key-%s", secondary_name);
make_name(new_to_name, to_name, dict_name);
}
else {
make_name(new_to_name, to_name, secondary_name);
}
}
if (is_delete) {
error = db_env->dbremove(db_env, txn, new_from_name, NULL, 0);
}
else {
error = db_env->dbrename(db_env, txn, new_from_name, NULL, new_to_name, 0);
}
if (error) { goto cleanup; }
cleanup:
tokudb_my_free(new_from_name);
tokudb_my_free(new_to_name);
return error;
}
//
// deletes or renames a table. if is_delete is true, then we delete, and to_name can be NULL
// if is_delete is false, then to_name must be non-NULL, as we are renaming the table.
//
int ha_tokudb::delete_or_rename_table (const char* from_name, const char* to_name, bool is_delete) {
THD *thd = ha_thd();
int error;
DB* status_db = NULL;
DBC* status_cursor = NULL;
DB_TXN* txn = NULL;
DBT curr_key;
DBT curr_val;
memset(&curr_key, 0, sizeof(curr_key));
memset(&curr_val, 0, sizeof(curr_val));
DB_TXN *parent_txn = NULL;
tokudb_trx_data *trx = NULL;
trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
if (thd_sql_command(ha_thd()) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) {
parent_txn = trx->sub_sp_level;
}
error = txn_begin(db_env, parent_txn, &txn, 0, thd);
if (error) { goto cleanup; }
//
// open status db,
// create cursor,
// for each name read out of there, create a db and delete or rename it
//
error = open_status_dictionary(&status_db, from_name, txn);
if (error) { goto cleanup; }
error = status_db->cursor(status_db, txn, &status_cursor, 0);
if (error) { goto cleanup; }
status_cursor->c_set_check_interrupt_callback(status_cursor, tokudb_killed_thd_callback, thd);
while (error != DB_NOTFOUND) {
error = status_cursor->c_get(status_cursor, &curr_key, &curr_val, DB_NEXT);
if (error && error != DB_NOTFOUND) {
error = map_to_handler_error(error);
goto cleanup;
}
if (error == DB_NOTFOUND) {
break;
}
HA_METADATA_KEY mk = *(HA_METADATA_KEY *)curr_key.data;
if (mk != hatoku_key_name) {
continue;
}
error = delete_or_rename_dictionary(from_name, to_name, (char *)((char *)curr_key.data + sizeof(HA_METADATA_KEY)), true, txn, is_delete);
if (error) { goto cleanup; }
}
//
// delete or rename main.tokudb
//
error = delete_or_rename_dictionary(from_name, to_name, "main", false, txn, is_delete);
if (error) { goto cleanup; }
error = status_cursor->c_close(status_cursor);
assert(error==0);
status_cursor = NULL;
if (error) { goto cleanup; }
error = status_db->close(status_db, 0);
assert(error == 0);
status_db = NULL;
//
// delete or rename status.tokudb
//
error = delete_or_rename_dictionary(from_name, to_name, "status", false, txn, is_delete);
if (error) { goto cleanup; }
my_errno = error;
cleanup:
if (status_cursor) {
int r = status_cursor->c_close(status_cursor);
assert(r==0);
}
if (status_db) {
int r = status_db->close(status_db, 0);
assert(r==0);
}
if (txn) {
if (error) {
abort_txn(txn);
}
else {
commit_txn(txn, 0);
}
}
return error;
}
//
// Drops table
// Parameters:
// [in] name - name of table to be deleted
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::delete_table(const char *name) {
TOKUDB_HANDLER_DBUG_ENTER("%s", name);
int error;
error = delete_or_rename_table(name, NULL, true);
if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
sql_print_error("Could not delete table %s because \
another transaction has accessed the table. \
To drop the table, make sure no transactions touch the table.", name);
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// renames table from "from" to "to"
// Parameters:
// [in] name - old name of table
// [in] to - new name of table
// Returns:
// 0 on success
// error otherwise
//
int ha_tokudb::rename_table(const char *from, const char *to) {
TOKUDB_HANDLER_DBUG_ENTER("%s %s", from, to);
int error;
error = delete_or_rename_table(from, to, false);
if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
sql_print_error("Could not rename table from %s to %s because \
another transaction has accessed the table. \
To rename the table, make sure no transactions touch the table.", from, to);
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
/*
Returns estimate on number of seeks it will take to read through the table
This is to be comparable to the number returned by records_in_range so
that we can decide if we should scan the table or use keys.
*/
/// QQQ why divide by 3
double ha_tokudb::scan_time() {
TOKUDB_HANDLER_DBUG_ENTER("");
double ret_val = (double)stats.records / 3;
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("return %" PRIu64 " %f", (uint64_t) stats.records, ret_val);
}
DBUG_RETURN(ret_val);
}
double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
{
TOKUDB_HANDLER_DBUG_ENTER("%u %u %" PRIu64, index, ranges, (uint64_t) rows);
double ret_val;
if (index == primary_key || key_is_clustering(&table->key_info[index])) {
ret_val = read_time(index, ranges, rows);
DBUG_RETURN(ret_val);
}
/*
It is assumed that we will read trough the whole key range and that all
key blocks are half full (normally things are much better). It is also
assumed that each time we read the next key from the index, the handler
performs a random seek, thus the cost is proportional to the number of
blocks read. This model does not take into account clustered indexes -
engines that support that (e.g. InnoDB) may want to overwrite this method.
*/
double keys_per_block= (stats.block_size/2.0/
(table->key_info[index].key_length +
ref_length) + 1);
ret_val = (rows + keys_per_block - 1)/ keys_per_block;
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("return %f", ret_val);
}
DBUG_RETURN(ret_val);
}
//
// Calculate the time it takes to read a set of ranges through an index
// This enables us to optimize reads for clustered indexes.
// Implementation pulled from InnoDB
// Parameters:
// index - index to use
// ranges - number of ranges
// rows - estimated number of rows in the range
// Returns:
// estimated time measured in disk seeks
//
double ha_tokudb::read_time(
uint index,
uint ranges,
ha_rows rows
)
{
TOKUDB_HANDLER_DBUG_ENTER("%u %u %" PRIu64, index, ranges, (uint64_t) rows);
double total_scan;
double ret_val;
bool is_primary = (index == primary_key);
bool is_clustering;
//
// in case for hidden primary key, this is called
//
if (index >= table_share->keys) {
ret_val = handler::read_time(index, ranges, rows);
goto cleanup;
}
is_clustering = key_is_clustering(&table->key_info[index]);
//
// if it is not the primary key, and it is not a clustering key, then return handler::read_time
//
if (!(is_primary || is_clustering)) {
ret_val = handler::read_time(index, ranges, rows);
goto cleanup;
}
//
// for primary key and for clustered keys, return a fraction of scan_time()
//
total_scan = scan_time();
if (stats.records < rows) {
ret_val = is_clustering ? total_scan + 0.00001 : total_scan;
goto cleanup;
}
//
// one disk seek per range plus the proportional scan time of the rows
//
ret_val = (ranges + (double) rows / (double) stats.records * total_scan);
ret_val = is_clustering ? ret_val + 0.00001 : ret_val;
cleanup:
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("return %f", ret_val);
}
DBUG_RETURN(ret_val);
}
double ha_tokudb::index_only_read_time(uint keynr, double records) {
TOKUDB_HANDLER_DBUG_ENTER("%u %f", keynr, records);
double ret_val = keyread_time(keynr, 1, (ha_rows)records);
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("return %f", ret_val);
}
DBUG_RETURN(ret_val);
}
//
// Estimates the number of index records in a range. In case of errors, return
// HA_TOKUDB_RANGE_COUNT instead of HA_POS_ERROR. This was behavior
// when we got the handlerton from MySQL.
// Parameters:
// keynr -index to use
// [in] start_key - low end of the range
// [in] end_key - high end of the range
// Returns:
// 0 - There are no matching keys in the given range
// number > 0 - There are approximately number matching rows in the range
// HA_POS_ERROR - Something is wrong with the index tree
//
ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range* end_key) {
TOKUDB_HANDLER_DBUG_ENTER("%d %p %p", keynr, start_key, end_key);
DBT *pleft_key, *pright_key;
DBT left_key, right_key;
ha_rows ret_val = HA_TOKUDB_RANGE_COUNT;
DB *kfile = share->key_file[keynr];
uint64_t rows = 0;
int error;
// get start_rows and end_rows values so that we can estimate range
// when calling key_range64, the only value we can trust is the value for less
// The reason is that the key being passed in may be a prefix of keys in the DB
// As a result, equal may be 0 and greater may actually be equal+greater
// So, we call key_range64 on the key, and the key that is after it.
if (!start_key && !end_key) {
error = estimate_num_rows(kfile, &rows, transaction);
if (error) {
ret_val = HA_TOKUDB_RANGE_COUNT;
goto cleanup;
}
ret_val = (rows <= 1) ? 1 : rows;
goto cleanup;
}
if (start_key) {
uchar inf_byte = (start_key->flag == HA_READ_KEY_EXACT) ? COL_NEG_INF : COL_POS_INF;
pack_key(&left_key, keynr, key_buff, start_key->key, start_key->length, inf_byte);
pleft_key = &left_key;
} else {
pleft_key = NULL;
}
if (end_key) {
uchar inf_byte = (end_key->flag == HA_READ_BEFORE_KEY) ? COL_NEG_INF : COL_POS_INF;
pack_key(&right_key, keynr, key_buff2, end_key->key, end_key->length, inf_byte);
pright_key = &right_key;
} else {
pright_key = NULL;
}
// keys_range64 can not handle a degenerate range (left_key > right_key), so we filter here
if (pleft_key && pright_key && tokudb_cmp_dbt_key(kfile, pleft_key, pright_key) > 0) {
rows = 0;
} else {
uint64_t less, equal1, middle, equal2, greater;
bool is_exact;
error = kfile->keys_range64(kfile, transaction, pleft_key, pright_key,
&less, &equal1, &middle, &equal2, &greater, &is_exact);
if (error) {
ret_val = HA_TOKUDB_RANGE_COUNT;
goto cleanup;
}
rows = middle;
}
// MySQL thinks a return value of 0 means there are exactly 0 rows
// Therefore, always return non-zero so this assumption is not made
ret_val = (ha_rows) (rows <= 1 ? 1 : rows);
cleanup:
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("return %" PRIu64 " %" PRIu64, (uint64_t) ret_val, rows);
}
DBUG_RETURN(ret_val);
}
//
// Initializes the auto-increment data in the local "share" object to the
// greater of two values: what's stored in the metadata or the last inserted
// auto-increment field (if auto-increment field is the first field of a key).
//
void ha_tokudb::init_auto_increment() {
int error;
DB_TXN* txn = NULL;
error = txn_begin(db_env, 0, &txn, 0, ha_thd());
if (error) {
share->last_auto_increment = 0;
} else {
HA_METADATA_KEY key_val;
DBT key;
memset(&key, 0, sizeof(key));
key.data = &key_val;
key.size = sizeof(key_val);
DBT value;
memset(&value, 0, sizeof(value));
value.flags = DB_DBT_USERMEM;
// Retrieve the initial auto increment value, as specified by create table
// so if a user does "create table t1 (a int auto_increment, primary key (a)) auto_increment=100",
// then the value 100 should be stored here
key_val = hatoku_ai_create_value;
value.ulen = sizeof(share->auto_inc_create_value);
value.data = &share->auto_inc_create_value;
error = share->status_block->get(share->status_block, txn, &key, &value, 0);
if (error || value.size != sizeof(share->auto_inc_create_value)) {
share->auto_inc_create_value = 0;
}
// Retrieve hatoku_max_ai, which is max value used by auto increment
// column so far, the max value could have been auto generated (e.g. insert (NULL))
// or it could have been manually inserted by user (e.g. insert (345))
key_val = hatoku_max_ai;
value.ulen = sizeof(share->last_auto_increment);
value.data = &share->last_auto_increment;
error = share->status_block->get(share->status_block, txn, &key, &value, 0);
if (error || value.size != sizeof(share->last_auto_increment)) {
if (share->auto_inc_create_value)
share->last_auto_increment = share->auto_inc_create_value - 1;
else
share->last_auto_increment = 0;
}
commit_txn(txn, 0);
}
if (tokudb_debug & TOKUDB_DEBUG_AUTO_INCREMENT) {
TOKUDB_HANDLER_TRACE("init auto increment:%lld", share->last_auto_increment);
}
}
void ha_tokudb::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, ulonglong * first_value, ulonglong * nb_reserved_values) {
TOKUDB_HANDLER_DBUG_ENTER("");
ulonglong nr;
bool over;
tokudb_pthread_mutex_lock(&share->mutex);
if (share->auto_inc_create_value > share->last_auto_increment) {
nr = share->auto_inc_create_value;
over = false;
share->last_auto_increment = share->auto_inc_create_value;
}
else {
nr = share->last_auto_increment + increment;
over = nr < share->last_auto_increment;
if (over)
nr = ULONGLONG_MAX;
}
if (!over) {
share->last_auto_increment = nr + (nb_desired_values - 1)*increment;
if (delay_updating_ai_metadata) {
ai_metadata_update_required = true;
}
else {
update_max_auto_inc(share->status_block, share->last_auto_increment);
}
}
if (tokudb_debug & TOKUDB_DEBUG_AUTO_INCREMENT) {
TOKUDB_HANDLER_TRACE("get_auto_increment(%lld,%lld,%lld):got:%lld:%lld",
offset, increment, nb_desired_values, nr, nb_desired_values);
}
*first_value = nr;
*nb_reserved_values = nb_desired_values;
tokudb_pthread_mutex_unlock(&share->mutex);
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
bool ha_tokudb::is_optimize_blocking() {
return false;
}
bool ha_tokudb::is_auto_inc_singleton(){
return false;
}
// Internal function called by ha_tokudb::add_index and ha_tokudb::alter_table_phase2
// With a transaction, drops dictionaries associated with indexes in key_num
//
//
// Adds indexes to the table. Takes the array of KEY passed in key_info, and creates
// DB's that will go at the end of share->key_file. THE IMPLICIT ASSUMPTION HERE is
// that the table will be modified and that these added keys will be appended to the end
// of the array table->key_info
// Parameters:
// [in] table_arg - table that is being modified, seems to be identical to this->table
// [in] key_info - array of KEY's to be added
// num_of_keys - number of keys to be added, number of elements in key_info
// Returns:
// 0 on success, error otherwise
//
int ha_tokudb::tokudb_add_index(
TABLE *table_arg,
KEY *key_info,
uint num_of_keys,
DB_TXN* txn,
bool* inc_num_DBs,
bool* modified_DBs
)
{
TOKUDB_HANDLER_DBUG_ENTER("");
assert(txn);
int error;
uint curr_index = 0;
DBC* tmp_cursor = NULL;
int cursor_ret_val = 0;
DBT curr_pk_key, curr_pk_val;
THD* thd = ha_thd();
DB_LOADER* loader = NULL;
DB_INDEXER* indexer = NULL;
bool loader_save_space = get_load_save_space(thd);
bool use_hot_index = (lock.type == TL_WRITE_ALLOW_WRITE);
uint32_t loader_flags = loader_save_space ? LOADER_COMPRESS_INTERMEDIATES : 0;
uint32_t indexer_flags = 0;
uint32_t mult_db_flags[MAX_KEY + 1] = {0};
uint32_t mult_put_flags[MAX_KEY + 1];
uint32_t mult_dbt_flags[MAX_KEY + 1];
bool creating_hot_index = false;
struct loader_context lc;
memset(&lc, 0, sizeof lc);
lc.thd = thd;
lc.ha = this;
loader_error = 0;
bool rw_lock_taken = false;
*inc_num_DBs = false;
*modified_DBs = false;
invalidate_bulk_fetch();
unpack_entire_row = true; // for bulk fetching rows
for (uint32_t i = 0; i < MAX_KEY+1; i++) {
mult_put_flags[i] = 0;
mult_dbt_flags[i] = DB_DBT_REALLOC;
}
//
// number of DB files we have open currently, before add_index is executed
//
uint curr_num_DBs = table_arg->s->keys + tokudb_test(hidden_primary_key);
//
// get the row type to use for the indexes we're adding
//
toku_compression_method compression_method = get_compression_method(share->file);
//
// status message to be shown in "show process list"
//
const char *orig_proc_info = tokudb_thd_get_proc_info(thd);
char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound.
ulonglong num_processed = 0; //variable that stores number of elements inserted thus far
thd_proc_info(thd, "Adding indexes");
//
// in unpack_row, MySQL passes a buffer that is this long,
// so this length should be good enough for us as well
//
memset((void *) &curr_pk_key, 0, sizeof(curr_pk_key));
memset((void *) &curr_pk_val, 0, sizeof(curr_pk_val));
//
// The files for secondary tables are derived from the name of keys
// If we try to add a key with the same name as an already existing key,
// We can crash. So here we check if any of the keys added has the same
// name of an existing key, and if so, we fail gracefully
//
for (uint i = 0; i < num_of_keys; i++) {
for (uint j = 0; j < table_arg->s->keys; j++) {
if (strcmp(key_info[i].name, table_arg->s->key_info[j].name) == 0) {
error = HA_ERR_WRONG_COMMAND;
goto cleanup;
}
}
}
rw_wrlock(&share->num_DBs_lock);
rw_lock_taken = true;
//
// open all the DB files and set the appropriate variables in share
// they go to the end of share->key_file
//
creating_hot_index = use_hot_index && num_of_keys == 1 && (key_info[0].flags & HA_NOSAME) == 0;
if (use_hot_index && (share->num_DBs > curr_num_DBs)) {
//
// already have hot index in progress, get out
//
error = HA_ERR_INTERNAL_ERROR;
goto cleanup;
}
curr_index = curr_num_DBs;
*modified_DBs = true;
for (uint i = 0; i < num_of_keys; i++, curr_index++) {
if (key_is_clustering(&key_info[i])) {
set_key_filter(
&share->kc_info.key_filters[curr_index],
&key_info[i],
table_arg,
false
);
if (!hidden_primary_key) {
set_key_filter(
&share->kc_info.key_filters[curr_index],
&table_arg->key_info[primary_key],
table_arg,
false
);
}
error = initialize_col_pack_info(&share->kc_info,table_arg->s,curr_index);
if (error) {
goto cleanup;
}
}
error = create_secondary_dictionary(share->table_name, table_arg, &key_info[i], txn, &share->kc_info, curr_index, creating_hot_index, compression_method);
if (error) { goto cleanup; }
error = open_secondary_dictionary(
&share->key_file[curr_index],
&key_info[i],
share->table_name,
false,
txn
);
if (error) { goto cleanup; }
}
if (creating_hot_index) {
share->num_DBs++;
*inc_num_DBs = true;
error = db_env->create_indexer(
db_env,
txn,
&indexer,
share->file,
num_of_keys,
&share->key_file[curr_num_DBs],
mult_db_flags,
indexer_flags
);
if (error) { goto cleanup; }
error = indexer->set_poll_function(indexer, ai_poll_fun, &lc);
if (error) { goto cleanup; }
error = indexer->set_error_callback(indexer, loader_ai_err_fun, &lc);
if (error) { goto cleanup; }
rw_unlock(&share->num_DBs_lock);
rw_lock_taken = false;
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
// initialize a one phase progress report.
// incremental reports are done in the indexer's callback function.
thd_progress_init(thd, 1);
#endif
error = indexer->build(indexer);
if (error) { goto cleanup; }
rw_wrlock(&share->num_DBs_lock);
error = indexer->close(indexer);
rw_unlock(&share->num_DBs_lock);
if (error) { goto cleanup; }
indexer = NULL;
}
else {
DBUG_ASSERT(table->mdl_ticket->get_type() >= MDL_SHARED_NO_WRITE);
rw_unlock(&share->num_DBs_lock);
rw_lock_taken = false;
prelocked_right_range_size = 0;
prelocked_left_range_size = 0;
struct smart_dbt_bf_info bf_info;
bf_info.ha = this;
// you need the val if you have a clustering index and key_read is not 0;
bf_info.direction = 1;
bf_info.thd = ha_thd();
bf_info.need_val = true;
bf_info.key_to_compare = NULL;
error = db_env->create_loader(
db_env,
txn,
&loader,
NULL, // no src_db needed
num_of_keys,
&share->key_file[curr_num_DBs],
mult_put_flags,
mult_dbt_flags,
loader_flags
);
if (error) { goto cleanup; }
error = loader->set_poll_function(loader, loader_poll_fun, &lc);
if (error) { goto cleanup; }
error = loader->set_error_callback(loader, loader_ai_err_fun, &lc);
if (error) { goto cleanup; }
//
// scan primary table, create each secondary key, add to each DB
//
if ((error = share->file->cursor(share->file, txn, &tmp_cursor, DB_SERIALIZABLE))) {
tmp_cursor = NULL; // Safety
goto cleanup;
}
//
// grab some locks to make this go faster
// first a global read lock on the main DB, because
// we intend to scan the entire thing
//
error = tmp_cursor->c_set_bounds(
tmp_cursor,
share->file->dbt_neg_infty(),
share->file->dbt_pos_infty(),
true,
0
);
if (error) { goto cleanup; }
// set the bulk fetch iteration to its max so that adding an
// index fills the bulk fetch buffer every time. we do not
// want it to grow exponentially fast.
rows_fetched_using_bulk_fetch = 0;
bulk_fetch_iteration = HA_TOKU_BULK_FETCH_ITERATION_MAX;
cursor_ret_val = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED,smart_dbt_bf_callback, &bf_info);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
// initialize a two phase progress report.
// first phase: putting rows into the loader
thd_progress_init(thd, 2);
#endif
while (cursor_ret_val != DB_NOTFOUND || ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0)) {
if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) == 0) {
invalidate_bulk_fetch(); // reset the buffers
cursor_ret_val = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED, smart_dbt_bf_callback, &bf_info);
if (cursor_ret_val != DB_NOTFOUND && cursor_ret_val != 0) {
error = cursor_ret_val;
goto cleanup;
}
}
// do this check in case the the c_getf_next did not put anything into the buffer because
// there was no more data
if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) == 0) {
break;
}
// at this point, we know the range query buffer has at least one key/val pair
uchar* curr_pos = range_query_buff+curr_range_query_buff_offset;
uint32_t key_size = *(uint32_t *)curr_pos;
curr_pos += sizeof(key_size);
uchar* curr_key_buff = curr_pos;
curr_pos += key_size;
curr_pk_key.data = curr_key_buff;
curr_pk_key.size = key_size;
uint32_t val_size = *(uint32_t *)curr_pos;
curr_pos += sizeof(val_size);
uchar* curr_val_buff = curr_pos;
curr_pos += val_size;
curr_pk_val.data = curr_val_buff;
curr_pk_val.size = val_size;
curr_range_query_buff_offset = curr_pos - range_query_buff;
error = loader->put(loader, &curr_pk_key, &curr_pk_val);
if (error) { goto cleanup; }
num_processed++;
if ((num_processed % 1000) == 0) {
sprintf(status_msg, "Adding indexes: Fetched %llu of about %llu rows, loading of data still remains.",
num_processed, (long long unsigned) share->rows);
thd_proc_info(thd, status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(thd, num_processed, (long long unsigned) share->rows);
#endif
if (thd_killed(thd)) {
error = ER_ABORTING_CONNECTION;
goto cleanup;
}
}
}
error = tmp_cursor->c_close(tmp_cursor);
assert(error==0);
tmp_cursor = NULL;
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
// next progress report phase: closing the loader.
// incremental reports are done in the loader's callback function.
thd_progress_next_stage(thd);
#endif
error = loader->close(loader);
loader = NULL;
if (error) goto cleanup;
}
curr_index = curr_num_DBs;
for (uint i = 0; i < num_of_keys; i++, curr_index++) {
if (key_info[i].flags & HA_NOSAME) {
bool is_unique;
error = is_index_unique(&is_unique, txn, share->key_file[curr_index], &key_info[i],
creating_hot_index ? 0 : DB_PRELOCKED_WRITE);
if (error) goto cleanup;
if (!is_unique) {
error = HA_ERR_FOUND_DUPP_KEY;
last_dup_key = i;
goto cleanup;
}
}
}
//
// We have an accurate row count, might as well update share->rows
//
if(!creating_hot_index) {
tokudb_pthread_mutex_lock(&share->mutex);
share->rows = num_processed;
tokudb_pthread_mutex_unlock(&share->mutex);
}
//
// now write stuff to status.tokudb
//
tokudb_pthread_mutex_lock(&share->mutex);
for (uint i = 0; i < num_of_keys; i++) {
write_key_name_to_status(share->status_block, key_info[i].name, txn);
}
tokudb_pthread_mutex_unlock(&share->mutex);
error = 0;
cleanup:
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_end(thd);
#endif
if (rw_lock_taken) {
rw_unlock(&share->num_DBs_lock);
rw_lock_taken = false;
}
if (tmp_cursor) {
int r = tmp_cursor->c_close(tmp_cursor);
assert(r==0);
tmp_cursor = NULL;
}
if (loader != NULL) {
sprintf(status_msg, "aborting creation of indexes.");
thd_proc_info(thd, status_msg);
loader->abort(loader);
}
if (indexer != NULL) {
sprintf(status_msg, "aborting creation of indexes.");
thd_proc_info(thd, status_msg);
rw_wrlock(&share->num_DBs_lock);
indexer->abort(indexer);
rw_unlock(&share->num_DBs_lock);
}
if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
sql_print_error("Could not add indexes to table %s because \
another transaction has accessed the table. \
To add indexes, make sure no transactions touch the table.", share->table_name);
}
thd_proc_info(thd, orig_proc_info);
TOKUDB_HANDLER_DBUG_RETURN(error ? error : loader_error);
}
//
// Internal function called by ha_tokudb::add_index and ha_tokudb::alter_table_phase2
// Closes added indexes in case of error in error path of add_index and alter_table_phase2
//
void ha_tokudb::restore_add_index(TABLE* table_arg, uint num_of_keys, bool incremented_numDBs, bool modified_DBs) {
uint curr_num_DBs = table_arg->s->keys + tokudb_test(hidden_primary_key);
uint curr_index = 0;
//
// need to restore num_DBs, and we have to do it before we close the dictionaries
// so that there is not a window
//
if (incremented_numDBs) {
rw_wrlock(&share->num_DBs_lock);
share->num_DBs--;
}
if (modified_DBs) {
curr_index = curr_num_DBs;
for (uint i = 0; i < num_of_keys; i++, curr_index++) {
reset_key_and_col_info(&share->kc_info, curr_index);
}
curr_index = curr_num_DBs;
for (uint i = 0; i < num_of_keys; i++, curr_index++) {
if (share->key_file[curr_index]) {
int r = share->key_file[curr_index]->close(
share->key_file[curr_index],
0
);
assert(r==0);
share->key_file[curr_index] = NULL;
}
}
}
if (incremented_numDBs) {
rw_unlock(&share->num_DBs_lock);
}
}
//
// Internal function called by ha_tokudb::prepare_drop_index and ha_tokudb::alter_table_phase2
// With a transaction, drops dictionaries associated with indexes in key_num
//
int ha_tokudb::drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys, KEY *key_info, DB_TXN* txn) {
TOKUDB_HANDLER_DBUG_ENTER("");
assert(txn);
int error = 0;
for (uint i = 0; i < num_of_keys; i++) {
uint curr_index = key_num[i];
error = share->key_file[curr_index]->pre_acquire_fileops_lock(share->key_file[curr_index],txn);
if (error != 0) {
goto cleanup;
}
}
for (uint i = 0; i < num_of_keys; i++) {
uint curr_index = key_num[i];
int r = share->key_file[curr_index]->close(share->key_file[curr_index],0);
assert(r==0);
share->key_file[curr_index] = NULL;
error = remove_key_name_from_status(share->status_block, key_info[curr_index].name, txn);
if (error) { goto cleanup; }
error = delete_or_rename_dictionary(share->table_name, NULL, key_info[curr_index].name, true, txn, true);
if (error) { goto cleanup; }
}
cleanup:
if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
sql_print_error("Could not drop indexes from table %s because \
another transaction has accessed the table. \
To drop indexes, make sure no transactions touch the table.", share->table_name);
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
// Internal function called by ha_tokudb::prepare_drop_index and ha_tokudb::alter_table_phase2
// Restores dropped indexes in case of error in error path of prepare_drop_index and alter_table_phase2
//
void ha_tokudb::restore_drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys) {
//
// reopen closed dictionaries
//
for (uint i = 0; i < num_of_keys; i++) {
int r;
uint curr_index = key_num[i];
if (share->key_file[curr_index] == NULL) {
r = open_secondary_dictionary(
&share->key_file[curr_index],
&table_share->key_info[curr_index],
share->table_name,
false, //
NULL
);
assert(!r);
}
}
}
int ha_tokudb::map_to_handler_error(int error) {
switch (error) {
case DB_LOCK_DEADLOCK:
error = HA_ERR_LOCK_DEADLOCK;
break;
case DB_LOCK_NOTGRANTED:
error = HA_ERR_LOCK_WAIT_TIMEOUT;
break;
#if defined(HA_ERR_DISK_FULL)
case ENOSPC:
error = HA_ERR_DISK_FULL;
break;
#endif
case DB_KEYEXIST:
error = HA_ERR_FOUND_DUPP_KEY;
break;
#if defined(HA_ALTER_ERROR)
case HA_ALTER_ERROR:
error = HA_ERR_UNSUPPORTED;
break;
#endif
case TOKUDB_INTERRUPTED:
error = ER_QUERY_INTERRUPTED;
break;
case TOKUDB_OUT_OF_LOCKS:
error = HA_ERR_LOCK_TABLE_FULL;
break;
}
return error;
}
void ha_tokudb::print_error(int error, myf errflag) {
error = map_to_handler_error(error);
handler::print_error(error, errflag);
}
//
// truncate's dictionary associated with keynr index using transaction txn
// does so by deleting and then recreating the dictionary in the context
// of a transaction
//
int ha_tokudb::truncate_dictionary( uint keynr, DB_TXN* txn ) {
int error;
bool is_pk = (keynr == primary_key);
toku_compression_method compression_method = get_compression_method(share->key_file[keynr]);
error = share->key_file[keynr]->close(share->key_file[keynr], 0);
assert(error == 0);
share->key_file[keynr] = NULL;
if (is_pk) { share->file = NULL; }
if (is_pk) {
error = delete_or_rename_dictionary(
share->table_name,
NULL,
"main",
false, //is_key
txn,
true // is a delete
);
if (error) { goto cleanup; }
}
else {
error = delete_or_rename_dictionary(
share->table_name,
NULL,
table_share->key_info[keynr].name,
true, //is_key
txn,
true // is a delete
);
if (error) { goto cleanup; }
}
if (is_pk) {
error = create_main_dictionary(share->table_name, table, txn, &share->kc_info, compression_method);
}
else {
error = create_secondary_dictionary(
share->table_name,
table,
&table_share->key_info[keynr],
txn,
&share->kc_info,
keynr,
false,
compression_method
);
}
if (error) { goto cleanup; }
cleanup:
return error;
}
// for 5.5
int ha_tokudb::truncate() {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = delete_all_rows_internal();
TOKUDB_HANDLER_DBUG_RETURN(error);
}
// delete all rows from a table
//
// effects: delete all of the rows in the main dictionary and all of the
// indices. this must be atomic, so we use the statement transaction
// for all of the truncate operations.
// locks: if we have an exclusive table write lock, all of the concurrency
// issues go away.
// returns: 0 if success
int ha_tokudb::delete_all_rows() {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
if (thd_sql_command(ha_thd()) != SQLCOM_TRUNCATE) {
share->try_table_lock = true;
error = HA_ERR_WRONG_COMMAND;
}
if (error == 0)
error = delete_all_rows_internal();
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::delete_all_rows_internal() {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
uint curr_num_DBs = 0;
DB_TXN* txn = NULL;
error = txn_begin(db_env, 0, &txn, 0, ha_thd());
if (error) { goto cleanup; }
curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint i = 0; i < curr_num_DBs; i++) {
error = share->key_file[i]->pre_acquire_fileops_lock(
share->key_file[i],
txn
);
if (error) { goto cleanup; }
error = share->key_file[i]->pre_acquire_table_lock(
share->key_file[i],
txn
);
if (error) { goto cleanup; }
}
for (uint i = 0; i < curr_num_DBs; i++) {
error = truncate_dictionary(i, txn);
if (error) { goto cleanup; }
}
// zap the row count
if (error == 0) {
share->rows = 0;
// update auto increment
share->last_auto_increment = 0;
// calling write_to_status directly because we need to use txn
write_to_status(
share->status_block,
hatoku_max_ai,
&share->last_auto_increment,
sizeof(share->last_auto_increment),
txn
);
}
share->try_table_lock = true;
cleanup:
if (txn) {
if (error) {
abort_txn(txn);
}
else {
commit_txn(txn,0);
}
}
if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
sql_print_error("Could not truncate table %s because another transaction has accessed the \
table. To truncate the table, make sure no transactions touch the table.",
share->table_name);
}
//
// regardless of errors, need to reopen the DB's
//
for (uint i = 0; i < curr_num_DBs; i++) {
int r = 0;
if (share->key_file[i] == NULL) {
if (i != primary_key) {
r = open_secondary_dictionary(
&share->key_file[i],
&table_share->key_info[i],
share->table_name,
false, //
NULL
);
assert(!r);
}
else {
r = open_main_dictionary(
share->table_name,
false,
NULL
);
assert(!r);
}
}
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
void ha_tokudb::set_loader_error(int err) {
loader_error = err;
}
void ha_tokudb::set_dup_value_for_pk(DBT* key) {
assert(!hidden_primary_key);
unpack_key(table->record[0],key,primary_key);
last_dup_key = primary_key;
}
void ha_tokudb::close_dsmrr() {
#ifdef MARIADB_BASE_VERSION
ds_mrr.dsmrr_close();
#elif 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
ds_mrr.dsmrr_close();
#endif
}
void ha_tokudb::reset_dsmrr() {
#ifdef MARIADB_BASE_VERSION
ds_mrr.dsmrr_close();
#elif 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
ds_mrr.reset();
#endif
}
// we cache the information so we can do filtering ourselves,
// but as far as MySQL knows, we are not doing any filtering,
// so if we happen to miss filtering a row that does not match
// idx_cond_arg, MySQL will catch it.
// This allows us the ability to deal with only index_next and index_prev,
// and not need to worry about other index_XXX functions
Item* ha_tokudb::idx_cond_push(uint keyno_arg, Item* idx_cond_arg) {
toku_pushed_idx_cond_keyno = keyno_arg;
toku_pushed_idx_cond = idx_cond_arg;
return idx_cond_arg;
}
void ha_tokudb::cleanup_txn(DB_TXN *txn) {
if (transaction == txn && cursor) {
int r = cursor->c_close(cursor);
assert(r == 0);
cursor = NULL;
}
}
void ha_tokudb::add_to_trx_handler_list() {
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton);
trx->handlers = list_add(trx->handlers, &trx_handler_list);
}
void ha_tokudb::remove_from_trx_handler_list() {
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton);
trx->handlers = list_delete(trx->handlers, &trx_handler_list);
}
void ha_tokudb::rpl_before_write_rows() {
in_rpl_write_rows = true;
}
void ha_tokudb::rpl_after_write_rows() {
in_rpl_write_rows = false;
}
void ha_tokudb::rpl_before_delete_rows() {
in_rpl_delete_rows = true;
}
void ha_tokudb::rpl_after_delete_rows() {
in_rpl_delete_rows = false;
}
void ha_tokudb::rpl_before_update_rows() {
in_rpl_update_rows = true;
}
void ha_tokudb::rpl_after_update_rows() {
in_rpl_update_rows = false;
}
bool ha_tokudb::rpl_lookup_rows() {
if (!in_rpl_delete_rows && !in_rpl_update_rows)
return true;
else
return THDVAR(ha_thd(), rpl_lookup_rows);
}
// table admin
#include "ha_tokudb_admin.cc"
// update functions
#include "tokudb_update_fun.cc"
// fast updates
#include "ha_tokudb_update.cc"
// alter table code for various mysql distros
#include "ha_tokudb_alter_55.cc"
#include "ha_tokudb_alter_56.cc"
// mrr
#ifdef MARIADB_BASE_VERSION
#include "ha_tokudb_mrr_maria.cc"
#elif 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
#include "ha_tokudb_mrr_mysql.cc"
#endif
// key comparisons
#include "hatoku_cmp.cc"
// handlerton
#include "hatoku_hton.cc"
// generate template functions
namespace tokudb {
template size_t vlq_encode_ui(uint32_t n, void *p, size_t s);
template size_t vlq_decode_ui(uint32_t *np, void *p, size_t s);
template size_t vlq_encode_ui(uint64_t n, void *p, size_t s);
template size_t vlq_decode_ui(uint64_t *np, void *p, size_t s);
};
```
|
Blood Brothers: The International Recording is a 1995 studio cast recording of the Willy Russell musical Blood Brothers. The album features David Cassidy, Shaun Cassidy, and Petula Clark, with Russell himself as the Narrator.
Background
The album was the third release of a Blood Brothers cast recording; the earlier versions featured the original 1983 cast (including Barbara Dickson as Mrs Johnstone) and 1988 cast (with Kiki Dee as Mrs Johnstone).
In 1995, a recording of the then London cast, including Stephanie Lawrence as Mrs Johnstone and Warwick Evans as the Narrator was also produced. Many of the instrumental tracks appeared on both the International and London recordings.
Cast biographies
Blood Brothers marked the first time Shaun Cassidy and David Cassidy appeared together on an album, having both previously had successful careers as pop artists. (The next would be only a year later when they sang in a trio with brother Patrick on "You Could Drive A Person Crazy", released on the live recording "Sondheim A Celebration).
Blood Brothers is the first time Shaun Cassidy had appeared on an album in 15 years. His fifth appearance on an album was in 1980 during his teen-idol career when he released his fifth solo album, Wasp. To date, Blood Brothers was the fifth time Shaun released new material on an album.
David Cassidy (Shaun's half-brother), however, had released solo albums throughout the 1990s, and Blood Brothers represented his first musical-theatre album. Following Blood Brothers, David (unlike Shaun) continued to perform as a headliner of musical theatre throughout the 1990s, including the Las Vegas shows EFX and At The Copa.
Petula Clark also continued to perform as a headliner of musical theatre, including an extensive tour of the musical Sunset Boulevard.
Track listing
"Overture"
"Marilyn Monroe"
"My Child"
"Easy Terms"
"Shoes upon the Table"
"July 18"
"Kids' Game"
"Gypsies in the Wood"
"Long Sunday Afternoon/My Friend"
"Bright New Day"
"Entr'acte/Marilyn Monroe (2)"
"Secrets"
"That Guy"
"Summer Sequence"
"I'm Not Saying a Word"
"One Day in October"
"Take a Letter Miss Jones"
"Robbery"
"Marilyn Monroe (3)"
"Light Romance/Madman"
"Council Chamber"
"Tell Me It's Not True"
David Cassidy albums
Shaun Cassidy albums
Cast recordings
1995 soundtrack albums
Theatre soundtracks
|
```xml
import React from 'react';
import TestRenderer from 'react-test-renderer';
import { SC_ATTR, SC_ATTR_VERSION } from '../constants';
import { getRenderedCSS, rehydrateTestStyles, resetStyled, seedNextClassnames } from './utils';
declare const __VERSION__: string;
/* NOTE:
Sometimes we add an empty function interpolation into some
styled-components to skip the static optimisation in
ComponentStyle. This will look like this:
${() => ''}
*/
let styled: ReturnType<typeof resetStyled>;
let createGlobalStyle: Awaited<typeof import('../constructors/createGlobalStyle')>['default'];
let keyframes: Awaited<typeof import('../constructors/keyframes')>['default'];
describe('rehydration', () => {
/**
* Make sure the setup is the same for every test
*/
beforeEach(() => {
createGlobalStyle = require('../constructors/createGlobalStyle');
keyframes = require('../constructors/keyframes');
styled = resetStyled();
});
describe('with existing styled components', () => {
beforeEach(() => {
document.head.innerHTML = `
<style ${SC_ATTR} ${SC_ATTR_VERSION}="${__VERSION__}">
.b { color: red; }/*!sc*/
${SC_ATTR}.g1[id="TWO"]{content: "b,"}/*!sc*/
</style>
`;
rehydrateTestStyles();
});
it('should preserve the styles', () => {
expect(getRenderedCSS()).toMatchInlineSnapshot(`
".b {
color: red;
}"
`);
});
it('should append a new component like normal', () => {
const Comp = styled.div.withConfig({ componentId: 'ONE' })`
color: blue;
${() => ''}
`;
TestRenderer.create(<Comp />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
".b {
color: red;
}
.a {
color: blue;
}"
`);
});
it('should reuse a componentId', () => {
const A = styled.div.withConfig({ componentId: 'ONE' })`
color: blue;
${() => ''}
`;
TestRenderer.create(<A />);
const B = styled.div.withConfig({ componentId: 'TWO' })``;
TestRenderer.create(<B />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
".b {
color: red;
}
.a {
color: blue;
}"
`);
});
it('should reuse a componentId and generated class', () => {
const A = styled.div.withConfig({ componentId: 'ONE' })`
color: blue;
${() => ''}
`;
TestRenderer.create(<A />);
const B = styled.div.withConfig({ componentId: 'TWO' })`
color: red;
${() => ''}
`;
TestRenderer.create(<B />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
".b {
color: red;
}
.a {
color: blue;
}"
`);
});
it('should reuse a componentId and inject new classes', () => {
const A = styled.div.withConfig({ componentId: 'ONE' })`
color: blue;
${() => ''}
`;
TestRenderer.create(<A />);
const B = styled.div.withConfig({ componentId: 'TWO' })`
color: ${() => 'red'};
`;
TestRenderer.create(<B />);
const C = styled.div.withConfig({ componentId: 'TWO' })`
color: ${() => 'green'};
`;
TestRenderer.create(<C />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
".b {
color: red;
}
.c {
color: green;
}
.a {
color: blue;
}"
`);
});
});
describe('with styled components with props', () => {
beforeEach(() => {
/* Hash 1323611362 is based on name TWO and contents color: red.
* Change either and this will break. */
document.head.innerHTML = `
<style ${SC_ATTR} ${SC_ATTR_VERSION}="${__VERSION__}">
.a { color: blue; }/*!sc*/
${SC_ATTR}.g1[id="ONE"]{content: "a,"}/*!sc*/
.b { color: red; }/*!sc*/
${SC_ATTR}.g2[id="TWO"]{content: "b,"}/*!sc*/
</style>
`;
rehydrateTestStyles();
});
it('should preserve the styles', () => {
expect(getRenderedCSS()).toMatchInlineSnapshot(`
".a {
color: blue;
}
.b {
color: red;
}"
`);
});
it('should not inject new styles for a component already rendered', () => {
const Comp = styled.div.withConfig({ componentId: 'ONE' })`
color: ${props => props.color};
`;
TestRenderer.create(<Comp color="blue" />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
".a {
color: blue;
}
.b {
color: red;
}"
`);
});
it('should inject new styles for a new computed style of a component', () => {
seedNextClassnames(['x']);
const Comp = styled.div.withConfig({ componentId: 'ONE' })`
color: ${props => props.color};
`;
TestRenderer.create(<Comp color="green" />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
".a {
color: blue;
}
.x {
color: green;
}
.b {
color: red;
}"
`);
});
});
describe('with inline styles that werent rendered by us', () => {
beforeEach(() => {
/* Same css as before, but without the data attributes we ignore it */
document.head.innerHTML = `
<style>
.b { color: red; }/*!sc*/
${SC_ATTR}.g2[id="TWO"]{content: "b,"}/*!sc*/
</style>
`;
rehydrateTestStyles();
});
it('should leave the existing styles there', () => {
expect(getRenderedCSS()).toMatchInlineSnapshot(`
".b {
color: red;
}
data-styled.g2[id="TWO"] {
content: "b,"
}"
`);
});
});
describe('with global styles', () => {
beforeEach(() => {
/* Adding a non-local stylesheet with a hash 557410406 which is
* derived from "body { background: papayawhip; }" so be careful
* changing it. */
document.head.innerHTML = `
<style ${SC_ATTR} ${SC_ATTR_VERSION}="${__VERSION__}">
body { background: papayawhip; }/*!sc*/
${SC_ATTR}.g1[id="sc-global-557410406"]{content: "sc-global-557410406,"}/*!sc*/
</style>
<style ${SC_ATTR} ${SC_ATTR_VERSION}="${__VERSION__}">
.a { color: red; }/*!sc*/
${SC_ATTR}.g2[id="TWO"]{content: "a,"}/*!sc*/
</style>
`;
rehydrateTestStyles();
});
it('should leave the existing styles there', () => {
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"body {
background: papayawhip;
}
.a {
color: red;
}"
`);
});
it('should inject new global styles at the end', () => {
const Component = createGlobalStyle`
body { color: tomato; }
`;
TestRenderer.create(<Component />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"body {
background: papayawhip;
}
.a {
color: red;
}
body {
color: tomato;
}"
`);
});
it('should interleave global and local styles', () => {
const Component = createGlobalStyle`
body { color: tomato; }
`;
const A = styled.div.withConfig({ componentId: 'ONE' })`
color: blue;
${() => ''}
`;
TestRenderer.create(<Component />);
TestRenderer.create(<A />);
// although `<Component />` is rendered before `<A />`, the global style isn't registered until render time
// compared to typical component styles which are registered at creation time
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"body {
background: papayawhip;
}
.a {
color: red;
}
body {
color: tomato;
}
.b {
color: blue;
}"
`);
});
});
describe('with all styles already rendered', () => {
beforeEach(() => {
document.head.innerHTML = `
<style ${SC_ATTR} ${SC_ATTR_VERSION}="${__VERSION__}">
html { font-size: 16px; }/*!sc*/
${SC_ATTR}.g1[id="sc-global-a1"]{content: "sc-global-a1,"}/*!sc*/
body { background: papayawhip; }/*!sc*/
${SC_ATTR}.g2[id="sc-global-b1"]{content: "sc-global-b1,"}/*!sc*/
.c { color: blue; }/*!sc*/
${SC_ATTR}.g3[id="ONE"]{content: "c,"}/*!sc*/
.d { color: red; }/*!sc*/
${SC_ATTR}.g4[id="TWO"]{content: "d,"}/*!sc*/
</style>
`;
rehydrateTestStyles();
});
it('should not touch existing styles', () => {
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"html {
font-size: 16px;
}
body {
background: papayawhip;
}
.c {
color: blue;
}
.d {
color: red;
}"
`);
});
it('should not change styles if rendered in the same order they were created with', () => {
const Component1 = createGlobalStyle`
html { font-size: 16px; }
`;
TestRenderer.create(<Component1 />);
const Component2 = createGlobalStyle`
body { background: papayawhip; }
`;
TestRenderer.create(<Component2 />);
const A = styled.div.withConfig({ componentId: 'ONE' })`
color: blue;
`;
TestRenderer.create(<A />);
const B = styled.div.withConfig({ componentId: 'TWO' })`
color: red;
`;
TestRenderer.create(<B />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"html {
font-size: 16px;
}
body {
background: papayawhip;
}
.c {
color: blue;
}
.d {
color: red;
}"
`);
});
it('should still not change styles if rendered in a different order', () => {
seedNextClassnames(['d', 'a', 'b', 'c']);
const B = styled.div.withConfig({ componentId: 'TWO' })`
color: red;
`;
TestRenderer.create(<B />);
const Component1 = createGlobalStyle`
html { font-size: 16px; }
`;
TestRenderer.create(<Component1 />);
const Component2 = createGlobalStyle`
body { background: papayawhip; }
`;
TestRenderer.create(<Component2 />);
const A = styled.div.withConfig({ componentId: 'ONE' })`
color: blue;
`;
TestRenderer.create(<A />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"html {
font-size: 16px;
}
body {
background: papayawhip;
}
.c {
color: blue;
}
.d {
color: red;
}"
`);
});
});
describe('with keyframes', () => {
beforeEach(() => {
document.head.innerHTML = `
<style ${SC_ATTR} ${SC_ATTR_VERSION}="${__VERSION__}">
@-webkit-keyframes keyframe_880 {from {opacity: 0;}}@keyframes keyframe_880 {from {opacity: 0;}}/*!sc*/
${SC_ATTR}.g1[id="sc-keyframes-keyframe_880"]{content: "keyframe_880,"}/*!sc*/
</style>
`;
rehydrateTestStyles();
});
it('should not touch existing styles', () => {
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"@-webkit-keyframes keyframe_880 {
from {
opacity: 0;
}
}
@keyframes keyframe_880 {
from {
opacity: 0;
}
}"
`);
});
it('should not regenerate keyframes', () => {
seedNextClassnames(['keyframe_880']);
const fadeIn = keyframes`
from { opacity: 0; }
`;
const A = styled.div`
animation: ${fadeIn} 1s both;
${() => ''}
`;
TestRenderer.create(<A />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"@-webkit-keyframes keyframe_880 {
from {
opacity: 0;
}
}
@keyframes keyframe_880 {
from {
opacity: 0;
}
}
.b {
animation: keyframe_880 1s both;
}"
`);
});
it('should still inject new keyframes', () => {
seedNextClassnames(['keyframe_144']);
const fadeOut = keyframes`
from { opacity: 1; }
`;
const A = styled.div`
animation: ${fadeOut} 1s both;
${() => ''}
`;
TestRenderer.create(<A />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"@-webkit-keyframes keyframe_880 {
from {
opacity: 0;
}
}
@keyframes keyframe_880 {
from {
opacity: 0;
}
}
.b {
animation: keyframe_144 1s both;
}
@keyframes keyframe_144 {
from {
opacity: 1;
}
}"
`);
});
it('should pass the keyframes name along as well', () => {
seedNextClassnames(['keyframe_880', 'keyframe_144']);
const fadeIn = keyframes`
from { opacity: 0; }
`;
const fadeOut = keyframes`
from { opacity: 1; }
`;
const A = styled.div`
animation: ${fadeIn} 1s both;
${() => ''}
`;
const B = styled.div`
animation: ${fadeOut} 1s both;
${() => ''}
`;
/* Purposely rendering out of order to make sure the output looks right */
TestRenderer.create(<B />);
TestRenderer.create(<A />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"@-webkit-keyframes keyframe_880 {
from {
opacity: 0;
}
}
@keyframes keyframe_880 {
from {
opacity: 0;
}
}
.d {
animation: keyframe_880 1s both;
}
.c {
animation: keyframe_144 1s both;
}
@keyframes keyframe_144 {
from {
opacity: 1;
}
}"
`);
});
it('should pass the keyframes name through props along as well', () => {
seedNextClassnames(['keyframe_880', 'keyframe_144']);
const fadeIn = keyframes`
from { opacity: 0; }
`;
const fadeOut = keyframes`
from { opacity: 1; }
`;
const A = styled.div<{ animation: any }>`
animation: ${props => props.animation} 1s both;
`;
const B = styled.div<{ animation: any }>`
animation: ${props => props.animation} 1s both;
`;
/* Purposely rendering out of order to make sure the output looks right */
TestRenderer.create(<B animation={fadeOut} />);
TestRenderer.create(<A animation={fadeIn} />);
expect(getRenderedCSS()).toMatchInlineSnapshot(`
"@-webkit-keyframes keyframe_880 {
from {
opacity: 0;
}
}
@keyframes keyframe_880 {
from {
opacity: 0;
}
}
.d {
animation: keyframe_880 1s both;
}
.c {
animation: keyframe_144 1s both;
}
@keyframes keyframe_144 {
from {
opacity: 1;
}
}"
`);
});
});
});
```
|
Pleurobema flavidulum, the yellow pigtoe, was a species of freshwater mussel, an aquatic bivalve mollusk in the family Unionidae. It was endemic to the United States. Its natural habitat was rivers. It is now extinct.
References
flavidulum
Bivalves described in 1861
Taxonomy articles created by Polbot
|
The men's tournament of football at the 2017 Summer Universiade was held from August 18 to 29 in Taipei, Taiwan.
Teams
Preliminary round
All times are Taiwan Standard Time (UTC+08:00).
Tiebreakers
The ranking of each team in each group was determined as follows:
Greatest number of points obtained in group matches;
Goal difference in all group matches;
Greatest number of goals scored in all group matches;
Greatest number of points obtained in group matches between the teams concerned;
Greatest number of goals scored in the group matches between the teams concerned;
Fair play points system taking into account the number of yellow and red cards in all group matches;
Drawing of lots by the Technical Committee.
Group A
Group B
Group C
Group D
Classification round
9th–16th place quarterfinals
13th–16th place semifinals
9th–12th place semifinals
15th place match
13th place match
11th place match
9th place match
Elimination round
Quarterfinals
5th–8th place semifinals
Semifinals
7th place match
5th place match
Bronze medal match
Gold medal match
Final standings
References
External links
2017 Summer Universiade – Football – Men's tournament
Men's
|
is a passenger railway station in the city of Sakuragawa, Ibaraki, Japan, operated by East Japan Railway Company (JR East).
Lines
Iwase Station is served by the Mito Line, and is located 29.6 km from the official starting point of the line at Oyama Station.
Station layout
The station consists of one side platform and one island platform, connected to the station building by a footbridge. The station is staffed
Platforms
History
Iwase Station was opened on 16 January 1889. From 1918 to 1987 it was also a terminal station for the now defunct Tsukuba Railway Tsukuba Line. The station was absorbed into the JR East network upon the privatization of the Japanese National Railways (JNR) on 1 April 1987.
Passenger statistics
In fiscal 2019, the station was used by an average of 918 passengers daily (boarding passengers only).
Surrounding area
former Iwase Town Hall
Iwase Post Office
See also
List of railway stations in Japan
References
External links
JR East Station Information
Railway stations in Ibaraki Prefecture
Mito Line
Railway stations in Japan opened in 1889
Sakuragawa, Ibaraki
|
```java
/*
*
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.google.android.material.loadingindicator;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.ColorFilter;
import android.graphics.Paint;
import android.graphics.PixelFormat;
import android.graphics.Rect;
import android.graphics.drawable.Drawable;
import androidx.annotation.IntRange;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.RestrictTo;
import androidx.annotation.RestrictTo.Scope;
import androidx.annotation.VisibleForTesting;
import androidx.core.graphics.drawable.DrawableCompat;
import com.google.android.material.progressindicator.AnimatorDurationScaleProvider;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
/** This class draws the graphics for a loading indicator. */
public final class LoadingIndicatorDrawable extends Drawable implements Drawable.Callback {
AnimatorDurationScaleProvider animatorDurationScaleProvider;
@NonNull private final Context context;
@NonNull private final LoadingIndicatorSpec specs;
@NonNull private LoadingIndicatorDrawingDelegate drawingDelegate;
@NonNull private LoadingIndicatorAnimatorDelegate animatorDelegate;
@NonNull Paint paint;
@IntRange(from = 0, to = 255)
int alpha;
private Drawable staticDummyDrawable;
@NonNull
public static LoadingIndicatorDrawable create(
@NonNull Context context, @NonNull LoadingIndicatorSpec specs) {
return new LoadingIndicatorDrawable(
context,
specs,
new LoadingIndicatorDrawingDelegate(specs),
new LoadingIndicatorAnimatorDelegate(specs));
}
LoadingIndicatorDrawable(
@NonNull Context context,
@NonNull LoadingIndicatorSpec specs,
@NonNull LoadingIndicatorDrawingDelegate drawingDelegate,
@NonNull LoadingIndicatorAnimatorDelegate animatorDelegate) {
this.context = context;
this.specs = specs;
this.drawingDelegate = drawingDelegate;
this.animatorDelegate = animatorDelegate;
this.paint = new Paint();
animatorDelegate.registerDrawable(this);
setAlpha(255);
}
// ******************* Overridden methods *******************
@Override
public int getIntrinsicWidth() {
return drawingDelegate.getPreferredWidth();
}
@Override
public int getIntrinsicHeight() {
return drawingDelegate.getPreferredHeight();
}
@Override
public void draw(@NonNull Canvas canvas) {
Rect clipBounds = new Rect();
Rect bounds = getBounds();
if (bounds.isEmpty() || !isVisible() || !canvas.getClipBounds(clipBounds)) {
// Escape if bounds are empty, clip bounds are empty, or currently hidden.
return;
}
if (isSystemAnimatorDisabled() && staticDummyDrawable != null) {
staticDummyDrawable.setBounds(bounds);
DrawableCompat.setTint(staticDummyDrawable, specs.indicatorColors[0]);
staticDummyDrawable.draw(canvas);
return;
}
canvas.save();
drawingDelegate.adjustCanvas(canvas, bounds);
drawingDelegate.drawContainer(canvas, paint, specs.containerColor, getAlpha());
drawingDelegate.drawIndicator(canvas, paint, animatorDelegate.indicatorState, getAlpha());
canvas.restore();
}
@CanIgnoreReturnValue
@Override
public boolean setVisible(boolean visible, boolean restart) {
boolean changed = super.setVisible(visible, restart);
if (visible && !isSystemAnimatorDisabled()) {
animatorDelegate.startAnimator();
} else {
animatorDelegate.cancelAnimatorImmediately();
}
return changed;
}
@Override
public void setAlpha(int alpha) {
if (this.alpha != alpha) {
this.alpha = alpha;
invalidateSelf();
}
}
@Override
public int getAlpha() {
return alpha;
}
@Override
public void setColorFilter(@Nullable ColorFilter colorFilter) {
paint.setColorFilter(colorFilter);
invalidateSelf();
}
@Override
public int getOpacity() {
return PixelFormat.TRANSLUCENT;
}
@Override
public void invalidateDrawable(@NonNull Drawable drawable) {
Drawable.Callback callback = getCallback();
if (callback != null) {
callback.invalidateDrawable(this);
}
}
@Override
public void scheduleDrawable(@NonNull Drawable who, @NonNull Runnable what, long when) {
Drawable.Callback callback = getCallback();
if (callback != null) {
callback.scheduleDrawable(this, what, when);
}
}
@Override
public void unscheduleDrawable(@NonNull Drawable who, @NonNull Runnable what) {
Drawable.Callback callback = getCallback();
if (callback != null) {
callback.unscheduleDrawable(this, what);
}
}
// ******************* Utility functions *******************
private boolean isSystemAnimatorDisabled() {
if (animatorDurationScaleProvider != null) {
float systemAnimatorDurationScale =
animatorDurationScaleProvider.getSystemAnimatorDurationScale(
context.getContentResolver());
return systemAnimatorDurationScale == 0;
}
return false;
}
// ******************* Setter and getter *******************
/**
* Returns the drawable that will be used when the system animator is disabled.
*
* @hide
*/
@RestrictTo(Scope.LIBRARY_GROUP)
@Nullable
public Drawable getStaticDummyDrawable() {
return staticDummyDrawable;
}
/**
* Sets the drawable that will be used when the system animator is disabled.
*
* @hide
*/
@RestrictTo(Scope.LIBRARY_GROUP)
@VisibleForTesting
public void setStaticDummyDrawable(@Nullable Drawable staticDummyDrawable) {
this.staticDummyDrawable = staticDummyDrawable;
}
@NonNull
LoadingIndicatorAnimatorDelegate getAnimatorDelegate() {
return animatorDelegate;
}
void setAnimatorDelegate(@NonNull LoadingIndicatorAnimatorDelegate animatorDelegate) {
this.animatorDelegate = animatorDelegate;
animatorDelegate.registerDrawable(this);
}
@NonNull
LoadingIndicatorDrawingDelegate getDrawingDelegate() {
return drawingDelegate;
}
void setDrawingDelegate(@NonNull LoadingIndicatorDrawingDelegate drawingDelegate) {
this.drawingDelegate = drawingDelegate;
}
}
```
|
The 1st Illinois General Assembly, consisting of the Illinois Senate and the Illinois House of Representatives, met from October 4, 1818, to March 31, 1819, during the first two years of Shadrach Bond's governorship, at The Kaskaskia State House. The apportionment of seats in the House of Representatives was based on the provisions of the First Illinois Constitution. Political parties were not established in the State at the time.
It was succeeded by the 2nd Illinois General Assembly.
Members
This list is arranged by chamber, then by county. Senators and Representatives were both allotted to counties roughly by population and elected at-large within their districts. Two counties shared one senator.
Senate
Bond County
Martin Jones
Crawford County
Joseph Kitchell
Edwards County
Guy W. Smith
Gallatin County
Michael Jones
Jackson County
Conrad Will
Johnson and Franklin Counties
Thomas Roberts
Madison County
George Cadwell
Monroe County
Alexander Jamison
Pope County
Lewis Barker
St. Clair County
William Kinney
Randolph County
John McFerron, resigned July 8, 1819
Union County
Thomas Cox
Washington County
Zariah Maddux
White County
Willis Hargrave
House of Representatives
Bond County
Francis Kirkpatrick
Crawford County
David Porter
Scott Riggs
Edwards County
Levi Compton
Henry Utter
Franklin County
Elijah Ewing
Gallatin County
John G. Daimwood
Adolphus F. Hubbard
John Marshall, resigned
Jackson County
Jesse Griggs
Johnson County
Isaac D. Wilcox
Madison County
John Howard
Abraham Prickett
Samuel Whiteside
Monroe County
William Alexander
Pope County
Green B. Field
Robert Hamilton
St. Clair County
John Messinger
Risdon Moore
James D. Thomas
Randolph County
Edward Humphreys
Samuel Walker
Union County
Jesse Echols
John Whitaker
Washington County
Daniel S. Swearengen
White County
William McHenry
William Nash
Alexander Phillips
Employees
Senate
Secretary: William C. Greenup
Doorkeeper: Ezra Owen
House of Representatives
Clerk: Thomas Reynolds
Enrolling and Engrossing Clerk: Timothy Davis
Assistant Enrolling and Engrossing Clerk: Milton Ladd
Doorkeeper: Charles McNabb
See also
List of Illinois state legislatures
References
Illinois legislative sessions
Randolph County, Illinois
Illinois
Illinois
1818 in Illinois
1819 in Illinois
|
```javascript
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --noalways-opt
var Debug = debug.Debug;
var breakPointCount = 0;
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
++breakPointCount;
try {
if (breakPointCount === 1) {
assertEquals(
"inner", exec_state.frame(0).evaluate("inner").value());
// Variables in TDZ have 'undefined' as their values.
assertEquals(undefined, exec_state.frame(0).evaluate("letInner").value());
assertEquals(undefined, exec_state.frame(0).evaluate("constInner").value());
assertEquals("outer", exec_state.frame(0).evaluate("outer").value());
assertEquals(
"const outer", exec_state.frame(0).evaluate("constOuter").value());
assertEquals(
"let outer", exec_state.frame(0).evaluate("letOuter").value());
assertEquals("outer", exec_state.frame(1).evaluate("outer").value());
assertEquals(
"const outer", exec_state.frame(1).evaluate("constOuter").value());
assertEquals(
"let outer", exec_state.frame(1).evaluate("letOuter").value());
// Variables in TDZ have 'undefined' as their values.
assertEquals(undefined, exec_state.frame(0).evaluate("withVar").value());
} else if (breakPointCount === 2) {
assertEquals(
"inner", exec_state.frame(0).evaluate("inner").value());
// Variables in TDZ have 'undefined' as their values.
assertEquals(undefined, exec_state.frame(0).evaluate("letInner").value());
assertEquals(undefined, exec_state.frame(0).evaluate("constInner").value());
assertEquals(57, exec_state.frame(0).evaluate("x").value());
assertEquals(100, exec_state.frame(0).evaluate("y").value());
// From breakPointCount === 1 and later, it's not possible to access
// earlier framestates.
assertEquals("outer", exec_state.frame(0).evaluate("outer").value());
assertEquals(
"const outer", exec_state.frame(0).evaluate("constOuter").value());
assertEquals(
"let outer", exec_state.frame(0).evaluate("letOuter").value());
exec_state.frame(0).evaluate("x = `x later(${x})`");
exec_state.frame(0).evaluate("y = `y later(${y})`");
exec_state.frame(0).evaluate("z = `ZEE`");
} else if (breakPointCount === 3) {
assertEquals(
"inner", exec_state.frame(0).evaluate("inner").value());
assertEquals(
"let inner", exec_state.frame(0).evaluate("letInner").value());
assertEquals(
"const inner", exec_state.frame(0).evaluate("constInner").value());
} else if (breakPointCount === 4) {
assertEquals(
"oop", exec_state.frame(0).evaluate("error.message").value());
assertEquals(
"Error",
exec_state.frame(0).evaluate("error.constructor.name").value());
assertEquals("floof", exec_state.frame(0).evaluate("bun").value());
// Variables in TDZ have 'undefined' as their values.
assertEquals(undefined, exec_state.frame(0).evaluate("cow").value())
assertEquals("outer", exec_state.frame(0).evaluate("outer").value());
assertEquals(
"const outer", exec_state.frame(0).evaluate("constOuter").value());
assertEquals(
"let outer", exec_state.frame(0).evaluate("letOuter").value());
}
} catch (e) {
print(e.stack);
quit(1);
}
}
Debug.setListener(listener);
var outer = "outer";
const constOuter = "const outer";
let letOuter = "let outer"
async function thrower() {
return Promise.reject(new Error("oop"));
}
async function testLater() {
return { x: 57, y: 100 };
}
async function test() {
var inner = "inner";
debugger;
let withVar = await testLater();
with (withVar) {
debugger;
}
assertEquals("x later(57)", withVar.x);
assertEquals("y later(100)", withVar.y);
assertEquals(undefined, withVar.z);
assertEquals("ZEE", z);
let letInner = "let inner";
const constInner = "const inner";
debugger;
try {
await thrower();
} catch (error) {
const bun = "floof";
debugger;
let cow = "moo";
}
}
test().
then(x => {
Debug.setListener(null);
}).
catch(error => {
print(error.stack);
quit(1);
Debug.setListener(null);
});
```
|
Slebech was a community (prior to 1974, a civil parish) in Pembrokeshire, Wales, which is now part of the combined community of Uzmaston and Boulston and Slebech, a sparsely populated community on the northern shore of the Eastern River Cleddau. The community shares boundaries with the communities of Wiston and Llawhaden and mainly consists of farmland and woodland. Much of the community is within the Pembrokeshire Coast National Park and Picton Castle's stable block loft is an important breeding roost for the rare Greater Horseshoe Bat.
History
Slebech is situated on the upper Eastern Cleddau and was once part of the Barony of Daugleddau. In the Middle Ages Slebech belonged to the Knights Hospitallers of the Order of St John and the original church on the bank of the river was established in 1161, together with a commandery which became the headquarters of the order in West Wales. After the Dissolution of the Monasteries by Henry VIII the lands passed to the Barlow family.
Roger Barlow (c.1483-1553) was born in Essex, in or near Colchester, where his father was a customs official. After becoming a merchant in Seville, Barlow joined Sebastian Cabot's 1526 voyage to South America. He accompanied Cabot up the Rio de la Plata (River Plate) river system. He returned to England in 1530 and lived in Bristol, where he married Julyan Dawes. He moved to Pembrokeshire in 1535. In 1542 he presented a cosmography to Henry VIII, based on a translation of Enciso's Spanish Suma de Geographie. This included Barlow's descriptions of his travels - the first account of the New World in English. Roger Barlow had three younger brothers, William Barlow (successively bishop of St David's, Bath and Wells, and Chichester), John Barlow (dean of Worcester), and Thomas Barlow (a cleric in Norfolk). After renting the dissolved commandery of the hospitallers of St John of Jerusalem at Slebech, Roger and Thomas Barlow bought Slebech in 1546, and then Roger became the sole owner in 1553. Barlow had at least 10 children, including John who inherited Slebech.
Picton Castle
The estates, gardens and parkland of Picton Castle was once part of the larger Manor of Wiston, but had become a separate holding, replacing Wiston Castle by the 13th century. Picton Castle began as a motte castle and was reconstructed in stone by the Wogan family during the 13th century. In 1405 French troops supporting Owain Glyndŵr attacked and held the Castle, and it was seized again during the English Civil War in 1645 by Parliamentary forces.
The Picton Castle estate was acquired by the Phillips family when Sir Thomas ap Philip of Cilsant married Jane, daughter and heiress of Sir Henry Dwnn, of Picton in the 1490s. Sir John Philipps, who inherited the castle in the 15th century, remodelled the building and created a new entrance which remained until the 1820s when a new entrance was designed by Thomas Rowlands (who also designed Slebech Church).
The estate remained with the Phillips family until the death of Lord Milford, in 1823, when it was inherited by his cousin (through female lines) Richard Grant, who assumed the surname Philipps and was created a Baronet in 1828 and Baron Milford in 1847. His heir was his half-brother, the Reverend James Henry Alexander Philipps (formerly Gwyther), who assumed by royal licence the surname and arms of Philipps. On his death the estate passed to his son-in-law, Charles Edward Gregg Philipps, who was created a Baronet in 1887 (see Philipps Baronets), then to Sir Richard Foley Foley-Philipps, cousin of Sir John Erasmus, and grandson of Charles Edward Gregg Philipps. Now run by the Picton Castle Trust, the present owner, Jeremy Philipps, lives in a lodge in the grounds.
Slebech Park Estate
Slebech Park developed from estates belonging to the Knights Hospitaller and their Commandery at Slebech Church. After the dissolution the Barlow family built Slebech Mansion near the site of the Commandery, and established Slebech Park. The Hall is a grade II* listed building and its stable block is grade II listed.
After the death of George Barlow in 1757, having no son the land passed to his daughter Anne, who married William Trevanion of Cornwall and after his death, John Symmons of Llanstinan. Symmons sold the estate to William Knox of London, High Sheriff of Pembrokeshire for 1786, who in turn sold it to Nathaniel Phillips (High Sheriff for 1796).
Nathaniel Phillips was born in England in 1733, the illegitimate son of a merchant trading between London and Kingston, Jamaica. Following his father he arrived at Kingston in April 1759 and used his father's connections to join a partnership with the Kingston merchants who owned sugar plantations which supported the slave trade to obtain workers. Over twenty five years he built a fortune and his Jamaican properties were valued £160,000 Jamaica currency, as well as ownership of 706 slaves valued at £50,000. In 1793, he bought the estate at Slebech from a bankrupt slaver. As well as Slebech Hall, which he had re-modelled by Anthony Keck, Phillips bought of park land and woodland. In 1796 he married Mary, a Philipps forty years younger than him and had two sons (Nathaniel and Edward Augustus) and two daughters (Mary Dorothea and Louisa Catharine). After his death, Phillips' heirs continued to operate the Jamaican estates but they became unprofitable after the end of slavery in 1834.
After the death of Edward, the estate passed to Mary Dorothea and her sister, Louisa Catherine, the Countess of Lichfield, as co-heiress. In 1821 Mary Dorothea met Charles Frederick Baron de Rutzen of Germany in Rome. A Polish nobleman and descendant of Field Marshal Potemkin, they married in 1822 and became Lords of the Manors of Slebech. Their eldest son, Baron Frederick Leopold Sapieha Manteuffel (High Sheriff of Pembrokeshire for 1871), died and the estate passed to his younger brother, Baron Rudolph William Henry Ehrard (High Sheriff for 1895), who was succeeded by his nephew, Alan Frederick James. Their third son, Albert Richard Francis Maximilien married Horatia Augusta Stepney Gulston, of Carmarthenshire and their eldest son, Alan Frederick James married Eleanor Etna Audley Thursby Pelham, in 1908. Lieutenant-Colonel Augustus Henry Archibald Anson VC MP, (5 March 1835 – 17 November 1877), recipient of the Victoria Cross during the Crimean War, was born at Slebech Hall. John Frederick Foley de Rutzen married Sheila Victoria Katrin Philipps, of Picton Castle, and their only child, Victoria Anne Elizabeth Gwynne de Rutzen, married Sir Francis Dashwood of West Wycombe Park.
Their descendants managed both estates and in 2003 Geoffrey and Georgina Philipps developed the large stable block into a luxury hotel. It was the first project in Pembrokeshire to attract European Objective One funding and was also funded by the Welsh Development Agency (WDA) and the Wales Tourist Board. The Phillips family left the estate in 2014, which continues to operate as a hotel. Slebach Hall, however, is largely abandoned.
Geology
On the northern bank of the Eastern Cleddau, the foreshore is of mud, marsh and rocks. Slebech has extensive deciduous woodland and open farmland. Fields are large and regular and are divided by earth banks topped with hedges. Agriculture land use is improved pasture with a small proportion of arable crops.
The Rhos village
The only settlement of any size is the small hamlet of The Rhos with a population of 25 people. Community services and
facilities are limited, with a small church hall. The Rhos village is located along one street which once included a primary school and post office. The old school building is now a private residence. Originally known as 'Slebech and
Picton Castle School', it later became Slebech Voluntary Controlled School and operated from 1866 with support from the Philipps family of Picton Castle. It was taken over by the Education Authority in the 1930s and closed in 1985, with only 11 pupils on the register.
Listing designations
Grade II listed Slebech Park is one of a total of 25 listed buildings in Slebech with the main ones as follows:
The Church of St John the Baptist is a Grade II listed building which was consecrated in 1848 as Slebech Parish Church in place of the older Parish Church. It was designed by Thomas Rowlands of Haverfordwest and paid for by Baron de Rutzen with contributions from Queen Adelaide. The church was deconsecrated in 1990 due to subsidence.
The Stable Block at Slebech Park is Grade II listed. A rectangular block 40m by 60m built of local rubble stone masonry with quoins of limestone. An unusual feature is that crenellated parapets hide slate roofs. The stables have an octagonal clock-tower with a weathervane. The stable block loft is also an important breeding roost for the rare Greater Horseshoe Bat (Rhinolophus ferrumequinum) and numbers have been recorded at Slebech since 1983.
Blackpool Bridge is Grade II listed and located to the east of Blackpool Mill to cross the River Cleddau. A single-span bridge, it was built about 1825 for the de Rutzens family of coursed, undressed stone, with two carved external panels on either side and dressed stone edge on the rim of the arch. To the south of the bridge are stone piers topped by ball finials.
The park is designated Grade II* on the Cadw/ICOMOS Register of Parks and Gardens of Special Historic Interest in Wales.
Scheduled Ancient Monuments
Remains of the old church of St John the Baptist (PEM 275). Located between the mansion of Slebech and the river, this is a ruin with only the main walls surviving. Records show that in 1766 the ceiling fell down and workmen were paid for the job of `stripping the church.' The owner, Baron de Rutzen, built a replacement church and stripped the rest of the roof in 1844, partly to stop worshipers coming on to his land. Burial mounds on the island to the east of the church (PEM 276) are also scheduled ancient monuments, one is known locally as Dog Island because it is where Slebech Park owners have buried their pets over the past hundred years.
References
External links
Plans of St Johns Church
Photos of interior of old derelict Slebech church
Historical information and sources on GENUKI
Villages in Pembrokeshire
Former communities in Pembrokeshire
Registered historic parks and gardens in Pembrokeshire
|
Yung Pi-hock (, born 4 November 1930) is a Taiwanese former basketball player. He competed as part of the Republic of China's squad at the 1956 Summer Olympics.
References
External links
1930 births
Possibly living people
Taiwanese men's basketball players
Olympic basketball players for Taiwan
Basketball players at the 1956 Summer Olympics
Basketball players at the 1954 Asian Games
Asian Games medalists in basketball
Asian Games silver medalists for Chinese Taipei
Medalists at the 1954 Asian Games
Republic of China men's national basketball team players
|
```java
/*
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package org.apache.shardingsphere.sharding.route.engine.condition.generator.impl;
import com.google.common.collect.Range;
import org.apache.shardingsphere.sharding.route.engine.condition.Column;
import org.apache.shardingsphere.sharding.route.engine.condition.value.ListShardingConditionValue;
import org.apache.shardingsphere.sharding.route.engine.condition.value.RangeShardingConditionValue;
import org.apache.shardingsphere.sharding.route.engine.condition.value.ShardingConditionValue;
import org.apache.shardingsphere.sql.parser.statement.core.segment.dml.column.ColumnSegment;
import org.apache.shardingsphere.sql.parser.statement.core.segment.dml.expr.BinaryOperationExpression;
import org.apache.shardingsphere.sql.parser.statement.core.segment.dml.expr.complex.CommonExpressionSegment;
import org.apache.shardingsphere.sql.parser.statement.core.segment.dml.expr.simple.LiteralExpressionSegment;
import org.apache.shardingsphere.sql.parser.statement.core.segment.dml.expr.simple.ParameterMarkerExpressionSegment;
import org.apache.shardingsphere.sql.parser.statement.core.value.identifier.IdentifierValue;
import org.apache.shardingsphere.timeservice.core.rule.TimestampServiceRule;
import org.junit.jupiter.api.Test;
import java.util.Collections;
import java.util.LinkedList;
import java.util.Optional;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
class ConditionValueCompareOperatorGeneratorTest {
private final ConditionValueCompareOperatorGenerator generator = new ConditionValueCompareOperatorGenerator();
private final Column column = new Column("id", "tbl");
@SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValue() {
int value = 1;
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, value), "=", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertTrue(((ListShardingConditionValue<Integer>) shardingConditionValue.get()).getValues().contains(value));
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
}
@SuppressWarnings("unchecked")
@Test
void assertGenerateNullConditionValue() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, null), "=", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertTrue(((ListShardingConditionValue<Integer>) shardingConditionValue.get()).getValues().contains(null));
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
}
@SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithLessThanOperator() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, 1), "<", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertTrue(Range.lessThan(1).encloses(((RangeShardingConditionValue<Integer>) shardingConditionValue.get()).getValueRange()));
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
}
@Test
void assertGenerateNullConditionValueWithLessThanOperator() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, null), "<", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertFalse(shardingConditionValue.isPresent());
}
@SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithGreaterThanOperator() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, 1), ">", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertTrue(Range.greaterThan(1).encloses(((RangeShardingConditionValue<Integer>) shardingConditionValue.get()).getValueRange()));
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
}
@SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithAtMostOperator() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, 1), "<=", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertTrue(Range.atMost(1).encloses(((RangeShardingConditionValue<Integer>) shardingConditionValue.get()).getValueRange()));
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
}
@SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithAtLeastOperator() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, 1), ">=", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertTrue(Range.atLeast(1).encloses(((RangeShardingConditionValue<Integer>) shardingConditionValue.get()).getValueRange()));
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
}
@Test
void assertGenerateConditionValueWithErrorOperator() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, 1), "!=", null);
assertFalse(generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class)).isPresent());
}
@Test
void assertGenerateConditionValueWithoutNowExpression() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new CommonExpressionSegment(0, 0, "value"), "=", null);
assertFalse(generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class)).isPresent());
}
@SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithNowExpression() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, "now()"), "=", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertFalse(((ListShardingConditionValue<Integer>) shardingConditionValue.get()).getValues().isEmpty());
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
}
@SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithParameter() {
ColumnSegment left = new ColumnSegment(0, 0, new IdentifierValue("id"));
ParameterMarkerExpressionSegment right = new ParameterMarkerExpressionSegment(0, 0, 0);
BinaryOperationExpression predicate = new BinaryOperationExpression(0, 0, left, right, "=", "id = ?");
Optional<ShardingConditionValue> actual = generator.generate(predicate, column, Collections.singletonList(1), mock(TimestampServiceRule.class));
assertTrue(actual.isPresent());
assertThat(actual.get(), instanceOf(ListShardingConditionValue.class));
ListShardingConditionValue<Integer> conditionValue = (ListShardingConditionValue<Integer>) actual.get();
assertThat(conditionValue.getTableName(), is("tbl"));
assertThat(conditionValue.getColumnName(), is("id"));
assertThat(conditionValue.getValues(), is(Collections.singletonList(1)));
assertThat(conditionValue.getParameterMarkerIndexes(), is(Collections.singletonList(0)));
}
@Test
void assertGenerateConditionValueWithoutParameter() {
ColumnSegment left = new ColumnSegment(0, 0, new IdentifierValue("order_id"));
ParameterMarkerExpressionSegment right = new ParameterMarkerExpressionSegment(0, 0, 0);
BinaryOperationExpression predicate = new BinaryOperationExpression(0, 0, left, right, "=", "order_id = ?");
Optional<ShardingConditionValue> actual = generator.generate(predicate, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertFalse(actual.isPresent());
}
}
```
|
La Thébaïde (The Thebaid, The Thebans or The Theban Brothers) is a tragedy in five acts (with respectively 6, 4, 6, 3 and 6 scenes) in verse by Jean Racine first presented, without much success, on June 20, 1664, at the Palais-Royal in Paris. The twins, along with their sister Antigone, were children borne of the incestuous marriage of the Theban king Oedipus and his mother Jocasta. The play depicts the struggle and death of the young son of Oedipus, as well as that of Antigone. This subject had already occupied many authors before Racine. Thus, the young playwright, still fairly inexperienced, drew particularly from the Antigone of Sophocles, the Phoenician Women of Euripides, but especially the Antigone of Jean Rotrou and the tragedies of Pierre Corneille.
This ancient Theban drama attracted great interest among 17th century French writers. The young Racine drew principally upon sources from Sophocles and Euripides, as well as the of Rotrou, and the Oedipus of Pierre Corneille. Molière may also have assisted in the play's composition.
Plot
The plot is the same as the rest of the Theban plays and poems, in which Eteocles and Polynices, the two warring brothers, fight fiercely, despite the entreaties of their mother, Jocasta and Antigone, their sister, and their two cousins, Menoeceus and Haemon son of Creon. All these characters without exception are killed. Some kill themselves or die of grief. Their characters are quite weakly drawn, Eteocles and Polynices are monotonously violent, Jocasta tired by their declamations, and Creon is a cynical traitor.
Analysis
Traditional scholarship saw limited merit in the play, deeming it an only partially successful work of a still maturing dramatist. In his groundbreaking work On Racine, however, Roland Barthes treats the play as seriously as Racine's greatest Greek dramas, including Phèdre and Iphigénie). Since Barthes, recent scholarship has shown greater interest, exploring, for example, power relationships driving the action, and, more broadly, fundamental problems of political philosophy that arise with respect to the legitimacy of the modern state.
References
Further reading
Hochman, Stanley, editor (1984). McGraw-Hill Encyclopedia of World Drama (second edition, 5 volumes). New York: McGraw-Hill. .
External links
The Thebaïd; or, The Brothers at War (translated by Robert Bruce Boswell, 1890) at Internet Archive
Plays by Jean Racine
1664 plays
Plays set in ancient Greece
Tragedy plays
Plays based on Antigone (Sophocles play)
Plays based on works by Euripides
|
```java
/*
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package org.apache.beam.sdk.io.gcp.bigquery;
import com.google.api.services.bigquery.model.TableRow;
import com.google.cloud.bigquery.storage.v1.AppendRowsRequest;
import javax.annotation.Nullable;
import org.apache.beam.sdk.coders.Coder;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionTuple;
import org.apache.beam.sdk.values.TupleTag;
import org.apache.beam.sdk.values.TupleTagList;
/**
* A transform to write sharded records to BigQuery using the Storage API. This transform uses the
* default stream to write the records. Records written will show up in BigQuery immediately,
* however exactly once is not guaranteed - duplicates may appear in the output. For exactly-once
* writes, use {@link StorageApiWritesShardedRecords} or {@link StorageApiWriteUnshardedRecords}.
*/
@SuppressWarnings("FutureReturnValueIgnored")
public class StorageApiWriteRecordsInconsistent<DestinationT, ElementT>
extends PTransform<PCollection<KV<DestinationT, StorageApiWritePayload>>, PCollectionTuple> {
private final StorageApiDynamicDestinations<ElementT, DestinationT> dynamicDestinations;
private final BigQueryServices bqServices;
private final TupleTag<BigQueryStorageApiInsertError> failedRowsTag;
private final @Nullable TupleTag<TableRow> successfulRowsTag;
private final TupleTag<KV<String, String>> finalizeTag = new TupleTag<>("finalizeTag");
private final Coder<BigQueryStorageApiInsertError> failedRowsCoder;
private final Coder<TableRow> successfulRowsCoder;
private final boolean autoUpdateSchema;
private final boolean ignoreUnknownValues;
private final BigQueryIO.Write.CreateDisposition createDisposition;
private final @Nullable String kmsKey;
private final boolean usesCdc;
private final AppendRowsRequest.MissingValueInterpretation defaultMissingValueInterpretation;
public StorageApiWriteRecordsInconsistent(
StorageApiDynamicDestinations<ElementT, DestinationT> dynamicDestinations,
BigQueryServices bqServices,
TupleTag<BigQueryStorageApiInsertError> failedRowsTag,
@Nullable TupleTag<TableRow> successfulRowsTag,
Coder<BigQueryStorageApiInsertError> failedRowsCoder,
Coder<TableRow> successfulRowsCoder,
boolean autoUpdateSchema,
boolean ignoreUnknownValues,
BigQueryIO.Write.CreateDisposition createDisposition,
@Nullable String kmsKey,
boolean usesCdc,
AppendRowsRequest.MissingValueInterpretation defaultMissingValueInterpretation) {
this.dynamicDestinations = dynamicDestinations;
this.bqServices = bqServices;
this.failedRowsTag = failedRowsTag;
this.failedRowsCoder = failedRowsCoder;
this.successfulRowsCoder = successfulRowsCoder;
this.successfulRowsTag = successfulRowsTag;
this.autoUpdateSchema = autoUpdateSchema;
this.ignoreUnknownValues = ignoreUnknownValues;
this.createDisposition = createDisposition;
this.kmsKey = kmsKey;
this.usesCdc = usesCdc;
this.defaultMissingValueInterpretation = defaultMissingValueInterpretation;
}
@Override
public PCollectionTuple expand(PCollection<KV<DestinationT, StorageApiWritePayload>> input) {
String operationName = input.getName() + "/" + getName();
BigQueryOptions bigQueryOptions = input.getPipeline().getOptions().as(BigQueryOptions.class);
// Append records to the Storage API streams.
TupleTagList tupleTagList = TupleTagList.of(failedRowsTag);
if (successfulRowsTag != null) {
tupleTagList = tupleTagList.and(successfulRowsTag);
}
PCollectionTuple result =
input.apply(
"Write Records",
ParDo.of(
new StorageApiWriteUnshardedRecords.WriteRecordsDoFn<>(
operationName,
dynamicDestinations,
bqServices,
true,
bigQueryOptions.getStorageApiAppendThresholdBytes(),
bigQueryOptions.getStorageApiAppendThresholdRecordCount(),
bigQueryOptions.getNumStorageWriteApiStreamAppendClients(),
finalizeTag,
failedRowsTag,
successfulRowsTag,
autoUpdateSchema,
ignoreUnknownValues,
createDisposition,
kmsKey,
usesCdc,
defaultMissingValueInterpretation,
bigQueryOptions.getStorageWriteApiMaxRetries()))
.withOutputTags(finalizeTag, tupleTagList)
.withSideInputs(dynamicDestinations.getSideInputs()));
result.get(failedRowsTag).setCoder(failedRowsCoder);
if (successfulRowsTag != null) {
result.get(successfulRowsTag).setCoder(successfulRowsCoder);
}
return result;
}
}
```
|
```objective-c
function two_d_grad_wrapper_hw()
% two_d_grad_wrapper.m is a toy wrapper to illustrate the path
% taken by gradient descent depending on the learning rate (alpha) chosen.
% Here alpha is kept fixed and chosen by the use. The corresponding
% gradient steps, evaluated at the objective, are then plotted. The plotted points on
% the objective turn from green to red as the algorithm converges (or
% reaches a maximum iteration count, preset to 50).
%
% (nonconvex) function here is
% g(w) = -cos(2*pi*w'*w) + w'*w
%
% This file is associated with the book
% "Machine Learning Refined", Cambridge University Press, 2016.
% by Jeremy Watt, Reza Borhani, and Aggelos Katsaggelos.
%%% runs everything %%%
run_all()
%%%%%%%%%%%% subfunctions %%%%%%%%%%%%
%%% performs gradient descent steps %%%%
function [w,in,out] = gradient_descent(alpha,w)
% initializations
grad_stop = 10^-5;
max_its = 50;
iter = 1;
grad = 1;
in = [w];
out = [-cos(2*pi*w'*w) + 2*w'*w];
% main loop
while norm(grad) > grad_stop && iter <= max_its
% take gradient step
% ----> grad =
w = w - alpha*grad;
% update containers
in = [in, w];
out = [out, -cos(2*pi*w'*w) + 2*w'*w];
% update stopers
iter = iter + 1;
end
end
function run_all()
% dials for the toy
alpha = 10^-2; % step length/learning rate (for gradient descent). Preset to alpha = 10^-3
for j = 1:2
x0 = [-.7;0]; % initial point (for gradient descent)
if j == 2
x0 = [.85;.85];
alpha = 3*10^-3;
end
%%% perform gradient descent %%%
[x,in,out] = gradient_descent(alpha,x0);
%%% plot function with grad descent objective evaluations %%%
hold on
plot_it_all(in,out)
end
end
%%% plots everything %%%
function plot_it_all(in,out)
% print function
[A,b] = make_fun();
% print steps on surface
plot_steps(in,out,3)
set(gcf,'color','w');
end
%%% plots everything %%%
function [A,b] = make_fun()
range = 1.15; % range over which to view surfaces
[a1,a2] = meshgrid(-range:0.04:range);
a1 = reshape(a1,numel(a1),1);
a2 = reshape(a2,numel(a2),1);
A = [a1, a2];
A = (A.*A)*ones(2,1);
b = -cos(2*pi*A) + 2*A;
r = sqrt(size(b,1));
a1 = reshape(a1,r,r);
a2 = reshape(a2,r,r);
b = reshape(b,r,r);
h = surf(a1,a2,b)
az = 35;
el = 60;
view(az, el);
shading interp
xlabel('w_1','Fontsize',18,'FontName','cmmi9')
ylabel('w_2','Fontsize',18,'FontName','cmmi9')
zlabel('g','Fontsize',18,'FontName','cmmi9')
set(get(gca,'ZLabel'),'Rotation',0)
set(gca,'FontSize',12);
box on
colormap gray
end
% plot descent steps on function surface
function plot_steps(in,out,dim)
s = (1/length(out):1/length(out):1)';
colorspec = [s.^(1),flipud(s) ,zeros(length(out),1)];
width = (1 + s)*5;
if dim == 2
for i = 1:length(out)
hold on
plot(in(1,i),in(2,i),'o','Color',colorspec(i,:),'MarkerFaceColor',colorspec(i,:),'MarkerSize',width(i));
end
else % dim == 3
for i = 1:length(out)
hold on
plot3(in(1,i),in(2,i),out(i),'o','Color',colorspec(i,:),'MarkerFaceColor',colorspec(i,:),'MarkerSize',width(i));
end
end
end
end
```
|
```objective-c
/*
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing,
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* specific language governing permissions and limitations
*/
#ifndef GUAC_VNC_VNC_H
#define GUAC_VNC_VNC_H
#include "config.h"
#include "common/clipboard.h"
#include "common/display.h"
#include "common/iconv.h"
#include "common/surface.h"
#include "display.h"
#include "settings.h"
#include <guacamole/client.h>
#include <guacamole/layer.h>
#include <rfb/rfbclient.h>
#ifdef ENABLE_PULSE
#include "pulse/pulse.h"
#endif
#ifdef ENABLE_COMMON_SSH
#include "common-ssh/sftp.h"
#include "common-ssh/ssh.h"
#include "common-ssh/user.h"
#endif
#include <guacamole/recording.h>
#include <pthread.h>
/**
* VNC-specific client data.
*/
typedef struct guac_vnc_client {
/**
* The VNC client thread.
*/
pthread_t client_thread;
#ifdef ENABLE_VNC_TLS_LOCKING
/**
* The TLS mutex lock for the client.
*/
pthread_mutex_t tls_lock;
#endif
/**
* Lock which synchronizes messages sent to VNC server.
*/
pthread_mutex_t message_lock;
/**
* The underlying VNC client.
*/
rfbClient* rfb_client;
/**
* The original framebuffer malloc procedure provided by the initialized
* rfbClient.
*/
MallocFrameBufferProc rfb_MallocFrameBuffer;
/**
* Whether copyrect was used to produce the latest update received
* by the VNC server.
*/
int copy_rect_used;
/**
* Client settings, parsed from args.
*/
guac_vnc_settings* settings;
/**
* The current display state.
*/
guac_common_display* display;
/**
* Internal clipboard.
*/
guac_common_clipboard* clipboard;
#ifdef ENABLE_PULSE
/**
* PulseAudio output, if any.
*/
guac_pa_stream* audio;
#endif
#ifdef ENABLE_COMMON_SSH
/**
* The user and credentials used to authenticate for SFTP.
*/
guac_common_ssh_user* sftp_user;
/**
* The SSH session used for SFTP.
*/
guac_common_ssh_session* sftp_session;
/**
* An SFTP-based filesystem.
*/
guac_common_ssh_sftp_filesystem* sftp_filesystem;
#endif
/**
* The in-progress session recording, or NULL if no recording is in
* progress.
*/
guac_recording* recording;
/**
* Clipboard encoding-specific reader.
*/
guac_iconv_read* clipboard_reader;
/**
* Clipboard encoding-specific writer.
*/
guac_iconv_write* clipboard_writer;
} guac_vnc_client;
/**
* Allocates a new rfbClient instance given the parameters stored within the
* client, returning NULL on failure.
*
* @param client
* The guac_client associated with the settings of the desired VNC
* connection.
*
* @return
* A new rfbClient instance allocated and connected according to the
* parameters stored within the given client, or NULL if connecting to the
* VNC server fails.
*/
rfbClient* guac_vnc_get_client(guac_client* client);
/**
* VNC client thread. This thread initiates the VNC connection and ultimately
* runs throughout the duration of the client, existing as a single instance,
* shared by all users.
*
* @param data
* The guac_client instance associated with the requested VNC connection.
*
* @return
* Always NULL.
*/
void* guac_vnc_client_thread(void* data);
/**
* Key which can be used with the rfbClientGetClientData function to return
* the associated guac_client.
*/
extern char* GUAC_VNC_CLIENT_KEY;
#endif
```
|
```c++
//===your_sha256_hash------===//
//
// See path_to_url for license information.
//
//
// This file implements the functionality associated with the terminate_handler,
// unexpected_handler, and new_handler.
//===your_sha256_hash------===//
#include <stdexcept>
#include <new>
#include <exception>
#include "abort_message.h"
#include "cxxabi.h"
#include "cxa_handlers.h"
#include "cxa_exception.h"
#include "private_typeinfo.h"
#include "include/atomic_support.h" // from libc++
namespace std
{
unexpected_handler
get_unexpected() noexcept
{
return __libcpp_atomic_load(&__cxa_unexpected_handler, _AO_Acquire);
}
void
__unexpected(unexpected_handler func)
{
func();
// unexpected handler should not return
abort_message("unexpected_handler unexpectedly returned");
}
__attribute__((noreturn))
void
unexpected()
{
__unexpected(get_unexpected());
}
terminate_handler
get_terminate() noexcept
{
return __libcpp_atomic_load(&__cxa_terminate_handler, _AO_Acquire);
}
void
__terminate(terminate_handler func) noexcept
{
#ifndef _LIBCXXABI_NO_EXCEPTIONS
try
{
#endif // _LIBCXXABI_NO_EXCEPTIONS
func();
// handler should not return
abort_message("terminate_handler unexpectedly returned");
#ifndef _LIBCXXABI_NO_EXCEPTIONS
}
catch (...)
{
// handler should not throw exception
abort_message("terminate_handler unexpectedly threw an exception");
}
#endif // _LIBCXXABI_NO_EXCEPTIONS
}
__attribute__((noreturn))
void
terminate() noexcept
{
#ifndef _LIBCXXABI_NO_EXCEPTIONS
// If there might be an uncaught exception
using namespace __cxxabiv1;
__cxa_eh_globals* globals = __cxa_get_globals_fast();
if (globals)
{
__cxa_exception* exception_header = globals->caughtExceptions;
if (exception_header)
{
_Unwind_Exception* unwind_exception =
reinterpret_cast<_Unwind_Exception*>(exception_header + 1) - 1;
if (__isOurExceptionClass(unwind_exception))
__terminate(exception_header->terminateHandler);
}
}
#endif
__terminate(get_terminate());
}
new_handler
get_new_handler() noexcept
{
return __libcpp_atomic_load(&__cxa_new_handler, _AO_Acquire);
}
} // std
```
|
Clement Stephenson (6 February 1890 – 24 October 1961) was an English footballer whose 20-year career at Aston Villa and Huddersfield Town included success in both the FA Cup and League Championship. Stephenson's place in history as an inside forward was assured when Herbert Chapman targeted him as the man to lead Huddersfield Town's challenge for three consecutive Football League titles in the 1920s, he also made a single appearance for England in that period.
Playing career
Aston Villa
Born in Blyth Northumberland, Stephenson originally played for Aston Villa in 1910 as an inside forward; his career at Villa Park totalling 216 matches, from which he scored a reasonable 85 goals. He was brought into Villa Park as Villa legend Harry Hampton's career was nearing its end and soon acquired a reputation for intuitive play: his passes were said to be "as sweet as stolen kisses". His first silverware came in the 1913 FA Cup Final alongside Hampton and England national team player Joe Bache in the victory over League Champions Sunderland; the only occasion in English football history when the FA Cup Final was contested between the top two clubs in The Football League. Stephenson claimed before the 1913 FA Cup final to have dreamt, that Villa, would beat Sunderland with a headed goal from Tommy Barber. Villa did indeed win that final with a headed goal from Tommy Barber. Stephenson was to play in the semi-final the following year and was a winner in the FA Cup final in 1920 in what proved a fitting swansong for the club.
During World War I Stephenson had guested for Leeds City, a club managed by Herbert Chapman, who, typically, saw qualities, beyond Stephenson's well-renowned lack of pace, that would serve the player in his later career. It was as a result of making irregular payments to such Wartime guest players as Stephenson (Chapman had secured the services of Charlie Buchan of Sunderland, Franny Walden of Tottenham Hotspur, and Billy Hampson of Newcastle United ) that Chapman's career at Leeds City came to a questionable end and led to the dissolution of the club in 1919.
Huddersfield Town
However, when Chapman took over the reins at Huddersfield Town (in August 1920) one of his first tasks was to secure Stephenson in a £4,000 move from Villa Park. This caused controversy at a time and at a club where money was scarce but within two seasons, Stephenson was playing inside-left in the narrow FA Cup victory over Preston North End at Stamford Bridge becoming the first player in the 20th century to win three winners' medals in the FA Cup and eventually took the captain's armband from Tommy Wilson. The Final was notable for the controversy surrounding the penalty decision that decided that match when Hamilton brought down Huddersfield's W.H. Smith. Ivan Sharpe, writing in the Sunday Chronicle, stated: "It was certainly a close thing. I had a fairly good view of the incident and my impression was 'Penalty!' The kick was to be taken by the victim, Smith. The goalkeeper Mitchell decided on the manoeuvre of dervish leaps in the air while the act was brewing, the intention obviously to put the Huddersfield marksman off his shot. 'Ne'er mind the devil-dances' seemed to be the tenor of Town captain Clem Stephenson as he advised Smith: 'Just shove it in the net.'
In another two seasons Huddersfield had won the first of three consecutive League Championships and Chapman was moved to write to Stephenson: "I want to thank you personally for your play, your wholehearted efforts both on and off the field. I have never had such confidence in any captain of a team I have been associated with." It was later written: "Chapman bought perceptively, welded his assets together astutely and soon sent out one of the most successful League sides of all time. It was stubborn, disciplined and highly mobile with Clem Stephenson, once of Aston Villa, at the heart of everything. He was a stocky tactician without much pace but his passes were as sweet as stolen kisses."
Chapman brought in the free scoring George Brown and Alex Jackson and between 1924 and 1925 led the side to two consecutive League titles before departing for fame and fortune with Arsenal Without his guile the brio was gone and whereas Stephenson led Cecil Potter's team to a third straight League title in 1926 Huddersfield have never since won either the League Championship or the FA Cup instead finishing in second place in the League in 1927 and 1928 became the first side to have "Double Horror" and losing the 1928 FA Cup Final to Blackburn Rovers and finishing second to Everton by 2 points. They lost the semi-final to Bolton Wanderers the following year and Stephenson played his last game for the club that year, becoming manager in May 1929 taking over from Jack Chaplin.
Career statistics
Management career
Stephenson went on to become Huddersfield's longest-serving manager from 27 May 1929 until 8 June 1942 overseeing club records:
The record 10–1 victory over Blackpool in December 1930 at Leeds Road.
The record attendance at Leeds Road 67,037 attended in the FA Cup quarter-final versus a Herbert Chapman led Arsenal.
But his League and Cup exploits were fraught with irony leading Huddersfield to second place in 1933–34 losing to Arsenal by 3 points in the season Herbert Chapman died, and reaching two FA Cup Finals and losing both: the first to a Herbert Chapman led Arsenal and the other to Preston North End in an exact reversal of the 1922 final, and the semi-final to Portsmouth the following year. Huddersfield have never reached the FA Cup semi-final since 1939.
Honours
Aston Villa
First Division runners-up: 1910–11, 1912–13, 1913–14
FA Cup winners: 1913, 1920
Huddersfield Town (Player & manager)
First Division champions: 1923–24, 1924–25, 1925–26
Runners-up: 1926–27, 1927–28, 1933–34
FA Cup winners: 1922
Runners-up: 1928, 1930, 1938
FA Charity Shield winners: 1922
References
External links
Aston Villa career summary at Aston Villa Player Database
Hall of Fame
Added to Hall of Fame
1890 births
1961 deaths
English men's footballers
Men's association football midfielders
English Football League players
Aston Villa F.C. players
Leeds City F.C. wartime guest players
Huddersfield Town A.F.C. players
England men's international footballers
English football managers
Huddersfield Town A.F.C. managers
English Football Hall of Fame inductees
English Football League representative players
West Stanley F.C. players
Blyth Spartans A.F.C. players
Durham City A.F.C. players
Royal Naval Volunteer Reserve personnel of World War I
Footballers from Northumberland
|
Charles Hancock FRCO (4 January 1852 – 6 February 1927) was an organist and composer based in England.
Life
His early musical education was as a chorister in the choir of St George's Chapel, Windsor. He was awarded his FRCO in 1872 and graduated from Oxford University in 1874.
In Leicester he was the conductor of the Leicester New Musical Society.
He died on 6 February 1927, a few weeks before the church was upgraded to cathedral status.
Appointments
Organist of St. Mary's Church, Datchet, Windsor
Organist of St. Andrew's Church, Uxbridge
Assistant organist of St George's Chapel, Windsor
Organist of St. Martin's Church, Leicester 1875 - 1927
Compositions
He composed works for choir and organ.
References
1852 births
1927 deaths
English organists
British male organists
English composers
Fellows of the Royal College of Organists
|
```c++
#include "vtuneapi.h"
#ifdef _MSC_VER // for msvc
#include <cstdlib>
#endif
std::map<std::string, std::shared_ptr<VTuneDomain>> VTuneDomain::domains_;
std::map<std::string, __itt_string_handle*> VTuneDomain::string_handlers_;
std::shared_ptr<VTuneDomain> VTuneDomain::createDomain(
const char* domain_name) {
auto domain = getDomain(domain_name);
if (domain == nullptr) {
#ifdef _MSC_VER // for msvc
wchar_t buffer[255];
mbstowcs(buffer, domain_name, 255);
__itt_domain* itt_domain = __itt_domain_create(buffer); // call api
#else // for clang and gcc
__itt_domain* itt_domain = __itt_domain_create(domain_name); // call api
#endif
if (itt_domain != NULL) {
std::string key(domain_name);
std::shared_ptr<VTuneDomain> value(new VTuneDomain(itt_domain));
domain = value;
domains_.insert(std::make_pair(key, value));
}
}
return domain;
}
void VTuneDomain::destroyDomain(const char* domain_name) {
auto it = domains_.find(domain_name);
if (it != domains_.end()) {
domains_.erase(it);
}
}
std::shared_ptr<VTuneDomain> VTuneDomain::getDomain(const char* domain_name) {
std::shared_ptr<VTuneDomain> result(nullptr);
auto it = domains_.find(domain_name);
if (it != domains_.end()) {
result = it->second;
}
return result;
}
__itt_string_handle* VTuneDomain::getString(const char* str) {
__itt_string_handle* result = NULL;
auto it = string_handlers_.find(str);
if (it != string_handlers_.end()) {
result = it->second;
} else {
#ifdef _MSC_VER // for msvc
wchar_t buffer[255];
mbstowcs(buffer, str, 255);
result = __itt_string_handle_create(buffer); // call api
#else // for clang and gcc
result = __itt_string_handle_create(str);
#endif
std::string key(str);
string_handlers_.insert(std::make_pair(key, result));
}
return result;
}
bool VTuneDomain::beginTask(const char* task_name) {
bool result = false;
__itt_string_handle* name = getString(task_name);
if (name != NULL) {
__itt_task_begin(domain_, __itt_null, __itt_null, name);
result = true;
}
return result;
}
void VTuneDomain::endTask() { __itt_task_end(domain_); }
```
|
```javascript
import React, { Component } from 'react'
export default class Index extends Component {
static getInitialProps() {
return { color: 'aquamarine' }
}
render() {
return (
<div>
{[1, 2].map(idx => (
<div key={idx}>
{[3, 4].map(idx2 => (
<div key={idx2}>{this.props.color}</div>
))}
</div>
))}
{[1, 2].map(idx => (
<div key={idx}>
<div>
{this.props.color}
<div className="something">
<React.Fragment>
<div>
<div>{this.props.color} hello there</div>
</div>
</React.Fragment>
</div>
</div>
</div>
))}
<style jsx>{`
div {
background: ${this.props.color};
}
`}</style>
</div>
)
}
}
```
|
Michael K. Wirth (born October 15, 1960) is an American businessman who has been the chairman and CEO of Chevron Corporation since 2018. Previously, he was the vice chairman of the company.
Early life
After graduating from high school, Wirth attended the University of Colorado, where he earned a bachelor's degree in chemical engineering in 1982. Wirth played football and basketball at school.
Career
Wirth joined Chevron as a design engineer in 1982. He worked in multiple sectors including engineering, construction, and operations.In 2001 he became president of marketing for Chevron's Asia, Africa and Middle East region.
He has been on the board of directors for Caltex Australia and GS Caltex. In 2018, Wirth was widely speculated and eventually confirmed to succeed John S. Watson as CEO of Chevron.
Wirth is a member of the board of the American Petroleum Institute, a trade group in the oil industry.
Climate Change Inaction
Wirth's company, Chevron, is responsible for one of the highest total carbon emissions of any private company worldwide. Despite these practices, Wirth's company has been involved in several greenwashing tactics, and was found by a Federal Trade Commission to be misleading its customers on its efforts to reduce greenhouse gas emissions. In response to CNBC host Jim Cramer asking if Chevron had considered calls to invest in alternative energy, Wirth responded that Chevron would "go back to our shareholders and let them plant trees".
In 2022, the Guardian newspaper named Wirth one of the US' top 'climate villains' due to Chevron's "greenwashing tactics to downplay the company’s environmental impact".
Personal life
Wirth and his wife have four adult children.
Wirth sleeps less than 6 hours per night, waking at 3.45am to do a 90 minute gym session.
References
1960 births
Living people
Directors of Chevron Corporation
Businesspeople from Colorado
University of Colorado alumni
American chief executives of Fortune 500 companies
|
Jhosep Ylarde Lopez (born February 8, 1963) is an associate justice of the Supreme Court of the Philippines. He was appointed by President Rodrigo Duterte to replace Associate Justice Priscilla Baltazar-Padilla who retired on November 3, 2020.
Education
Lopez graduated cum laude with a degree in Political Science from the University of the Philippines Diliman. He proceeded to take up his Law studies at the same University where he became a brother of the Sigma Rho Fraternity, the oldest and most distinguished law-based fraternity in Asia. He passed the Bar examinations in 1989 with an average of 84.55%.
Career
Legal
Lopez worked as legal counsel of the University of the Philippines for a year before being promoted as chief legal officer of the Philippine General Hospital. In 1991, he worked under former Senate president Jovito Salonga and was later appointed as chief legal counsel of the Senate of the Philippines. He was appointed as Chief City Prosecutor of Manila in February 2006.
City councilor
He served as a city councilor of the 3rd District Manila from 1992 to 1998 and 2001 to 2006.
Justice of court of appeals
Lopez was appointed as associate justice of the Court of Appeals on May 17, 2012 and served for more than eight years until his appointment to the Supreme Court.
Associate justice of the Supreme Court
On January 26, 2021, President Rodrigo Duterte appointed Lopez as associate justice of the Supreme Court of the Philippines. Lopez fills the post vacated by Justice Priscilla Baltazar-Padilla who retired on November 3, 2020.
References
1963 births
Living people
Associate Justices of the Supreme Court of the Philippines
20th-century Filipino lawyers
Justices of the Court of Appeals of the Philippines
Manila City Council members
People from Pangasinan
University of the Philippines Diliman alumni
21st-century Filipino judges
|
```vue
<template>
<section class="chart-container">
<el-row>
<el-col :span="12">
<div id="chartColumn" style="width:100%; height:400px;"></div>
</el-col>
<el-col :span="12">
<div id="chartBar" style="width:100%; height:400px;"></div>
</el-col>
<el-col :span="12">
<div id="chartLine" style="width:100%; height:400px;"></div>
</el-col>
<el-col :span="12">
<div id="chartPie" style="width:100%; height:400px;"></div>
</el-col>
<el-col :span="24">
<a href="path_to_url" target="_blank" style="float: right;">more>></a>
</el-col>
</el-row>
</section>
</template>
<script>
import echarts from 'echarts'
export default {
data() {
return {
chartColumn: null,
chartBar: null,
chartLine: null,
chartPie: null
}
},
methods: {
drawColumnChart() {
this.chartColumn = echarts.init(document.getElementById('chartColumn'));
this.chartColumn.setOption({
title: { text: 'Column Chart' },
tooltip: {},
xAxis: {
data: ["", "", "", "", "", ""]
},
yAxis: {},
series: [{
name: '',
type: 'bar',
data: [5, 20, 36, 10, 10, 20]
}]
});
},
drawBarChart() {
this.chartBar = echarts.init(document.getElementById('chartBar'));
this.chartBar.setOption({
title: {
text: 'Bar Chart',
subtext: ''
},
tooltip: {
trigger: 'axis',
axisPointer: {
type: 'shadow'
}
},
legend: {
data: ['2011', '2012']
},
grid: {
left: '3%',
right: '4%',
bottom: '3%',
containLabel: true
},
xAxis: {
type: 'value',
boundaryGap: [0, 0.01]
},
yAxis: {
type: 'category',
data: ['', '', '', '', '', '()']
},
series: [
{
name: '2011',
type: 'bar',
data: [18203, 23489, 29034, 104970, 131744, 630230]
},
{
name: '2012',
type: 'bar',
data: [19325, 23438, 31000, 121594, 134141, 681807]
}
]
});
},
drawLineChart() {
this.chartLine = echarts.init(document.getElementById('chartLine'));
this.chartLine.setOption({
title: {
text: 'Line Chart'
},
tooltip: {
trigger: 'axis'
},
legend: {
data: ['', '', '']
},
grid: {
left: '3%',
right: '4%',
bottom: '3%',
containLabel: true
},
xAxis: {
type: 'category',
boundaryGap: false,
data: ['', '', '', '', '', '', '']
},
yAxis: {
type: 'value'
},
series: [
{
name: '',
type: 'line',
stack: '',
data: [120, 132, 101, 134, 90, 230, 210]
},
{
name: '',
type: 'line',
stack: '',
data: [220, 182, 191, 234, 290, 330, 310]
},
{
name: '',
type: 'line',
stack: '',
data: [820, 932, 901, 934, 1290, 1330, 1320]
}
]
});
},
drawPieChart() {
this.chartPie = echarts.init(document.getElementById('chartPie'));
this.chartPie.setOption({
title: {
text: 'Pie Chart',
subtext: '',
x: 'center'
},
tooltip: {
trigger: 'item',
formatter: "{a} <br/>{b} : {c} ({d}%)"
},
legend: {
orient: 'vertical',
left: 'left',
data: ['', '', '', '', '']
},
series: [
{
name: '',
type: 'pie',
radius: '55%',
center: ['50%', '60%'],
data: [
{ value: 335, name: '' },
{ value: 310, name: '' },
{ value: 234, name: '' },
{ value: 135, name: '' },
{ value: 1548, name: '' }
],
itemStyle: {
emphasis: {
shadowBlur: 10,
shadowOffsetX: 0,
shadowColor: 'rgba(0, 0, 0, 0.5)'
}
}
}
]
});
},
drawCharts() {
this.drawColumnChart()
this.drawBarChart()
this.drawLineChart()
this.drawPieChart()
},
},
mounted: function () {
this.drawCharts()
},
updated: function () {
this.drawCharts()
}
}
</script>
<style scoped>
.chart-container {
width: 100%;
float: left;
}
/*.chart div {
height: 400px;
float: left;
}*/
.el-col {
padding: 30px 20px;
}
</style>
```
|
Antonio Arbiol y Díez (Torrellas (Zaragoza), 1651 – Zaragoza, January 31 1726) was a Spanish Franciscan and moralistic writer. His works include topics such as the task of comforting the sick or the education of children and offer moral advice.
Works
Manuale sacerdotum. 1693. Manual para que sacerdotes aprendan a predicar.
La Venerable y esclarecida Orden Tercera de San Francisco. 1697. Historia de la orden de San Francisco, de la que evalúa los "principios, leyes, reglas, ejercicios y vidas de sus principales santos."
Desengaños místicos. 1706. Sobre los errores cometidos durante la oración, esquema de teología, y errores en la espiritualidad.
El cristiano reformado. 1714. Sobre los ejercicios y devociones de la Tercera Orden.
La familia regulada con doctrina de la Sagrada Escritura. 1715
La religiosa instruida. 1717.
Visita de enfermos y exercicio santo de ayudar a bien morir. 1722.
Estragos de la lujuria y sus remedios conforme a las Divinas Escrituras. 1726.
References
External links
Los terceros hijos de el humano serafin: la venerable, y esclarecida Orden Tercera de nuestro serafico Patriarca San Francisco..., Zaragaça, 1724, at the National Library of Portugal
17th-century Spanish writers
18th-century Spanish writers
18th-century male writers
1651 births
1726 deaths
|
Golden Bay (officially Golden Bay / Mohua) is a shallow, paraboloid-shaped bay in New Zealand's Tasman District, near the northern tip of the South Island. An arm of the Tasman Sea, the bay lies northwest of Tasman Bay and Cook Strait. It is protected in the north by Farewell Spit, a 26 km long arm of fine golden sand that is the country's longest sandspit. The Aorere and Tākaka rivers are the major waterways to flow into the bay from the south and the west.
The bay was once a resting area for migrating whales and dolphins such as southern right whales and humpback whales, and pygmy blue whales may be observed off the bay as well.
The west and northern regions of the bay are largely unpopulated. Along its southern coast are the towns of Tākaka and Collingwood, and the Abel Tasman National Park. Separation Point, the natural boundary between Golden and Tasman Bays, is in the park. North-eastern parts of Kahurangi National Park are in Golden Bay.
It is known for being a popular tourist destination, because of its good weather and relaxed, friendly lifestyle. Beaches such as Tata Beach are popular locations for retirees and holiday homes.
Name
In 1642, Abel Tasman named the bay , meaning "Killers' Bay" or "Murderers' Bay", after four of his crew were killed there in a clash with Māori.
In 1770, James Cook included it as part of Tasman Bay, which he called "Blind Bay". Fifty years later, Dumont d'Urville named it Massacre Bay, but following the discovery of coal in Takaka in 1842 it was renamed Coal Bay.
In 1857, gold was found near inland from Parapara, prompting another change, this time to Golden Bay.
In 2014 the bay was given the official name, Golden Bay / Mohua, incorporating the Māori name for the area, Mohua, in reference to the Māori name for the yellowhead.
History
Māori lived along the shores of Golden Bay from at least 1450, which is the earliest dated archaeological evidence (from carbon dating) yet found. In 2010 an extensive scientific study was made of Golden Bay by a team from Otago University led by Associate Professor Ian Barber. They accurately plotted and investigated a large number of early Māori sites ranging from pā to kāinga to probable kumara gardens that stretch along the coastal arc from the base of Farewell Spit at Triangle Flat, 60 km eastwards to a pā site 10 km east of Separation Point. Some of the original inhabitants of the area were Waitaha, Ngāi Tara and Ngāti Wairangi (Hauāuru Māori from Whanganui), who were displaced by Ngāti Tūmatakōkiri in the early 1600s.
Dutch explorer Abel Tasman anchored in this bay in 1642. Ngāti Tūmatakōkiri rammed the Dutch ship's cockboat with a waka and four Dutch seamen were killed by Māori, prompting Tasman to name it Moordenaar's Bay ('Murderers Bay'). Archeological research has shown the Dutch had tried to land at a major agricultural area, which the Māori may have been trying to protect. Tasman saw at least 22 waka. He recorded that of the 11 waka that chased his ship, most had 17 men on board. This gives a total of about 200 men, with a likely population of about 500 people. Tasman had already been in the bay five days when attacked giving the Māori time to assemble an attack force. Archaeological evidence has not shown any large settlements so it is likely that the iwi normally lived in whanau based groups scattered along the coast but mainly in the eastern bay at Ligar Beach, Tata Beach and Wainui Bay where there are 20 known archaeological sites in a 10 km zone.
In 1770, during his first voyage, English explorer James Cook included the bay as part of Blind Bay, but upon his second voyage to the bay in 1773 realised that it was in fact the location of Murderers Bay. The French explorer Jules Dumont d'Urville appears to have changed the name to Massacre Bay.
After Ngāti Tūmatakōkiri's defeat in the 1810s, Golden Bay became a part of the rohe of Ngāti Apa ki te Rā Tō.
European settlement commenced in October 1842 with the Lovell family settling at Motupipi near the then existing Māori pā site. Earlier, in March of that year, Frederick Tuckett had discovered coal on the beach near the Motupipi pā. There was a report from May 1841, which also stated there was coal in the area. In the 1840s, following the discoveries, the local population unsuccessfully sought to have it renamed Coal Bay.
In 1846, Charles Heaphy and Thomas Brunner with their Māori guide Kehu, passed through Golden Bay on their journey to the West Coast. In 1850, Packard, Robinson and Lovell started the first sawmill in Tākaka and between 1852 and 1856 land was sold to various European immigrants in Golden Bay by some members of the local iwi but without the consent of the entire iwi. In 1855 William Gibbs bought of land from local Māori and established the town of Gibbstown which later was renamed Collingwood.
In the late 1850s, with the discovery of gold at Aorere, its name was changed to Golden Bay. In the Great Depression, miners returned to search for any remaining gold in a government-subsidised prospecting scheme for the unemployed, and about 40 miners lived in a dozen huts around Waingaro Forks.
The road over Tākaka Hill was completed in 1888. Prior to this, the usual method of access to Golden Bay was by sea.
A coal mining lease was granted to Joseph Taylor and James Walker in 1895 to a piece of land at Pūponga on the coast between Farewell Spit and Collingwood. They subsequently discovered a seam of coal that was between three and seven feet in depth. Work on developing a mine progressed with a tramline built and a wharf built, and dredging took place to allow ships to berth and be loaded with coal. By 1910, 73 men were employed at the mine and over 30,000 tons of coal had been mined. The mine was run by various companies until 1974 when it became uneconomic.
Deposits of limonite and coal lead to the development of an iron works at Onekaka. The Onekaka ironworks started operating in 1924. A hydroelectric scheme was built to power the ironworks and a wharf and tramway were built to move supplies and product in and out of the factory. The ironworks fell victim to the great depression, a saturated local market for iron and Australian tariffs limiting the export potential. The iron works closed in 1935. The iron works were nationalised but the grand plans to revitalise the iron works never succeeded and it was finally closed for good in 1954.
The Abel Tasman National Park was established on 16 December 1942 which was 300 years exactly after Abel Tasman had visited Golden Bay. It was established thanks to the determined efforts of Pérrine Moncrieff, who was concerned about both a proposal to mill the trees around Totaranui in 1937 and a plan to build a road through the area. Home to beech forests, red tussock, penguin colonies, wading birds and seals, the park has rich ecological systems.
During the 1960s and the early 1970s, the Ministry of Works surveyed the land where the Heaphy Track now exists for a proposed road to link Golden Bay with the Karamea. This was encouraged by local authorities both in Golden Bay and on the West Coast. The project never progressed beyond this due to public opposition and a lack of funding from the government.
The Northwest Nelson Forest Park was created in 1970 by amalgamating eight state forest parks. The Tasman Wilderness area was established in 1988 and this entire area was given the highest level of conservation protection in 1996 when it became the Kahurangi National Park. It is the second largest of New Zealand's national park and forms the majority of Golden Bay's interior. The primary reason for its establishment was a new emphasis on protecting the rich biodiversity of the park. It has the largest number of endemic plants of any national park. The park includes the great spotted kiwi, wētās, 29 species of carnivorous snails and native cave spiders.
In 1974, the Milnethorpe Park regeneration project was started. of land overlooking the beach which had very poor soils was chosen for the project. Native species would not grow on the land initially. A variety of Australian gum trees and acacias were planted. As they grew and the soil conditions improved, natives were established amongst them. By 2020, the park had a forest like appearance with many kilometres of walking tracks built.
In December 2011 Golden Bay, as well as much of the Nelson and Tasman regions, were hit by heavy rain and flooding. It was described as a 1 in 500 year downpour for Tākaka. This affected many homes around the Pohara, Ligar Bay, Tata Beach and Wainui area. State Highway 60 between Tākaka and Collingwood was severely damaged at Bird's Hill. The road to Totaranui, a popular isolated tourist destination in Tasman Bay, was badly damaged and was reopened on 29 June 2012.
In August 2014, the name of the bay was officially altered to Golden Bay / Mohua.
Ex Cyclone Gita hit Golden Bay in February 2018 and damaged state highway 60 over the Tākaka Hill isolating Golden Bay from the rest of the South Island. The road over Tākaka Hill was closed by 16 landslides. Tākaka lost electricity and roads and bridges were damaged making them unusable. Barges were required to bring in food supplies and keep the Fonterra dairy factory in operation in Tākaka. It took a number of days for the NZ Transport Agency to reopen the road over Tākaka Hill to essential vehicles and those most urgently needing to leave the region. The road has taken substantial work and time to repair and was fully repaired by the end of 2021.
Demographics
Golden Bay/Mohua, which includes Collingwood, Parapara, Tākaka, Pōhara and Tata Beach, covers It had an estimated population of as of with a population density of people per km2.
Golden Bay/Mohua had a population of 5,226 at the 2018 New Zealand census, an increase of 237 people (4.8%) since the 2013 census, and an increase of 396 people (8.2%) since the 2006 census. There were 2,124 households, comprising 2,598 males and 2,628 females, giving a sex ratio of 0.99 males per female, with 882 people (16.9%) aged under 15 years, 624 (11.9%) aged 15 to 29, 2,556 (48.9%) aged 30 to 64, and 1,167 (22.3%) aged 65 or older.
Ethnicities were 94.9% European/Pākehā, 8.0% Māori, 0.9% Pasifika, 2.1% Asian, and 2.2% other ethnicities. People may identify with more than one ethnicity.
The percentage of people born overseas was 21.6, compared with 27.1% nationally.
Although some people chose not to answer the census's question about religious affiliation, 65.9% had no religion, 21.9% were Christian, 0.1% had Māori religious beliefs, 0.3% were Hindu, 0.1% were Muslim, 1.0% were Buddhist and 2.6% had other religions.
Of those at least 15 years old, 909 (20.9%) people had a bachelor's or higher degree, and 732 (16.9%) people had no formal qualifications. 381 people (8.8%) earned over $70,000 compared to 17.2% nationally. The employment status of those at least 15 was that 1,767 (40.7%) people were employed full-time, 993 (22.9%) were part-time, and 93 (2.1%) were unemployed.
Over the summer months, the population of Golden Bay increases significantly with holiday makers taking holidays near the Golden Bay beaches. Numbers of people staying in Golden Bay have been reported as swelling the population up to 25.000 people during the peak holiday season.
Industry
Hydroelectricity
The Cobb Valley is the location of the Cobb Hydroelectric Power Station, built between 1936 and 1956. The reservoir sits at above sea level at the confluence of the Tākaka and Cobb rivers. The power station is situated 600 vertical metres below and provides 32 megawatts of power. The average annual output is 192 GWh.
The power station's construction was difficult due to the local weather with an annual rainfall of over and snow and heavy frosts in winter. The dam was originally planned to be concrete but this was deemed to be not suitable and an earth dam was constructed instead. It first produced power in 1944.
Asbestos mining
Asbestos was discovered in Golden Bay in 1882 in the mountains behind Takaka. Several attempts were made to obtain commercial quantities in 1896 and 1908 but miners struggled with the isolated mountainous location. In 1917, 100 tons of asbestos was brought down by packhorse. With the development of the Cobb Valley hydroelectricity scheme, and in particular, the access road, asbestos mining became viable. Forty tons were extracted each month until the mine closed in 1945. The mine reopened in 1949 with government assistance and mining continued until 1964.
Golden Bay Cement
The components of Portland cement were found to be all available in Golden Bay and in the early 1880s a cement works was built near Collingwood but was never completed due to a lack of financing. In 1909 a cement works plant was built at Tarakohe where there was plenty of suitable limestone to quarry close to a safe anchorage. The end product was then shipped to the North Island where plenty of demand existed. A wharf was built in 1910 and then a few years later a road was built from the cement works round the bays to Pōhara. By 1928, 50,000 tons of cement was produced annually. To provide bulk shipment of cement by sea, the ship MV Golden Bay was acquired in 1955.
In 1988, the new owners, Fletcher Challenge, closed the cement works and transferred the name Golden Bay Cement to their other plant in Whangārei. In 1994, the harbour facilities owned by the cement works were sold to the Tasman District Council.
Dairy farming
In 2009, there were 83 dairy farms which supplied the Fonterra factory in Tākaka. The factory turned about 525,000 litres of milk each day into skim milk powder.
References
Bays of the Tasman District
Maritime history of the Dutch East India Company
|
```java
/*
*
* at path_to_url
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package jsyntaxpane.syntaxkits;
import jsyntaxpane.DefaultSyntaxKit;
import jsyntaxpane.lexers.DOSBatchLexer;
/**
*
* @author Ayman Al-Sairafi
*/
public class DOSBatchSyntaxKit extends DefaultSyntaxKit {
public DOSBatchSyntaxKit() {
super(new DOSBatchLexer());
}
}
```
|
```ruby
# frozen_string_literal: true
require "rubocops/class"
RSpec.describe RuboCop::Cop::FormulaAudit::ClassName do
subject(:cop) { described_class.new }
corrected_source = <<~RUBY
class Foo < Formula
url 'path_to_url
end
RUBY
it "reports and corrects an offense when using ScriptFileFormula" do
expect_offense(<<~RUBY)
class Foo < ScriptFileFormula
^^^^^^^^^^^^^^^^^ FormulaAudit/ClassName: ScriptFileFormula is deprecated, use Formula instead
url 'path_to_url
end
RUBY
expect_correction(corrected_source)
end
it "reports and corrects an offense when using GithubGistFormula" do
expect_offense(<<~RUBY)
class Foo < GithubGistFormula
^^^^^^^^^^^^^^^^^ FormulaAudit/ClassName: GithubGistFormula is deprecated, use Formula instead
url 'path_to_url
end
RUBY
expect_correction(corrected_source)
end
it "reports and corrects an offense when using AmazonWebServicesFormula" do
expect_offense(<<~RUBY)
class Foo < AmazonWebServicesFormula
^^^^^^^^^^^^^^^^^^^^^^^^ FormulaAudit/ClassName: AmazonWebServicesFormula is deprecated, use Formula instead
url 'path_to_url
end
RUBY
expect_correction(corrected_source)
end
end
```
|
Gurab (, also Romanized as Gūrāb; also known as Gerdāb) is a village in Tayebi-ye Sarhadi-ye Sharqi Rural District, Charusa District, Kohgiluyeh County, Kohgiluyeh and Boyer-Ahmad Province, Iran. At the 2006 census, its population was 163, in 34 families.
References
Populated places in Kohgiluyeh County
|
The 2007 Major League Baseball postseason was the playoff tournament of Major League Baseball for the 2007 season. The winners of the Division Series would move on to the League Championship Series to determine the pennant winners that face each other in the World Series.
In the American League, the New York Yankees made their thirteenth straight postseason appearance, the Boston Red Sox returned for the fourth time in five years, the Cleveland Indians returned for the first time since 2001, and the Los Angeles Angels of Anaheim returned for the fourth time in six years.
In the National League, the Arizona Diamondbacks made their third postseason appearance in the last eight years, the Chicago Cubs made their second appearance in five years, the Colorado Rockies made their second postseason appearance in franchise history and first since 1995, and the Philadelphia Phillies returned to the postseason for the first time since 1993. This would be the first of five consecutive postseason appearances for the Phillies, a streak which lasted until 2011.
The postseason began on October 3, 2007, and ended on October 28, 2007, with the Red Sox sweeping the Rockies in the 2007 World Series. It was the seventh title won by the Red Sox franchise.
Playoff seeds
The following teams qualified for the postseason:
American League
Boston Red Sox – AL East champions, 96–66 (5–2 head-to-head record vs. CLE)
Cleveland Indians – AL Central champions, 96–66 (2–5 head-to-head record vs. BOS)
Los Angeles Angels of Anaheim – AL West champions, 94–68
New York Yankees – 94–68
National League
Arizona Diamondbacks – NL West champions, 90–72
Philadelphia Phillies – NL East champions, 89–73
Chicago Cubs – NL Central champions, 85–77
Colorado Rockies – NL Wild Card qualifier, 90–73
Playoff bracket
Note: Two teams in the same division could not meet in the division series.
American League Division Series
(1) Boston Red Sox vs. (3) Los Angeles Angels of Anaheim
This was the third postseason meeting between the Angels and Red Sox. The Red Sox once again defeated the Angels to advance to the ALCS for the third time in five years. The series was not close — the Red Sox shut out the Angels and Game 1, took Game 2 by a 6–3 score on a Manny Ramirez walk-off three-run home run, and then blew out the Angels in Anaheim in Game 3 to advance to the next round.
(2) Cleveland Indians vs. (4) New York Yankees
This was the third postseason meeting between the Yankees and Indians. The Indians defeated the Yankees in four games to advance to the ALCS for the first time since 1998. This was the last postseason series ever played at the original Yankee Stadium.
The Indians blew out the Yankees in Game 1 by 9 runs. Game 2 was a pitchers' duel between both teams bullpens which lasted 11 innings, and again the Indians prevailed as Travis Hafner drove in the winning run on a bases-loaded single off Luis Vizcaíno. Game 2 was referred to as "The Bug Game", as a swarm of tiny insects circled the mound in the late innings of the game. Play was stopped temporarily out of concern for the players' safety, including Joba Chamberlain, who threw only 12 of his 25 pitches for strikes in suffering a blown save without surrendering a hit. Yankees manager Joe Torre would later say that his decision not to remove his team from the field was one of his biggest regrets as a manager. When the series shifted to the Bronx for Game 3, the Yankees overcame a 3–1 Indians lead to win 8–4 to stave off elimination. The Yankees put up another rally late in Game 4, but the Indians held on to win by a 6–4 score to advance to the ALCS.
The loss to the Indians marked the end of the Yankees' thirteen-year postseason streak, which started in 1995. The Yankees' thirteen-year postseason appearance streak is the longest of any American League team, and second only to the Atlanta Braves, who made fourteen straight appearances from 1991 to 2005.
National League Division Series
(1) Arizona Diamondbacks vs. (3) Chicago Cubs
The Diamondbacks swept the Cubs to return to the NLCS for the first time since 2001.
In Game 1, the Diamondbacks prevailed by a 3–1 score thanks to a solid pitching performance from Brandon Webb. The Diamondbacks would put up a big lead early in Game 2 and didn't relinquish it, winning 8–4 to take a 2–0 series lead headed to Chicago. Liván Hernández kept the Cubs' offense at bay in Game 3 as the Diamondbacks won 5–1 to return to the NLCS.
This was the most recent playoff series win by the Diamondbacks until 2023. They would, however, win the NL Wild Card Game in 2017.
(2) Philadelphia Phillies vs. (4) Colorado Rockies
This was the first postseason meeting between the Phillies and Rockies. The Rockies swept the Phillies to advance to the NLCS for the first time in franchise history. These two teams would meet once more in the 2009 NLDS, which the Phillies won in four games.
American League Championship Series
(1) Boston Red Sox vs. (2) Cleveland Indians
This was the fourth postseason meeting between the Indians and Red Sox (1995, 1998, 1999). The Red Sox came back from a 3-1 series deficit to return to the World Series for the second time in four years.
Similar to 2004, the Red Sox once again found themselves trailing in the series. The Red Sox blew out the Indians in Game 1, while the Indians evened the series in a 13-6 rout thanks to help from Jhonny Peralta, Grady Sizemore, and Franklin Gutiérrez. When the series shifted to Cleveland for Game 3, Jake Westbrook out-dueled Boston ace Daisuke Matsuzaka as the Indians prevailed by a 4-2 score to take the series lead. The Indians' offense would chase Tim Wakefield from the mound in Game 4 thanks to a seven run fifth inning to take a 3-1 series lead, and were now one win away from their first World Series berth in a decade. However, things then unraveled for the Indians fast. In Game 5, Josh Beckett out-dueled CC Sabathia as the Red Sox blew out the Indians to send the series back to Fenway Park. Curt Schilling and the Red Sox bullpen would shut down the Indians offense in Game 6 as the Red Sox blew out the Indians by a 12-2 score to force a seventh game. The Red Sox's ten run margin of victory was the largest in an LCS game since Game 4 of the 1974 NLCS. In Game 7, the Red Sox again blew out the Indians, this time by nine runs, to clinch the pennant.
The Red Sox would win their next AL pennant in 2013, against the Detroit Tigers in six games. The Indians wouldn't return to the ALCS again until 2016, where they defeated the Toronto Blue Jays in five games before falling in the World Series that year.
The Indians would not return to the postseason again until 2013. Both teams would meet again in the 2016 ALDS, which the Indians won en route to the World Series.
National League Championship Series
(1) Arizona Diamondbacks vs. (4) Colorado Rockies
This was the first postseason meeting between the Diamondbacks and Rockies. The Rockies swept the top-seeded Diamondbacks to advance to the World Series for the first time in franchise history.
Jeff Francis pitched over six solid innings as the Rockies stole Game 1 in Phoenix. Game 1 was marred by a controversial call in the bottom of the seventh inning - Francis allowed a leadoff double to Chris Snyder and hit Justin Upton with a pitch, but the Diamondbacks were taken out of a potential rally when a disputed interference call resulted in a double-play groundout for Augie Ojeda. Diamondbacks fans responded by throwing objects onto the field, which stopped play. The Rockies would then prevail in a long 11-inning affair in Game 2 to go up 2-0 in the series headed home to Denver. Home runs from Matt Holliday and Yorvit Torrealba would carry the Rockies to victory in Game 3. Holliday would again hit another three-run home run in Game 4 to give the Rockies a big lead that they would hold onto, as they clinched the pennant in front of their home fans.
By sweeping the series, the Rockies became the first team since the 1976 Cincinnati Reds to start 7–0 in the postseason. The Diamondbacks would not return to the postseason again until 2011, and this was their last appearance in the NLCS until 2023, where they defeated the Philadelphia Phillies in seven games to return to the World Series.
Both these teams would meet again in the 2017 NL Wild Card Game, which was won by the Diamondbacks.
2007 World Series
(AL1) Boston Red Sox vs. (NL4) Colorado Rockies
This was the first World Series ever played in the state of Colorado. It was also the sixth World Series in a row to feature a Wild Card team. The Red Sox handily swept the Rockies to win their second title in four years and seventh overall.
The series was heavily lopsided in favor of the Red Sox - Josh Beckett pitched seven solid innings and gave up only one run as the Red Sox blew out the Rockies by a 13-1 score in Game 1. The Rockies took an early lead in Game 2, but the Red Sox put up two unanswered runs across the fourth and fifth innings to take the lead, and Curt Schilling and the Red Sox bullpen would keep the Rockies' offense at bay the rest of the game to go up 2-0 in the series headed to Denver. The Red Sox offense would once again explode for 10 runs in Game 3 as they won 10-5 to take a 3-0 series lead. The Sox would then clinch the title in Game 4 as Jonathan Papelbon fended off a late rally by the Rockies.
The Rockies would return to the postseason again in 2009, but fell to the Philadelphia Phillies in the NLDS. The Red Sox attempted to defend their title the next year, but fell to the Tampa Bay Rays in seven games in the ALCS. They would win the World Series twice more afterward - in 2013, where they defeated the St. Louis Cardinals in six games, as well as 2018 where they defeated the Los Angeles Dodgers in five games.
Broadcasting
This was the first postseason under a seven-year U.S. rights agreement with Fox and TBS. TBS was awarded all Division Series games, the National League Championship Series in odd-numbered years starting in 2007, and the American League Championship Series in even-numbered years starting in 2008. Fox was awarded the American League Championship Series in odd-numbered years starting in 2007, and the National League Championship Series in even-numbered years starting in 2008. The deal also maintained Fox's streak of airing consecutive World Series since 2000.
References
External links
League Baseball Standings & Expanded Standings – 2007
Major League Baseball postseason
|
```go
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/pkg/mount"
rsystem "github.com/opencontainers/runc/libcontainer/system"
"golang.org/x/sys/unix"
)
// chroot on linux uses pivot_root instead of chroot
// pivot_root takes a new root and an old root.
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
// New root is where the new rootfs is set to.
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
// This is similar to how libcontainer sets up a container's rootfs
func chroot(path string) (err error) {
// if the engine is running in a user namespace we need to use actual chroot
if rsystem.RunningInUserNS() {
return realChroot(path)
}
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
}
// Make everything in new ns slave.
// Don't use `private` here as this could race where the mountns gets a
// reference to a mount and an unmount from the host does not propagate,
// which could potentially cause transient errors for other operations,
// even though this should be relatively small window here `slave` should
// not cause any problems.
if err := mount.MakeRSlave("/"); err != nil {
return err
}
if mounted, _ := mount.Mounted(path); !mounted {
if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
return realChroot(path)
}
}
// setup oldRoot for pivot_root
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
if err != nil {
return fmt.Errorf("Error setting up pivot dir: %v", err)
}
var mounted bool
defer func() {
if mounted {
// make sure pivotDir is not mounted before we try to remove it
if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil {
if err == nil {
err = errCleanup
}
return
}
}
errCleanup := os.Remove(pivotDir)
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
// because we already cleaned it up on failed pivot_root
if errCleanup != nil && !os.IsNotExist(errCleanup) {
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
if err == nil {
err = errCleanup
}
}
}()
if err := unix.PivotRoot(path, pivotDir); err != nil {
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
if err := os.Remove(pivotDir); err != nil {
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
}
return realChroot(path)
}
mounted = true
// This is the new path for where the old root (prior to the pivot) has been moved to
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := unix.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root: %v", err)
}
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
return fmt.Errorf("Error making old root private after pivot: %v", err)
}
// Now unmount the old root so it's no longer visible from the new root
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
}
mounted = false
return nil
}
func realChroot(path string) error {
if err := unix.Chroot(path); err != nil {
return fmt.Errorf("Error after fallback to chroot: %v", err)
}
if err := unix.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root after chroot: %v", err)
}
return nil
}
```
|
Bashkim Muhedini (born 3 January 1949) is an Albanian retired football player. He was a goalkeeper for Partizani Tirana and the Albania national team in the 1970s.
Club career
Just like the famous Albanian sculptor Odhise Paskali, Muhedine hails from the village Kozarë. He played basketball in school, but Bejkush Birçe persuaded him to play football and he subsequently joined the Partizani academy led by Birçe. He replaced Mikel Janku as Partizani's first choice goalkeeper, but he was forced to quit his career due to political reasons in 1976.
International career
He made his debut for Albania in a June 1971 European Championship qualification match away against West Germany and earned a total of 7 caps, scoring no goals. His final international was a November 1973 friendly match against China.
Personal life
Muhedini was a son-in-law of Albanian general Beqir Balluku, who was executed during the Hoxha regime. Family of Balluku were persecuted and Muhedini was forced to retire from playing for being a relative as well and he was 'relocated' and became a physical education teacher. In 1979 he was allowed to leave for Përmet and coached the local team. Nowadays he lives in New York.
Honours
Albanian Superliga: 1
1971
References
External links
1949 births
Living people
People from Berat County
Men's association football goalkeepers
Albanian men's footballers
Albania men's international footballers
FK Partizani Tirana players
Kategoria Superiore players
Albanian football managers
|
The Lonesome Death of Buck McCoy is an album by American rock band The Minus 5. Their final release of new material for Hollywood Records, it was released in 1997. The album was met with positive reception from critics.
Recording and release
The musicians making up The Minus 5 overlapped with the auxiliary touring members of R.E.M. and Tuatara; the musicians recorded The Lonesome Death of Buck McCoy alongside Mark Eitzel's West. The opportunity to record and tour with the material spurred Scott McCaughey to finish off songs that Peter Buck had started writing two years prior. The revolving door of performers supported all their releases (including Tuatara's Breaking the Ethers) with The Magnificent Seven Vs. the United States tour in 1997. This album was the first release from Hollywood Records imprint Malt Records.
Critical reception
The album has received positive reviews from critics, with Album of the Year characterizing consensus as a 90 out of 100, based on two reviews. The editorial staff of AllMusic Guide gave the album four out of five stars, with reviewer Stephen Thomas Erlewine writing that it was an improvement over their first full-length, Old Liquidator, with catchy songwriting and "charmingly ragged pop-rock". The review in No Depression concurred with the charming nature of the songwriting, tying it stylistically to 1960s pop music and concluding that McCaughey's first major label album is deserved. In Entertainment Weekly, Steven Mirkin compared the sound to Merseybeat acts and gave the album an A for "casually perfect crystalline pop".
Commercial performance
The album reached 58 on CMJ New Music Monthlys Alternative Radio Airplay in August 1997. In 2003, McCaughey claimed in an interview that the album had only sold 5,000 copies, compared to the greater commercial success of some of the musicians who played on the album.
Track listing
All songs written by Peter Buck and Scott McCaughey, except where noted:
"The Rest of the World" – 3:10
"Cross Every Line" – 4:01
"Empty Room" – 3:42
"Wasted Bandage" – 3:04
"Boeing Spacearium" (Buck, McCaughey, and Robert Pollard) – 3:27
"My Mummy's Dead" (John Lennon) – 2:30
"Moonshine Girl" – 3:20
"Popsycle Shoppe" – 2:47
"Wouldn't Want to Care" – 2:56
"Spidery Moon" – 2:42
"Bullfight" – 4:15
"Hate Me More" – 5:08
Personnel
The Minus 5
Jon Auer ("Admiral Boot")
Chris Ballew ("Mr. MacAfee")
Peter Buck ("Streetsinger") – guitar, associate production
John Crist ("Calliope Bird")
Dave Dederer ("Daughter")
Jason Finn ("The Constable")
John Keane ("Mr. Dawes, Jr.") – recording at John Keane's Studio
Barrett Martin ("Santouri")
Scott McCaughey ("Buck McCoy") – guitar, vocals, production, mixing, mastering, recording at LowBeat
Mike McCready ("Winthrop")
Christy McWilson ("Miss Polly")
Robert Pollard ("Ancient Roomer") – vocals on "Boeing Spacearium", recording in Dayton
Pony ("Nigel")
Ken Stringfellow ("Martha")
Additional personnel
Matt Bayles – engineering at Litho
Art Chantry – artwork
Ed Brooks – recording at Litho, mixing, executive production, associate production
Mark Downey – choreography
Rob Grenoble – recording at Water Music
Mark Guenther – mastering
Hammi Hammerschmidt – layout
Nadine McCaughey – artwork
Floyd Reitsma – assistant engineering
Jason Rowe – engineering at Ironwood
Todd Rundgren – recording at Location, Neverland
Robert Seidenberg – choreography
Kevin Suggs – recording at John & Stu's
Conrad Uno – recording at Egg
Kevin Wilson – painting
References
External links
The Lonesome Death of Buck McCoy at Rate Your Music
1997 albums
Hollywood Records albums
The Minus 5 albums
The Presidents of the United States of America (band)
|
```c++
/****************************************************************************
** All rights reserved.
** See license at path_to_url
****************************************************************************/
#include "dialog_about.h"
#include "ui_dialog_about.h"
#include "../qtcommon/qstring_conv.h"
#include <common/mayo_version.h>
#include <Standard_Version.hxx>
namespace Mayo {
DialogAbout::DialogAbout(QWidget* parent)
: QDialog(parent),
m_ui(new Ui_DialogAbout)
{
m_ui->setupUi(this);
m_ui->label_AppByOrg->setText(
tr("%1 By %2").arg(QApplication::applicationName(), QApplication::organizationName())
);
const QString strVersionFull =
QString("%1 commit:%2 revnum:%3")
.arg(strVersion).arg(strVersionCommitId).arg(versionRevisionNumber);
m_ui->label_Version->setText(m_ui->label_Version->text().arg(strVersionFull).arg(QT_POINTER_SIZE * 8));
m_ui->label_BuildDateTime->setText(m_ui->label_BuildDateTime->text().arg(__DATE__, __TIME__));
m_ui->label_Qt->setText(m_ui->label_Qt->text().arg(QT_VERSION_STR));
m_ui->label_Occ->setText(m_ui->label_Occ->text().arg(OCC_VERSION_COMPLETE));
}
DialogAbout::~DialogAbout()
{
delete m_ui;
}
void DialogAbout::addLibraryInfo(std::string_view libName, std::string_view libVersion)
{
auto label = new QLabel(this);
label->setText(tr("%1 %2").arg(to_QString(libName), to_QString(libVersion)));
m_ui->layout_Infos->addWidget(label);;
}
} // namespace Mayo
```
|
Piero Scotti (November 11, 1909 – February 14, 1976) was a racing driver from Italy. He participated in one Formula One World Championship Grand Prix, on June 3, 1956. He scored no championship points.
Complete Formula One World Championship results
(key)
Italian racing drivers
Italian Formula One drivers
1909 births
1976 deaths
Sportspeople from Florence
|
```c++
#include "ExportProcessor.h"
using namespace mega;
using namespace std;
ExportProcessor::ExportProcessor(MegaApi *megaApi, QStringList fileList) : QObject()
{
this->megaApi = megaApi;
this->fileList = fileList;
this->mode = MODE_PATHS;
currentIndex = 0;
remainingNodes = fileList.size();
importSuccess = 0;
importFailed = 0;
delegateListener = new QTMegaRequestListener(megaApi, this);
}
ExportProcessor::ExportProcessor(MegaApi *megaApi, QList<MegaHandle> handleList)
{
this->megaApi = megaApi;
this->handleList = handleList;
this->mode = MODE_HANDLES;
currentIndex = 0;
remainingNodes = handleList.size();
importSuccess = 0;
importFailed = 0;
delegateListener = new QTMegaRequestListener(megaApi, this);
}
ExportProcessor::~ExportProcessor()
{
delete delegateListener;
}
void ExportProcessor::requestLinks()
{
int size = (mode == MODE_PATHS) ? fileList.size() : handleList.size();
if (!size)
{
emit onRequestLinksFinished();
return;
}
for (int i = 0; i < size; i++)
{
MegaNode *node = NULL;
if (mode == MODE_PATHS)
{
#ifdef WIN32
if (!fileList[i].startsWith(QString::fromLatin1("\\\\")))
{
fileList[i].insert(0, QString::fromLatin1("\\\\?\\"));
}
string tmpPath((const char*)fileList[i].utf16(), fileList[i].size()*sizeof(wchar_t));
#else
string tmpPath((const char*)fileList[i].toUtf8().constData());
#endif
node = megaApi->getSyncedNode(&tmpPath);
if (!node)
{
const char *fpLocal = megaApi->getFingerprint(tmpPath.c_str());
node = megaApi->getNodeByFingerprint(fpLocal);
delete [] fpLocal;
}
}
else
{
node = megaApi->getNodeByHandle(handleList[i]);
}
megaApi->exportNode(node, delegateListener);
delete node;
}
}
QStringList ExportProcessor::getValidLinks()
{
return validPublicLinks;
}
void ExportProcessor::onRequestFinish(MegaApi *, MegaRequest *request, MegaError *e)
{
currentIndex++;
remainingNodes--;
if (e->getErrorCode() != MegaError::API_OK)
{
publicLinks.append(QString());
importFailed++;
}
else
{
publicLinks.append(QString::fromLatin1(request->getLink()));
validPublicLinks.append(QString::fromLatin1(request->getLink()));
importSuccess++;
}
if (!remainingNodes)
{
emit onRequestLinksFinished();
}
}
```
|
Benjamin Crémieux (1888–1944) was a French author, critic and literary historian.
Early life
Crémieux was born to a Jewish family in Narbonne, France in 1888. His family had long ties in the region, having 'settled in France as early as the 14th century'.:452
Military service
He fought in World War I during his obligatory military service in the French Army and was severely wounded during battle.:452 After the war he focused on studying Italian literature and history.:452
Career
Crémieux contributed to a variety of literary magazines and journals, including La Gazette du Franc,:270 and the influential literary journal Nouvelle Revue Française (NRF). He started writing for the NRF in 1920 and Jean Paulhan invited him to be a member of the journal's editorial committee as early as 1926.:22
In 1928 he defended his doctoral thesis Essai Sur l'évolution littéraire de l'Italie de 1870 á nos jours at the Sorbonne, which was published later that year.:41 He published one of his most important texts in 1931, Inquiétude et Reconstruction, which provided a survey of French literature since the turn of the century.:139
He also served in a variety of service roles. He was 'chief of the Italian bureau of the French Ministry of Foreign Affairs':41 and the permanent secretary of the French section of the PEN Club.:139
In 1940, Crémieux joined the French underground and became a leader of the Maquis.
Death
In April 1943, two Gestapo agents detained Crémieux in Marseilles.:458 He was arrested, imprisoned, and deported to Nazi Germany, where, in April 1944 he was executed in the Buchenwald concentration camp.:458.
Legacy
Crémieux introduced a number of important literary figures to the French public through his translations, including Luigi Pirandello and Italo Svevo;:138 he was also an early champion of the works of Marcel Proust.
References
1888 births
1944 deaths
French magazine editors
French male essayists
20th-century French essayists
20th-century French male writers
Jewish French history
French people who died in Buchenwald concentration camp
French Jews who died in the Holocaust
French military personnel of World War I
French people executed in Nazi concentration camps
|
Hallie Rubenhold (born 1971) is an American-born British historian and author. Her work specializes in 18th and 19th century social history and women's history. Her 2019 book The Five, about the lives of the women murdered by Jack the Ripper, was shortlisted for the Wolfson History Prize and won the Baillie Gifford Prize for Non-fiction. Rubenhold's focus on the victims of murder (frequently women), rather than on the identity or the acts of the perpetrator, has been credited with changing attitudes to the proper commemoration of such crimes and to the appeal and function of the true crime genre.
Early life
Rubenhold was born in Los Angeles to a British father and American mother and undertook a BA in History at the University of Massachusetts, Amherst. She then gained an MA in British History and History of Art and an MPhil in History from the University of Leeds, on the subject of marriage and child-rearing in the eighteenth century. Rubenhold has also worked in the commercial art world for Philip Mould and as an assistant curator for the National Portrait Gallery.
Career
In 2005, she wrote an accessible history of Harris's List of Covent Garden Ladies and its author in her book The Covent Garden Ladies: Pimp General Jack and the Extraordinary Story of Harris's List, and, in 2008, she published The Harlot's Handbook: Harris's List, a selection of the directories' "funniest, rudest and most surreal entries". The BBC later adapted the material for a documentary, presented by Rubenhold herself called The Harlot's Handbook.
Rubenhold appears regularly as an expert contributor on history documentaries for British and US networks. In the past she has appeared on BBC 2's Balderdash and Piffle, discussing the origins of merkins with burlesque star Immodesty Blaize and on BBC 4's Age of Excess. She has contributed to the BBC series The Beauty of Maps and to History Cold Case and to Channel 4's Titanic: The Mission, as well as the Travel Channel's Mysteries at the Museum and Private Lives of the Monarchs. She also works as a historical consultant for period dramas, including Jonathan Strange & Mr Norrell (BBC) and Harlots (Hulu / Amazon).
Her book, Lady Worsley's Whim, published in November 2008, is an account of one of the eighteenth century's most sensational sex scandals, the criminal conversation case of Sir Richard Worsley against Maurice George Bisset for having committed adultery with Seymour Fleming, a member of The New Female Coterie established by Caroline Stanhope, Countess of Harrington. It featured as BBC Radio 4's Book of the Week from 3 November 2008 and was adapted into a 90-minute drama for BBC 2 entitled The Scandalous Lady W, broadcast on 17 August 2015, and starring Natalie Dormer.
Rubenhold has written two novels, both set during the eighteenth century. The French Lesson is set during the Terror in Revolutionary Paris. It follows on from her first novel, Mistress of My Fate, the first book in the Confessions of Henrietta Lightfoot series. Both books are written as an hommage to classic works of eighteenth and early nineteenth century literature.
Her most recent book is The Five, a biography of the five victims of Jack the Ripper. It won the £50,000 Baillie Gifford Prize in 2019 and was named the Hay Festival Book of the Year. It was also shortlisted for the 2020 Wolfson History Prize.
Rubenhold is married and lives in London.
Bibliography
(2005:a) The Covent Garden Ladies: Pimp General Jack and the extraordinary story of "Harris' List" . Stroud: Tempus
(ed.) (2005:b) "Harris's List of Covent-Garden Ladies": sex in the city in Georgian Britain. Stroud: Tempus
(2008:a) Lady Worsley’s Whim; An Eighteenth Century Tale of Sex, Scandal and Divorce. Chatto & Windus. US title: The Lady in Red
(2007:b) The Harlot's Handbook: Harris's List. Tempus
(2011) Mistress of My Fate; The Confessions of Henrietta Lightfoot Transworld
(2015) The French Lesson Transworld
(2019) The Five: The Untold Lives of the Women Killed by Jack the Ripper Doubleday
References
External links
Rubenhold's personal website
1971 births
21st-century American historians
21st-century American women writers
21st-century British historians
21st-century British women writers
Alumni of the University of Leeds
American emigrants to England
American non-fiction crime writers
American people of British descent
American women historians
British people of American descent
British non-fiction crime writers
British women historians
Historians from California
Historians of Jack the Ripper
Living people
Social historians
University of Massachusetts Amherst alumni
Writers from Los Angeles
|
İsmet Kür (born Zorluhankızı, 29 September 1916 – 21 January 2013) was a Turkish educator, journalist, columnist and writer of mainly children's literature. Her writings included children's stories, novels, memoirs, short story, poems, and non-fiction. As a journalist, she worked at the BBC World Service, Cumhuriyet, Barış, and Yeni İstanbul. She also provided programming at Ankara Radio, TRT, and Bayrak.
Personal life
İsmet Kür was born on 29 September 1916 in Göztepe, Kadıköy, in Constantinople, Ottoman Empire (now Istanbul, Turkey), in a mansion frequented by writers and poets. Her father Avnullah Kazim was a journalist, writer and politician, and her mother Ayşe Nazlı, an intellectual woman. Her sister Halide Nusret Zorlutuna (1901–1984) was a poet and writer. After the Surname Law went into effect in 1934, she took the family name "Zorluhankızı", meaning the "daughter of Zorluhan" because her name "İsmet" is mainly used for males, and caused confusion. Her father's ancestry of 6th to 7th generations was a Bey, a chieftain of Zorluhan in Erzurum Province, eastern Turkey.
After graduation from the Girls' Teacher School in Edirne, she studied in the Literature Department of Gazi Institute for Education () in Ankara obtaining a teacher's degree in 1938.
She was married to a mathematician, who admired classical music and painting. By this marriage, she took the surname "Kür". She gave birth to two daughters, journalist and writer Pınar Kür (born 1943) and sculptor Işılar Kür. She has a grandson Emrah Kolukısa (born 1972), who is a program producer at the NTV television channel, the son of Pınar Kür and the father of her grand grandson Cem.
Kür went with her two young daughters to London to learn the English language on a scholarship. In 1953, she attended the Kent School of Drama. She lived also from 1956 to 1960 in New York City with her children. There, she attended in 1960 courses at New York University on children's and youth psychology, adult education, human relations, history of education and Russian literature of the 19th century.
During her time in the U.S., she founded the "Women's Association" with the spouse of the Turkish Ambassador.
She said in an interview that she owed her very old age to her practicing tennis, volleyball, skiing, and gymnastics, and exercising every morning. In September 2012, she contracted cerebral infarction, and became bedridden. She died at home on 21 January 2013 at the age of 96. She was interred at Ayazağa Cemetery following a religious funeral service held at Teşvikiye Mosque on 23 January.
Professional career
Educator and diplomat
Kür served as a school teacher of Turkish language and Literature for 21 years. In the 1950s, she worked at the BBC World Service in London. In 1956, she was appointed Deputy Student inspector for the United States territory, and was subsequently promoted to the post of Student Inspector in New York City. She also served as the Turkish Attaché of Culture there until 1960.
Journalist
She was a long-time journalist at Cumhuriyet. She worked as a columnist at the newspapers Barış and Yeni İstanbul.
Writer
Kür was a friend of many authors, particularly Kemal Tahir (1910–1973). She was the author of 27 books in the genres of research, essay, short story, poem, novel. and memoir. She also wrote sketches for radio and plays. She published her first poem in the periodical Çocuk Dünyası ("Child's World") in 1927, and her first short story Mutlu Tahayüller in the periodical Muhit in August 1931.
She wrote more than 100 sketches for the "Çocuk Saati" ( "Children's Hour") program at Ankara Radio. She also did children's programs for the radio stations TRT in Turkey and Bayrak in the Turkish Republic of Northern Cyprus.
She published her memoirs in Yarısı Roman (1995) and Yıllara mı Çarptı Hızımız (2008). Osmanlıca Çocuk Dergileri is a research work of hers on the children's magazines in Ottoman Turkish language, which made her well-known. She continued writing until her death.
Kür was a member of the "People of the Letters Association" and the "Turkish Union of Writers".
Works
Children's
Novels
Memoirs
Short story
Non-fiction
Almanya'daki Çocuklarımızın Başarısızlık Nedenleri
Anneler Sizin İçin (1964)
Poems
Yaşamak'' (1945)
References
External links
Her autobiography on the TRT archive video
1916 births
People from Kadıköy
Writers from Istanbul
Gazi Eğitim Enstitüsü alumni
Turkish schoolteachers
Turkish women civil servants
Turkish civil servants
Turkish expatriates in the United States
Turkish women diplomats
New York University alumni
Turkish women journalists
Turkish columnists
Cumhuriyet people
Turkish women children's writers
Turkish children's writers
Turkish dramatists and playwrights
Turkish women dramatists and playwrights
Turkish autobiographers
Turkish non-fiction writers
2013 deaths
20th-century Turkish women writers
20th-century Turkish writers
21st-century Turkish women writers
Women autobiographers
Turkish women columnists
|
```javascript
import Sider from '../layout/sider.vue';
export default Sider;
```
|
Timberhouse is a historic plantation house located at Newberry, Newberry County, South Carolina. It was built about 1858 by Jacob Kibler, and is a two-story, weatherboarded Greek Revival style dwelling. It features double-tiered full-width porches supported by six square wood pillars and exterior end chimneys.
It was listed on the National Register of Historic Places in 1980.
Kibler, who built the residence, owned 68 slaves in 1850, eight years before Timberhouse was constructed.
References
Plantation houses in South Carolina
Houses on the National Register of Historic Places in South Carolina
Greek Revival houses in South Carolina
Houses completed in 1858
Houses in Newberry County, South Carolina
National Register of Historic Places in Newberry County, South Carolina
Newberry, South Carolina
|
```objective-c
/*
* Workspace window manager
*
*
* This program is free software; you can redistribute it and/or modify
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __WORKSPACE_WM_MENU__
#define __WORKSPACE_WM_MENU__
#include <CoreFoundation/CFRunLoop.h>
#include "wcore.h"
#define MI_DIAMOND 0
#define MI_CHECK 1
#define MI_MINIWINDOW 2
#define MI_HIDDEN 3
#define MI_SHADED 4
#define MENU_WIDTH(m) ((m)->frame->core->width + 2 * (m)->frame->screen_ptr->frame_border_width)
#define MENU_HEIGHT(m) ((m)->frame->core->height + 2 * (m)->frame->screen_ptr->frame_border_width)
typedef struct WMenuItem {
struct WMenu *menu; /* menu this item belongs to */
struct WShortKey *shortcut;
int index; /* index of item in menu */
char *text; /* entry text */
char *rtext; /* text to show in the right part */
void (*callback)(struct WMenu *menu, struct WMenuItem *item);
void (*free_cdata)(void *data); /* proc to be used to free clientdata */
void *clientdata; /* data to pass to callback */
int submenu_index; /* cascade menu index */
struct {
unsigned int enabled : 1; /* item is selectable */
unsigned int selected : 1; /* item is highlighted */
unsigned int indicator : 1; /* left indicator */
unsigned int indicator_on : 1;
unsigned int indicator_type : 3;
} flags;
} WMenuItem;
typedef struct WMenu {
struct WMenu *parent;
struct WMenu *brother;
struct WApplication *app;
/* decorations */
struct WFrameWindow *frame;
WCoreWindow *menu; /* the menu window */
Pixmap menu_texture_data;
int frame_x, frame_y; /* position of the frame in root window */
int old_frame_x, old_frame_y; /* position of the frame before slide */
WMenuItem **items; /* array of items. shared by the menu and it's "brother" */
short allocated_items; /* number of items allocated in `items` array */
short items_count; /* number of items in `items` array */
short selected_item_index; /* index of item in `items` array */
short item_height; /* height of each item */
struct WMenu **submenus; /* array of submenus attached to items */
short submenus_count;
CFRunLoopTimerRef timer; /* timer for the autoscroll */
/* to be called when some item is edited */
void (*on_edit)(struct WMenu *menu, struct WMenuItem *item);
/* to be called when destroyed */
void (*on_destroy)(struct WMenu *menu);
struct {
unsigned int titled : 1;
unsigned int realized : 1; /* whether the window was configured */
unsigned int restored : 1; /* whether the menu was restored from saved state */
unsigned int app_menu : 1; /* this is a application or root menu */
unsigned int mapped : 1; /* if menu is already mapped on screen*/
unsigned int hidden : 1; /* if menu was hidden on app deactivation */
unsigned int tornoff : 1; /* if the close button is visible (menu was torn off) */
unsigned int lowered : 1;
unsigned int brother : 1; /* if this is a copy of the menu */
} flags;
} WMenu;
void wMenuPaint(WMenu *menu);
void wMenuDestroy(WMenu *menu, int recurse);
void wMenuRealize(WMenu *menu);
WMenuItem *wMenuInsertSubmenu(WMenu *menu, int index, const char *text, WMenu *submenu);
void wMenuItemSetSubmenu(WMenu *menu, WMenuItem *item, WMenu *submenu);
void wMenuItemRemoveSubmenu(WMenu *menu, WMenuItem *item);
WMenuItem *wMenuItemInsert(WMenu *menu, int index, const char *text,
void (*callback)(WMenu *menu, WMenuItem *index), void *clientdata);
#define wMenuAddItem(menu, text, callback, data) wMenuItemInsert(menu, -1, text, callback, data)
void wMenuItemRemove(WMenu *menu, int index);
void wMenuItemSetShortcut(WMenuItem *item, const char *shortcut);
void wMenuItemPaint(WMenu *menu, int item_index, int selected);
void wMenuItemSetEnabled(WMenu *menu, WMenuItem *item, Bool enable);
void wMenuItemSelect(WMenu *menu, int item_index);
WMenu *wMenuCreate(WScreen *screen, const char *title, int main_menu);
WMenu *wMenuCreateForApp(WScreen *screen, const char *title, int main_menu);
void wMenuMap(WMenu *menu);
void wMenuMapAt(WMenu *menu, int x, int y, int keyboard);
#define wMenuMapCopyAt(menu, x, y) wMenuMapAt((menu)->brother, (x), (y), False)
void wMenuUnmap(WMenu *menu);
void wMenuSetEnabled(WMenu *menu, int index, int enable);
void wMenuMove(WMenu *menu, int x, int y, int submenus);
void wMenuSlideIfNeeded(WMenu *menu);
WMenu *wMenuUnderPointer(WScreen *screen);
void wMenuSaveState(WScreen *scr);
void wMenuRestoreState(WScreen *scr);
#endif /* __WORKSPACE_WM_MENU__ */
```
|
```python
#
#
# path_to_url
#
# Unless required by applicable law or agreed to in writing, software
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import paddle
def static_model(x, y):
z = paddle.pow(x, y)
return z
def main():
paddle.enable_static()
paddle.set_flags({"FLAGS_check_nan_inf": 1, "FLAGS_check_nan_inf_level": 0})
x_static = paddle.static.data(name='x_static', shape=[3], dtype='float32')
y_static = paddle.static.data(name='y_static', shape=[3], dtype='float32')
x_static.stop_gradient = False
z_static = static_model(x_static, y_static)
grads_static = paddle.static.gradients(z_static, x_static, y_static)
exe_static = paddle.static.Executor(paddle.CPUPlace())
exe_static.run(paddle.static.default_startup_program())
grads_val_static = exe_static.run(
paddle.static.default_main_program(),
feed={'x_static': [1, 0, 3], 'y_static': [0, 0, 0]},
fetch_list=[grads_static],
)
if __name__ == "__main__":
main()
```
|
```php
<?php
/*
* This file is part of SwiftMailer.
* (c) 2004-2009 Chris Corbyn
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/**
* A MIME entity, in a multipart message.
*
* @author Chris Corbyn
*/
class Swift_Mime_SimpleMimeEntity implements Swift_Mime_MimeEntity
{
/** A collection of Headers for this mime entity */
private $_headers;
/** The body as a string, or a stream */
private $_body;
/** The encoder that encodes the body into a streamable format */
private $_encoder;
/** The grammar to use for id validation */
private $_grammar;
/** A mime boundary, if any is used */
private $_boundary;
/** Mime types to be used based on the nesting level */
private $_compositeRanges = array(
'multipart/mixed' => array(self::LEVEL_TOP, self::LEVEL_MIXED),
'multipart/alternative' => array(self::LEVEL_MIXED, self::LEVEL_ALTERNATIVE),
'multipart/related' => array(self::LEVEL_ALTERNATIVE, self::LEVEL_RELATED),
);
/** A set of filter rules to define what level an entity should be nested at */
private $_compoundLevelFilters = array();
/** The nesting level of this entity */
private $_nestingLevel = self::LEVEL_ALTERNATIVE;
/** A KeyCache instance used during encoding and streaming */
private $_cache;
/** Direct descendants of this entity */
private $_immediateChildren = array();
/** All descendants of this entity */
private $_children = array();
/** The maximum line length of the body of this entity */
private $_maxLineLength = 78;
/** The order in which alternative mime types should appear */
private $_alternativePartOrder = array(
'text/plain' => 1,
'text/html' => 2,
'multipart/related' => 3,
);
/** The CID of this entity */
private $_id;
/** The key used for accessing the cache */
private $_cacheKey;
protected $_userContentType;
/**
* Create a new SimpleMimeEntity with $headers, $encoder and $cache.
*
* @param Swift_Mime_HeaderSet $headers
* @param Swift_Mime_ContentEncoder $encoder
* @param Swift_KeyCache $cache
* @param Swift_Mime_Grammar $grammar
*/
public function __construct(Swift_Mime_HeaderSet $headers, Swift_Mime_ContentEncoder $encoder, Swift_KeyCache $cache, Swift_Mime_Grammar $grammar)
{
$this->_cacheKey = md5(uniqid(getmypid().mt_rand(), true));
$this->_cache = $cache;
$this->_headers = $headers;
$this->_grammar = $grammar;
$this->setEncoder($encoder);
$this->_headers->defineOrdering(array('Content-Type', 'Content-Transfer-Encoding'));
// This array specifies that, when the entire MIME document contains
// $compoundLevel, then for each child within $level, if its Content-Type
// is $contentType then it should be treated as if it's level is
// $neededLevel instead. I tried to write that unambiguously! :-\
// Data Structure:
// array (
// $compoundLevel => array(
// $level => array(
// $contentType => $neededLevel
// )
// )
// )
$this->_compoundLevelFilters = array(
(self::LEVEL_ALTERNATIVE + self::LEVEL_RELATED) => array(
self::LEVEL_ALTERNATIVE => array(
'text/plain' => self::LEVEL_ALTERNATIVE,
'text/html' => self::LEVEL_RELATED,
),
),
);
$this->_id = $this->getRandomId();
}
/**
* Generate a new Content-ID or Message-ID for this MIME entity.
*
* @return string
*/
public function generateId()
{
$this->setId($this->getRandomId());
return $this->_id;
}
/**
* Get the {@link Swift_Mime_HeaderSet} for this entity.
*
* @return Swift_Mime_HeaderSet
*/
public function getHeaders()
{
return $this->_headers;
}
/**
* Get the nesting level of this entity.
*
* @see LEVEL_TOP, LEVEL_MIXED, LEVEL_RELATED, LEVEL_ALTERNATIVE
*
* @return int
*/
public function getNestingLevel()
{
return $this->_nestingLevel;
}
/**
* Get the Content-type of this entity.
*
* @return string
*/
public function getContentType()
{
return $this->_getHeaderFieldModel('Content-Type');
}
/**
* Set the Content-type of this entity.
*
* @param string $type
*
* @return Swift_Mime_SimpleMimeEntity
*/
public function setContentType($type)
{
$this->_setContentTypeInHeaders($type);
// Keep track of the value so that if the content-type changes automatically
// due to added child entities, it can be restored if they are later removed
$this->_userContentType = $type;
return $this;
}
/**
* Get the CID of this entity.
*
* The CID will only be present in headers if a Content-ID header is present.
*
* @return string
*/
public function getId()
{
$tmp = (array) $this->_getHeaderFieldModel($this->_getIdField());
return $this->_headers->has($this->_getIdField()) ? current($tmp) : $this->_id;
}
/**
* Set the CID of this entity.
*
* @param string $id
*
* @return Swift_Mime_SimpleMimeEntity
*/
public function setId($id)
{
if (!$this->_setHeaderFieldModel($this->_getIdField(), $id)) {
$this->_headers->addIdHeader($this->_getIdField(), $id);
}
$this->_id = $id;
return $this;
}
/**
* Get the description of this entity.
*
* This value comes from the Content-Description header if set.
*
* @return string
*/
public function getDescription()
{
return $this->_getHeaderFieldModel('Content-Description');
}
/**
* Set the description of this entity.
*
* This method sets a value in the Content-ID header.
*
* @param string $description
*
* @return Swift_Mime_SimpleMimeEntity
*/
public function setDescription($description)
{
if (!$this->_setHeaderFieldModel('Content-Description', $description)) {
$this->_headers->addTextHeader('Content-Description', $description);
}
return $this;
}
/**
* Get the maximum line length of the body of this entity.
*
* @return int
*/
public function getMaxLineLength()
{
return $this->_maxLineLength;
}
/**
* Set the maximum line length of lines in this body.
*
* Though not enforced by the library, lines should not exceed 1000 chars.
*
* @param int $length
*
* @return Swift_Mime_SimpleMimeEntity
*/
public function setMaxLineLength($length)
{
$this->_maxLineLength = $length;
return $this;
}
/**
* Get all children added to this entity.
*
* @return Swift_Mime_MimeEntity[]
*/
public function getChildren()
{
return $this->_children;
}
/**
* Set all children of this entity.
*
* @param Swift_Mime_MimeEntity[] $children
* @param int $compoundLevel For internal use only
*
* @return Swift_Mime_SimpleMimeEntity
*/
public function setChildren(array $children, $compoundLevel = null)
{
// TODO: Try to refactor this logic
$compoundLevel = isset($compoundLevel)
? $compoundLevel
: $this->_getCompoundLevel($children)
;
$immediateChildren = array();
$grandchildren = array();
$newContentType = $this->_userContentType;
foreach ($children as $child) {
$level = $this->_getNeededChildLevel($child, $compoundLevel);
if (empty($immediateChildren)) {
//first iteration
$immediateChildren = array($child);
} else {
$nextLevel = $this->_getNeededChildLevel($immediateChildren[0], $compoundLevel);
if ($nextLevel == $level) {
$immediateChildren[] = $child;
} elseif ($level < $nextLevel) {
// Re-assign immediateChildren to grandchildren
$grandchildren = array_merge($grandchildren, $immediateChildren);
// Set new children
$immediateChildren = array($child);
} else {
$grandchildren[] = $child;
}
}
}
if (!empty($immediateChildren)) {
$lowestLevel = $this->_getNeededChildLevel($immediateChildren[0], $compoundLevel);
// Determine which composite media type is needed to accommodate the
// immediate children
foreach ($this->_compositeRanges as $mediaType => $range) {
if ($lowestLevel > $range[0]
&& $lowestLevel <= $range[1]) {
$newContentType = $mediaType;
break;
}
}
// Put any grandchildren in a subpart
if (!empty($grandchildren)) {
$subentity = $this->_createChild();
$subentity->_setNestingLevel($lowestLevel);
$subentity->setChildren($grandchildren, $compoundLevel);
array_unshift($immediateChildren, $subentity);
}
}
$this->_immediateChildren = $immediateChildren;
$this->_children = $children;
$this->_setContentTypeInHeaders($newContentType);
$this->_fixHeaders();
$this->_sortChildren();
return $this;
}
/**
* Get the body of this entity as a string.
*
* @return string
*/
public function getBody()
{
return ($this->_body instanceof Swift_OutputByteStream)
? $this->_readStream($this->_body)
: $this->_body;
}
/**
* Set the body of this entity, either as a string, or as an instance of
* {@link Swift_OutputByteStream}.
*
* @param mixed $body
* @param string $contentType optional
*
* @return Swift_Mime_SimpleMimeEntity
*/
public function setBody($body, $contentType = null)
{
if ($body !== $this->_body) {
$this->_clearCache();
}
$this->_body = $body;
if (isset($contentType)) {
$this->setContentType($contentType);
}
return $this;
}
/**
* Get the encoder used for the body of this entity.
*
* @return Swift_Mime_ContentEncoder
*/
public function getEncoder()
{
return $this->_encoder;
}
/**
* Set the encoder used for the body of this entity.
*
* @param Swift_Mime_ContentEncoder $encoder
*
* @return Swift_Mime_SimpleMimeEntity
*/
public function setEncoder(Swift_Mime_ContentEncoder $encoder)
{
if ($encoder !== $this->_encoder) {
$this->_clearCache();
}
$this->_encoder = $encoder;
$this->_setEncoding($encoder->getName());
$this->_notifyEncoderChanged($encoder);
return $this;
}
/**
* Get the boundary used to separate children in this entity.
*
* @return string
*/
public function getBoundary()
{
if (!isset($this->_boundary)) {
$this->_boundary = '_=_swift_v4_'.time().'_'.md5(getmypid().mt_rand().uniqid('', true)).'_=_';
}
return $this->_boundary;
}
/**
* Set the boundary used to separate children in this entity.
*
* @param string $boundary
*
* @throws Swift_RfcComplianceException
*
* @return Swift_Mime_SimpleMimeEntity
*/
public function setBoundary($boundary)
{
$this->_assertValidBoundary($boundary);
$this->_boundary = $boundary;
return $this;
}
/**
* Receive notification that the charset of this entity, or a parent entity
* has changed.
*
* @param string $charset
*/
public function charsetChanged($charset)
{
$this->_notifyCharsetChanged($charset);
}
/**
* Receive notification that the encoder of this entity or a parent entity
* has changed.
*
* @param Swift_Mime_ContentEncoder $encoder
*/
public function encoderChanged(Swift_Mime_ContentEncoder $encoder)
{
$this->_notifyEncoderChanged($encoder);
}
/**
* Get this entire entity as a string.
*
* @return string
*/
public function toString()
{
$string = $this->_headers->toString();
$string .= $this->_bodyToString();
return $string;
}
/**
* Get this entire entity as a string.
*
* @return string
*/
protected function _bodyToString()
{
$string = '';
if (isset($this->_body) && empty($this->_immediateChildren)) {
if ($this->_cache->hasKey($this->_cacheKey, 'body')) {
$body = $this->_cache->getString($this->_cacheKey, 'body');
} else {
$body = "\r\n".$this->_encoder->encodeString($this->getBody(), 0,
$this->getMaxLineLength()
);
$this->_cache->setString($this->_cacheKey, 'body', $body,
Swift_KeyCache::MODE_WRITE
);
}
$string .= $body;
}
if (!empty($this->_immediateChildren)) {
foreach ($this->_immediateChildren as $child) {
$string .= "\r\n\r\n--".$this->getBoundary()."\r\n";
$string .= $child->toString();
}
$string .= "\r\n\r\n--".$this->getBoundary()."--\r\n";
}
return $string;
}
/**
* Returns a string representation of this object.
*
* @see toString()
*
* @return string
*/
public function __toString()
{
return $this->toString();
}
/**
* Write this entire entity to a {@see Swift_InputByteStream}.
*
* @param Swift_InputByteStream
*/
public function toByteStream(Swift_InputByteStream $is)
{
$is->write($this->_headers->toString());
$is->commit();
$this->_bodyToByteStream($is);
}
/**
* Write this entire entity to a {@link Swift_InputByteStream}.
*
* @param Swift_InputByteStream
*/
protected function _bodyToByteStream(Swift_InputByteStream $is)
{
if (empty($this->_immediateChildren)) {
if (isset($this->_body)) {
if ($this->_cache->hasKey($this->_cacheKey, 'body')) {
$this->_cache->exportToByteStream($this->_cacheKey, 'body', $is);
} else {
$cacheIs = $this->_cache->getInputByteStream($this->_cacheKey, 'body');
if ($cacheIs) {
$is->bind($cacheIs);
}
$is->write("\r\n");
if ($this->_body instanceof Swift_OutputByteStream) {
$this->_body->setReadPointer(0);
$this->_encoder->encodeByteStream($this->_body, $is, 0, $this->getMaxLineLength());
} else {
$is->write($this->_encoder->encodeString($this->getBody(), 0, $this->getMaxLineLength()));
}
if ($cacheIs) {
$is->unbind($cacheIs);
}
}
}
}
if (!empty($this->_immediateChildren)) {
foreach ($this->_immediateChildren as $child) {
$is->write("\r\n\r\n--".$this->getBoundary()."\r\n");
$child->toByteStream($is);
}
$is->write("\r\n\r\n--".$this->getBoundary()."--\r\n");
}
}
/**
* Get the name of the header that provides the ID of this entity.
*/
protected function _getIdField()
{
return 'Content-ID';
}
/**
* Get the model data (usually an array or a string) for $field.
*/
protected function _getHeaderFieldModel($field)
{
if ($this->_headers->has($field)) {
return $this->_headers->get($field)->getFieldBodyModel();
}
}
/**
* Set the model data for $field.
*/
protected function _setHeaderFieldModel($field, $model)
{
if ($this->_headers->has($field)) {
$this->_headers->get($field)->setFieldBodyModel($model);
return true;
} else {
return false;
}
}
/**
* Get the parameter value of $parameter on $field header.
*/
protected function _getHeaderParameter($field, $parameter)
{
if ($this->_headers->has($field)) {
return $this->_headers->get($field)->getParameter($parameter);
}
}
/**
* Set the parameter value of $parameter on $field header.
*/
protected function _setHeaderParameter($field, $parameter, $value)
{
if ($this->_headers->has($field)) {
$this->_headers->get($field)->setParameter($parameter, $value);
return true;
} else {
return false;
}
}
/**
* Re-evaluate what content type and encoding should be used on this entity.
*/
protected function _fixHeaders()
{
if (count($this->_immediateChildren)) {
$this->_setHeaderParameter('Content-Type', 'boundary',
$this->getBoundary()
);
$this->_headers->remove('Content-Transfer-Encoding');
} else {
$this->_setHeaderParameter('Content-Type', 'boundary', null);
$this->_setEncoding($this->_encoder->getName());
}
}
/**
* Get the KeyCache used in this entity.
*
* @return Swift_KeyCache
*/
protected function _getCache()
{
return $this->_cache;
}
/**
* Get the grammar used for validation.
*
* @return Swift_Mime_Grammar
*/
protected function _getGrammar()
{
return $this->_grammar;
}
/**
* Empty the KeyCache for this entity.
*/
protected function _clearCache()
{
$this->_cache->clearKey($this->_cacheKey, 'body');
}
/**
* Returns a random Content-ID or Message-ID.
*
* @return string
*/
protected function getRandomId()
{
$idLeft = md5(getmypid().'.'.time().'.'.uniqid(mt_rand(), true));
$idRight = !empty($_SERVER['SERVER_NAME']) ? $_SERVER['SERVER_NAME'] : 'swift.generated';
$id = $idLeft.'@'.$idRight;
try {
$this->_assertValidId($id);
} catch (Swift_RfcComplianceException $e) {
$id = $idLeft.'@swift.generated';
}
return $id;
}
private function _readStream(Swift_OutputByteStream $os)
{
$string = '';
while (false !== $bytes = $os->read(8192)) {
$string .= $bytes;
}
$os->setReadPointer(0);
return $string;
}
private function _setEncoding($encoding)
{
if (!$this->_setHeaderFieldModel('Content-Transfer-Encoding', $encoding)) {
$this->_headers->addTextHeader('Content-Transfer-Encoding', $encoding);
}
}
private function _assertValidBoundary($boundary)
{
if (!preg_match(
'/^[a-z0-9\'\(\)\+_\-,\.\/:=\?\ ]{0,69}[a-z0-9\'\(\)\+_\-,\.\/:=\?]$/Di',
$boundary)) {
throw new Swift_RfcComplianceException('Mime boundary set is not RFC 2046 compliant.');
}
}
private function _setContentTypeInHeaders($type)
{
if (!$this->_setHeaderFieldModel('Content-Type', $type)) {
$this->_headers->addParameterizedHeader('Content-Type', $type);
}
}
private function _setNestingLevel($level)
{
$this->_nestingLevel = $level;
}
private function _getCompoundLevel($children)
{
$level = 0;
foreach ($children as $child) {
$level |= $child->getNestingLevel();
}
return $level;
}
private function _getNeededChildLevel($child, $compoundLevel)
{
$filter = array();
foreach ($this->_compoundLevelFilters as $bitmask => $rules) {
if (($compoundLevel & $bitmask) === $bitmask) {
$filter = $rules + $filter;
}
}
$realLevel = $child->getNestingLevel();
$lowercaseType = strtolower($child->getContentType());
if (isset($filter[$realLevel])
&& isset($filter[$realLevel][$lowercaseType])) {
return $filter[$realLevel][$lowercaseType];
} else {
return $realLevel;
}
}
private function _createChild()
{
return new self($this->_headers->newInstance(),
$this->_encoder, $this->_cache, $this->_grammar);
}
private function _notifyEncoderChanged(Swift_Mime_ContentEncoder $encoder)
{
foreach ($this->_immediateChildren as $child) {
$child->encoderChanged($encoder);
}
}
private function _notifyCharsetChanged($charset)
{
$this->_encoder->charsetChanged($charset);
$this->_headers->charsetChanged($charset);
foreach ($this->_immediateChildren as $child) {
$child->charsetChanged($charset);
}
}
private function _sortChildren()
{
$shouldSort = false;
foreach ($this->_immediateChildren as $child) {
// NOTE: This include alternative parts moved into a related part
if ($child->getNestingLevel() == self::LEVEL_ALTERNATIVE) {
$shouldSort = true;
break;
}
}
// Sort in order of preference, if there is one
if ($shouldSort) {
usort($this->_immediateChildren, array($this, '_childSortAlgorithm'));
}
}
private function _childSortAlgorithm($a, $b)
{
$typePrefs = array();
$types = array(
strtolower($a->getContentType()),
strtolower($b->getContentType()),
);
foreach ($types as $type) {
$typePrefs[] = (array_key_exists($type, $this->_alternativePartOrder))
? $this->_alternativePartOrder[$type]
: (max($this->_alternativePartOrder) + 1);
}
return ($typePrefs[0] >= $typePrefs[1]) ? 1 : -1;
}
// -- Destructor
/**
* Empties it's own contents from the cache.
*/
public function __destruct()
{
$this->_cache->clearAll($this->_cacheKey);
}
/**
* Throws an Exception if the id passed does not comply with RFC 2822.
*
* @param string $id
*
* @throws Swift_RfcComplianceException
*/
private function _assertValidId($id)
{
if (!preg_match(
'/^'.$this->_grammar->getDefinition('id-left').'@'.
$this->_grammar->getDefinition('id-right').'$/D',
$id
)) {
throw new Swift_RfcComplianceException(
'Invalid ID given <'.$id.'>'
);
}
}
/**
* Make a deep copy of object.
*/
public function __clone()
{
$this->_headers = clone $this->_headers;
$this->_encoder = clone $this->_encoder;
$this->_cacheKey = uniqid();
$children = array();
foreach ($this->_children as $pos => $child) {
$children[$pos] = clone $child;
}
$this->setChildren($children);
}
}
```
|
```shell
Clear the terminal instantly
Terminal based browser
Useful aliasing in bash
Breaking out of a terminal when `ssh` locks
Conditional command execution
(`&&` operator)
```
|
Granite High School may refer to:
Granite High School (Montana), Philipsburg, Montana
Granite High School (Oklahoma), Granite, Oklahoma
Granite High School (Utah), South Salt Lake, Utah
Granite Bay High School, Granite Bay, California
Granite City High School, Granite City, Illinois
Yellow Medicine East High School, Granite Falls, Minnesota
Granite Falls High School, Granite Falls, Washington
Granite Hills High School (Porterville, California), Porterville, California
Granite Hills High School (Apple Valley, California), Apple Valley, California
Granite Hills High School (El Cajon, California), El Cajon, California
Raymond Granite High School, Raymond, California
|
```objective-c
/*
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <charconv>
#include <cstring>
#include <string>
#include <type_traits>
#include <utility>
#include "CppUtil.h"
#include "Debug.h"
#include "DexDefs.h"
#include "DexOpcode.h"
#include "Gatherable.h"
#include "IROpcode.h"
#define MAX_ARG_COUNT (4)
class DexIdx;
class DexOutputIdx;
class DexString;
class DexInstruction : public Gatherable {
protected:
enum {
REF_NONE,
REF_STRING,
REF_TYPE,
REF_FIELD,
REF_METHOD,
REF_CALLSITE,
REF_METHODHANDLE,
REF_PROTO,
} m_ref_type{REF_NONE};
private:
uint16_t m_opcode = OPCODE_NOP;
uint16_t m_arg[MAX_ARG_COUNT] = {};
protected:
uint16_t m_count = 0;
// use clone() instead
DexInstruction(const DexInstruction&) = default;
DexInstruction& operator=(const DexInstruction&) = default;
// Ref-less opcodes, largest size is 5 insns.
// If the constructor is called with a non-numeric
// count, we'll have to add a assert here.
// Holds formats:
// 10x 11x 11n 12x 22x 21s 21h 31i 32x 51l
DexInstruction(const uint16_t* opcodes, int count) {
always_assert_log(count <= MAX_ARG_COUNT,
"arg count %d exceeded the limit of %d",
count,
MAX_ARG_COUNT);
m_opcode = *opcodes++;
m_count = count;
for (int i = 0; i < count; i++) {
m_arg[i] = opcodes[i];
}
}
public:
explicit DexInstruction(DexOpcode op)
: m_opcode(op), m_count(count_from_opcode()) {}
DexInstruction(DexOpcode opcode, uint16_t arg) : DexInstruction(opcode) {
redex_assert(m_count == 1);
m_arg[0] = arg;
}
protected:
void encode_args(uint16_t*& insns) const {
for (int i = 0; i < m_count; i++) {
*insns++ = m_arg[i];
}
}
void encode_opcode(uint16_t*& insns) const { *insns++ = m_opcode; }
public:
static DexInstruction* make_instruction(DexIdx* idx,
const uint16_t** insns_ptr,
const uint16_t* end);
/* Creates the right subclass of DexInstruction for the given opcode */
static DexInstruction* make_instruction(DexOpcode);
virtual void encode(DexOutputIdx* dodx, uint16_t*& insns) const;
virtual size_t size() const;
virtual DexInstruction* clone() const { return new DexInstruction(*this); }
bool operator==(const DexInstruction&) const;
bool has_string() const { return m_ref_type == REF_STRING; }
bool has_type() const { return m_ref_type == REF_TYPE; }
bool has_field() const { return m_ref_type == REF_FIELD; }
bool has_method() const { return m_ref_type == REF_METHOD; }
bool has_callsite() const { return m_ref_type == REF_CALLSITE; }
bool has_methodhandle() const { return m_ref_type == REF_METHODHANDLE; }
bool has_proto() const { return m_ref_type == REF_PROTO; }
bool has_range() const { return dex_opcode::has_range(opcode()); }
bool has_literal() const { return dex_opcode::has_literal(opcode()); }
bool has_offset() const { return dex_opcode::has_offset(opcode()); }
/*
* Number of registers used.
*/
bool has_dest() const;
unsigned srcs_size() const;
/*
* Accessors for logical parts of the instruction.
*/
DexOpcode opcode() const;
uint16_t dest() const;
uint16_t src(int i) const;
uint16_t arg_word_count() const;
uint16_t range_base() const;
uint16_t range_size() const;
int64_t get_literal() const;
int32_t offset() const;
/*
* Setters for logical parts of the instruction.
*/
DexInstruction* set_opcode(DexOpcode);
DexInstruction* set_dest(uint16_t vreg);
DexInstruction* set_src(int i, uint16_t vreg);
DexInstruction* set_srcs(const std::vector<uint16_t>& vregs);
DexInstruction* set_arg_word_count(uint16_t count);
DexInstruction* set_range_base(uint16_t base);
DexInstruction* set_range_size(uint16_t size);
DexInstruction* set_literal(int64_t literal);
DexInstruction* set_offset(int32_t offset);
/*
* The number of shorts needed to encode the args.
*/
uint16_t count() const { return m_count; }
friend std::string show(const DexInstruction* insn);
friend std::string show_deobfuscated(const DexInstruction* insn);
private:
unsigned count_from_opcode() const;
};
class DexOpcodeString : public DexInstruction {
private:
const DexString* m_string;
public:
size_t size() const override;
void encode(DexOutputIdx* dodx, uint16_t*& insns) const override;
void gather_strings(std::vector<const DexString*>& lstring) const override;
DexOpcodeString* clone() const override { return new DexOpcodeString(*this); }
DexOpcodeString(DexOpcode opcode, const DexString* str)
: DexInstruction(opcode) {
m_string = str;
m_ref_type = REF_STRING;
}
const DexString* get_string() const { return m_string; }
bool jumbo() const { return opcode() == DOPCODE_CONST_STRING_JUMBO; }
void set_string(const DexString* str) { m_string = str; }
};
class DexOpcodeType : public DexInstruction {
private:
DexType* m_type;
public:
size_t size() const override;
void encode(DexOutputIdx* dodx, uint16_t*& insns) const override;
void gather_types(std::vector<DexType*>& ltype) const override;
DexOpcodeType* clone() const override { return new DexOpcodeType(*this); }
DexOpcodeType(DexOpcode opcode, DexType* type) : DexInstruction(opcode) {
m_type = type;
m_ref_type = REF_TYPE;
}
DexOpcodeType(DexOpcode opcode, DexType* type, uint16_t arg)
: DexInstruction(opcode, arg) {
m_type = type;
m_ref_type = REF_TYPE;
}
DexType* get_type() const { return m_type; }
void set_type(DexType* type) { m_type = type; }
};
class DexOpcodeField : public DexInstruction {
private:
DexFieldRef* m_field;
public:
size_t size() const override;
void encode(DexOutputIdx* dodx, uint16_t*& insns) const override;
void gather_fields(std::vector<DexFieldRef*>& lfield) const override;
DexOpcodeField* clone() const override { return new DexOpcodeField(*this); }
DexOpcodeField(DexOpcode opcode, DexFieldRef* field)
: DexInstruction(opcode) {
m_field = field;
m_ref_type = REF_FIELD;
}
DexFieldRef* get_field() const { return m_field; }
void set_field(DexFieldRef* field) { m_field = field; }
};
class DexOpcodeMethod : public DexInstruction {
private:
DexMethodRef* m_method;
public:
size_t size() const override;
void encode(DexOutputIdx* dodx, uint16_t*& insns) const override;
void gather_methods(std::vector<DexMethodRef*>& lmethod) const override;
DexOpcodeMethod* clone() const override { return new DexOpcodeMethod(*this); }
DexOpcodeMethod(DexOpcode opcode, DexMethodRef* meth, uint16_t arg = 0)
: DexInstruction(opcode, arg) {
m_method = meth;
m_ref_type = REF_METHOD;
}
DexMethodRef* get_method() const { return m_method; }
void set_method(DexMethodRef* method) { m_method = method; }
};
class DexOpcodeCallSite : public DexInstruction {
private:
DexCallSite* m_callsite;
public:
size_t size() const override;
void encode(DexOutputIdx* dodx, uint16_t*& insns) const override;
void gather_callsites(std::vector<DexCallSite*>& lcallsite) const override;
void gather_strings(std::vector<const DexString*>& lstring) const override;
void gather_methodhandles(
std::vector<DexMethodHandle*>& lmethodhandle) const override;
void gather_methods(std::vector<DexMethodRef*>& lmethod) const override;
void gather_fields(std::vector<DexFieldRef*>& lfield) const override;
DexOpcodeCallSite* clone() const override {
return new DexOpcodeCallSite(*this);
}
DexOpcodeCallSite(DexOpcode opcode, DexCallSite* callsite, uint16_t arg = 0)
: DexInstruction(opcode, arg) {
m_callsite = callsite;
m_ref_type = REF_CALLSITE;
}
DexCallSite* get_callsite() const { return m_callsite; }
void set_callsite(DexCallSite* callsite) { m_callsite = callsite; }
};
class DexOpcodeMethodHandle : public DexInstruction {
private:
DexMethodHandle* m_methodhandle;
public:
size_t size() const override;
void encode(DexOutputIdx* dodx, uint16_t*& insns) const override;
void gather_methodhandles(
std::vector<DexMethodHandle*>& lmethodhandle) const override;
void gather_methods(std::vector<DexMethodRef*>& lmethod) const override;
void gather_fields(std::vector<DexFieldRef*>& lfield) const override;
DexOpcodeMethodHandle* clone() const override {
return new DexOpcodeMethodHandle(*this);
}
DexOpcodeMethodHandle(DexOpcode opcode, DexMethodHandle* methodhandle)
: DexInstruction(opcode) {
m_methodhandle = methodhandle;
m_ref_type = REF_METHODHANDLE;
}
DexMethodHandle* get_methodhandle() const { return m_methodhandle; }
void set_methodhandle(DexMethodHandle* methodhandle) {
m_methodhandle = methodhandle;
}
};
class DexOpcodeData : public DexInstruction {
private:
std::unique_ptr<uint16_t[]> m_data;
size_t m_data_count;
public:
// This size refers to the whole instruction, not just the data portion
size_t size() const override;
void encode(DexOutputIdx* dodx, uint16_t*& insns) const override;
DexOpcodeData* clone() const override { return new DexOpcodeData(*this); }
std::unique_ptr<DexOpcodeData> clone_as_unique_ptr() const {
return std::make_unique<DexOpcodeData>(*this);
}
DexOpcodeData(const uint16_t* opcodes, size_t count)
: DexInstruction(opcodes, 0),
m_data(std::make_unique<uint16_t[]>(count)),
m_data_count(count) {
opcodes++;
memcpy(m_data.get(), opcodes, count * sizeof(uint16_t));
}
explicit DexOpcodeData(const std::vector<uint16_t>& opcodes)
: DexInstruction(&opcodes[0], 0),
m_data(std::make_unique<uint16_t[]>(opcodes.size() - 1)),
m_data_count(opcodes.size() - 1) {
const uint16_t* data = opcodes.data() + 1;
memcpy(m_data.get(), data, (opcodes.size() - 1) * sizeof(uint16_t));
}
DexOpcodeData(const DexOpcodeData& op)
: DexInstruction(op),
m_data(std::make_unique<uint16_t[]>(op.m_data_count)),
m_data_count(op.m_data_count) {
memcpy(m_data.get(), op.m_data.get(), m_data_count * sizeof(uint16_t));
}
DexOpcodeData& operator=(DexOpcodeData op) {
DexInstruction::operator=(op);
std::swap(m_data, op.m_data);
return *this;
}
const uint16_t* data() const { return m_data.get(); }
// This size refers to just the length of the data array
size_t data_size() const { return m_data_count; }
};
class DexOpcodeProto : public DexInstruction {
private:
DexProto* m_proto;
public:
size_t size() const override;
void encode(DexOutputIdx* dodx, uint16_t*& insns) const override;
void gather_strings(std::vector<const DexString*>& lstring) const override;
DexOpcodeProto* clone() const override { return new DexOpcodeProto(*this); }
DexOpcodeProto(DexOpcode opcode, DexProto* proto) : DexInstruction(opcode) {
m_proto = proto;
m_ref_type = REF_PROTO;
}
DexProto* get_proto() const { return m_proto; }
void set_proto(DexProto* proto) { m_proto = proto; }
};
inline uint16_t fill_array_data_payload_width(const DexOpcodeData* op_data) {
always_assert_log(op_data->opcode() == FOPCODE_FILLED_ARRAY,
"DexOpcodeData is not an array payload");
always_assert(op_data->data_size() >= 3);
return *op_data->data();
}
inline uint32_t fill_array_data_payload_element_count(
const DexOpcodeData* op_data) {
always_assert_log(op_data->opcode() == FOPCODE_FILLED_ARRAY,
"DexOpcodeData is not an array payload");
always_assert(op_data->data_size() >= 3);
auto size_ptr = (uint32_t*)(op_data->data() + 1);
return *size_ptr;
}
// helper function to create fill-array-data-payload according to
// path_to_url#fill-array
template <typename IntType>
std::unique_ptr<DexOpcodeData> encode_fill_array_data_payload(
const std::vector<IntType>& vec) {
static_assert(std::is_integral<IntType>::value,
"fill-array-data-payload can only contain integral values.");
int width = sizeof(IntType);
size_t total_copy_size = vec.size() * width;
// one "code unit" is a 2 byte word
int total_used_code_units =
(total_copy_size + 1 /* for rounding up int division */) / 2 + 4;
std::vector<uint16_t> data(total_used_code_units);
uint16_t* ptr = data.data();
ptr[0] = FOPCODE_FILLED_ARRAY; // header
ptr[1] = width;
*(uint32_t*)(ptr + 2) = vec.size();
uint8_t* data_bytes = (uint8_t*)(ptr + 4);
memcpy(data_bytes, (void*)vec.data(), total_copy_size);
return std::make_unique<DexOpcodeData>(data);
}
// Like above, but parse from a vector of hex string elements
template <typename IntType>
std::unique_ptr<DexOpcodeData> encode_fill_array_data_payload_from_string(
const std::vector<std::string>& elements) {
static_assert(std::is_integral<IntType>::value,
"fill-array-data-payload can only contain integral values.");
std::vector<IntType> vec;
for (const auto& item : elements) {
IntType val;
auto trimmed = trim_whitespaces(item);
auto result = std::from_chars(trimmed.data(),
trimmed.data() + trimmed.size(), val, 16);
always_assert_log(result.ec != std::errc::invalid_argument,
"Invalid payload: \"%s\"", item.c_str());
vec.emplace_back(val);
}
return encode_fill_array_data_payload(vec);
}
template <typename IntType>
std::vector<IntType> get_fill_array_data_payload(const DexOpcodeData* op_data) {
static_assert(std::is_integral<IntType>::value,
"fill-array-data-payload can only contain integral values.");
int width = sizeof(IntType);
auto data = op_data->data();
always_assert_log(*data++ == width, "Incorrect width");
auto count = *((uint32_t*)data);
data += 2;
std::vector<IntType> vec;
vec.reserve(count);
auto element_data = (uint8_t*)data;
for (size_t i = 0; i < count; i++) {
IntType result = 0;
memcpy(&result, element_data, width);
vec.emplace_back(result);
element_data += width;
}
return vec;
}
/**
* Return a copy of the instruction passed in.
*/
DexInstruction* copy_insn(DexInstruction* insn);
```
|
Scarlet paintbrush is a common name for several flowering plants and may refer to:
Castilleja coccinea
Castilleja indivisa
Castilleja miniata, native to western North America
|
The Shankar Vihar metro station is a stop on the Magenta Line of the Delhi Metro. This represents part of the third phase of development of the Delhi Metro. It was opened to public on 29 May 2018. It is the only station in the Delhi Metro network where free movement of civilians is restricted, as it falls in the Delhi Cantonment area and "right in heart of defence zone".
Station layout
Structure
Shankar Vihar elevated metro station situated on the Magenta Line of Delhi Metro.
Station layout
Special provision for defence area
As the station lies completely inside an army unit, the commuters are required to carry ID cards to take exit from the station.
See also
Delhi
List of Delhi Metro stations
Transport in Delhi
Delhi Metro Rail Corporation
Delhi Suburban Railway
Delhi Monorail
Delhi Transport Corporation
South West Delhi
Shankar Vihar
National Capital Region (India)
List of rapid transit systems
List of metro systems
References
External links
Delhi Metro Rail Corporation Ltd. (Official site)
Delhi Metro Annual Reports
UrbanRail.Net – descriptions of all metro systems in the world, each with a schematic map showing all stations.
Delhi Metro stations
Railway stations in India opened in 2018
Railway stations in New Delhi district
|
The Codòzinho River is a river of Maranhão state in northeastern Brazil.
See also
List of rivers of Maranhão
References
Brazilian Ministry of Transport
Rivers of Maranhão
|
```python
import pytest
from test_data.constants import DICT_1to5, TRANS_DICT_134, DICT_NESTED_123, TRANS_DICT_NESTED_12, \
TRANS_DICT_NESTED_VAL_12, DICT_LST_AAB2B, TRANS_DICT_LST_A2B, DICT_LST_NESTED, TRANS_DICT_LST_NESTED
from FireEyeHelix import Client, build_search_groupby_result, list_alerts_command, get_alert_by_id_command, \
get_alert_notes_command, create_alert_note_command, get_events_by_alert_command, get_endpoints_by_alert_command, \
get_cases_by_alert_command, add_list_item_command, get_list_items_command, update_list_item_command, \
list_rules_command, edit_rule_command, search_command, archive_search_command, archive_search_status_command, \
archive_search_results_command, create_context_result, build_title_with_page_numbers
from test_data.response_constants import ALERT_RESP, ALERTS_RESP, SEARCH_AGGREGATIONS_SINGLE_RESP, \
SEARCH_AGGREGATIONS_MULTI_RESP, NOTES_GET_RESP, NOTES_CREATE_RESP, EVENTS_BY_ALERT_RESP, ENDPOINTS_BY_ALERT_RESP, \
CASES_BY_ALERT_RESP, LIST_ITEMS_RESP, LIST_SINGLE_ITEM_RESP, RULE_RESP, SEARCH_MULTI_RESP, SEARCH_ARCHIVE_RESP, \
SEARCH_ARCHIVE_RESULTS_RESP
from test_data.result_constants import EXPECTED_AGGREGATIONS_MULTI_RSLT, EXPECTED_AGGREGATIONS_SINGLE_RSLT, \
EXPECTED_ALERT_RSLT, EXPECTED_ALERTS_RSLT, EXPECTED_NOTES_GET_RSLT, EXPECTED_NOTES_CREATE_RSLT, \
EXPECTED_EVENTS_BY_ALERT_RSLT, EXPECTED_ENDPOINTS_BY_ALERT_RSLT, EXPECTED_CASES_NY_ALERT_RSLT, \
EXPECTED_SINGLE_LIST_ITEM_RSLT, EXPECTED_LIST_ITEMS_RSLT, EXPECTED_LIST_ITEMS_UPDATE_RSLT, EXPECTED_RULES_RSLT, \
EXPECTED_SEARCH_RSLT, EXPECTED_SEARCH_ARCHIVE_RSLT, EXPECTED_SEARCH_ARCHIVE_STATUS_RSLT, \
EXPECTED_SEARCH_ARCHIVE_RESULTS_RSLT, EXPECTED_RULE_RSLT
def test_create_context_result_basic():
assert create_context_result(DICT_1to5, TRANS_DICT_134) == {'one': 1, 'three': 3, 'four': 4}
assert 'one' not in DICT_1to5
def test_create_context_result_nested_keys():
assert create_context_result(DICT_NESTED_123, TRANS_DICT_NESTED_12) == {'one': 1, 'two': 2}
def test_create_context_result_nested_vals():
assert create_context_result(DICT_1to5, TRANS_DICT_NESTED_VAL_12) == {'one': {'1': 1}, 'two': 2}
def test_create_context_result_list():
assert create_context_result(DICT_LST_AAB2B, TRANS_DICT_LST_A2B) == {'AaB': [{'two': 2}, {'two': 3}], 'four': 4}
assert create_context_result(DICT_LST_NESTED, TRANS_DICT_LST_NESTED) == {
'Master': {'ID': 1, 'Assets': [{'ID': 1, 'Name': 'a'}, {'ID': 2, 'Name': 'b'}]}}
def test_build_search_groupby_result():
separator = '|%$,$%|'
assert build_search_groupby_result(SEARCH_AGGREGATIONS_SINGLE_RESP, separator) == EXPECTED_AGGREGATIONS_SINGLE_RSLT
assert build_search_groupby_result(SEARCH_AGGREGATIONS_MULTI_RESP, separator) == EXPECTED_AGGREGATIONS_MULTI_RSLT
def test_build_title_with_page_numbers_start():
src_title = 'Title'
expected_title = 'Title\n### Page 1/4'
count = 100
limit = 30
offset = 0
# start of first page
assert expected_title == build_title_with_page_numbers(src_title, count, limit, offset)
# end of first page
offset = 24
assert expected_title == build_title_with_page_numbers(src_title, count, limit, offset)
def test_build_title_with_page_numbers_last():
src_title = 'Title'
expected_title = 'Title\n### Page 4/4'
count = 100
limit = 30
offset = 75
# start of last page
assert expected_title == build_title_with_page_numbers(src_title, count, limit, offset)
offset = 101
assert expected_title == build_title_with_page_numbers(src_title, count, limit, offset)
def test_build_title_with_page_numbers_wrong_type():
src_title = 'Title'
count = '100'
limit = 30
offset = 0
# count of wrong type
assert src_title == build_title_with_page_numbers(src_title, count, limit, offset)
# limit of wrong type
count = 100
limit = '30'
assert src_title == build_title_with_page_numbers(src_title, count, limit, offset)
# offset of wrong type
limit = 30
offset = '0'
assert src_title == build_title_with_page_numbers(src_title, count, limit, offset)
def test_build_title_with_page_numbers_zero_div():
src_title = 'Title'
count = 0
limit = 30
offset = 0
assert src_title == build_title_with_page_numbers(src_title, count, limit, offset)
@pytest.mark.parametrize('command,args,response,expected_result', [
(list_alerts_command, {'page_size': 2}, ALERTS_RESP, EXPECTED_ALERTS_RSLT),
(get_alert_by_id_command, {'id': 3232}, ALERT_RESP, EXPECTED_ALERT_RSLT),
(get_alert_notes_command, {'id': 3232}, NOTES_GET_RESP, EXPECTED_NOTES_GET_RSLT),
(create_alert_note_command, {'note': 'This is a note test', 'alert_id': 3232}, NOTES_CREATE_RESP,
EXPECTED_NOTES_CREATE_RSLT),
(get_events_by_alert_command, {'alert_id': 3232}, EVENTS_BY_ALERT_RESP, EXPECTED_EVENTS_BY_ALERT_RSLT),
(get_endpoints_by_alert_command, {'alert_id': 3232, 'offset': 0}, ENDPOINTS_BY_ALERT_RESP,
EXPECTED_ENDPOINTS_BY_ALERT_RSLT),
(get_cases_by_alert_command, {'alert_id': 3232, 'offset': 0, 'page_size': 1}, CASES_BY_ALERT_RESP,
EXPECTED_CASES_NY_ALERT_RSLT),
(add_list_item_command, {'list_id': 3232, 'value': 'test', 'type': 'misc', 'risk': 'Low'}, LIST_SINGLE_ITEM_RESP,
EXPECTED_SINGLE_LIST_ITEM_RSLT),
(get_list_items_command, {'list_id': 3232, 'offset': 0}, LIST_ITEMS_RESP, EXPECTED_LIST_ITEMS_RSLT),
(update_list_item_command, {'list_id': 3232, 'value': 'test', 'type': 'misc', 'risk': 'Low', 'item_id': 163},
LIST_SINGLE_ITEM_RESP, EXPECTED_LIST_ITEMS_UPDATE_RSLT),
(list_rules_command, {'offset': 1}, RULE_RESP, EXPECTED_RULES_RSLT),
(edit_rule_command, {'rule_id': '1.1.1', 'enabled': 'true'}, RULE_RESP, EXPECTED_RULE_RSLT),
(search_command,
{'query': 'domain:google.com', 'start': '4 days ago', 'groupby': 'subject', 'limit': 1, 'page_size': 2,
'offset': 1}, SEARCH_MULTI_RESP, EXPECTED_SEARCH_RSLT),
(archive_search_command, {'query': 'domain:google.com', 'start': '4 days ago', 'groupby': 'subject', 'limit': 1,
'offset': 1}, SEARCH_ARCHIVE_RESP, EXPECTED_SEARCH_ARCHIVE_RSLT),
(archive_search_status_command, {'search_id': '82,83'}, SEARCH_ARCHIVE_RESP, EXPECTED_SEARCH_ARCHIVE_STATUS_RSLT),
(archive_search_results_command, {'search_id': 82}, SEARCH_ARCHIVE_RESULTS_RESP, EXPECTED_SEARCH_ARCHIVE_RESULTS_RSLT)
]) # noqa: E124
def test_commands(command, args, response, expected_result, mocker):
headers = {
'accept': 'application/json',
'x-fireeye-api-key': ''
}
client = Client(base_url='path_to_url verify=False, proxy=True, headers=headers)
mocker.patch.object(client, '_http_request', return_value=response)
res = command(client, args)
assert expected_result == res[1]
def test_search_command_verify_args_passed_to_build_mql_query(mocker):
"""
Given:
- FireEye Helix integration client
- `headers` argument given to the search command
When:
- Running the search command
Then:
- Ensure the command runs without raising exception that build_mql_query() got unexpected `headers` argument
"""
args = {
'headers': 'bug1,bug2,toomanybugs'
}
client = Client(base_url='path_to_url
mocker.patch.object(client, '_http_request', return_value={})
search_command(client, args)
```
|
```javascript
'use strict';
import Component from 'metal-component';
import Soy from 'metal-soy';
import moment from 'moment';
import {addClasses, hasClass} from 'metal-dom';
import templates from './TutorialTimer.soy';
class TutorialTimer extends Component {
attached() {
this.time = this.calculateTimeRemaining();
}
calculateTimeRemaining() {
let timeRead = 0;
let totalTime = 0;
let indexSelected = -1;
let sidebar = document.querySelector('.sidebar');
let sidebarLinks = Array.prototype.slice.call(sidebar.querySelectorAll('.sidebar-link'));
sidebarLinks.forEach((item, i) => {
let time = parseInt(item.dataset.time || 0);
totalTime += time;
if (hasClass(item, 'sidebar-link-selected')) {
indexSelected = i;
}
if (indexSelected === -1) {
addClasses(item, 'sidebar-link-read');
timeRead += time;
return;
}
});
let milliseconds = (totalTime - timeRead);
let eventDuration = moment.duration(milliseconds, 'seconds');
return this.humanizeDuration(eventDuration);
}
humanizeDuration(eventDuration) {
var eventDurationString = '';
if (eventDuration.days() > 0) {
eventDurationString += ' ' + moment.duration(eventDuration.days(), 'days').asDays() + 'd';
}
if (eventDuration.hours() > 0) {
eventDurationString += ' ' + moment.duration(eventDuration.hours(), 'hours').asHours() + ' h';
}
if (eventDuration.minutes() > 0) {
eventDurationString += ' ' + moment.duration(eventDuration.minutes(), 'minutes').asMinutes() + ' min';
}
if (eventDuration.seconds() > 0) {
eventDurationString += ' ' + moment.duration(eventDuration.seconds(), 'seconds').asSeconds() + ' sec';
}
return eventDurationString.trim();
}
};
TutorialTimer.STATE = {
time: {
value: null
}
}
Soy.register(TutorialTimer, templates);
export default TutorialTimer;
```
|
```objective-c
/*
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_CODEC_TYPE_H_
#define API_VIDEO_VIDEO_CODEC_TYPE_H_
namespace webrtc {
// GENERATED_JAVA_ENUM_PACKAGE: org.webrtc
enum VideoCodecType {
// There are various memset(..., 0, ...) calls in the code that rely on
// kVideoCodecGeneric being zero.
kVideoCodecGeneric = 0,
kVideoCodecVP8,
kVideoCodecVP9,
kVideoCodecAV1,
kVideoCodecH264,
kVideoCodecMultiplex,
};
} // namespace webrtc
#endif // API_VIDEO_VIDEO_CODEC_TYPE_H_
```
|
```java
package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.component.Version;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.maintenance.RetiredExpirer;
import com.yahoo.vespa.hosted.provision.maintenance.TestMetric;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.testutils.MockDeployer;
import com.yahoo.vespa.service.duper.InfraApplication;
import org.junit.Before;
import org.junit.Test;
import java.time.Duration;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* Tests provisioning by node type instead of by count and flavor
*
* @author bratseth
*/
public class NodeTypeProvisioningTest {
private final ProvisioningTester tester = new ProvisioningTester.Builder().build();
private final InfraApplication proxyApp = ProvisioningTester.infraApplication(NodeType.proxy);
@Before
public void setup() {
tester.makeReadyNodes( 1, "small", NodeType.proxy);
tester.makeReadyNodes( 3, "small", NodeType.host);
tester.makeReadyNodes( 5, "small", NodeType.tenant);
tester.makeReadyNodes(10, "large", NodeType.proxy);
tester.makeReadyNodes(20, "large", NodeType.host);
tester.makeReadyNodes(40, "large", NodeType.tenant);
}
@Test
public void proxy_deployment() {
{ // Deploy
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals("Activated all proxies", 11, nodes.size());
}
{ // Redeploy with no changes
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
}
{ // Add 2 ready proxies then redeploy
tester.makeReadyNodes(2, "small", NodeType.proxy);
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(13, nodes.size());
}
{ // Remove 3 proxies then redeploy
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
tester.nodeRepository().nodes().fail(nodes.asList().get(0).hostname(), Agent.system, "Failing to unit test");
tester.nodeRepository().nodes().fail(nodes.asList().get(1).hostname(), Agent.system, "Failing to unit test");
tester.nodeRepository().nodes().fail(nodes.asList().get(5).hostname(), Agent.system, "Failing to unit test");
tester.prepareAndActivateInfraApplication(proxyApp);
nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(10, nodes.size());
}
}
@Test
public void retire_proxy() {
MockDeployer deployer = new MockDeployer(tester.provisioner(),
tester.clock(),
List.of(new MockDeployer.ApplicationContext(proxyApp, Version.fromString("6.42"))));
RetiredExpirer retiredExpirer = new RetiredExpirer(tester.nodeRepository(),
deployer,
new TestMetric(),
Duration.ofDays(30),
Duration.ofMinutes(10));
{ // Deploy
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals("Activated all proxies", 11, nodes.size());
}
Node nodeToRetire = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy).asList().get(5);
{ // Pick out a node and retire it
tester.nodeRepository().nodes().write(nodeToRetire.withWantToRetire(true, Agent.system, tester.clock().instant()), () -> {});
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
// Verify that wantToRetire has been propagated
assertTrue(tester.nodeRepository().nodes().node(nodeToRetire.hostname())
.flatMap(Node::allocation)
.map(allocation -> allocation.membership().retired())
.orElseThrow(RuntimeException::new));
}
{ // Redeploying while the node is still retiring has no effect
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
// Verify that the node is still marked as retired
assertTrue(tester.nodeRepository().nodes().node(nodeToRetire.hostname())
.flatMap(Node::allocation)
.map(allocation -> allocation.membership().retired())
.orElseThrow(RuntimeException::new));
}
{
tester.advanceTime(Duration.ofMinutes(11));
retiredExpirer.run();
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(10, nodes.size());
// Verify that the node is now inactive
assertEquals(Node.State.dirty, tester.nodeRepository().nodes().node(nodeToRetire.hostname())
.orElseThrow(RuntimeException::new).state());
}
}
@Test
public void retire_multiple_proxy_simultaneously() {
MockDeployer deployer = new MockDeployer(tester.provisioner(),
tester.clock(),
List.of(new MockDeployer.ApplicationContext(proxyApp, Version.fromString("6.42"))));
RetiredExpirer retiredExpirer = new RetiredExpirer(tester.nodeRepository(),
deployer,
new TestMetric(),
Duration.ofDays(30),
Duration.ofMinutes(10));
final int numNodesToRetire = 5;
{ // Deploy
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals("Activated all proxies", 11, nodes.size());
}
List<Node> nodesToRetire = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy).asList()
.subList(3, 3 + numNodesToRetire);
{
nodesToRetire.forEach(nodeToRetire ->
tester.nodeRepository().nodes().write(nodeToRetire.withWantToRetire(true, Agent.system, tester.clock().instant()), () -> {}));
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
// Verify that wantToRetire has been propagated
List<Node> nodesCurrentlyRetiring = nodes.stream()
.filter(node -> node.allocation().get().membership().retired())
.toList();
assertEquals(5, nodesCurrentlyRetiring.size());
// The retiring nodes should be the nodes we marked for retirement
assertTrue(Set.copyOf(nodesToRetire).containsAll(nodesCurrentlyRetiring));
}
{ // Redeploying while the nodes are still retiring has no effect
tester.prepareAndActivateInfraApplication(proxyApp);
NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
// Verify that wantToRetire has been propagated
List<Node> nodesCurrentlyRetiring = nodes.stream()
.filter(node -> node.allocation().get().membership().retired())
.toList();
assertEquals(5, nodesCurrentlyRetiring.size());
}
{
// Let all retired nodes expire
tester.advanceTime(Duration.ofMinutes(11));
retiredExpirer.run();
tester.prepareAndActivateInfraApplication(proxyApp);
// All currently active proxy nodes are not marked with wantToRetire or as retired
long numRetiredActiveProxyNodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy).stream()
.filter(node -> !node.status().wantToRetire())
.filter(node -> !node.allocation().get().membership().retired())
.count();
assertEquals(6, numRetiredActiveProxyNodes);
// All the nodes that were marked with wantToRetire earlier are now dirty
assertEquals(nodesToRetire.stream().map(Node::hostname).collect(Collectors.toSet()),
tester.nodeRepository().nodes().list(Node.State.dirty).stream().map(Node::hostname).collect(Collectors.toSet()));
}
}
}
```
|
Anthony or Tony Griffin may refer to:
Anthony J. Griffin (1866–1935), U.S. Representative from New York
Anthony C. Griffin, American plastic surgeon
Anthony Griffin (Royal Navy officer) (1920–1996), Controller of the Royal Navy
Anthony Griffin (rugby league) (born 1966), Australian rugby league football coach
Anthony Griffin (footballer) (born 1979), retired English footballer
Tony Griffin (hurler) (born 1981), Irish hurler
Tony Griffin (athlete), British Paralympic athlete
|
Katherine Day (January 7, 1889 – March 12, 1976) was a Canadian artist.
She was born in Orillia, Ontario, and received a BA from Queen's University. She then became a social worker. During World War I, Day was a volunteer nursing assistant in England. From 1922 to 1923, she studied painting with Franz Johnston at the Winnipeg School of Art and then attended the Ontario College of Art from 1929 to 1930. She continued her studies at the Central School of Arts and Crafts in London and then studied with Nicolas Eekman and Henri Jannot in Paris. On her return to Canada, she became a member of the Canadian Society of Graphic Artists and of the Society of Canadian Painter-Etchers and Engravers; Day participated in exhibitions with both of these groups.
She died in Orillia at the age of 87.
One of her woodcut prints is included in the collection of the National Gallery of Canada.
References
1889 births
1976 deaths
Alumni of the Central School of Art and Design
Artists from Ontario
Canadian women painters
20th-century Canadian printmakers
People from Orillia
|
```go
//
//
// path_to_url
//
// Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
package ioutil
import (
"math/rand"
"testing"
)
func TestPageWriterRandom(t *testing.T) {
// smaller buffer for stress testing
defaultBufferBytes = 8 * 1024
pageBytes := 128
buf := make([]byte, 4*defaultBufferBytes)
cw := &checkPageWriter{pageBytes: pageBytes, t: t}
w := NewPageWriter(cw, pageBytes, 0)
n := 0
for i := 0; i < 4096; i++ {
c, err := w.Write(buf[:rand.Intn(len(buf))])
if err != nil {
t.Fatal(err)
}
n += c
}
if cw.writeBytes > n {
t.Fatalf("wrote %d bytes to io.Writer, but only wrote %d bytes", cw.writeBytes, n)
}
if cw.writeBytes-n > pageBytes {
t.Fatalf("got %d bytes pending, expected less than %d bytes", cw.writeBytes-n, pageBytes)
}
t.Logf("total writes: %d", cw.writes)
t.Logf("total write bytes: %d (of %d)", cw.writeBytes, n)
}
// TestPageWriterPariallack tests the case where a write overflows the buffer
// but there is not enough data to complete the slack write.
func TestPageWriterPartialSlack(t *testing.T) {
defaultBufferBytes = 1024
pageBytes := 128
buf := make([]byte, defaultBufferBytes)
cw := &checkPageWriter{pageBytes: 64, t: t}
w := NewPageWriter(cw, pageBytes, 0)
// put writer in non-zero page offset
if _, err := w.Write(buf[:64]); err != nil {
t.Fatal(err)
}
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if cw.writes != 1 {
t.Fatalf("got %d writes, expected 1", cw.writes)
}
// nearly fill buffer
if _, err := w.Write(buf[:1022]); err != nil {
t.Fatal(err)
}
// overflow buffer, but without enough to write as aligned
if _, err := w.Write(buf[:8]); err != nil {
t.Fatal(err)
}
if cw.writes != 1 {
t.Fatalf("got %d writes, expected 1", cw.writes)
}
// finish writing slack space
if _, err := w.Write(buf[:128]); err != nil {
t.Fatal(err)
}
if cw.writes != 2 {
t.Fatalf("got %d writes, expected 2", cw.writes)
}
}
// TestPageWriterOffset tests if page writer correctly repositions when offset is given.
func TestPageWriterOffset(t *testing.T) {
defaultBufferBytes = 1024
pageBytes := 128
buf := make([]byte, defaultBufferBytes)
cw := &checkPageWriter{pageBytes: 64, t: t}
w := NewPageWriter(cw, pageBytes, 0)
if _, err := w.Write(buf[:64]); err != nil {
t.Fatal(err)
}
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if w.pageOffset != 64 {
t.Fatalf("w.pageOffset expected 64, got %d", w.pageOffset)
}
w = NewPageWriter(cw, w.pageOffset, pageBytes)
if _, err := w.Write(buf[:64]); err != nil {
t.Fatal(err)
}
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if w.pageOffset != 0 {
t.Fatalf("w.pageOffset expected 0, got %d", w.pageOffset)
}
}
// checkPageWriter implements an io.Writer that fails a test on unaligned writes.
type checkPageWriter struct {
pageBytes int
writes int
writeBytes int
t *testing.T
}
func (cw *checkPageWriter) Write(p []byte) (int, error) {
if len(p)%cw.pageBytes != 0 {
cw.t.Fatalf("got write len(p) = %d, expected len(p) == k*cw.pageBytes", len(p))
}
cw.writes++
cw.writeBytes += len(p)
return len(p), nil
}
```
|
```shell
Pushing to a remote branch
Fetching a remote branch
What is rebasing?
The golden rule of rebasing
Cherry-pick a commit
```
|
This is a list of statues of King George V of the United Kingdom and aboard.
See also
List of statues of Queen Victoria
List of statues of British royalty in London
Royal monuments in the United Kingdom
References
George V
George V
|
The National Institute for Astrophysics (, or INAF) is an Italian research institute in astronomy and astrophysics, founded in 1999. INAF funds and operates twenty separate research facilities, which in turn employ scientists, engineers and technical staff. The research they perform covers most areas of astronomy, ranging from planetary science to cosmology.
Research facilities
INAF coordinates the activities of twenty research units, nineteen in Italy and one in Spain:
Bologna Observatory
Istituto di Astrofisica Spaziale e Fisica cosmica di Bologna
Istituto di Radioastronomia di Bologna
Cagliari Observatory
Catania Observatory
Arcetri Observatory (Florence)
Brera Observatory (Milan)
Istituto di Astrofisica Spaziale e Fisica cosmica di Milano
Capodimonte Observatory (Naples)
Osservatorio Astronomico di Padova
Palermo Observatory
Istituto di Astrofisica Spaziale e Fisica cosmica di Palermo
Rome Observatory
Istituto di Astrofisica Spaziale e Fisica cosmica di Roma
Istituto di Fisica dello Spazio Interplanetario di Roma
Collurania-Teramo Observatory
Turin Observatory
Istituto di Fisica dello Spazio Interplanetario di Torino
Trieste Observatory
Telescopio Nazionale Galileo (Canary Islands, Spain)
Sardinia Radio Telescope (San Basilio, Sardinia)
Noto Radio Observatory (Noto, Sicily)
International partnerships
INAF is involved in scientific collaborations with several international institutions, including:
the European Southern Observatory (Italy has been an ESO member since 1982)
the astronomical observatories located in Canary Islands (Teide Observatory and Roque de los Muchachos Observatory)
the Large Binocular Telescope, in partnership with the United States and Germany
the Very Long Baseline Interferometry consortium
the European Space Agency (ESA)
the American National Aeronautic and Space Administration (NASA)
Notable figures
Giampaolo Vettolani, scientific director
Stefano Cristiani, board member
Nicolò D'Amico, president in 2015-2020
Projects
Projects include:
Mars Multispectral Imager for Subsurface Studies, instrument for the ExoMars rover Rosalind Franklin
Rapid Eye Mount telescope (REM)
Juno's JIRAM
See also
Istituto Nazionale di Fisica Nucleare (INFN)
French National Centre for Scientific Research (CNRS)
References
External links
Official website of INAF
Public and press website of INAF
Website of the Italian Telescopio Nazione Galileo
Website of the Large Binocular Telescope (Arizona, US)
Italian Astronomical Archive Center
VObs.it: Italian Virtual Observatory
Research institutes in Italy
Astrophysics research institutes
Rome Q. XV Della Vittoria
1999 establishments in Italy
|
Rhinoclavis vertagus, common name the common creeper, is a species of sea snail, a marine gastropod mollusk in the family Cerithiidae, the ceriths. R.vertagus is most commonly found in intertidal sand flats
Description
R. vertagus has a cone shaped shell most commonly bearing white, grey and brown colors.The length of the shell varies between 40 mm and 80 mm.
Distribution
This species occurs in Australia, Philippines, and in the Indo-west Pacific from east Africa to Vanuatu as a whole.
References
Linnaeus, C. 1767. Systema naturae, per regna tria naturae, secundum classes, ordines, genera, species, cum caracteribus, differentiis, synonymis, locis. Holmiae [= Stockholm] : L. Salvii Vol. 1(2) 12, pp. 533–1327.
Humphrey, G. 1797. Museum Calonnianum : specification of the various articles which compose the magnificent museum of natural history collected by M. de Calonne in France and lately his property : consisting of an assemblage of the most beautiful and rare subjects in entomology, conchology, ornithology, mineralogy, &c. London : E. Bibl. Radcl. viii, 84 pp.
Röding, P.F. 1798. Museum Boltenianum sive Catalogus cimeliorum e tribus regnis naturae quae olim collegerat Joa. Hamburg : Trappii 199 pp.
Perry, G. 1811. Conchology, or the natural history of shells containing a new arrangement of the genera and species, illustrated by coloured engravings, executed from the natural specimens and including the latest discoveries. London : W. Miller 4 pp., 62 pls.
Schumacher, C.F. 1817. Essai d'un Nouveau Systéme des Habitations des vers Testacés. Copenhagen : Schultz 287 pp., pls 1-22.
Tryon, G.W. (ed.) 1887. Solariidae, Ianthinidae, Trichotropidae, Scalariidae, Cerithiidae, Rissoidae, Littorinidae. Manual of Conchology. Philadelphia : G.W. Tryon Vol. 9 488 pp., 71 pls.
Schepman, M.M. 1909. The Prosobranchia of the Siboga Expedition. Part 2. Taenioglossa and Ptenoglossa. 109-231, pls 10-16 in Weber, M. (ed.). Siboga Expeditie. Leiden : Brill Vol. 49.
Dautzenberg, P. 1923. Liste Préliminaire des Mollusques marins de Madagascar et description de deux epèces nouvelles. Journal de Conchyliologie 68(1): 21-74
Thiele, J. 1931. Handbuch der Systematischen Weichtierkunde. Jena : Gustav Fischer Vol. 2 pp. 377–778.
Cotton, B.C. 1952. Family Cerithiidae. Royal Society of South Australia Malacological Section 2: 4 pp.
Houbrick, R.S. 1978. The family Cerithiidae in the Indo-Pacific. Part I. The genera Rhinoclavis, Pseudovertagus and Clavocerithium. Monographs of Marine Mollusca 1: 1-130
Abbott, R.T. & S.P. Dance (1986). Compendium of sea shells. American Malacologists, Inc:Melbourne, Florida
Wilson, B. 1993. Australian Marine Shells. Prosobranch Gastropods. Kallaroo, Western Australia : Odyssey Publishing Vol. 1 408 pp.
External links
http://www.marinespecies.org/aphia.php?p=taxdetails&id=473152
Cerithiidae
Gastropods described in 1767
Taxa named by Carl Linnaeus
|
The arrondissement of Laon is an arrondissement of France in the Aisne department in the Hauts-de-France region. It has 240 communes. Its population is 157,371 (2016), and its area is .
Composition
The communes of the arrondissement of Laon, and their INSEE codes, are:
Abbécourt (02001)
Achery (02002)
Agnicourt-et-Séchelles (02004)
Aguilcourt (02005)
Aizelles (02007)
Amifontaine (02013)
Amigny-Rouy (02014)
Andelain (02016)
Anguilcourt-le-Sart (02017)
Anizy-le-Grand (02018)
Arrancy (02024)
Assis-sur-Serre (02027)
Athies-sous-Laon (02028)
Aubigny-en-Laonnois (02033)
Aulnois-sous-Laon (02037)
Autremencourt (02039)
Autreville (02041)
Barenton-Bugny (02046)
Barenton-Cel (02047)
Barenton-sur-Serre (02048)
Barisis-aux-Bois (02049)
Bassoles-Aulers (02052)
Beaumont-en-Beine (02056)
Beaurieux (02058)
Beautor (02059)
Berrieux (02072)
Berry-au-Bac (02073)
Bertaucourt-Epourdon (02074)
Bertricourt (02076)
Besmé (02078)
Besny-et-Loizy (02080)
Béthancourt-en-Vaux (02081)
Bièvres (02088)
Bichancourt (02086)
Blérancourt (02093)
Bois-lès-Pargny (02096)
Boncourt (02097)
Bosmont-sur-Serre (02101)
Bouconville-Vauclair (02102)
Bouffignereux (02104)
Bourg-et-Comin (02106)
Bourguignon-sous-Coucy (02107)
Bourguignon-sous-Montbavin (02108)
Brancourt-en-Laonnois (02111)
Braye-en-Laonnois (02115)
Brie (02122)
Bruyères-et-Montbérault (02128)
Bucy-lès-Cerny (02132)
Bucy-lès-Pierrepont (02133)
Caillouël-Crépigny (02139)
Camelin (02140)
Caumont (02145)
Cerny-en-Laonnois (02150)
Cerny-lès-Bucy (02151)
Cessières-Suzy (02153)
Chaillevois (02155)
Chalandry (02156)
Chambry (02157)
Chamouille (02158)
Champs (02159)
Charmes (02165)
Châtillon-lès-Sons (02169)
Chaudardes (02171)
Chauny (02173)
Chérêt (02177)
Chermizy-Ailles (02178)
Chéry-lès-Pouilly (02180)
Chevregny (02183)
Chivres-en-Laonnois (02189)
Chivy-lès-Étouvelles (02191)
Cilly (02194)
Clacy-et-Thierret (02196)
Colligis-Crandelain (02205)
Commenchon (02207)
Concevreux (02208)
Condé-sur-Suippe (02211)
Condren (02212)
Corbeny (02215)
Coucy-lès-Eppes (02218)
Coucy-la-Ville (02219)
Coucy-le-Château-Auffrique (02217)
Courbes (02222)
Courtrizy-et-Fussigny (02229)
Couvron-et-Aumencourt (02231)
Crécy-au-Mont (02236)
Crécy-sur-Serre (02237)
Crépy (02238)
Craonne (02234)
Craonnelle (02235)
Cuirieux (02248)
Cuiry-lès-Chaudardes (02250)
Cuissy-et-Geny (02252)
Danizy (02260)
Dercy (02261)
Deuillet (02262)
Ébouleau (02274)
Eppes (02282)
Erlon (02283)
Étouvelles (02294)
Évergnicourt (02299)
La Fère (02304)
Festieux (02309)
Folembray (02318)
Fourdrain (02329)
Fresnes-sous-Coucy (02333)
Fressancourt (02335)
Frières-Faillouël (02336)
Froidmont-Cohartille (02338)
Gizy (02346)
Goudelancourt-lès-Berrieux (02349)
Goudelancourt-lès-Pierrepont (02350)
Grandlup-et-Fay (02353)
Guivry (02362)
Guny (02363)
Guyencourt (02364)
Jumencourt (02395)
Jumigny (02396)
Juvincourt-et-Damary (02399)
Landricourt (02406)
Laniscourt (02407)
Laon (02408)
Lappion (02409)
Laval-en-Laonnois (02413)
Leuilly-sous-Coucy (02423)
Lierval (02429)
Liesse-Notre-Dame (02430)
Liez (02431)
Lor (02440)
Mâchecourt (02448)
Maizy (02453)
La Malmaison (02454)
Manicamp (02456)
Marchais (02457)
Marcy-sous-Marle (02460)
Marest-Dampcourt (02461)
Marle (02468)
Martigny-Courpierre (02471)
Mauregny-en-Haye (02472)
Mayot (02473)
Mennessis (02474)
Merlieux-et-Fouquerolles (02478)
Mesbrecourt-Richecourt (02480)
Meurival (02482)
Missy-lès-Pierrepont (02486)
Molinchart (02489)
Monceau-lès-Leups (02492)
Monceau-le-Waast (02493)
Mons-en-Laonnois (02497)
Montaigu (02498)
Montbavin (02499)
Montchâlons (02501)
Monthenault (02508)
Montigny-le-Franc (02513)
Montigny-sous-Marle (02516)
Montigny-sur-Crécy (02517)
Mortiers (02529)
Moulins (02530)
Moussy-Verneuil (02531)
Muscourt (02534)
Neufchâtel-sur-Aisne (02541)
Neuflieux (02542)
La Neuville-Bosmont (02545)
La Neuville-en-Beine (02546)
Neuville-sur-Ailette (02550)
Nizy-le-Comte (02553)
Nouvion-et-Catillon (02559)
Nouvion-le-Comte (02560)
Nouvion-le-Vineux (02561)
Œuilly (02565)
Ognes (02566)
Orainville (02572)
Orgeval (02573)
Oulches-la-Vallée-Foulon (02578)
Paissy (02582)
Pancy-Courtecon (02583)
Parfondru (02587)
Pargnan (02588)
Pargny-les-Bois (02591)
Pierremande (02599)
Pierrepont (02600)
Pignicourt (02601)
Pinon (02602)
Ployart-et-Vaurseine (02609)
Pont-Saint-Mard (02616)
Pontavert (02613)
Pouilly-sur-Serre (02617)
Prémontré (02619)
Presles-et-Thierny (02621)
Prouvais (02626)
Proviseux-et-Plesnoy (02627)
Quierzy (02631)
Quincy-Basse (02632)
Remies (02638)
Rogécourt (02651)
Roucy (02656)
Royaucourt-et-Chailvet (02661)
Saint-Aubin (02671)
Sainte-Croix (02675)
Sainte-Preuve (02690)
Saint-Erme-Outre-et-Ramecourt (02676)
Saint-Gobain (02680)
Saint-Nicolas-aux-Bois (02685)
Saint-Paul-aux-Bois (02686)
Saint-Pierremont (02689)
Saint-Thomas (02696)
Samoussy (02697)
Selens (02704)
La Selve (02705)
Septvaux (02707)
Servais (02716)
Sinceny (02719)
Sissonne (02720)
Sons-et-Ronchères (02727)
Tavaux-et-Pontséricourt (02737)
Tergnier (02738)
Thiernu (02742)
Toulis-et-Attencourt (02745)
Travecy (02746)
Trosly-Loire (02750)
Trucy (02751)
Ugny-le-Gay (02754)
Urcel (02755)
Variscourt (02761)
Vassogne (02764)
Vaucelles-et-Beffecourt (02765)
Vauxaillon (02768)
Vendresse-Beaulne (02778)
Verneuil-sous-Coucy (02786)
Verneuil-sur-Serre (02787)
Versigny (02788)
Vesles-et-Caumont (02790)
Veslud (02791)
La Ville-aux-Bois-lès-Pontavert (02803)
Villeneuve-sur-Aisne (02360)
Villequier-Aumont (02807)
Viry-Noureuil (02820)
Vivaise (02821)
Vorges (02824)
Voyenne (02827)
Wissignicourt (02834)
History
The arrondissement of Laon was created in 1800. At the January 2017 reorganization of the arrondissements of Aisne, it lost 30 communes to the arrondissement of Vervins and three to the arrondissement of Soissons.
As a result of the reorganisation of the cantons of France which came into effect in 2015, the borders of the cantons are no longer related to the borders of the arrondissements. The cantons of the arrondissement of Laon were, as of January 2015:
Anizy-le-Château
Chauny
Coucy-le-Château-Auffrique
Craonne
Crécy-sur-Serre
La Fère
Laon-Nord
Laon-Sud
Marle
Neufchâtel-sur-Aisne
Rozoy-sur-Serre
Sissonne
Tergnier
References
Laon
|
María Dolores Álvarez Campillo (10 April 1960 – 6 April 2022) was a Spanish politician. A member of the Spanish Socialist Workers' Party, she served in the General Junta of the Principality of Asturias from 2015 to 2019. She died of cancer in Llanes on 6 April 2022 at the age of 61.
References
1960 births
2022 deaths
Deaths from cancer in Spain
People from Llanes
21st-century Spanish women politicians
Spanish psychologists
Women mayors of places in Spain
Members of the General Junta of the Principality of Asturias
Spanish Socialist Workers' Party politicians
University of Oviedo alumni
|
Emma McIntyre (born 30 December 1992) is a Scottish international lawn and indoor bowler.
Bowls career
McIntyre, who bowls for Dumbarton BC came to prominence after winning the women's national under 25 indoor title and winning the 2018 IIBC Championships mixed doubles title with Stewart Anderson.
She has four medals at the 2017 and 2022 European Bowls Championships, which included a silver medal in the singles, where she lost out in the final to Stef Branfield.
Her performances at the European Championships led to her being selected by the national team, to represent them at the sport's blue riband event, the 2023 World Bowls Championship. She participated in the women's singles and the women's pairs events. In the pairs partnering Claire Anderson she won a bronze medal, losing to Malaysia in the semi final.
References
1992 births
Living people
Scottish female bowls players
|
```go
// Package filenotify provides a mechanism for watching file(s) for changes.
// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support.
// These are wrapped up in a common interface so that either can be used interchangeably in your code.
package filenotify
import "github.com/fsnotify/fsnotify"
// FileWatcher is an interface for implementing file notification watchers
type FileWatcher interface {
Events() <-chan fsnotify.Event
Errors() <-chan error
Add(name string) error
Remove(name string) error
Close() error
}
// New tries to use an fs-event watcher, and falls back to the poller if there is an error
func New() (FileWatcher, error) {
if watcher, err := NewEventWatcher(); err == nil {
return watcher, nil
}
return NewPollingWatcher(), nil
}
// NewPollingWatcher returns a poll-based file watcher
func NewPollingWatcher() FileWatcher {
return &filePoller{
events: make(chan fsnotify.Event),
errors: make(chan error),
}
}
// NewEventWatcher returns an fs-event based file watcher
func NewEventWatcher() (FileWatcher, error) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
return &fsNotifyWatcher{watcher}, nil
}
```
|
```javascript
/* @flow */
import { CARD, WALLET_INSTRUMENT } from "@paypal/sdk-constants/src";
export type SecondaryInstruments = $ReadOnlyArray<{|
type: string,
label: string,
instrumentID: string,
|}>;
export type WalletInstrument = {|
type?: $Values<typeof WALLET_INSTRUMENT>,
label?: string,
logoUrl?: string,
instrumentID?: string,
tokenID?: string,
vendor?: $Values<typeof CARD>,
oneClick: boolean,
branded: boolean,
secondaryInstruments?: SecondaryInstruments,
|};
export type WalletPaymentType = {|
instruments: $ReadOnlyArray<WalletInstrument>,
|};
export type Wallet = {|
paypal: WalletPaymentType,
card: WalletPaymentType,
credit: WalletPaymentType,
venmo: WalletPaymentType,
|};
export type ContentType = {|
instantlyPayWith: string,
poweredBy: string,
chooseCardOrShipping: string,
useDifferentAccount: string,
deleteVaultedAccount: string,
deleteVaultedCard: string,
chooseCard: string,
balance: string,
payNow: string,
payWithDebitOrCreditCard: string,
credit: string,
payWith: string,
payLater: string,
flex: string,
payPalBalance: string,
moreOptions: string,
"label.paypal": string,
"label.checkout": string,
"label.buynow": string,
"label.pay": string,
"label.installment.withPeriod": string,
"label.installment.withoutPeriod": string,
|};
export type Experiment = {|
enableVenmo?: boolean,
disablePaylater?: boolean,
venmoWebEnabled?: boolean,
// first render experiments
venmoVaultWithoutPurchase?: boolean,
|};
export type Requires = {|
applepay?: boolean,
popup?: boolean,
native?: boolean,
|};
export type LazyExport<T> = {|
__get__: () => T,
|};
export type LazyProtectedExport<T> = {|
__get__: () => ?T,
|};
```
|
```javascript
import React from 'react'
import Flex from './Flex'
import Box from './Box'
import Pre from './Pre'
import Heading from './Heading'
import Text from './Text'
import Bar from './Bar'
const Usage = () => (
<section id='usage'>
<Box pt={5} pb={5}>
<Bar mb={4} />
<Heading>Getting Started</Heading>
<Pre>npm i cxs</Pre>
<Heading>
Usage
</Heading>
<Pre children={example} />
</Box>
</section>
)
const example = `const className = cxs({
color: 'tomato'
})`
const code = {
import: `import cxs from 'cxs'`,
basic: `const rule = cxs('color: tomato;')`,
classname: `const className = rule.toString()`,
chaining: `const rule = cxs('color: tomato')
.hover('color: black')
.media('@media screen and (min-width: 40em)')('font-size: 32px')`,
}
export default Usage
```
|
```java
package loopeer.com.appbarlayout_spring_extension;
import android.content.Intent;
import android.os.Bundle;
import androidx.appcompat.app.AppCompatActivity;
import android.view.View;
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
}
public void onNormalAppBarLayoutClick(View view) {
startActivity(new Intent(this, NormalAppBarLayoutActivity.class));
}
public void onSpringAppBarLayoutClick(View view) {
startActivity(new Intent(this, SpringAppBarLayoutActivity.class));
}
public void onSpringTabAppBarLayoutClick(View view) {
startActivity(new Intent(this, SpringAppBarLayoutWithTabActivity.class));
}
}
```
|
```java
/*
*
* See the CONTRIBUTORS.txt file in the distribution for a
* full listing of individual contributors.
*
* This program is free software: you can redistribute it and/or modify
* published by the Free Software Foundation, either version 3 of the
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* along with this program. If not, see <path_to_url
*/
package org.openremote.model.util;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import com.kjetland.jackson.jsonSchema.JsonSchemaConfig;
import com.kjetland.jackson.jsonSchema.JsonSchemaDraft;
import com.kjetland.jackson.jsonSchema.annotations.JsonSchemaInject;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
public class JSONSchemaUtil {
@JsonSchemaInject(jsonSupplierViaLookup = JSONSchemaUtil.SCHEMA_SUPPLIER_NAME_PATTERN_PROPERTIES_ANY_KEY_ANY_TYPE)
static class PatternPropertiesAnyKeyAnyType {}
@JsonSchemaInject(jsonSupplierViaLookup = JSONSchemaUtil.SCHEMA_SUPPLIER_NAME_PATTERN_PROPERTIES_SIMPLE_KEY_ANY_TYPE)
static class PatternPropertiesSimpleKeyAnyType {}
@JsonSchemaInject(jsonSupplierViaLookup = JSONSchemaUtil.SCHEMA_SUPPLIER_NAME_ANY_TYPE)
static class AnyType {}
private JSONSchemaUtil() {}
public static final String SCHEMA_SUPPLIER_NAME_ANY_TYPE = "anyType";
public static final String SCHEMA_SUPPLIER_NAME_PATTERN_PROPERTIES_ANY_KEY_ANY_TYPE = "patternPropertiesAnyKeyAnyType";
public static final String SCHEMA_SUPPLIER_NAME_PATTERN_PROPERTIES_SIMPLE_KEY_ANY_TYPE = "patternPropertiesSimpleKeyAnyType";
public static final String PATTERN_PROPERTIES_MATCH_ANY = ".+";
public static final String PATTERN_PROPERTIES_MATCH_SIMPLE = "^[a-zA-Z][a-zA-Z0-9]*";
public static final String TYPE_NULL = "null";
public static final String TYPE_NUMBER = "number";
public static final String TYPE_INTEGER = "integer";
public static final String TYPE_BOOLEAN = "boolean";
public static final String TYPE_STRING = "string";
public static final String TYPE_ARRAY = "array";
public static final String TYPE_OBJECT = "object";
public static final String[] TYPES_ALL = new String[]{
TYPE_NULL,
TYPE_NUMBER,
TYPE_INTEGER,
TYPE_BOOLEAN,
TYPE_STRING,
TYPE_ARRAY,
TYPE_OBJECT
};
public static JsonNode getSchemaPatternPropertiesAnyKeyAnyType() {
return getSchemaPatternProperties(PATTERN_PROPERTIES_MATCH_ANY, TYPES_ALL);
}
public static JsonNode getSchemaPatternPropertiesSimpleKeyAnyType() {
return getSchemaPatternProperties(PATTERN_PROPERTIES_MATCH_SIMPLE, TYPES_ALL);
}
public static JsonNode getSchemaPatternPropertiesAnyType(String keyPattern) {
return getSchemaPatternProperties(keyPattern, TYPES_ALL);
}
public static JsonNode getSchemaPatternProperties(String keyPattern, String...types) {
ObjectNode node = ValueUtil.JSON.createObjectNode();
node.put("type", "object");
ObjectNode patternNode = node.putObject("patternProperties").putObject(keyPattern);
patternNode.set("type", getSchemaType(false, types));
return node;
}
public static JsonNode getSchemaType(boolean wrapped, String...types) {
JsonNode typesNode;
if (types.length == 1) {
typesNode = new TextNode(types[0]);
} else {
ArrayNode arrNode = ValueUtil.JSON.createArrayNode();
Arrays.stream(types).forEach(arrNode::add);
typesNode = arrNode;
}
return wrapped ? ValueUtil.JSON.createObjectNode().set("type", typesNode) : typesNode;
}
public static JsonSchemaConfig getJsonSchemaConfig() {
return JsonSchemaConfig.create(
false,
Optional.empty(),
false,
false,
false,
false,
false,
false,
false,
Collections.emptyMap(),
false,
Collections.emptySet(),
Map.of(
Object.class, AnyType.class,
ObjectNode.class, PatternPropertiesSimpleKeyAnyType.class
),
Map.of(
SCHEMA_SUPPLIER_NAME_ANY_TYPE, () -> getSchemaType(true, TYPES_ALL),
SCHEMA_SUPPLIER_NAME_PATTERN_PROPERTIES_ANY_KEY_ANY_TYPE, JSONSchemaUtil::getSchemaPatternPropertiesAnyKeyAnyType,
SCHEMA_SUPPLIER_NAME_PATTERN_PROPERTIES_SIMPLE_KEY_ANY_TYPE, JSONSchemaUtil::getSchemaPatternPropertiesSimpleKeyAnyType
),
null,
false,
null,
null,
true
).withJsonSchemaDraft(JsonSchemaDraft.DRAFT_07);
}
}
```
|
```ruby
class Spidermonkey < Formula
desc "JavaScript-C Engine"
homepage "path_to_url"
url "path_to_url"
version "115.14.0"
sha256 your_sha256_hash
license "MPL-2.0"
head "path_to_url", using: :hg
# Spidermonkey versions use the same versions as Firefox, so we simply check
# Firefox ESR release versions.
livecheck do
url "path_to_url"
regex(/data-esr-versions=["']?v?(\d+(?:\.\d+)+)["' >]/i)
end
bottle do
sha256 cellar: :any, arm64_sonoma: your_sha256_hash
sha256 cellar: :any, arm64_ventura: your_sha256_hash
sha256 cellar: :any, sonoma: your_sha256_hash
sha256 cellar: :any, ventura: your_sha256_hash
sha256 x86_64_linux: your_sha256_hash
end
depends_on "pkg-config" => :build
depends_on "python@3.11" => :build # path_to_url
depends_on "rust" => :build
depends_on macos: :ventura # minimum SDK version 13.3
depends_on "readline"
uses_from_macos "llvm" => :build # for llvm-objdump
uses_from_macos "m4" => :build
uses_from_macos "zlib"
on_linux do
depends_on "icu4c"
depends_on "nspr"
end
conflicts_with "narwhal", because: "both install a js binary"
# From python/mozbuild/mozbuild/test/configure/test_toolchain_configure.py
fails_with :gcc do
version "7"
cause "Only GCC 8.1 or newer is supported"
end
# Apply patch used by `gjs` to bypass build error.
# ERROR: *** The pkg-config script could not be found. Make sure it is
# *** in your path, or set the PKG_CONFIG environment variable
# *** to the full path to pkg-config.
# Ref: path_to_url
# Ref: path_to_url
patch do
on_macos do
url "path_to_url"
sha256 your_sha256_hash
end
end
def install
# Help the build script detect ld64 as it expects logs from LD_PRINT_OPTIONS=1 with -Wl,-version
if DevelopmentTools.clang_build_version >= 1500
inreplace "build/moz.configure/toolchain.configure", '"-Wl,--version"', '"-Wl,-ld_classic,--version"'
end
mkdir "brew-build" do
args = %W[
--prefix=#{prefix}
--enable-optimize
--enable-readline
--enable-release
--enable-shared-js
--disable-bootstrap
--disable-debug
--disable-jemalloc
--with-intl-api
--with-system-zlib
]
if OS.mac?
# Force build script to use Xcode install_name_tool
ENV["INSTALL_NAME_TOOL"] = DevelopmentTools.locate("install_name_tool")
else
# System libraries are only supported on Linux and build fails if args are used on macOS.
# Ref: path_to_url
args += %w[--with-system-icu --with-system-nspr]
end
system "../js/src/configure", *args
system "make"
system "make", "install"
end
(lib/"libjs_static.ajs").unlink
# Add an unversioned `js` to be used by dependents like `jsawk` & `plowshare`
ln_s bin/"js#{version.major}", bin/"js"
return unless OS.linux?
# Avoid writing nspr's versioned Cellar path in js*-config
inreplace bin/"js#{version.major}-config",
Formula["nspr"].prefix.realpath,
Formula["nspr"].opt_prefix
end
test do
path = testpath/"test.js"
path.write "print('hello');"
assert_equal "hello", shell_output("#{bin}/js#{version.major} #{path}").strip
assert_equal "hello", shell_output("#{bin}/js #{path}").strip
end
end
```
|
```html
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<meta name="nodejs.org:node-version" content="v14.8.0">
<title>Cluster | Node.js v14.8.0 Documentation</title>
<link rel="stylesheet" href="path_to_url">
<link rel="stylesheet" href="assets/style.css">
<link rel="stylesheet" href="assets/hljs.css">
<link rel="canonical" href="path_to_url">
</head>
<body class="alt apidoc" id="api-section-cluster">
<div id="content" class="clearfix">
<div id="column2" class="interior">
<div id="intro" class="interior">
<a href="/" title="Go back to the home page">
Node.js
</a>
</div>
<ul>
<li><a href="documentation.html" class="nav-documentation">About this documentation</a></li>
<li><a href="synopsis.html" class="nav-synopsis">Usage and example</a></li>
</ul>
<div class="line"></div>
<ul>
<li><a href="assert.html" class="nav-assert">Assertion testing</a></li>
<li><a href="async_hooks.html" class="nav-async_hooks">Async hooks</a></li>
<li><a href="buffer.html" class="nav-buffer">Buffer</a></li>
<li><a href="addons.html" class="nav-addons">C++ addons</a></li>
<li><a href="n-api.html" class="nav-n-api">C/C++ addons with N-API</a></li>
<li><a href="embedding.html" class="nav-embedding">C++ embedder API</a></li>
<li><a href="child_process.html" class="nav-child_process">Child processes</a></li>
<li><a href="cluster.html" class="nav-cluster active">Cluster</a></li>
<li><a href="cli.html" class="nav-cli">Command line options</a></li>
<li><a href="console.html" class="nav-console">Console</a></li>
<li><a href="crypto.html" class="nav-crypto">Crypto</a></li>
<li><a href="debugger.html" class="nav-debugger">Debugger</a></li>
<li><a href="deprecations.html" class="nav-deprecations">Deprecated APIs</a></li>
<li><a href="dns.html" class="nav-dns">DNS</a></li>
<li><a href="domain.html" class="nav-domain">Domain</a></li>
<li><a href="esm.html" class="nav-esm">ECMAScript modules</a></li>
<li><a href="errors.html" class="nav-errors">Errors</a></li>
<li><a href="events.html" class="nav-events">Events</a></li>
<li><a href="fs.html" class="nav-fs">File system</a></li>
<li><a href="globals.html" class="nav-globals">Globals</a></li>
<li><a href="http.html" class="nav-http">HTTP</a></li>
<li><a href="http2.html" class="nav-http2">HTTP/2</a></li>
<li><a href="https.html" class="nav-https">HTTPS</a></li>
<li><a href="inspector.html" class="nav-inspector">Inspector</a></li>
<li><a href="intl.html" class="nav-intl">Internationalization</a></li>
<li><a href="modules.html" class="nav-modules">Modules</a></li>
<li><a href="net.html" class="nav-net">Net</a></li>
<li><a href="os.html" class="nav-os">OS</a></li>
<li><a href="path.html" class="nav-path">Path</a></li>
<li><a href="perf_hooks.html" class="nav-perf_hooks">Performance hooks</a></li>
<li><a href="policy.html" class="nav-policy">Policies</a></li>
<li><a href="process.html" class="nav-process">Process</a></li>
<li><a href="punycode.html" class="nav-punycode">Punycode</a></li>
<li><a href="querystring.html" class="nav-querystring">Query strings</a></li>
<li><a href="readline.html" class="nav-readline">Readline</a></li>
<li><a href="repl.html" class="nav-repl">REPL</a></li>
<li><a href="report.html" class="nav-report">Report</a></li>
<li><a href="stream.html" class="nav-stream">Stream</a></li>
<li><a href="string_decoder.html" class="nav-string_decoder">String decoder</a></li>
<li><a href="timers.html" class="nav-timers">Timers</a></li>
<li><a href="tls.html" class="nav-tls">TLS/SSL</a></li>
<li><a href="tracing.html" class="nav-tracing">Trace events</a></li>
<li><a href="tty.html" class="nav-tty">TTY</a></li>
<li><a href="dgram.html" class="nav-dgram">UDP/datagram</a></li>
<li><a href="url.html" class="nav-url">URL</a></li>
<li><a href="util.html" class="nav-util">Utilities</a></li>
<li><a href="v8.html" class="nav-v8">V8</a></li>
<li><a href="vm.html" class="nav-vm">VM</a></li>
<li><a href="wasi.html" class="nav-wasi">WASI</a></li>
<li><a href="worker_threads.html" class="nav-worker_threads">Worker threads</a></li>
<li><a href="zlib.html" class="nav-zlib">Zlib</a></li>
</ul>
<div class="line"></div>
<ul>
<li><a href="path_to_url" class="nav-https-github-com-nodejs-node">Code repository and issue tracker</a></li>
</ul>
</div>
<div id="column1" data-id="cluster" class="interior">
<header>
<h1>Node.js v14.8.0 Documentation</h1>
<div id="gtoc">
<ul>
<li>
<a href="index.html" name="toc">Index</a>
</li>
<li>
<a href="all.html">View on single page</a>
</li>
<li>
<a href="cluster.json">View as JSON</a>
</li>
<li class="version-picker">
<a href="#">View another version <span>▼</span></a>
<ol class="version-picker"><li><a href="path_to_url">14.x</a></li>
<li><a href="path_to_url">13.x</a></li>
<li><a href="path_to_url">12.x <b>LTS</b></a></li>
<li><a href="path_to_url">11.x</a></li>
<li><a href="path_to_url">10.x <b>LTS</b></a></li>
<li><a href="path_to_url">9.x</a></li>
<li><a href="path_to_url">8.x</a></li>
<li><a href="path_to_url">7.x</a></li>
<li><a href="path_to_url">6.x</a></li>
<li><a href="path_to_url">5.x</a></li>
<li><a href="path_to_url">4.x</a></li>
<li><a href="path_to_url">0.12.x</a></li>
<li><a href="path_to_url">0.10.x</a></li></ol>
</li>
<li class="edit_on_github"><a href="path_to_url"><span class="github_icon"><svg height="16" width="16" viewBox="0 0 16.1 16.1" fill="currentColor"><path d="M8 0a8 8 0 0 0-2.5 15.6c.4 0 .5-.2.5-.4v-1.5c-2 .4-2.5-.5-2.7-1 0-.1-.5-.9-.8-1-.3-.2-.7-.6 0-.6.6 0 1 .6 1.2.8.7 1.2 1.9 1 2.4.7 0-.5.2-.9.5-1-1.8-.3-3.7-1-3.7-4 0-.9.3-1.6.8-2.2 0-.2-.3-1 .1-2 0 0 .7-.3 2.2.7a7.4 7.4 0 0 1 4 0c1.5-1 2.2-.8 2.2-.8.5 1.1.2 2 .1 2.1.5.6.8 1.3.8 2.2 0 3-1.9 3.7-3.6 4 .3.2.5.7.5 1.4v2.2c0 .2.1.5.5.4A8 8 0 0 0 16 8a8 8 0 0 0-8-8z"/></svg></span>Edit on GitHub</a></li>
</ul>
</div>
<hr>
</header>
<div id="toc">
<h2>Table of Contents</h2>
<ul>
<li>
<p><span class="stability_2"><a href="#cluster_cluster">Cluster</a></span></p>
<ul>
<li><a href="#cluster_how_it_works">How it works</a></li>
<li>
<p><a href="#cluster_class_worker">Class: <code>Worker</code></a></p>
<ul>
<li><a href="#cluster_event_disconnect">Event: <code>'disconnect'</code></a></li>
<li><a href="#cluster_event_error">Event: <code>'error'</code></a></li>
<li><a href="#cluster_event_exit">Event: <code>'exit'</code></a></li>
<li><a href="#cluster_event_listening">Event: <code>'listening'</code></a></li>
<li><a href="#cluster_event_message">Event: <code>'message'</code></a></li>
<li><a href="#cluster_event_online">Event: <code>'online'</code></a></li>
<li><a href="#cluster_worker_disconnect"><code>worker.disconnect()</code></a></li>
<li><a href="#cluster_worker_exitedafterdisconnect"><code>worker.exitedAfterDisconnect</code></a></li>
<li><a href="#cluster_worker_id"><code>worker.id</code></a></li>
<li><a href="#cluster_worker_isconnected"><code>worker.isConnected()</code></a></li>
<li><a href="#cluster_worker_isdead"><code>worker.isDead()</code></a></li>
<li><a href="#cluster_worker_kill_signal"><code>worker.kill([signal])</code></a></li>
<li><a href="#cluster_worker_process"><code>worker.process</code></a></li>
<li><a href="#cluster_worker_send_message_sendhandle_options_callback"><code>worker.send(message[, sendHandle[, options]][, callback])</code></a></li>
</ul>
</li>
<li><a href="#cluster_event_disconnect_1">Event: <code>'disconnect'</code></a></li>
<li><a href="#cluster_event_exit_1">Event: <code>'exit'</code></a></li>
<li><a href="#cluster_event_fork">Event: <code>'fork'</code></a></li>
<li><a href="#cluster_event_listening_1">Event: <code>'listening'</code></a></li>
<li><a href="#cluster_event_message_1">Event: <code>'message'</code></a></li>
<li><a href="#cluster_event_online_1">Event: <code>'online'</code></a></li>
<li><a href="#cluster_event_setup">Event: <code>'setup'</code></a></li>
<li><a href="#cluster_cluster_disconnect_callback"><code>cluster.disconnect([callback])</code></a></li>
<li><a href="#cluster_cluster_fork_env"><code>cluster.fork([env])</code></a></li>
<li><a href="#cluster_cluster_ismaster"><code>cluster.isMaster</code></a></li>
<li><a href="#cluster_cluster_isworker"><code>cluster.isWorker</code></a></li>
<li><a href="#cluster_cluster_schedulingpolicy"><code>cluster.schedulingPolicy</code></a></li>
<li><a href="#cluster_cluster_settings"><code>cluster.settings</code></a></li>
<li><a href="#cluster_cluster_setupmaster_settings"><code>cluster.setupMaster([settings])</code></a></li>
<li><a href="#cluster_cluster_worker"><code>cluster.worker</code></a></li>
<li><a href="#cluster_cluster_workers"><code>cluster.workers</code></a></li>
</ul>
</li>
</ul>
</div>
<div id="apicontent">
<h1>Cluster<span><a class="mark" href="#cluster_cluster" id="cluster_cluster">#</a></span></h1>
<p></p><div class="api_stability api_stability_2"><a href="documentation.html#documentation_stability_index">Stability: 2</a> - Stable</div><p></p>
<p><strong>Source Code:</strong> <a href="path_to_url">lib/cluster.js</a></p>
<p>A single instance of Node.js runs in a single thread. To take advantage of
multi-core systems, the user will sometimes want to launch a cluster of Node.js
processes to handle the load.</p>
<p>The cluster module allows easy creation of child processes that all share
server ports.</p>
<pre><code class="language-js"><span class="hljs-keyword">const</span> cluster = <span class="hljs-built_in">require</span>(<span class="hljs-string">'cluster'</span>);
<span class="hljs-keyword">const</span> http = <span class="hljs-built_in">require</span>(<span class="hljs-string">'http'</span>);
<span class="hljs-keyword">const</span> numCPUs = <span class="hljs-built_in">require</span>(<span class="hljs-string">'os'</span>).cpus().length;
<span class="hljs-keyword">if</span> (cluster.isMaster) {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">`Master <span class="hljs-subst">${process.pid}</span> is running`</span>);
<span class="hljs-comment">// Fork workers.</span>
<span class="hljs-keyword">for</span> (<span class="hljs-keyword">let</span> i = <span class="hljs-number">0</span>; i < numCPUs; i++) {
cluster.fork();
}
cluster.on(<span class="hljs-string">'exit'</span>, (worker, code, signal) => {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">`worker <span class="hljs-subst">${worker.process.pid}</span> died`</span>);
});
} <span class="hljs-keyword">else</span> {
<span class="hljs-comment">// Workers can share any TCP connection</span>
<span class="hljs-comment">// In this case it is an HTTP server</span>
http.createServer(<span class="hljs-function">(<span class="hljs-params">req, res</span>) =></span> {
res.writeHead(<span class="hljs-number">200</span>);
res.end(<span class="hljs-string">'hello world\n'</span>);
}).listen(<span class="hljs-number">8000</span>);
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">`Worker <span class="hljs-subst">${process.pid}</span> started`</span>);
}</code></pre>
<p>Running Node.js will now share port 8000 between the workers:</p>
<pre><code class="language-console"><span class="hljs-meta">$</span><span class="bash"> node server.js</span>
Master 3596 is running
Worker 4324 started
Worker 4520 started
Worker 6056 started
Worker 5644 started</code></pre>
<p>On Windows, it is not yet possible to set up a named pipe server in a worker.</p>
<h2>How it works<span><a class="mark" href="#cluster_how_it_works" id="cluster_how_it_works">#</a></span></h2>
<p>The worker processes are spawned using the <a href="child_process.html#child_process_child_process_fork_modulepath_args_options"><code>child_process.fork()</code></a> method,
so that they can communicate with the parent via IPC and pass server
handles back and forth.</p>
<p>The cluster module supports two methods of distributing incoming
connections.</p>
<p>The first one (and the default one on all platforms except Windows),
is the round-robin approach, where the master process listens on a
port, accepts new connections and distributes them across the workers
in a round-robin fashion, with some built-in smarts to avoid
overloading a worker process.</p>
<p>The second approach is where the master process creates the listen
socket and sends it to interested workers. The workers then accept
incoming connections directly.</p>
<p>The second approach should, in theory, give the best performance.
In practice however, distribution tends to be very unbalanced due
to operating system scheduler vagaries. Loads have been observed
where over 70% of all connections ended up in just two processes,
out of a total of eight.</p>
<p>Because <code>server.listen()</code> hands off most of the work to the master
process, there are three cases where the behavior between a normal
Node.js process and a cluster worker differs:</p>
<ol>
<li><code>server.listen({fd: 7})</code> Because the message is passed to the master,
file descriptor 7 <strong>in the parent</strong> will be listened on, and the
handle passed to the worker, rather than listening to the worker's
idea of what the number 7 file descriptor references.</li>
<li><code>server.listen(handle)</code> Listening on handles explicitly will cause
the worker to use the supplied handle, rather than talk to the master
process.</li>
<li><code>server.listen(0)</code> Normally, this will cause servers to listen on a
random port. However, in a cluster, each worker will receive the
same "random" port each time they do <code>listen(0)</code>. In essence, the
port is random the first time, but predictable thereafter. To listen
on a unique port, generate a port number based on the cluster worker ID.</li>
</ol>
<p>Node.js does not provide routing logic. It is, therefore important to design an
application such that it does not rely too heavily on in-memory data objects for
things like sessions and login.</p>
<p>Because workers are all separate processes, they can be killed or
re-spawned depending on a program's needs, without affecting other
workers. As long as there are some workers still alive, the server will
continue to accept connections. If no workers are alive, existing connections
will be dropped and new connections will be refused. Node.js does not
automatically manage the number of workers, however. It is the application's
responsibility to manage the worker pool based on its own needs.</p>
<p>Although a primary use case for the <code>cluster</code> module is networking, it can
also be used for other use cases requiring worker processes.</p>
<h2>Class: <code>Worker</code><span><a class="mark" href="#cluster_class_worker" id="cluster_class_worker">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<ul>
<li>Extends: <a href="events.html#events_class_eventemitter" class="type"><EventEmitter></a></li>
</ul>
<p>A <code>Worker</code> object contains all public information and method about a worker.
In the master it can be obtained using <code>cluster.workers</code>. In a worker
it can be obtained using <code>cluster.worker</code>.</p>
<h3>Event: <code>'disconnect'</code><span><a class="mark" href="#cluster_event_disconnect" id="cluster_event_disconnect">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.7.7</span>
</div>
<p>Similar to the <code>cluster.on('disconnect')</code> event, but specific to this worker.</p>
<pre><code class="language-js">cluster.fork().on(<span class="hljs-string">'disconnect'</span>, () => {
<span class="hljs-comment">// Worker has disconnected</span>
});</code></pre>
<h3>Event: <code>'error'</code><span><a class="mark" href="#cluster_event_error" id="cluster_event_error">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.7.3</span>
</div>
<p>This event is the same as the one provided by <a href="child_process.html#child_process_child_process_fork_modulepath_args_options"><code>child_process.fork()</code></a>.</p>
<p>Within a worker, <code>process.on('error')</code> may also be used.</p>
<h3>Event: <code>'exit'</code><span><a class="mark" href="#cluster_event_exit" id="cluster_event_exit">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.11.2</span>
</div>
<ul>
<li><code>code</code> <a href="path_to_url#Number_type" class="type"><number></a> The exit code, if it exited normally.</li>
<li><code>signal</code> <a href="path_to_url#String_type" class="type"><string></a> The name of the signal (e.g. <code>'SIGHUP'</code>) that caused
the process to be killed.</li>
</ul>
<p>Similar to the <code>cluster.on('exit')</code> event, but specific to this worker.</p>
<pre><code class="language-js"><span class="hljs-keyword">const</span> worker = cluster.fork();
worker.on(<span class="hljs-string">'exit'</span>, (code, signal) => {
<span class="hljs-keyword">if</span> (signal) {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">`worker was killed by signal: <span class="hljs-subst">${signal}</span>`</span>);
} <span class="hljs-keyword">else</span> <span class="hljs-keyword">if</span> (code !== <span class="hljs-number">0</span>) {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">`worker exited with error code: <span class="hljs-subst">${code}</span>`</span>);
} <span class="hljs-keyword">else</span> {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">'worker success!'</span>);
}
});</code></pre>
<h3>Event: <code>'listening'</code><span><a class="mark" href="#cluster_event_listening" id="cluster_event_listening">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<ul>
<li><code>address</code> <a href="path_to_url" class="type"><Object></a></li>
</ul>
<p>Similar to the <code>cluster.on('listening')</code> event, but specific to this worker.</p>
<pre><code class="language-js">cluster.fork().on(<span class="hljs-string">'listening'</span>, (address) => {
<span class="hljs-comment">// Worker is listening</span>
});</code></pre>
<p>It is not emitted in the worker.</p>
<h3>Event: <code>'message'</code><span><a class="mark" href="#cluster_event_message" id="cluster_event_message">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<ul>
<li><code>message</code> <a href="path_to_url" class="type"><Object></a></li>
<li><code>handle</code> <a href="path_to_url#Undefined_type" class="type"><undefined></a> | <a href="path_to_url" class="type"><Object></a></li>
</ul>
<p>Similar to the <code>'message'</code> event of <code>cluster</code>, but specific to this worker.</p>
<p>Within a worker, <code>process.on('message')</code> may also be used.</p>
<p>See <a href="process.html#process_event_message"><code>process</code> event: <code>'message'</code></a>.</p>
<p>Here is an example using the message system. It keeps a count in the master
process of the number of HTTP requests received by the workers:</p>
<pre><code class="language-js"><span class="hljs-keyword">const</span> cluster = <span class="hljs-built_in">require</span>(<span class="hljs-string">'cluster'</span>);
<span class="hljs-keyword">const</span> http = <span class="hljs-built_in">require</span>(<span class="hljs-string">'http'</span>);
<span class="hljs-keyword">if</span> (cluster.isMaster) {
<span class="hljs-comment">// Keep track of http requests</span>
<span class="hljs-keyword">let</span> numReqs = <span class="hljs-number">0</span>;
setInterval(<span class="hljs-function"><span class="hljs-params">()</span> =></span> {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">`numReqs = <span class="hljs-subst">${numReqs}</span>`</span>);
}, <span class="hljs-number">1000</span>);
<span class="hljs-comment">// Count requests</span>
<span class="hljs-function"><span class="hljs-keyword">function</span> <span class="hljs-title">messageHandler</span>(<span class="hljs-params">msg</span>) </span>{
<span class="hljs-keyword">if</span> (msg.cmd && msg.cmd === <span class="hljs-string">'notifyRequest'</span>) {
numReqs += <span class="hljs-number">1</span>;
}
}
<span class="hljs-comment">// Start workers and listen for messages containing notifyRequest</span>
<span class="hljs-keyword">const</span> numCPUs = <span class="hljs-built_in">require</span>(<span class="hljs-string">'os'</span>).cpus().length;
<span class="hljs-keyword">for</span> (<span class="hljs-keyword">let</span> i = <span class="hljs-number">0</span>; i < numCPUs; i++) {
cluster.fork();
}
<span class="hljs-keyword">for</span> (<span class="hljs-keyword">const</span> id <span class="hljs-keyword">in</span> cluster.workers) {
cluster.workers[id].on(<span class="hljs-string">'message'</span>, messageHandler);
}
} <span class="hljs-keyword">else</span> {
<span class="hljs-comment">// Worker processes have a http server.</span>
http.Server(<span class="hljs-function">(<span class="hljs-params">req, res</span>) =></span> {
res.writeHead(<span class="hljs-number">200</span>);
res.end(<span class="hljs-string">'hello world\n'</span>);
<span class="hljs-comment">// Notify master about the request</span>
process.send({ <span class="hljs-attr">cmd</span>: <span class="hljs-string">'notifyRequest'</span> });
}).listen(<span class="hljs-number">8000</span>);
}</code></pre>
<h3>Event: <code>'online'</code><span><a class="mark" href="#cluster_event_online" id="cluster_event_online">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<p>Similar to the <code>cluster.on('online')</code> event, but specific to this worker.</p>
<pre><code class="language-js">cluster.fork().on(<span class="hljs-string">'online'</span>, () => {
<span class="hljs-comment">// Worker is online</span>
});</code></pre>
<p>It is not emitted in the worker.</p>
<h3><code>worker.disconnect()</code><span><a class="mark" href="#cluster_worker_disconnect" id="cluster_worker_disconnect">#</a></span></h3>
<div class="api_metadata">
<details class="changelog"><summary>History</summary>
<table>
<tbody><tr><th>Version</th><th>Changes</th></tr>
<tr><td>v7.3.0</td>
<td><p>This method now returns a reference to <code>worker</code>.</p></td></tr>
<tr><td>v0.7.7</td>
<td><p><span>Added in: v0.7.7</span></p></td></tr>
</tbody></table>
</details>
</div>
<ul>
<li>Returns: <a href="cluster.html#cluster_class_worker" class="type"><cluster.Worker></a> A reference to <code>worker</code>.</li>
</ul>
<p>In a worker, this function will close all servers, wait for the <code>'close'</code> event
on those servers, and then disconnect the IPC channel.</p>
<p>In the master, an internal message is sent to the worker causing it to call
<code>.disconnect()</code> on itself.</p>
<p>Causes <code>.exitedAfterDisconnect</code> to be set.</p>
<p>After a server is closed, it will no longer accept new connections,
but connections may be accepted by any other listening worker. Existing
connections will be allowed to close as usual. When no more connections exist,
see <a href="net.html#net_event_close"><code>server.close()</code></a>, the IPC channel to the worker will close allowing it
to die gracefully.</p>
<p>The above applies <em>only</em> to server connections, client connections are not
automatically closed by workers, and disconnect does not wait for them to close
before exiting.</p>
<p>In a worker, <code>process.disconnect</code> exists, but it is not this function;
it is <a href="child_process.html#child_process_subprocess_disconnect"><code>disconnect()</code></a>.</p>
<p>Because long living server connections may block workers from disconnecting, it
may be useful to send a message, so application specific actions may be taken to
close them. It also may be useful to implement a timeout, killing a worker if
the <code>'disconnect'</code> event has not been emitted after some time.</p>
<pre><code class="language-js"><span class="hljs-keyword">if</span> (cluster.isMaster) {
<span class="hljs-keyword">const</span> worker = cluster.fork();
<span class="hljs-keyword">let</span> timeout;
worker.on(<span class="hljs-string">'listening'</span>, (address) => {
worker.send(<span class="hljs-string">'shutdown'</span>);
worker.disconnect();
timeout = setTimeout(<span class="hljs-function"><span class="hljs-params">()</span> =></span> {
worker.kill();
}, <span class="hljs-number">2000</span>);
});
worker.on(<span class="hljs-string">'disconnect'</span>, () => {
clearTimeout(timeout);
});
} <span class="hljs-keyword">else</span> <span class="hljs-keyword">if</span> (cluster.isWorker) {
<span class="hljs-keyword">const</span> net = <span class="hljs-built_in">require</span>(<span class="hljs-string">'net'</span>);
<span class="hljs-keyword">const</span> server = net.createServer(<span class="hljs-function">(<span class="hljs-params">socket</span>) =></span> {
<span class="hljs-comment">// Connections never end</span>
});
server.listen(<span class="hljs-number">8000</span>);
process.on(<span class="hljs-string">'message'</span>, (msg) => {
<span class="hljs-keyword">if</span> (msg === <span class="hljs-string">'shutdown'</span>) {
<span class="hljs-comment">// Initiate graceful close of any connections to server</span>
}
});
}</code></pre>
<h3><code>worker.exitedAfterDisconnect</code><span><a class="mark" href="#cluster_worker_exitedafterdisconnect" id="cluster_worker_exitedafterdisconnect">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v6.0.0</span>
</div>
<ul>
<li><a href="path_to_url#Boolean_type" class="type"><boolean></a></li>
</ul>
<p>This property is <code>true</code> if the worker exited due to <code>.kill()</code> or
<code>.disconnect()</code>. If the worker exited any other way, it is <code>false</code>. If the
worker has not exited, it is <code>undefined</code>.</p>
<p>The boolean <a href="#cluster_worker_exitedafterdisconnect"><code>worker.exitedAfterDisconnect</code></a> allows distinguishing between
voluntary and accidental exit, the master may choose not to respawn a worker
based on this value.</p>
<pre><code class="language-js">cluster.on(<span class="hljs-string">'exit'</span>, (worker, code, signal) => {
<span class="hljs-keyword">if</span> (worker.exitedAfterDisconnect === <span class="hljs-literal">true</span>) {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">'Oh, it was just voluntary no need to worry'</span>);
}
});
<span class="hljs-comment">// kill worker</span>
worker.kill();</code></pre>
<h3><code>worker.id</code><span><a class="mark" href="#cluster_worker_id" id="cluster_worker_id">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.8.0</span>
</div>
<ul>
<li><a href="path_to_url#Number_type" class="type"><number></a></li>
</ul>
<p>Each new worker is given its own unique id, this id is stored in the
<code>id</code>.</p>
<p>While a worker is alive, this is the key that indexes it in
<code>cluster.workers</code>.</p>
<h3><code>worker.isConnected()</code><span><a class="mark" href="#cluster_worker_isconnected" id="cluster_worker_isconnected">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.11.14</span>
</div>
<p>This function returns <code>true</code> if the worker is connected to its master via its
IPC channel, <code>false</code> otherwise. A worker is connected to its master after it
has been created. It is disconnected after the <code>'disconnect'</code> event is emitted.</p>
<h3><code>worker.isDead()</code><span><a class="mark" href="#cluster_worker_isdead" id="cluster_worker_isdead">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.11.14</span>
</div>
<p>This function returns <code>true</code> if the worker's process has terminated (either
because of exiting or being signaled). Otherwise, it returns <code>false</code>.</p>
<pre><code class="language-js"><span class="hljs-keyword">const</span> cluster = <span class="hljs-built_in">require</span>(<span class="hljs-string">'cluster'</span>);
<span class="hljs-keyword">const</span> http = <span class="hljs-built_in">require</span>(<span class="hljs-string">'http'</span>);
<span class="hljs-keyword">const</span> numCPUs = <span class="hljs-built_in">require</span>(<span class="hljs-string">'os'</span>).cpus().length;
<span class="hljs-keyword">if</span> (cluster.isMaster) {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">`Master <span class="hljs-subst">${process.pid}</span> is running`</span>);
<span class="hljs-comment">// Fork workers.</span>
<span class="hljs-keyword">for</span> (<span class="hljs-keyword">let</span> i = <span class="hljs-number">0</span>; i < numCPUs; i++) {
cluster.fork();
}
cluster.on(<span class="hljs-string">'fork'</span>, (worker) => {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">'worker is dead:'</span>, worker.isDead());
});
cluster.on(<span class="hljs-string">'exit'</span>, (worker, code, signal) => {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">'worker is dead:'</span>, worker.isDead());
});
} <span class="hljs-keyword">else</span> {
<span class="hljs-comment">// Workers can share any TCP connection. In this case, it is an HTTP server.</span>
http.createServer(<span class="hljs-function">(<span class="hljs-params">req, res</span>) =></span> {
res.writeHead(<span class="hljs-number">200</span>);
res.end(<span class="hljs-string">`Current process\n <span class="hljs-subst">${process.pid}</span>`</span>);
process.kill(process.pid);
}).listen(<span class="hljs-number">8000</span>);
}</code></pre>
<h3><code>worker.kill([signal])</code><span><a class="mark" href="#cluster_worker_kill_signal" id="cluster_worker_kill_signal">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.9.12</span>
</div>
<ul>
<li><code>signal</code> <a href="path_to_url#String_type" class="type"><string></a> Name of the kill signal to send to the worker
process. <strong>Default</strong>: <code>'SIGTERM'</code></li>
</ul>
<p>This function will kill the worker. In the master, it does this by disconnecting
the <code>worker.process</code>, and once disconnected, killing with <code>signal</code>. In the
worker, it does it by disconnecting the channel, and then exiting with code <code>0</code>.</p>
<p>Because <code>kill()</code> attempts to gracefully disconnect the worker process, it is
susceptible to waiting indefinitely for the disconnect to complete. For example,
if the worker enters an infinite loop, a graceful disconnect will never occur.
If the graceful disconnect behavior is not needed, use <code>worker.process.kill()</code>.</p>
<p>Causes <code>.exitedAfterDisconnect</code> to be set.</p>
<p>This method is aliased as <code>worker.destroy()</code> for backwards compatibility.</p>
<p>In a worker, <code>process.kill()</code> exists, but it is not this function;
it is <a href="process.html#process_process_kill_pid_signal"><code>kill()</code></a>.</p>
<h3><code>worker.process</code><span><a class="mark" href="#cluster_worker_process" id="cluster_worker_process">#</a></span></h3>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<ul>
<li><a href="child_process.html#child_process_class_childprocess" class="type"><ChildProcess></a></li>
</ul>
<p>All workers are created using <a href="child_process.html#child_process_child_process_fork_modulepath_args_options"><code>child_process.fork()</code></a>, the returned object
from this function is stored as <code>.process</code>. In a worker, the global <code>process</code>
is stored.</p>
<p>See: <a href="child_process.html#child_process_child_process_fork_modulepath_args_options">Child Process module</a>.</p>
<p>Workers will call <code>process.exit(0)</code> if the <code>'disconnect'</code> event occurs
on <code>process</code> and <code>.exitedAfterDisconnect</code> is not <code>true</code>. This protects against
accidental disconnection.</p>
<h3><code>worker.send(message[, sendHandle[, options]][, callback])</code><span><a class="mark" href="#cluster_worker_send_message_sendhandle_options_callback" id="cluster_worker_send_message_sendhandle_options_callback">#</a></span></h3>
<div class="api_metadata">
<details class="changelog"><summary>History</summary>
<table>
<tbody><tr><th>Version</th><th>Changes</th></tr>
<tr><td>v4.0.0</td>
<td><p>The <code>callback</code> parameter is supported now.</p></td></tr>
<tr><td>v0.7.0</td>
<td><p><span>Added in: v0.7.0</span></p></td></tr>
</tbody></table>
</details>
</div>
<ul>
<li><code>message</code> <a href="path_to_url" class="type"><Object></a></li>
<li><code>sendHandle</code> <a href="net.html#net_server_listen_handle_backlog_callback" class="type"><Handle></a></li>
<li>
<p><code>options</code> <a href="path_to_url" class="type"><Object></a> The <code>options</code> argument, if present, is an object used to
parameterize the sending of certain types of handles. <code>options</code> supports
the following properties:</p>
<ul>
<li><code>keepOpen</code> <a href="path_to_url#Boolean_type" class="type"><boolean></a> A value that can be used when passing instances of
<code>net.Socket</code>. When <code>true</code>, the socket is kept open in the sending process.
<strong>Default:</strong> <code>false</code>.</li>
</ul>
</li>
<li><code>callback</code> <a href="path_to_url" class="type"><Function></a></li>
<li>Returns: <a href="path_to_url#Boolean_type" class="type"><boolean></a></li>
</ul>
<p>Send a message to a worker or master, optionally with a handle.</p>
<p>In the master this sends a message to a specific worker. It is identical to
<a href="child_process.html#your_sha256_hashk"><code>ChildProcess.send()</code></a>.</p>
<p>In a worker this sends a message to the master. It is identical to
<code>process.send()</code>.</p>
<p>This example will echo back all messages from the master:</p>
<pre><code class="language-js"><span class="hljs-keyword">if</span> (cluster.isMaster) {
<span class="hljs-keyword">const</span> worker = cluster.fork();
worker.send(<span class="hljs-string">'hi there'</span>);
} <span class="hljs-keyword">else</span> <span class="hljs-keyword">if</span> (cluster.isWorker) {
process.on(<span class="hljs-string">'message'</span>, (msg) => {
process.send(msg);
});
}</code></pre>
<h2>Event: <code>'disconnect'</code><span><a class="mark" href="#cluster_event_disconnect_1" id="cluster_event_disconnect_1">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.9</span>
</div>
<ul>
<li><code>worker</code> <a href="cluster.html#cluster_class_worker" class="type"><cluster.Worker></a></li>
</ul>
<p>Emitted after the worker IPC channel has disconnected. This can occur when a
worker exits gracefully, is killed, or is disconnected manually (such as with
<code>worker.disconnect()</code>).</p>
<p>There may be a delay between the <code>'disconnect'</code> and <code>'exit'</code> events. These
events can be used to detect if the process is stuck in a cleanup or if there
are long-living connections.</p>
<pre><code class="language-js">cluster.on(<span class="hljs-string">'disconnect'</span>, (worker) => {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">`The worker #<span class="hljs-subst">${worker.id}</span> has disconnected`</span>);
});</code></pre>
<h2>Event: <code>'exit'</code><span><a class="mark" href="#cluster_event_exit_1" id="cluster_event_exit_1">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.9</span>
</div>
<ul>
<li><code>worker</code> <a href="cluster.html#cluster_class_worker" class="type"><cluster.Worker></a></li>
<li><code>code</code> <a href="path_to_url#Number_type" class="type"><number></a> The exit code, if it exited normally.</li>
<li><code>signal</code> <a href="path_to_url#String_type" class="type"><string></a> The name of the signal (e.g. <code>'SIGHUP'</code>) that caused
the process to be killed.</li>
</ul>
<p>When any of the workers die the cluster module will emit the <code>'exit'</code> event.</p>
<p>This can be used to restart the worker by calling <a href="#cluster_cluster_fork_env"><code>.fork()</code></a> again.</p>
<pre><code class="language-js">cluster.on(<span class="hljs-string">'exit'</span>, (worker, code, signal) => {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">'worker %d died (%s). restarting...'</span>,
worker.process.pid, signal || code);
cluster.fork();
});</code></pre>
<p>See <a href="child_process.html#child_process_event_exit"><code>child_process</code> event: <code>'exit'</code></a>.</p>
<h2>Event: <code>'fork'</code><span><a class="mark" href="#cluster_event_fork" id="cluster_event_fork">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<ul>
<li><code>worker</code> <a href="cluster.html#cluster_class_worker" class="type"><cluster.Worker></a></li>
</ul>
<p>When a new worker is forked the cluster module will emit a <code>'fork'</code> event.
This can be used to log worker activity, and create a custom timeout.</p>
<pre><code class="language-js"><span class="hljs-keyword">const</span> timeouts = [];
<span class="hljs-function"><span class="hljs-keyword">function</span> <span class="hljs-title">errorMsg</span>(<span class="hljs-params"></span>) </span>{
<span class="hljs-built_in">console</span>.error(<span class="hljs-string">'Something must be wrong with the connection ...'</span>);
}
cluster.on(<span class="hljs-string">'fork'</span>, (worker) => {
timeouts[worker.id] = setTimeout(errorMsg, <span class="hljs-number">2000</span>);
});
cluster.on(<span class="hljs-string">'listening'</span>, (worker, address) => {
clearTimeout(timeouts[worker.id]);
});
cluster.on(<span class="hljs-string">'exit'</span>, (worker, code, signal) => {
clearTimeout(timeouts[worker.id]);
errorMsg();
});</code></pre>
<h2>Event: <code>'listening'</code><span><a class="mark" href="#cluster_event_listening_1" id="cluster_event_listening_1">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<ul>
<li><code>worker</code> <a href="cluster.html#cluster_class_worker" class="type"><cluster.Worker></a></li>
<li><code>address</code> <a href="path_to_url" class="type"><Object></a></li>
</ul>
<p>After calling <code>listen()</code> from a worker, when the <code>'listening'</code> event is emitted
on the server a <code>'listening'</code> event will also be emitted on <code>cluster</code> in the
master.</p>
<p>The event handler is executed with two arguments, the <code>worker</code> contains the
worker object and the <code>address</code> object contains the following connection
properties: <code>address</code>, <code>port</code> and <code>addressType</code>. This is very useful if the
worker is listening on more than one address.</p>
<pre><code class="language-js">cluster.on(<span class="hljs-string">'listening'</span>, (worker, address) => {
<span class="hljs-built_in">console</span>.log(
<span class="hljs-string">`A worker is now connected to <span class="hljs-subst">${address.address}</span>:<span class="hljs-subst">${address.port}</span>`</span>);
});</code></pre>
<p>The <code>addressType</code> is one of:</p>
<ul>
<li><code>4</code> (TCPv4)</li>
<li><code>6</code> (TCPv6)</li>
<li><code>-1</code> (Unix domain socket)</li>
<li><code>'udp4'</code> or <code>'udp6'</code> (UDP v4 or v6)</li>
</ul>
<h2>Event: <code>'message'</code><span><a class="mark" href="#cluster_event_message_1" id="cluster_event_message_1">#</a></span></h2>
<div class="api_metadata">
<details class="changelog"><summary>History</summary>
<table>
<tbody><tr><th>Version</th><th>Changes</th></tr>
<tr><td>v6.0.0</td>
<td><p>The <code>worker</code> parameter is passed now; see below for details.</p></td></tr>
<tr><td>v2.5.0</td>
<td><p><span>Added in: v2.5.0</span></p></td></tr>
</tbody></table>
</details>
</div>
<ul>
<li><code>worker</code> <a href="cluster.html#cluster_class_worker" class="type"><cluster.Worker></a></li>
<li><code>message</code> <a href="path_to_url" class="type"><Object></a></li>
<li><code>handle</code> <a href="path_to_url#Undefined_type" class="type"><undefined></a> | <a href="path_to_url" class="type"><Object></a></li>
</ul>
<p>Emitted when the cluster master receives a message from any worker.</p>
<p>See <a href="child_process.html#child_process_event_message"><code>child_process</code> event: <code>'message'</code></a>.</p>
<h2>Event: <code>'online'</code><span><a class="mark" href="#cluster_event_online_1" id="cluster_event_online_1">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<ul>
<li><code>worker</code> <a href="cluster.html#cluster_class_worker" class="type"><cluster.Worker></a></li>
</ul>
<p>After forking a new worker, the worker should respond with an online message.
When the master receives an online message it will emit this event.
The difference between <code>'fork'</code> and <code>'online'</code> is that fork is emitted when the
master forks a worker, and <code>'online'</code> is emitted when the worker is running.</p>
<pre><code class="language-js">cluster.on(<span class="hljs-string">'online'</span>, (worker) => {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">'Yay, the worker responded after it was forked'</span>);
});</code></pre>
<h2>Event: <code>'setup'</code><span><a class="mark" href="#cluster_event_setup" id="cluster_event_setup">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.1</span>
</div>
<ul>
<li><code>settings</code> <a href="path_to_url" class="type"><Object></a></li>
</ul>
<p>Emitted every time <a href="#cluster_cluster_setupmaster_settings"><code>.setupMaster()</code></a> is called.</p>
<p>The <code>settings</code> object is the <code>cluster.settings</code> object at the time
<a href="#cluster_cluster_setupmaster_settings"><code>.setupMaster()</code></a> was called and is advisory only, since multiple calls to
<a href="#cluster_cluster_setupmaster_settings"><code>.setupMaster()</code></a> can be made in a single tick.</p>
<p>If accuracy is important, use <code>cluster.settings</code>.</p>
<h2><code>cluster.disconnect([callback])</code><span><a class="mark" href="#cluster_cluster_disconnect_callback" id="cluster_cluster_disconnect_callback">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.7</span>
</div>
<ul>
<li><code>callback</code> <a href="path_to_url" class="type"><Function></a> Called when all workers are disconnected and handles are
closed.</li>
</ul>
<p>Calls <code>.disconnect()</code> on each worker in <code>cluster.workers</code>.</p>
<p>When they are disconnected all internal handles will be closed, allowing the
master process to die gracefully if no other event is waiting.</p>
<p>The method takes an optional callback argument which will be called when
finished.</p>
<p>This can only be called from the master process.</p>
<h2><code>cluster.fork([env])</code><span><a class="mark" href="#cluster_cluster_fork_env" id="cluster_cluster_fork_env">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.6.0</span>
</div>
<ul>
<li><code>env</code> <a href="path_to_url" class="type"><Object></a> Key/value pairs to add to worker process environment.</li>
<li>Returns: <a href="cluster.html#cluster_class_worker" class="type"><cluster.Worker></a></li>
</ul>
<p>Spawn a new worker process.</p>
<p>This can only be called from the master process.</p>
<h2><code>cluster.isMaster</code><span><a class="mark" href="#cluster_cluster_ismaster" id="cluster_cluster_ismaster">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.8.1</span>
</div>
<ul>
<li><a href="path_to_url#Boolean_type" class="type"><boolean></a></li>
</ul>
<p>True if the process is a master. This is determined
by the <code>process.env.NODE_UNIQUE_ID</code>. If <code>process.env.NODE_UNIQUE_ID</code> is
undefined, then <code>isMaster</code> is <code>true</code>.</p>
<h2><code>cluster.isWorker</code><span><a class="mark" href="#cluster_cluster_isworker" id="cluster_cluster_isworker">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.6.0</span>
</div>
<ul>
<li><a href="path_to_url#Boolean_type" class="type"><boolean></a></li>
</ul>
<p>True if the process is not a master (it is the negation of <code>cluster.isMaster</code>).</p>
<h2><code>cluster.schedulingPolicy</code><span><a class="mark" href="#cluster_cluster_schedulingpolicy" id="cluster_cluster_schedulingpolicy">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.11.2</span>
</div>
<p>The scheduling policy, either <code>cluster.SCHED_RR</code> for round-robin or
<code>cluster.SCHED_NONE</code> to leave it to the operating system. This is a
global setting and effectively frozen once either the first worker is spawned,
or <a href="#cluster_cluster_setupmaster_settings"><code>.setupMaster()</code></a> is called, whichever comes first.</p>
<p><code>SCHED_RR</code> is the default on all operating systems except Windows.
Windows will change to <code>SCHED_RR</code> once libuv is able to effectively
distribute IOCP handles without incurring a large performance hit.</p>
<p><code>cluster.schedulingPolicy</code> can also be set through the
<code>NODE_CLUSTER_SCHED_POLICY</code> environment variable. Valid
values are <code>'rr'</code> and <code>'none'</code>.</p>
<h2><code>cluster.settings</code><span><a class="mark" href="#cluster_cluster_settings" id="cluster_cluster_settings">#</a></span></h2>
<div class="api_metadata">
<details class="changelog"><summary>History</summary>
<table>
<tbody><tr><th>Version</th><th>Changes</th></tr>
<tr><td>v13.2.0, v12.16.0</td>
<td><p>The <code>serialization</code> option is supported now.</p></td></tr>
<tr><td>v9.5.0</td>
<td><p>The <code>cwd</code> option is supported now.</p></td></tr>
<tr><td>v9.4.0</td>
<td><p>The <code>windowsHide</code> option is supported now.</p></td></tr>
<tr><td>v8.2.0</td>
<td><p>The <code>inspectPort</code> option is supported now.</p></td></tr>
<tr><td>v6.4.0</td>
<td><p>The <code>stdio</code> option is supported now.</p></td></tr>
<tr><td>v0.7.1</td>
<td><p><span>Added in: v0.7.1</span></p></td></tr>
</tbody></table>
</details>
</div>
<ul>
<li>
<p><a href="path_to_url" class="type"><Object></a></p>
<ul>
<li><code>execArgv</code> <a href="path_to_url#String_type" class="type"><string[]></a> List of string arguments passed to the Node.js
executable. <strong>Default:</strong> <code>process.execArgv</code>.</li>
<li><code>exec</code> <a href="path_to_url#String_type" class="type"><string></a> File path to worker file. <strong>Default:</strong> <code>process.argv[1]</code>.</li>
<li><code>args</code> <a href="path_to_url#String_type" class="type"><string[]></a> String arguments passed to worker.
<strong>Default:</strong> <code>process.argv.slice(2)</code>.</li>
<li><code>cwd</code> <a href="path_to_url#String_type" class="type"><string></a> Current working directory of the worker process. <strong>Default:</strong>
<code>undefined</code> (inherits from parent process).</li>
<li><code>serialization</code> <a href="path_to_url#String_type" class="type"><string></a> Specify the kind of serialization used for sending
messages between processes. Possible values are <code>'json'</code> and <code>'advanced'</code>.
See <a href="child_process.html#child_process_advanced_serialization">Advanced serialization for <code>child_process</code></a> for more details.
<strong>Default:</strong> <code>false</code>.</li>
<li><code>silent</code> <a href="path_to_url#Boolean_type" class="type"><boolean></a> Whether or not to send output to parent's stdio.
<strong>Default:</strong> <code>false</code>.</li>
<li><code>stdio</code> <a href="path_to_url" class="type"><Array></a> Configures the stdio of forked processes. Because the
cluster module relies on IPC to function, this configuration must contain an
<code>'ipc'</code> entry. When this option is provided, it overrides <code>silent</code>.</li>
<li><code>uid</code> <a href="path_to_url#Number_type" class="type"><number></a> Sets the user identity of the process. (See <a href="path_to_url"><code>setuid(2)</code></a>.)</li>
<li><code>gid</code> <a href="path_to_url#Number_type" class="type"><number></a> Sets the group identity of the process. (See <a href="path_to_url"><code>setgid(2)</code></a>.)</li>
<li><code>inspectPort</code> <a href="path_to_url#Number_type" class="type"><number></a> | <a href="path_to_url" class="type"><Function></a> Sets inspector port of worker.
This can be a number, or a function that takes no arguments and returns a
number. By default each worker gets its own port, incremented from the
master's <code>process.debugPort</code>.</li>
<li><code>windowsHide</code> <a href="path_to_url#Boolean_type" class="type"><boolean></a> Hide the forked processes console window that would
normally be created on Windows systems. <strong>Default:</strong> <code>false</code>.</li>
</ul>
</li>
</ul>
<p>After calling <a href="#cluster_cluster_setupmaster_settings"><code>.setupMaster()</code></a> (or <a href="#cluster_cluster_fork_env"><code>.fork()</code></a>) this settings object will
contain the settings, including the default values.</p>
<p>This object is not intended to be changed or set manually.</p>
<h2><code>cluster.setupMaster([settings])</code><span><a class="mark" href="#cluster_cluster_setupmaster_settings" id="cluster_cluster_setupmaster_settings">#</a></span></h2>
<div class="api_metadata">
<details class="changelog"><summary>History</summary>
<table>
<tbody><tr><th>Version</th><th>Changes</th></tr>
<tr><td>v6.4.0</td>
<td><p>The <code>stdio</code> option is supported now.</p></td></tr>
<tr><td>v0.7.1</td>
<td><p><span>Added in: v0.7.1</span></p></td></tr>
</tbody></table>
</details>
</div>
<ul>
<li><code>settings</code> <a href="path_to_url" class="type"><Object></a> See <a href="#cluster_cluster_settings"><code>cluster.settings</code></a>.</li>
</ul>
<p><code>setupMaster</code> is used to change the default 'fork' behavior. Once called,
the settings will be present in <code>cluster.settings</code>.</p>
<p>Any settings changes only affect future calls to <a href="#cluster_cluster_fork_env"><code>.fork()</code></a> and have no
effect on workers that are already running.</p>
<p>The only attribute of a worker that cannot be set via <code>.setupMaster()</code> is
the <code>env</code> passed to <a href="#cluster_cluster_fork_env"><code>.fork()</code></a>.</p>
<p>The defaults above apply to the first call only; the defaults for later
calls are the current values at the time of <code>cluster.setupMaster()</code> is called.</p>
<pre><code class="language-js"><span class="hljs-keyword">const</span> cluster = <span class="hljs-built_in">require</span>(<span class="hljs-string">'cluster'</span>);
cluster.setupMaster({
<span class="hljs-attr">exec</span>: <span class="hljs-string">'worker.js'</span>,
<span class="hljs-attr">args</span>: [<span class="hljs-string">'--use'</span>, <span class="hljs-string">'https'</span>],
<span class="hljs-attr">silent</span>: <span class="hljs-literal">true</span>
});
cluster.fork(); <span class="hljs-comment">// https worker</span>
cluster.setupMaster({
<span class="hljs-attr">exec</span>: <span class="hljs-string">'worker.js'</span>,
<span class="hljs-attr">args</span>: [<span class="hljs-string">'--use'</span>, <span class="hljs-string">'http'</span>]
});
cluster.fork(); <span class="hljs-comment">// http worker</span></code></pre>
<p>This can only be called from the master process.</p>
<h2><code>cluster.worker</code><span><a class="mark" href="#cluster_cluster_worker" id="cluster_cluster_worker">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<ul>
<li><a href="path_to_url" class="type"><Object></a></li>
</ul>
<p>A reference to the current worker object. Not available in the master process.</p>
<pre><code class="language-js"><span class="hljs-keyword">const</span> cluster = <span class="hljs-built_in">require</span>(<span class="hljs-string">'cluster'</span>);
<span class="hljs-keyword">if</span> (cluster.isMaster) {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">'I am master'</span>);
cluster.fork();
cluster.fork();
} <span class="hljs-keyword">else</span> <span class="hljs-keyword">if</span> (cluster.isWorker) {
<span class="hljs-built_in">console</span>.log(<span class="hljs-string">`I am worker #<span class="hljs-subst">${cluster.worker.id}</span>`</span>);
}</code></pre>
<h2><code>cluster.workers</code><span><a class="mark" href="#cluster_cluster_workers" id="cluster_cluster_workers">#</a></span></h2>
<div class="api_metadata">
<span>Added in: v0.7.0</span>
</div>
<ul>
<li><a href="path_to_url" class="type"><Object></a></li>
</ul>
<p>A hash that stores the active worker objects, keyed by <code>id</code> field. Makes it
easy to loop through all the workers. It is only available in the master
process.</p>
<p>A worker is removed from <code>cluster.workers</code> after the worker has disconnected
<em>and</em> exited. The order between these two events cannot be determined in
advance. However, it is guaranteed that the removal from the <code>cluster.workers</code>
list happens before last <code>'disconnect'</code> or <code>'exit'</code> event is emitted.</p>
<pre><code class="language-js"><span class="hljs-comment">// Go through all workers</span>
<span class="hljs-function"><span class="hljs-keyword">function</span> <span class="hljs-title">eachWorker</span>(<span class="hljs-params">callback</span>) </span>{
<span class="hljs-keyword">for</span> (<span class="hljs-keyword">const</span> id <span class="hljs-keyword">in</span> cluster.workers) {
callback(cluster.workers[id]);
}
}
eachWorker(<span class="hljs-function">(<span class="hljs-params">worker</span>) =></span> {
worker.send(<span class="hljs-string">'big announcement to all workers'</span>);
});</code></pre>
<p>Using the worker's unique id is the easiest way to locate the worker.</p>
<pre><code class="language-js">socket.on(<span class="hljs-string">'data'</span>, (id) => {
<span class="hljs-keyword">const</span> worker = cluster.workers[id];
});</code></pre>
<!-- API END -->
</div>
</div>
</div>
</body>
</html>
```
|
```smalltalk
/*
*
* This software may be modified and distributed under the terms
* of the MIT license. See the LICENSE file for details.
*
* path_to_url
*
*/
using System.ComponentModel.DataAnnotations;
using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.Mvc;
using Piranha.Manager.Models;
using Piranha.Manager.Services;
namespace Piranha.Manager.Controllers;
/// <summary>
/// Api controller for content management.
/// </summary>
[Area("Manager")]
[Route("manager/api/content")]
[Authorize(Policy = Permission.Admin)]
[ApiController]
[AutoValidateAntiforgeryToken]
public class ContentApiController : Controller
{
private readonly IApi _api;
private readonly ContentService _content;
private readonly ContentTypeService _contentType;
/// <summary>
/// Default constructor.
/// </summary>
public ContentApiController(ContentService content, ContentTypeService contentType, IApi api)
{
_api = api;
_content = content;
_contentType = contentType;
}
/// <summary>
/// Gets the currently available block types for the
/// specified page type.
/// </summary>
/// <param name="pageType">The page type id</param>
/// <param name="parentType">The optional parent group type</param>
/// <returns>The block list model</returns>
[Route("blocktypes/page/{pageType}/{parentType?}")]
[HttpGet]
public BlockListModel GetBlockTypesForPage(string pageType, string parentType = null)
{
return _contentType.GetPageBlockTypes(pageType, parentType);
}
/// <summary>
/// Gets the currently available block types for the
/// specified post type.
/// </summary>
/// <param name="postType">The post type id</param>
/// <param name="parentType">The optional parent group type</param>
/// <returns>The block list model</returns>
[Route("blocktypes/post/{postType}/{parentType?}")]
[HttpGet]
public BlockListModel GetBlockTypesForPost(string postType, string parentType = null)
{
return _contentType.GetPostBlockTypes(postType, parentType);
}
/// <summary>
/// Gets the currently available block types.
/// </summary>
/// <param name="parentType">The optional parent group type</param>
/// <returns>The block list model</returns>
[Route("blocktypes/{parentType?}")]
[HttpGet]
public BlockListModel GetBlockTypes(string parentType = null)
{
return _contentType.GetBlockTypes(parentType);
}
/// <summary>
/// Creates a new block of the specified type.
/// </summary>
/// <param name="type">The block type</param>
/// <returns>The new block</returns>
[Route("block/{type}")]
[HttpGet]
public async Task<IActionResult> CreateBlockAsync(string type)
{
var block = await _contentType.CreateBlockAsync(type);
if (block != null)
{
return Ok(block);
}
return NotFound();
}
/// <summary>
/// Creates a new region for the specified content type.
/// </summary>
/// <param name="content">The type of content</param>
/// <param name="type">The content type</param>
/// <param name="region">The region id</param>
/// <returns>The new region model</returns>
[Route("region/{content}/{type}/{region}")]
[HttpGet]
public async Task<IActionResult> CreateRegionAsync(string content, string type, string region)
{
if (content == "content")
{
return Ok(await _contentType.CreateContentRegionAsync(type, region));
}
else if (content == "page")
{
return Ok(await _contentType.CreatePageRegionAsync(type, region));
}
else if (content == "post")
{
return Ok(await _contentType.CreatePostRegionAsync(type, region));
}
else if (content == "site")
{
return Ok(await _contentType.CreateSiteRegionAsync(type, region));
}
return NotFound();
}
[Route("list")]
[HttpGet]
[Authorize(Policy = Permission.Content)]
public Task<IActionResult> List()
{
return List(null);
}
[Route("{contentGroup}/list")]
[HttpGet]
[Authorize(Policy = Permission.Content)]
public async Task<IActionResult> List(string contentGroup)
{
var model = await _content.GetListAsync(contentGroup);
return Ok(model);
}
/// <summary>
/// Gets the post with the given id.
/// </summary>
/// <param name="id">The unique id</param>
/// <param name="languageId">The optional language id</param>
/// <returns>The post edit model</returns>
[Route("{id}/{languageId?}")]
[HttpGet]
[Authorize(Policy = Permission.Content)]
public async Task<ContentEditModel> Get(Guid id, Guid? languageId = null)
{
return await _content.GetByIdAsync(id, languageId);
}
/// <summary>
/// Gets the info model for the content with the
/// given id.
/// </summary>
/// <param name="id">The unique id</param>
/// <returns>The content info model</returns>
[Route("info/{id}")]
[HttpGet]
[Authorize(Policy = Permission.Content)]
public async Task<Piranha.Models.ContentInfo> GetInfo(Guid id)
{
return await _api.Content.GetByIdAsync<Piranha.Models.ContentInfo>(id);
}
/// <summary>
///
/// </summary>
/// <param name="contentType">The content type</param>
/// <returns>The edit model</returns>
[Route("create/{contentType}")]
[HttpGet]
[Authorize(Policy = Permission.ContentAdd)]
public async Task<ContentEditModel> Create(string contentType)
{
return await _content.CreateAsync(contentType);
}
/// <summary>
/// Saves the given model
/// </summary>
/// <param name="model">The model</param>
/// <returns>The result of the operation</returns>
[Route("save")]
[HttpPost]
[Authorize(Policy = Permission.ContentSave)]
public async Task<ContentEditModel> Save(ContentEditModel model)
{
try
{
await _content.SaveAsync(model);
}
catch (ValidationException e)
{
model.Status = new StatusMessage
{
Type = StatusMessage.Error,
Body = e.Message
};
return model;
}
var ret = await _content.GetByIdAsync(model.Id, model.LanguageId);
ret.Status = new StatusMessage
{
Type = StatusMessage.Success,
Body = "The content was successfully saved"
};
return ret;
}
/// <summary>
/// Deletes the content with the given id.
/// </summary>
/// <param name="id">The unique id</param>
/// <returns>The result of the operation</returns>
[Route("delete")]
[HttpDelete]
[Authorize(Policy = Permission.ContentDelete)]
public async Task<StatusMessage> Delete([FromBody]Guid id)
{
try
{
await _content.DeleteAsync(id);
}
catch (ValidationException e)
{
// Validation did not succeed
return new StatusMessage
{
Type = StatusMessage.Error,
Body = e.Message
};
}
catch
{
return new StatusMessage
{
Type = StatusMessage.Error,
Body = "An error occured while deleting the content"
};
}
return new StatusMessage
{
Type = StatusMessage.Success,
Body = "The content was successfully deleted"
};
}
}
```
|
```java
module io.context.plugins {
requires io.ballerina.lang;
requires io.ballerina.parser;
requires io.ballerina.tools.api;
exports io.context.plugins;
}
```
|
```c++
//
// ip/address_v4_iterator.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
//
// file LICENSE_1_0.txt or copy at path_to_url
//
#ifndef ASIO_IP_ADDRESS_V4_ITERATOR_HPP
#define ASIO_IP_ADDRESS_V4_ITERATOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/ip/address_v4.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
template <typename> class basic_address_iterator;
/// An input iterator that can be used for traversing IPv4 addresses.
/**
* In addition to satisfying the input iterator requirements, this iterator
* also supports decrement.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <> class basic_address_iterator<address_v4>
{
public:
/// The type of the elements pointed to by the iterator.
typedef address_v4 value_type;
/// Distance between two iterators.
typedef std::ptrdiff_t difference_type;
/// The type of a pointer to an element pointed to by the iterator.
typedef const address_v4* pointer;
/// The type of a reference to an element pointed to by the iterator.
typedef const address_v4& reference;
/// Denotes that the iterator satisfies the input iterator requirements.
typedef std::input_iterator_tag iterator_category;
/// Construct an iterator that points to the specified address.
basic_address_iterator(const address_v4& addr) ASIO_NOEXCEPT
: address_(addr)
{
}
/// Copy constructor.
basic_address_iterator(
const basic_address_iterator& other) ASIO_NOEXCEPT
: address_(other.address_)
{
}
#if defined(ASIO_HAS_MOVE)
/// Move constructor.
basic_address_iterator(basic_address_iterator&& other) ASIO_NOEXCEPT
: address_(ASIO_MOVE_CAST(address_v4)(other.address_))
{
}
#endif // defined(ASIO_HAS_MOVE)
/// Assignment operator.
basic_address_iterator& operator=(
const basic_address_iterator& other) ASIO_NOEXCEPT
{
address_ = other.address_;
return *this;
}
#if defined(ASIO_HAS_MOVE)
/// Move assignment operator.
basic_address_iterator& operator=(
basic_address_iterator&& other) ASIO_NOEXCEPT
{
address_ = ASIO_MOVE_CAST(address_v4)(other.address_);
return *this;
}
#endif // defined(ASIO_HAS_MOVE)
/// Dereference the iterator.
const address_v4& operator*() const ASIO_NOEXCEPT
{
return address_;
}
/// Dereference the iterator.
const address_v4* operator->() const ASIO_NOEXCEPT
{
return &address_;
}
/// Pre-increment operator.
basic_address_iterator& operator++() ASIO_NOEXCEPT
{
address_ = address_v4((address_.to_uint() + 1) & 0xFFFFFFFF);
return *this;
}
/// Post-increment operator.
basic_address_iterator operator++(int) ASIO_NOEXCEPT
{
basic_address_iterator tmp(*this);
++*this;
return tmp;
}
/// Pre-decrement operator.
basic_address_iterator& operator--() ASIO_NOEXCEPT
{
address_ = address_v4((address_.to_uint() - 1) & 0xFFFFFFFF);
return *this;
}
/// Post-decrement operator.
basic_address_iterator operator--(int)
{
basic_address_iterator tmp(*this);
--*this;
return tmp;
}
/// Compare two addresses for equality.
friend bool operator==(const basic_address_iterator& a,
const basic_address_iterator& b)
{
return a.address_ == b.address_;
}
/// Compare two addresses for inequality.
friend bool operator!=(const basic_address_iterator& a,
const basic_address_iterator& b)
{
return a.address_ != b.address_;
}
private:
address_v4 address_;
};
/// An input iterator that can be used for traversing IPv4 addresses.
typedef basic_address_iterator<address_v4> address_v4_iterator;
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_ADDRESS_V4_ITERATOR_HPP
```
|
Kil Son-hui (born 7 March 1986) is a North Korean football forward who played for the North Korea women's national football team. She competed at the 2007 FIFA Women's World Cup and 2008 Summer Olympics. At the club level, she played for Rimyongsu.
International goals
See also
North Korea at the 2008 Summer Olympics
References
External links
1986 births
Living people
North Korean women's footballers
Place of birth missing (living people)
Footballers at the 2008 Summer Olympics
Olympic footballers for North Korea
Women's association football forwards
Asian Games medalists in football
Footballers at the 2006 Asian Games
North Korea women's international footballers
2007 FIFA Women's World Cup players
Asian Games gold medalists for North Korea
Medalists at the 2006 Asian Games
|
```freemarker
# This Source Code Form is subject to the terms of the Mozilla Public
# file, You can obtain one at path_to_url
## Main toolbar buttons (tooltips and alt text for images)
pdfjs-previous-button =
.title =
pdfjs-previous-button-label =
pdfjs-next-button =
.title =
pdfjs-next-button-label =
# .title: Tooltip for the pageNumber input.
pdfjs-page-input =
.title =
# Variables:
# $pagesCount (Number) - the total number of pages in the document
# This string follows an input field with the number of the page currently displayed.
pdfjs-of-pages = { $pagesCount }
# Variables:
# $pageNumber (Number) - the currently visible page
# $pagesCount (Number) - the total number of pages in the document
pdfjs-page-of-pages = ({ $pageNumber } { $pagesCount })
pdfjs-zoom-out-button =
.title =
pdfjs-zoom-out-button-label =
pdfjs-zoom-in-button =
.title =
pdfjs-zoom-in-button-label =
pdfjs-zoom-select =
.title =
pdfjs-presentation-mode-button =
.title =
pdfjs-presentation-mode-button-label =
pdfjs-open-file-button =
.title =
pdfjs-open-file-button-label =
pdfjs-print-button =
.title =
pdfjs-print-button-label =
pdfjs-save-button =
.title =
pdfjs-save-button-label =
# Used in Firefox for Android as a tooltip for the download button (download is a verb).
pdfjs-download-button =
.title =
# Used in Firefox for Android as a label for the download button (download is a verb).
# Length of the translation matters since we are in a mobile context, with limited screen estate.
pdfjs-download-button-label =
pdfjs-bookmark-button =
.title = ( URL- )
pdfjs-bookmark-button-label =
## Secondary toolbar and context menu
pdfjs-tools-button =
.title =
pdfjs-tools-button-label =
pdfjs-first-page-button =
.title =
pdfjs-first-page-button-label =
pdfjs-last-page-button =
.title =
pdfjs-last-page-button-label =
pdfjs-page-rotate-cw-button =
.title =
pdfjs-page-rotate-cw-button-label =
pdfjs-page-rotate-ccw-button =
.title =
pdfjs-page-rotate-ccw-button-label =
pdfjs-cursor-text-select-tool-button =
.title =
pdfjs-cursor-text-select-tool-button-label =
pdfjs-cursor-hand-tool-button =
.title = ""
pdfjs-cursor-hand-tool-button-label = ""
pdfjs-scroll-page-button =
.title =
pdfjs-scroll-page-button-label =
pdfjs-scroll-vertical-button =
.title =
pdfjs-scroll-vertical-button-label =
pdfjs-scroll-horizontal-button =
.title =
pdfjs-scroll-horizontal-button-label =
pdfjs-scroll-wrapped-button =
.title =
pdfjs-scroll-wrapped-button-label =
pdfjs-spread-none-button =
.title =
pdfjs-spread-none-button-label =
pdfjs-spread-odd-button =
.title =
pdfjs-spread-odd-button-label =
pdfjs-spread-even-button =
.title =
pdfjs-spread-even-button-label =
## Document properties dialog
pdfjs-document-properties-button =
.title =
pdfjs-document-properties-button-label =
pdfjs-document-properties-file-name = :
pdfjs-document-properties-file-size = :
# Variables:
# $size_kb (Number) - the PDF file size in kilobytes
# $size_b (Number) - the PDF file size in bytes
pdfjs-document-properties-kb = { $size_kb } ({ $size_b } )
# Variables:
# $size_mb (Number) - the PDF file size in megabytes
# $size_b (Number) - the PDF file size in bytes
pdfjs-document-properties-mb = { $size_mb } ({ $size_b } )
pdfjs-document-properties-title = :
pdfjs-document-properties-author = :
pdfjs-document-properties-subject = :
pdfjs-document-properties-keywords = :
pdfjs-document-properties-creation-date = :
pdfjs-document-properties-modification-date = :
# Variables:
# $date (Date) - the creation/modification date of the PDF file
# $time (Time) - the creation/modification time of the PDF file
pdfjs-document-properties-date-string = { $date }, { $time }
pdfjs-document-properties-creator = :
pdfjs-document-properties-producer = PDF:
pdfjs-document-properties-version = PDF:
pdfjs-document-properties-page-count = :
pdfjs-document-properties-page-size = :
pdfjs-document-properties-page-size-unit-inches =
pdfjs-document-properties-page-size-unit-millimeters =
pdfjs-document-properties-page-size-orientation-portrait =
pdfjs-document-properties-page-size-orientation-landscape =
pdfjs-document-properties-page-size-name-a-three = A3
pdfjs-document-properties-page-size-name-a-four = A4
pdfjs-document-properties-page-size-name-letter = Letter
pdfjs-document-properties-page-size-name-legal = Legal
## Variables:
## $width (Number) - the width of the (current) page
## $height (Number) - the height of the (current) page
## $unit (String) - the unit of measurement of the (current) page
## $name (String) - the name of the (current) page
## $orientation (String) - the orientation of the (current) page
pdfjs-document-properties-page-size-dimension-string = { $width } { $height } { $unit } ({ $orientation })
pdfjs-document-properties-page-size-dimension-name-string = { $width } { $height } { $unit } ({ $name }, { $orientation })
##
# The linearization status of the document; usually called "Fast Web View" in
# English locales of Adobe software.
pdfjs-document-properties-linearized = :
pdfjs-document-properties-linearized-yes =
pdfjs-document-properties-linearized-no =
pdfjs-document-properties-close-button =
## Print
pdfjs-print-progress-message =
# Variables:
# $progress (Number) - percent value
pdfjs-print-progress-percent = { $progress }%
pdfjs-print-progress-close-button =
pdfjs-printing-not-supported = : .
pdfjs-printing-not-ready = : PDF .
## Tooltips and alt text for side panel toolbar buttons
pdfjs-toggle-sidebar-button =
.title =
pdfjs-toggle-sidebar-notification-button =
.title = ( //)
pdfjs-toggle-sidebar-button-label =
pdfjs-document-outline-button =
.title = ( / )
pdfjs-document-outline-button-label =
pdfjs-attachments-button =
.title =
pdfjs-attachments-button-label =
pdfjs-layers-button =
.title = ( , )
pdfjs-layers-button-label =
pdfjs-thumbs-button =
.title =
pdfjs-thumbs-button-label =
pdfjs-current-outline-item-button =
.title =
pdfjs-current-outline-item-button-label =
pdfjs-findbar-button =
.title =
pdfjs-findbar-button-label =
pdfjs-additional-layers =
## Thumbnails panel item (tooltip and alt text for images)
# Variables:
# $page (Number) - the page number
pdfjs-thumb-page-title =
.title = { $page }
# Variables:
# $page (Number) - the page number
pdfjs-thumb-page-canvas =
.aria-label = { $page }
## Find panel button title and messages
pdfjs-find-input =
.title =
.placeholder =
pdfjs-find-previous-button =
.title =
pdfjs-find-previous-button-label =
pdfjs-find-next-button =
.title =
pdfjs-find-next-button-label =
pdfjs-find-highlight-checkbox =
pdfjs-find-match-case-checkbox-label =
pdfjs-find-match-diacritics-checkbox-label =
pdfjs-find-entire-word-checkbox-label =
pdfjs-find-reached-top = ,
pdfjs-find-reached-bottom = ,
# Variables:
# $current (Number) - the index of the currently active find result
# $total (Number) - the total number of matches in the document
pdfjs-find-match-count =
{ $total ->
[one] { $current } { $total }
[few] { $current } { $total }
*[many] { $current } { $total }
}
# Variables:
# $limit (Number) - the maximum number of matches
pdfjs-find-match-count-limit =
{ $limit ->
[one] { $limit }
[few] { $limit }
*[many] { $limit }
}
pdfjs-find-not-found =
## Predefined zoom values
pdfjs-page-scale-width =
pdfjs-page-scale-fit =
pdfjs-page-scale-auto =
pdfjs-page-scale-actual =
# Variables:
# $scale (Number) - percent value for page scale
pdfjs-page-scale-percent = { $scale }%
## PDF page
# Variables:
# $page (Number) - the page number
pdfjs-page-landmark =
.aria-label = { $page }
## Loading indicator messages
pdfjs-loading-error = PDF .
pdfjs-invalid-file-error = PDF-.
pdfjs-missing-file-error = PDF-.
pdfjs-unexpected-response-error = .
pdfjs-rendering-error = .
## Annotations
# Variables:
# $date (Date) - the modification date of the annotation
# $time (Time) - the modification time of the annotation
pdfjs-annotation-date-string = { $date }, { $time }
# .alt: This is used as a tooltip.
# Variables:
# $type (String) - an annotation type from a list defined in the PDF spec
# (32000-1:2008 Table 169 Annotation types).
# Some common types are e.g.: "Check", "Text", "Comment", "Note"
pdfjs-text-annotation-type =
.alt = [{ $type }-]
## Password
pdfjs-password-label = PDF-.
pdfjs-password-invalid = . .
pdfjs-password-ok-button = OK
pdfjs-password-cancel-button =
pdfjs-web-fonts-disabled = - : PDF .
## Editing
pdfjs-editor-free-text-button =
.title =
pdfjs-editor-free-text-button-label =
pdfjs-editor-ink-button =
.title =
pdfjs-editor-ink-button-label =
pdfjs-editor-stamp-button =
.title =
pdfjs-editor-stamp-button-label =
pdfjs-editor-highlight-button =
.title =
pdfjs-editor-highlight-button-label =
pdfjs-highlight-floating-button =
.title =
pdfjs-highlight-floating-button1 =
.title =
.aria-label =
pdfjs-highlight-floating-button-label =
## Remove button for the various kind of editor.
pdfjs-editor-remove-ink-button =
.title =
pdfjs-editor-remove-freetext-button =
.title =
pdfjs-editor-remove-stamp-button =
.title =
pdfjs-editor-remove-highlight-button =
.title =
##
# Editor Parameters
pdfjs-editor-free-text-color-input =
pdfjs-editor-free-text-size-input =
pdfjs-editor-ink-color-input =
pdfjs-editor-ink-thickness-input =
pdfjs-editor-ink-opacity-input =
pdfjs-editor-stamp-add-image-button =
.title =
pdfjs-editor-stamp-add-image-button-label =
# This refers to the thickness of the line used for free highlighting (not bound to text)
pdfjs-editor-free-highlight-thickness-input =
pdfjs-editor-free-highlight-thickness-title =
.title = ,
pdfjs-free-text =
.aria-label =
pdfjs-free-text-default-content =
pdfjs-ink =
.aria-label =
pdfjs-ink-canvas =
.aria-label = ,
## Alt-text dialog
# Alternative text (alt text) helps when people can't see the image.
pdfjs-editor-alt-text-button-label =
pdfjs-editor-alt-text-edit-button-label =
pdfjs-editor-alt-text-dialog-label =
pdfjs-editor-alt-text-dialog-description = , .
pdfjs-editor-alt-text-add-description-label =
pdfjs-editor-alt-text-add-description-description = 1-2 , , .
pdfjs-editor-alt-text-mark-decorative-label =
pdfjs-editor-alt-text-mark-decorative-description = , .
pdfjs-editor-alt-text-cancel-button =
pdfjs-editor-alt-text-save-button =
pdfjs-editor-alt-text-decorative-tooltip =
# .placeholder: This is a placeholder for the alt text input area
pdfjs-editor-alt-text-textarea =
.placeholder = ,
## Editor resizers
## This is used in an aria label to help to understand the role of the resizer.
pdfjs-editor-resizer-label-top-left =
pdfjs-editor-resizer-label-top-middle =
pdfjs-editor-resizer-label-top-right =
pdfjs-editor-resizer-label-middle-right =
pdfjs-editor-resizer-label-bottom-right =
pdfjs-editor-resizer-label-bottom-middle =
pdfjs-editor-resizer-label-bottom-left =
pdfjs-editor-resizer-label-middle-left =
## Color picker
# This means "Color used to highlight text"
pdfjs-editor-highlight-colorpicker-label =
pdfjs-editor-colorpicker-button =
.title =
pdfjs-editor-colorpicker-dropdown =
.aria-label =
pdfjs-editor-colorpicker-yellow =
.title =
pdfjs-editor-colorpicker-green =
.title =
pdfjs-editor-colorpicker-blue =
.title =
pdfjs-editor-colorpicker-pink =
.title =
pdfjs-editor-colorpicker-red =
.title =
## Show all highlights
## This is a toggle button to show/hide all the highlights.
pdfjs-editor-highlight-show-all-button-label =
pdfjs-editor-highlight-show-all-button =
.title =
```
|
```python
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator path_to_url # noqa: E501
The version of the OpenAPI document: release-1.30
Generated by: path_to_url
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1EndpointAddress(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: path_to_url
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'hostname': 'str',
'ip': 'str',
'node_name': 'str',
'target_ref': 'V1ObjectReference'
}
attribute_map = {
'hostname': 'hostname',
'ip': 'ip',
'node_name': 'nodeName',
'target_ref': 'targetRef'
}
def __init__(self, hostname=None, ip=None, node_name=None, target_ref=None, local_vars_configuration=None): # noqa: E501
"""V1EndpointAddress - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._hostname = None
self._ip = None
self._node_name = None
self._target_ref = None
self.discriminator = None
if hostname is not None:
self.hostname = hostname
self.ip = ip
if node_name is not None:
self.node_name = node_name
if target_ref is not None:
self.target_ref = target_ref
@property
def hostname(self):
"""Gets the hostname of this V1EndpointAddress. # noqa: E501
The Hostname of this endpoint # noqa: E501
:return: The hostname of this V1EndpointAddress. # noqa: E501
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""Sets the hostname of this V1EndpointAddress.
The Hostname of this endpoint # noqa: E501
:param hostname: The hostname of this V1EndpointAddress. # noqa: E501
:type: str
"""
self._hostname = hostname
@property
def ip(self):
"""Gets the ip of this V1EndpointAddress. # noqa: E501
The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16). # noqa: E501
:return: The ip of this V1EndpointAddress. # noqa: E501
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this V1EndpointAddress.
The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16). # noqa: E501
:param ip: The ip of this V1EndpointAddress. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and ip is None: # noqa: E501
raise ValueError("Invalid value for `ip`, must not be `None`") # noqa: E501
self._ip = ip
@property
def node_name(self):
"""Gets the node_name of this V1EndpointAddress. # noqa: E501
Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. # noqa: E501
:return: The node_name of this V1EndpointAddress. # noqa: E501
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""Sets the node_name of this V1EndpointAddress.
Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. # noqa: E501
:param node_name: The node_name of this V1EndpointAddress. # noqa: E501
:type: str
"""
self._node_name = node_name
@property
def target_ref(self):
"""Gets the target_ref of this V1EndpointAddress. # noqa: E501
:return: The target_ref of this V1EndpointAddress. # noqa: E501
:rtype: V1ObjectReference
"""
return self._target_ref
@target_ref.setter
def target_ref(self, target_ref):
"""Sets the target_ref of this V1EndpointAddress.
:param target_ref: The target_ref of this V1EndpointAddress. # noqa: E501
:type: V1ObjectReference
"""
self._target_ref = target_ref
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EndpointAddress):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EndpointAddress):
return True
return self.to_dict() != other.to_dict()
```
|
```objective-c
/*
*
*/
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief GPIO number
*/
typedef enum {
GPIO_NUM_NC = -1, /*!< Use to signal not connected to S/W */
GPIO_NUM_0 = 0, /*!< GPIO0, input and output */
GPIO_NUM_1 = 1, /*!< GPIO1, input and output */
GPIO_NUM_2 = 2, /*!< GPIO2, input and output */
GPIO_NUM_3 = 3, /*!< GPIO3, input and output */
GPIO_NUM_4 = 4, /*!< GPIO4, input and output */
GPIO_NUM_5 = 5, /*!< GPIO5, input and output */
GPIO_NUM_6 = 6, /*!< GPIO6, input and output */
GPIO_NUM_7 = 7, /*!< GPIO7, input and output */
GPIO_NUM_8 = 8, /*!< GPIO8, input and output */
GPIO_NUM_9 = 9, /*!< GPIO9, input and output */
GPIO_NUM_10 = 10, /*!< GPIO10, input and output */
GPIO_NUM_11 = 11, /*!< GPIO11, input and output */
GPIO_NUM_12 = 12, /*!< GPIO12, input and output */
GPIO_NUM_13 = 13, /*!< GPIO13, input and output */
GPIO_NUM_14 = 14, /*!< GPIO14, input and output */
GPIO_NUM_15 = 15, /*!< GPIO15, input and output */
GPIO_NUM_16 = 16, /*!< GPIO16, input and output */
GPIO_NUM_17 = 17, /*!< GPIO17, input and output */
GPIO_NUM_18 = 18, /*!< GPIO18, input and output */
GPIO_NUM_19 = 19, /*!< GPIO19, input and output */
GPIO_NUM_20 = 20, /*!< GPIO20, input and output */
GPIO_NUM_21 = 21, /*!< GPIO21, input and output */
GPIO_NUM_22 = 22, /*!< GPIO22, input and output */
GPIO_NUM_23 = 23, /*!< GPIO23, input and output */
GPIO_NUM_24 = 24, /*!< GPIO24, input and output */
GPIO_NUM_25 = 25, /*!< GPIO25, input and output */
GPIO_NUM_26 = 26, /*!< GPIO26, input and output */
GPIO_NUM_27 = 27, /*!< GPIO27, input and output */
GPIO_NUM_MAX,
} gpio_num_t;
#ifdef __cplusplus
}
#endif
```
|
Cinnamomea, cinnamomeus, or cinnamomeum is a Neo-Latin adjective meaning cinnamon-colored that occurs in the species names of many organisms. It may refer to:
Birds
Anthus cinnamomeus, the African pipit
Attila cinnamomeus, the cinnamon attila
Bradypterus cinnamomeus, the cinnamon bracken warbler
Certhiaxis cinnamomeus, the yellow-chinned spinetail
Cinclosoma cinnamomeum, the cinnamon quail-thrush
Cisticola cinnamomeus, the pale-crowned cisticola
Crypturellus cinnamomeus, the thicket tinamou or rufescent tinamou
Hypocryptadius cinnamomeus, the cinnamon ibon
Ixobrychus cinnamomeus, the cinnamon bittern or chestnut bittern
Pachyramphus cinnamomeus, the cinnamon becard
Pericrocotus cinnamomeus, the small minivet
Picumnus cinnamomeus, the chestnut piculet
Pyrrhomyias cinnamomeus, the cinnamon flycatcher
Passer cinnamomeus, the russet sparrow
Fungi
Microglossum cinnamomeum
Physocystidium cinnamomeum
Trichoderma cinnamomeum
Insects
Copelatus cinnamomeus, a diving beetle
Eublemma cinnamomeum, a moth
Heteragrion cinnamomeum, a dragonfly
Hoplogrammicosum cinnamomeum, a beetle
Marasmarcha cinnamomeus, a moth
Orthomegas cinnamomeus, a beetle
Plants
Odontoglossum cinnamomeum, an orchid
Oncidium cinnamomeum, an orchid
Osmundastrum cinnamomeum, a fern
Rhododendron arboreum subsp. cinnamomeum, a flowering shrub
Solanum cinnamomeum, a tomato species
See also
Cinnamomum
|
```c++
// Aseprite Document Library
//
// This file is released under the terms of the MIT license.
// Read LICENSE.txt for more information.
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "doc/document.h"
#include "base/path.h"
#include "doc/context.h"
#include "doc/sprite.h"
namespace doc {
Document::Document()
: Object(ObjectType::Document)
, m_sprites(this)
, m_ctx(NULL)
{
}
Document::~Document()
{
removeFromContext();
}
void Document::setContext(Context* ctx)
{
if (ctx == m_ctx)
return;
removeFromContext();
m_ctx = ctx;
if (ctx)
ctx->documents().add(this);
onContextChanged();
}
int Document::width() const
{
return sprite()->width();
}
int Document::height() const
{
return sprite()->height();
}
ColorMode Document::colorMode() const
{
return (ColorMode)sprite()->pixelFormat();
}
std::string Document::name() const
{
return base::get_file_name(m_filename);
}
void Document::setFilename(const std::string& filename)
{
// Normalize the path (if the filename has a path)
if (!base::get_file_path(filename).empty())
m_filename = base::normalize_path(filename);
else
m_filename = filename;
notifyObservers(&DocumentObserver::onFileNameChanged, this);
}
void Document::close()
{
removeFromContext();
}
void Document::onContextChanged()
{
// Do nothing
}
void Document::removeFromContext()
{
if (m_ctx) {
m_ctx->documents().remove(this);
m_ctx = NULL;
onContextChanged();
}
}
} // namespace doc
```
|
```xml
const mentions = [
{
name: 'Matthew Russell',
title: 'Senior Software Engineer',
avatar:
'path_to_url
},
{
name: 'Julian Krispel-Samsel',
title: 'United Kingdom',
avatar: 'path_to_url
},
{
name: 'Jyoti Puri',
title: 'New Delhi, India',
avatar: 'path_to_url
},
{
name: 'Max Stoiber',
title:
'Travels around the world, brews coffee, skis mountains and makes stuff on the web.',
avatar: 'path_to_url
},
{
name: 'Nik Graf',
title: 'Passionate about Software Architecture, UX, Skiing & Triathlons',
avatar: 'path_to_url
},
{
name: 'Pascal Brandt',
title: 'HeathIT hacker and researcher',
avatar:
'path_to_url
},
];
export default mentions;
```
|
```prolog
#!/usr/bin/env perl
#
# Program: GenLibDeps.pl
#
# Synopsis: Generate HTML output that shows the dependencies between a set of
# libraries. The output of this script should periodically replace
# the similar content in the UsingLibraries.html document.
#
# Syntax: GenLibDeps.pl [-flat] <directory_with_libraries_in_it> [path_to_nm_binary]
#
use strict;
use warnings;
# Parse arguments...
my $FLAT = 0;
my $WHY = 0;
my $PEROBJ = 0;
my $PEROBJINCL = 0;
while (scalar(@ARGV) and ($_ = $ARGV[0], /^[-+]/)) {
shift;
last if /^--$/; # Stop processing arguments on --
# List command line options here...
if (/^-flat$/) { $FLAT = 1; next; }
if (/^-why/) { $WHY = 1; $FLAT = 1; next; }
if (/^-perobj$/) { $PEROBJ = 1; next; }
if (/^-perobjincl/) { $PEROBJINCL = 1; next;}
print "Unknown option: $_ : ignoring!\n";
}
# Give first option a name.
my $Directory = $ARGV[0];
if (!defined($Directory) || ! -d "$Directory") {
die "First argument must specify the directory containing LLVM libs\n";
}
my $nmPath = $ARGV[1];
# Find the "dot" program
my $DotPath="";
if (!$FLAT) {
chomp($DotPath = `which dot`);
die "Can't find 'dot'" if (! -x "$DotPath");
}
if (defined($ENV{NM})) {
chomp($nmPath=$ENV{NM});
}
if (!defined($nmPath) || $nmPath eq "") {
chomp($nmPath=`which nm`);
die "Can't find 'nm'" if (! -x "$nmPath");
}
my $ranlibPath;
if ($PEROBJ) {
$ranlibPath = $ARGV[2];
if (defined($ENV{RANLIB})) {
chomp($ranlibPath=$ENV{RANLIB});
}
if (!defined($ranlibPath) || $ranlibPath eq "") {
chomp($ranlibPath=`which ranlib`);
die "Can't find 'ranlib'" if (! -x "$ranlibPath");
}
}
# Open the directory and read its contents, sorting by name and differentiating
# by whether its a library (.a) or an object file (.o)
opendir DIR,$Directory;
my @files = readdir DIR;
closedir DIR;
my @libs = grep(/libLLVM.*\.(dylib|so|a)$/,sort(@files));
# Omit the all-of-llvm shared library.
@libs = grep(!/libLLVM-\d\.\d(svn)?\.(dylib|so)/, @libs);
my @objs = grep(/LLVM.*\.o$/,sort(@files));
# Declare the hashes we will use to keep track of the library and object file
# symbol definitions.
my %libdefs;
my %objdefs;
my %libobjs;
my %objdeps=();
# Gather library definitions at object file granularity (optional)
if ($PEROBJ) {
foreach my $lib (@libs ) {
`$ranlibPath $Directory/$lib`;
my $libpath = $lib;
$libpath =~ s/^libLLVM(.*)\.a/$1/;
$libpath =~ s/(.+)CodeGen$/Target\/$1/;
$libpath =~ s/(.+)AsmPrinter$/Target\/$1\/AsmPrinter/;
$libpath =~ s/(.+)AsmParser$/Target\/$1\/AsmParser/;
$libpath =~ s/(.+)Info$/Target\/$1\/TargetInfo/;
$libpath =~ s/(.+)Disassembler$/Target\/$1\/Disassembler/;
$libpath =~ s/SelectionDAG/CodeGen\/SelectionDAG/;
$libpath =~ s/^AsmPrinter/CodeGen\/AsmPrinter/;
$libpath =~ s/^BitReader/Bitcode\/Reader/;
$libpath =~ s/^BitWriter/Bitcode\/Writer/;
$libpath =~ s/^BitstreamReader/Bitstream\/Reader/;
$libpath =~ s/^BitstreamWriter/Bitstream\/Writer/;
$libpath =~ s/^MSIL/Target\/MSIL/;
$libpath =~ s/^Core/IR/;
$libpath =~ s/^Instrumentation/Transforms\/Instrumentation/;
$libpath =~ s/^Interpreter/ExecutionEngine\/Interpreter/;
$libpath =~ s/^JIT/ExecutionEngine\/JIT/;
$libpath =~ s/^ScalarOpts/Transforms\/Scalar/;
$libpath =~ s/^TransformUtils/Transforms\/Utils/;
$libpath =~ s/^ipa/Analysis\/IPA/;
$libpath =~ s/^ipo/Transforms\/IPO/;
$libpath = "lib/".$libpath."/";
open DEFS, "$nmPath -sg $Directory/$lib|";
while (<DEFS>) {
chomp;
if (/^([^ ]*) in ([^ ]*)/) {
my $objfile = $libpath.$2;
$objdefs{$1} = $objfile;
$objdeps{$objfile} = {};
$libobjs{$lib}{$objfile}=1;
# my $p = "../llvm/".$objfile;
# $p =~ s/Support\/reg(.*).o/Support\/reg$1.c/;
# $p =~ s/.o$/.cpp/;
# unless (-e $p) {
# die "$p\n"
# }
}
}
close DEFS or die "nm failed";
}
foreach my $lib (@libs ) {
my $libpath = $lib;
$libpath =~ s/^libLLVM(.*)\.a/$1/;
$libpath =~ s/(.+)CodeGen$/Target\/$1/;
$libpath =~ s/(.+)AsmPrinter$/Target\/$1\/AsmPrinter/;
$libpath =~ s/(.+)AsmParser$/Target\/$1\/AsmParser/;
$libpath =~ s/(.+)Info$/Target\/$1\/TargetInfo/;
$libpath =~ s/(.+)Disassembler$/Target\/$1\/Disassembler/;
$libpath =~ s/SelectionDAG/CodeGen\/SelectionDAG/;
$libpath =~ s/^AsmPrinter/CodeGen\/AsmPrinter/;
$libpath =~ s/^BitReader/Bitcode\/Reader/;
$libpath =~ s/^BitWriter/Bitcode\/Writer/;
$libpath =~ s/^BitstreamReader/Bitstream\/Reader/;
$libpath =~ s/^BitstreamWriter/Bitstream\/Writer/;
$libpath =~ s/^MSIL/Target\/MSIL/;
$libpath =~ s/^Core/VMCore/;
$libpath =~ s/^Instrumentation/Transforms\/Instrumentation/;
$libpath =~ s/^Interpreter/ExecutionEngine\/Interpreter/;
$libpath =~ s/^JIT/ExecutionEngine\/JIT/;
$libpath =~ s/^ScalarOpts/Transforms\/Scalar/;
$libpath =~ s/^TransformUtils/Transforms\/Utils/;
$libpath =~ s/^ipa/Analysis\/IPA/;
$libpath =~ s/^ipo/Transforms\/IPO/;
$libpath = "lib/".$libpath."/";
open UDEFS, "$nmPath -Aup $Directory/$lib|";
while (<UDEFS>) {
chomp;
if (/:([^:]+):/) {
my $obj = $libpath.$1;
s/[^ ]+: *U //;
if (defined($objdefs{$_})) {
$objdeps{$obj}{$objdefs{$_}}=1;
}
}
}
close UDEFS or die "nm failed"
}
} else {
# Gather definitions from the libraries
foreach my $lib (@libs ) {
open DEFS, "$nmPath -g $Directory/$lib|";
while (<DEFS>) {
next if (! / [ABCDGRST] /);
s/^[^ ]* [ABCDGRST] //;
s/\015?\012//; # not sure if <DEFS> is in binmode and uses LF or CRLF.
# this strips both LF and CRLF.
$libdefs{$_} = $lib;
}
close DEFS or die "nm failed";
}
}
# Gather definitions from the object files.
foreach my $obj (@objs ) {
open DEFS, "$nmPath -g $Directory/$obj |";
while (<DEFS>) {
next if (! / [ABCDGRST] /);
s/^[^ ]* [ABCDGRST] //;
s/\015?\012//; # not sure if <DEFS> is in binmode and uses LF or CRLF.
# this strips both LF and CRLF.
$objdefs{$_} = $obj;
}
close DEFS or die "nm failed";
}
# Generate one entry in the <dl> list. This generates the <dt> and <dd> elements
# for one library or object file. The <dt> provides the name of the library or
# object. The <dd> provides a list of the libraries/objects it depends on.
sub gen_one_entry {
my $lib = $_[0];
my $lib_ns = $lib;
$lib_ns =~ s/(.*)\.[oa]/$1/;
if ($FLAT) {
print "$lib:";
if ($WHY) { print "\n"; }
} else {
print " <dt><b>$lib</b></dt><dd><ul>\n";
}
open UNDEFS,
"$nmPath -u $Directory/$lib | sed -e 's/^[ 0]* U //' | sort | uniq |";
my %DepLibs;
while (<UNDEFS>) {
chomp;
my $lib_printed = 0;
if (defined($libdefs{$_}) && $libdefs{$_} ne $lib) {
$DepLibs{$libdefs{$_}} = [] unless exists $DepLibs{$libdefs{$_}};
push(@{$DepLibs{$libdefs{$_}}}, $_);
} elsif (defined($objdefs{$_}) && $objdefs{$_} ne $lib) {
if ($PEROBJ && !$PEROBJINCL) {
# -perobjincl makes .a files depend on .o files they contain themselves
# default is don't depend on these.
next if defined $libobjs{$lib}{$objdefs{$_}};
}
my $libroot = $lib;
$libroot =~ s/lib(.*).a/$1/;
if ($objdefs{$_} ne "$libroot.o") {
$DepLibs{$objdefs{$_}} = [] unless exists $DepLibs{$objdefs{$_}};
push(@{$DepLibs{$objdefs{$_}}}, $_);
}
}
}
close UNDEFS or die "nm failed";
unless(keys %DepLibs) {
# above failed
open UNDEFS, "$nmPath -u $Directory/$lib |";
while (<UNDEFS>) {
# to bypass non-working sed
if (' ' eq substr($_,0,2) and index($_,'U ')) {
$_ = substr($_,index($_,'U ')+2)
};
$_ = substr($_,index($_,' *U ')+5) if -1!=index($_,' *U ');
chomp;
my $lib_printed = 0;
if (defined($libdefs{$_}) && $libdefs{$_} ne $lib) {
$DepLibs{$libdefs{$_}} = [] unless exists $DepLibs{$libdefs{$_}};
push(@{$DepLibs{$libdefs{$_}}}, $_);
} elsif (defined($objdefs{$_}) && $objdefs{$_} ne $lib) {
my $libroot = $lib;
$libroot =~ s/lib(.*).a/$1/;
if ($objdefs{$_} ne "$libroot.o") {
$DepLibs{$objdefs{$_}} = [] unless exists $DepLibs{$objdefs{$_}};
push(@{$DepLibs{$objdefs{$_}}}, $_);
}
}
}
close UNDEFS or die "nm failed";
}
if ($PEROBJINCL) {
# include the .a's objects
for my $obj (keys %{$libobjs{$lib}}) {
$DepLibs{$obj} = ["<.a object>"] unless exists $DepLibs{$obj};
}
my $madechange = 1;
while($madechange) {
$madechange = 0;
my %temp = %DepLibs;
foreach my $obj (keys %DepLibs) {
foreach my $objdeps (keys %{$objdeps{$obj}}) {
next if defined $temp{$objdeps};
push(@{$temp{$objdeps}}, $obj);
$madechange = 1;
}
}
%DepLibs = %temp;
}
}
for my $key (sort keys %DepLibs) {
if ($FLAT) {
print " $key";
if ($WHY) {
print "\n";
my @syms = @{$DepLibs{$key}};
foreach my $sym (@syms) {
print " $sym\n";
}
}
} else {
print " <li>$key</li>\n";
}
my $suffix = substr($key,length($key)-1,1);
$key =~ s/(.*)\.[oa]/$1/;
if ($suffix eq "a") {
if (!$FLAT) { print DOT "$lib_ns -> $key [ weight=0 ];\n" };
} else {
if (!$FLAT) { print DOT "$lib_ns -> $key [ weight=10];\n" };
}
}
if ($FLAT) {
if (!$WHY) {
print "\n";
}
} else {
print " </ul></dd>\n";
}
}
# Make sure we flush on write. This is slower but correct based on the way we
# write I/O in gen_one_entry.
$| = 1;
# Print the definition list tag
if (!$FLAT) {
print "<dl>\n";
open DOT, "| $DotPath -Tgif > libdeps.gif";
print DOT "digraph LibDeps {\n";
print DOT " size=\"40,15\"; \n";
print DOT " ratio=\"1.33333\"; \n";
print DOT " margin=\"0.25\"; \n";
print DOT " rankdir=\"LR\"; \n";
print DOT " mclimit=\"50.0\"; \n";
print DOT " ordering=\"out\"; \n";
print DOT " center=\"1\";\n";
print DOT "node [shape=\"box\",\n";
print DOT " color=\"#000088\",\n";
print DOT " fillcolor=\"#FFFACD\",\n";
print DOT " fontcolor=\"#3355BB\",\n";
print DOT " style=\"filled\",\n";
print DOT " fontname=\"sans\",\n";
print DOT " fontsize=\"24\"\n";
print DOT "];\n";
print DOT "edge [dir=\"forward\",style=\"solid\",color=\"#000088\"];\n";
}
# Print libraries first
foreach my $lib (@libs) {
gen_one_entry($lib);
}
if ($PEROBJ) {
foreach my $obj (keys %objdeps) {
print "$obj:";
if (!$PEROBJINCL) {
foreach my $dep (keys %{$objdeps{$obj}}) {
print " $dep";
}
}
print "\n";
}
}
if (!$FLAT) {
print DOT "}\n";
close DOT;
open DOT, "| $DotPath -Tgif > objdeps.gif";
print DOT "digraph ObjDeps {\n";
print DOT " size=\"8,10\";\n";
print DOT " margin=\"0.25\";\n";
print DOT " rankdir=\"LR\";\n";
print DOT " mclimit=\"50.0\";\n";
print DOT " ordering=\"out\";\n";
print DOT " center=\"1\";\n";
print DOT "node [shape=\"box\",\n";
print DOT " color=\"#000088\",\n";
print DOT " fillcolor=\"#FFFACD\",\n";
print DOT " fontcolor=\"#3355BB\",\n";
print DOT " fontname=\"sans\",\n";
print DOT " style=\"filled\",\n";
print DOT " fontsize=\"24\"\n";
print DOT "];\n";
print DOT "edge [dir=\"forward\",style=\"solid\",color=\"#000088\"];\n";
}
# Print objects second
foreach my $obj (@objs) {
gen_one_entry($obj);
}
if (!$FLAT) {
print DOT "}\n";
close DOT;
# Print end tag of definition list element
print "</dl>\n";
}
```
|
Mirabilistrombus listeri, common name the Lister's snail , is a species of sea snail, a marine gastropod mollusken in the family Strombidae, the true snail.
Description
The shell size varies between 90 mm and 160 mm. When the animal is mature, the body whorl takes up over 50% of the shell's height. The pronounced projection at the end of the apertural lip is characteristic of the species (Dance, 2002).
Distribution
This species is distributed in the Northwest Indian Ocean and the Andaman Sea. It is a deep-water species. Formerly very uncommon, its population has been increasing in recent years (Dance, 2002).
References
Walls, J.G. (1980). Conchs, tibias and harps. A survey of the molluscan families Strombidae and Harpidae. T.F.H. Publications Ltd, Hong Kong
Rosenberg, G. 1992. Encyclopedia of Seashells. Dorset: New York. 224 pp. page(s): 65
Dance, S.P. (2002). Shells (Smithsonian Handbooks), 2nd ed. Doring Kindersley: New York. 256 pp. page: 59
External links
Strombidae
Gastropods described in 1852
|
Salvador Fernández Beltrán D.J.C. (born at Matanzas, Cuba – 1987 in Venezuela) was among the first in the Americas to receive Wood Badge training, at Gilwell Park, England. He was the first to receive honorary appointment as Deputy Camp Chief of Gilwell. With this influence, the Scouts of Cuba began to use short trousers like those worn by the English.
Fernandez Beltrán was known for his work within the Scouts of Cuba, but served World Scouting in several capacities. With the support of the Boy Scouts of America, he was appointed the International Bureau's Traveling Commissioner for Latin America in November 1947, the first professional Scout executive, whose operations center was first in Mexico and then in Cuba. He later served as General Secretary of the Inter-American Advisory Committee, assisting in the creation of the InterAmerican Scout Office, the divisional office of the World Scout Bureau of the World Organization of the Scout Movement, founded and maintained with headquarters in Havana from 1946 to 1960. In 1948 Fernández Beltrán and J. S. Wilson took a six-week tour of the Americas, for assessment and planning, covering 23,000 miles and 110 hours of flying time. In 1955 he held the first Wood Badge training in Venezuela. At the beginning of the 1960s Fernandez Beltrán was appointed to the office of Deputy Secretary of the World Organization of the Scout Movement in Geneva, Switzerland.
Fernández Beltrán was awarded the Bronze Wolf, the only distinction of the World Organization of the Scout Movement, awarded by the World Scout Committee for exceptional services to world Scouting, in 1957.
He retired to Venezuela where he died in 1987.
Published works
RSA. EL GRUPO SCOUT, Mexico, Scout Interamericana, April 1964, Escultismo, publicaciones periodicas
References
Scouting Round the World, John S. Wilson, first edition, Blandford Press 1959 p. 135 233-235 240 275 282-283
External links
http://ve.scouts-es.net/recursosadultos/galeria.html
https://web.archive.org/web/20091027121211/http://www.geocities.com/valerina_scout/scoutar/historia.html
World Scout Committee members
Scouting pioneers
Year of birth missing
1987 deaths
Recipients of the Bronze Wolf Award
Scouting and Guiding in Cuba
|
Budziszyn is a village in the administrative district of Gmina Chynów, within Grójec County, Masovian Voivodeship, in east-central Poland. It lies approximately south-west of Chynów, east of Grójec, and south of Warsaw.
References
Budziszyn
|
```php
<?php
/*
*
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
*/
namespace Google\Service\AnalyticsData;
class SamplingMetadata extends \Google\Model
{
/**
* @var string
*/
public $samplesReadCount;
/**
* @var string
*/
public $samplingSpaceSize;
/**
* @param string
*/
public function setSamplesReadCount($samplesReadCount)
{
$this->samplesReadCount = $samplesReadCount;
}
/**
* @return string
*/
public function getSamplesReadCount()
{
return $this->samplesReadCount;
}
/**
* @param string
*/
public function setSamplingSpaceSize($samplingSpaceSize)
{
$this->samplingSpaceSize = $samplingSpaceSize;
}
/**
* @return string
*/
public function getSamplingSpaceSize()
{
return $this->samplingSpaceSize;
}
}
// Adding a class alias for backwards compatibility with the previous class name.
class_alias(SamplingMetadata::class, 'Google_Service_AnalyticsData_SamplingMetadata');
```
|
```javascript
'use strict';
const common = require('../../common');
const { testResolveAsync } = require(`./build/${common.buildType}/binding`);
testResolveAsync().then(common.mustCall());
```
|
St. Mary's is a cathedral-style church located at 566 Elm Street in Stamford, Connecticut. The church is part of the Roman Catholic Diocese of Bridgeport. The main building is a Gothic Revival structure, designed by Francis L. S. Mayers and completed about 1928. It is an elegant example of French Gothic architecture, notable for the large rose window in the front-facing gable end. The rectory is a c. 1860 Italianate villa, originally built for a member of the locally prominent Wardwell family.
The complex was added to the National Register of Historic Places in 1987.
St. Mary's Convent
The St. Mary's Convent is now the Monsignor McDermott Parish Center.
Gallery
See also
National Register of Historic Places listings in Stamford, Connecticut
References
External links
Roman Catholic Diocese of Bridgeport
St. Mary's Convent photo
Churches on the National Register of Historic Places in Connecticut
Gothic Revival church buildings in Connecticut
Italianate architecture in Connecticut
Roman Catholic churches completed in 1860
19th-century Roman Catholic church buildings in the United States
Roman Catholic churches in Stamford, Connecticut
National Register of Historic Places in Fairfield County, Connecticut
Italianate church buildings in the United States
|
```protocol buffer
// Go support for Protocol Buffers - Google's data interchange format
//
// path_to_url
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package test.a;
option go_package = "github.com/gogo/protobuf/protoc-gen-gogo/testdata/imports/test_a_2";
message M3 {}
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.