Add files using upload-large-folder tool
Browse files- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/html/header.html +149 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/images/state_dia.dia +0 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/javaExample.md +628 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/releasenotes.md +267 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/basic.css +167 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/getBlank.js +40 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/print.css +54 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/profile.css +159 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/prototype.js +0 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperAdmin.md +0 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperCLI.md +573 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperMonitor.md +269 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperOver.md +336 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperProgrammers.md +1642 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperQuotas.md +85 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperReconfig.md +908 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperStarted.md +373 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperTools.md +698 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperTutorial.md +666 -0
- local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperUseCases.md +385 -0
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/html/header.html
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
<!DOCTYPE html>
|
| 3 |
+
<html>
|
| 4 |
+
<head>
|
| 5 |
+
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
| 6 |
+
<title>ZooKeeper: Because Coordinating Distributed Systems is a Zoo</title>
|
| 7 |
+
<link type="text/css" href="skin/basic.css" rel="stylesheet">
|
| 8 |
+
<link media="screen" type="text/css" href="skin/screen.css" rel="stylesheet">
|
| 9 |
+
<link media="print" type="text/css" href="skin/print.css" rel="stylesheet">
|
| 10 |
+
<link type="text/css" href="skin/profile.css" rel="stylesheet">
|
| 11 |
+
<script src="skin/getBlank.js" language="javascript" type="text/javascript"></script>
|
| 12 |
+
<script src="skin/getMenu.js" language="javascript" type="text/javascript"></script>
|
| 13 |
+
<script src="skin/init.js" language="javascript" type="text/javascript"></script>
|
| 14 |
+
<link rel="shortcut icon" href="images/favicon.ico">
|
| 15 |
+
</head>
|
| 16 |
+
<body onload="init();">
|
| 17 |
+
<div id="top">
|
| 18 |
+
<div class="breadtrail">
|
| 19 |
+
<a href="http://www.apache.org/">Apache</a> > <a href="http://zookeeper.apache.org/">ZooKeeper</a>
|
| 20 |
+
</div>
|
| 21 |
+
<div class="header">
|
| 22 |
+
<div class="projectlogo">
|
| 23 |
+
<a href="http://zookeeper.apache.org/"><img class="logoImage" alt="ZooKeeper" src="images/zookeeper_small.gif" title="ZooKeeper: distributed coordination"></a>
|
| 24 |
+
</div>
|
| 25 |
+
<div class="searchbox">
|
| 26 |
+
<form action="http://www.google.com/search" method="get">
|
| 27 |
+
<input value="zookeeper.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">
|
| 28 |
+
<input name="Search" value="Search" type="submit">
|
| 29 |
+
</form>
|
| 30 |
+
</div>
|
| 31 |
+
<ul id="tabs">
|
| 32 |
+
<li>
|
| 33 |
+
<a class="unselected" href="http://zookeeper.apache.org/">Project</a>
|
| 34 |
+
</li>
|
| 35 |
+
<li>
|
| 36 |
+
<a class="unselected" href="https://cwiki.apache.org/confluence/display/ZOOKEEPER/">Wiki</a>
|
| 37 |
+
</li>
|
| 38 |
+
<li class="current">
|
| 39 |
+
<a class="selected" href="index.html">ZooKeeper 3.8 Documentation</a>
|
| 40 |
+
</li>
|
| 41 |
+
</ul>
|
| 42 |
+
</div>
|
| 43 |
+
</div>
|
| 44 |
+
<div id="main">
|
| 45 |
+
<div id="publishedStrip">
|
| 46 |
+
<div id="level2tabs"></div>
|
| 47 |
+
<script type="text/javascript"><!--
|
| 48 |
+
document.write("Last Published: " + document.lastModified);
|
| 49 |
+
// --></script>
|
| 50 |
+
</div>
|
| 51 |
+
<div class="breadtrail">
|
| 52 |
+
|
| 53 |
+
</div>
|
| 54 |
+
<div id="menu">
|
| 55 |
+
<div onclick="SwitchMenu('menu_1', 'skin/')" id="menu_1Title" class="menutitle">Overview</div>
|
| 56 |
+
<div id="menu_1" class="menuitemgroup">
|
| 57 |
+
<div class="menuitem">
|
| 58 |
+
<a href="index.html">Welcome</a>
|
| 59 |
+
</div>
|
| 60 |
+
<div class="menuitem">
|
| 61 |
+
<a href="zookeeperOver.html">Overview</a>
|
| 62 |
+
</div>
|
| 63 |
+
<div class="menuitem">
|
| 64 |
+
<a href="zookeeperStarted.html">Getting Started</a>
|
| 65 |
+
</div>
|
| 66 |
+
<div class="menuitem">
|
| 67 |
+
<a href="releasenotes.html">Release Notes</a>
|
| 68 |
+
</div>
|
| 69 |
+
</div>
|
| 70 |
+
<div onclick="SwitchMenu('menu_2', 'skin/')" id="menu_2Title" class="menutitle">Developer</div>
|
| 71 |
+
<div id="menu_2" class="menuitemgroup">
|
| 72 |
+
<div class="menuitem">
|
| 73 |
+
<a href="apidocs/zookeeper-server/index.html">API Docs</a>
|
| 74 |
+
</div>
|
| 75 |
+
<div class="menuitem">
|
| 76 |
+
<a href="zookeeperProgrammers.html">Programmer's Guide</a>
|
| 77 |
+
</div>
|
| 78 |
+
<div class="menuitem">
|
| 79 |
+
<a href="zookeeperUseCases.html">Use Cases</a>
|
| 80 |
+
</div>
|
| 81 |
+
<div class="menuitem">
|
| 82 |
+
<a href="javaExample.html">Java Example</a>
|
| 83 |
+
</div>
|
| 84 |
+
<div class="menuitem">
|
| 85 |
+
<a href="zookeeperTutorial.html">Barrier and Queue Tutorial</a>
|
| 86 |
+
</div>
|
| 87 |
+
<div class="menuitem">
|
| 88 |
+
<a href="recipes.html">Recipes</a>
|
| 89 |
+
</div>
|
| 90 |
+
</div>
|
| 91 |
+
<div onclick="SwitchMenu('menu_3', 'skin/')" id="menu_3Title" class="menutitle">Admin & Ops</div>
|
| 92 |
+
<div id="menu_3" class="menuitemgroup">
|
| 93 |
+
<div class="menuitem">
|
| 94 |
+
<a href="zookeeperAdmin.html">Administrator's Guide</a>
|
| 95 |
+
</div>
|
| 96 |
+
<div class="menuitem">
|
| 97 |
+
<a href="zookeeperQuotas.html">Quota Guide</a>
|
| 98 |
+
</div>
|
| 99 |
+
<div class="menuitem">
|
| 100 |
+
<a href="zookeeperSnapshotAndRestore.html">Snapshot and Restore Guide</a>
|
| 101 |
+
</div>
|
| 102 |
+
<div class="menuitem">
|
| 103 |
+
<a href="zookeeperJMX.html">JMX</a>
|
| 104 |
+
</div>
|
| 105 |
+
<div class="menuitem">
|
| 106 |
+
<a href="zookeeperHierarchicalQuorums.html">Hierarchical Quorums</a>
|
| 107 |
+
</div>
|
| 108 |
+
<div class="menuitem">
|
| 109 |
+
<a href="zookeeperOracleQuorums.html">Oracle Quorum</a>
|
| 110 |
+
</div>
|
| 111 |
+
<div class="menuitem">
|
| 112 |
+
<a href="zookeeperObservers.html">Observers Guide</a>
|
| 113 |
+
</div>
|
| 114 |
+
<div class="menuitem">
|
| 115 |
+
<a href="zookeeperReconfig.html">Dynamic Reconfiguration</a>
|
| 116 |
+
</div>
|
| 117 |
+
<div class="menuitem">
|
| 118 |
+
<a href="zookeeperCLI.html">ZooKeeper CLI</a>
|
| 119 |
+
</div>
|
| 120 |
+
<div class="menuitem">
|
| 121 |
+
<a href="zookeeperTools.html">ZooKeeper Tools</a>
|
| 122 |
+
</div>
|
| 123 |
+
<div class="menuitem">
|
| 124 |
+
<a href="zookeeperMonitor.html">ZooKeeper Monitor</a>
|
| 125 |
+
</div>
|
| 126 |
+
<div class="menuitem">
|
| 127 |
+
<a href="zookeeperAuditLogs.html">Audit Logs</a>
|
| 128 |
+
</div>
|
| 129 |
+
</div>
|
| 130 |
+
<div onclick="SwitchMenu('menu_4', 'skin/')" id="menu_4Title" class="menutitle">Contributor</div>
|
| 131 |
+
<div id="menu_4" class="menuitemgroup">
|
| 132 |
+
<div class="menuitem">
|
| 133 |
+
<a href="zookeeperInternals.html">ZooKeeper Internals</a>
|
| 134 |
+
</div>
|
| 135 |
+
</div>
|
| 136 |
+
<div onclick="SwitchMenu('menu_5', 'skin/')" id="menu_5Title" class="menutitle">Miscellaneous</div>
|
| 137 |
+
<div id="menu_5" class="menuitemgroup">
|
| 138 |
+
<div class="menuitem">
|
| 139 |
+
<a href="https://cwiki.apache.org/confluence/display/ZOOKEEPER">Wiki</a>
|
| 140 |
+
</div>
|
| 141 |
+
<div class="menuitem">
|
| 142 |
+
<a href="https://cwiki.apache.org/confluence/display/ZOOKEEPER/FAQ">FAQ</a>
|
| 143 |
+
</div>
|
| 144 |
+
<div class="menuitem">
|
| 145 |
+
<a href="http://zookeeper.apache.org/mailing_lists.html">Mailing Lists</a>
|
| 146 |
+
</div>
|
| 147 |
+
</div>
|
| 148 |
+
</div>
|
| 149 |
+
<div id="content">
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/images/state_dia.dia
ADDED
|
Binary file (2.6 kB). View file
|
|
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/javaExample.md
ADDED
|
@@ -0,0 +1,628 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2004 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper Java Example
|
| 18 |
+
|
| 19 |
+
* [A Simple Watch Client](#ch_Introduction)
|
| 20 |
+
* [Requirements](#sc_requirements)
|
| 21 |
+
* [Program Design](#sc_design)
|
| 22 |
+
* [The Executor Class](#sc_executor)
|
| 23 |
+
* [The DataMonitor Class](#sc_DataMonitor)
|
| 24 |
+
* [Complete Source Listings](#sc_completeSourceCode)
|
| 25 |
+
|
| 26 |
+
<a name="ch_Introduction"></a>
|
| 27 |
+
|
| 28 |
+
## A Simple Watch Client
|
| 29 |
+
|
| 30 |
+
To introduce you to the ZooKeeper Java API, we develop here a very simple
|
| 31 |
+
watch client. This ZooKeeper client watches a znode for changes
|
| 32 |
+
and responds to by starting or stopping a program.
|
| 33 |
+
|
| 34 |
+
<a name="sc_requirements"></a>
|
| 35 |
+
|
| 36 |
+
### Requirements
|
| 37 |
+
|
| 38 |
+
The client has four requirements:
|
| 39 |
+
|
| 40 |
+
* It takes as parameters:
|
| 41 |
+
* the address of the ZooKeeper service
|
| 42 |
+
* the name of a znode - the one to be watched
|
| 43 |
+
* the name of a file to write the output to
|
| 44 |
+
* an executable with arguments.
|
| 45 |
+
* It fetches the data associated with the znode and starts the executable.
|
| 46 |
+
* If the znode changes, the client re-fetches the contents and restarts the executable.
|
| 47 |
+
* If the znode disappears, the client kills the executable.
|
| 48 |
+
|
| 49 |
+
<a name="sc_design"></a>
|
| 50 |
+
|
| 51 |
+
### Program Design
|
| 52 |
+
|
| 53 |
+
Conventionally, ZooKeeper applications are broken into two units, one which maintains the connection,
|
| 54 |
+
and the other which monitors data. In this application, the class called the **Executor**
|
| 55 |
+
maintains the ZooKeeper connection, and the class called the **DataMonitor** monitors the data
|
| 56 |
+
in the ZooKeeper tree. Also, Executor contains the main thread and contains the execution logic.
|
| 57 |
+
It is responsible for what little user interaction there is, as well as interaction with the executable program you
|
| 58 |
+
pass in as an argument and which the sample (per the requirements) shuts down and restarts, according to the
|
| 59 |
+
state of the znode.
|
| 60 |
+
|
| 61 |
+
<a name="sc_executor"></a>
|
| 62 |
+
|
| 63 |
+
## The Executor Class
|
| 64 |
+
|
| 65 |
+
The Executor object is the primary container of the sample application. It contains
|
| 66 |
+
both the **ZooKeeper** object, **DataMonitor**, as described above in
|
| 67 |
+
[Program Design](#sc_design).
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
// from the Executor class...
|
| 71 |
+
|
| 72 |
+
public static void main(String[] args) {
|
| 73 |
+
if (args.length < 4) {
|
| 74 |
+
System.err
|
| 75 |
+
.println("USAGE: Executor hostPort znode filename program [args ...]");
|
| 76 |
+
System.exit(2);
|
| 77 |
+
}
|
| 78 |
+
String hostPort = args[0];
|
| 79 |
+
String znode = args[1];
|
| 80 |
+
String filename = args[2];
|
| 81 |
+
String exec[] = new String[args.length - 3];
|
| 82 |
+
System.arraycopy(args, 3, exec, 0, exec.length);
|
| 83 |
+
try {
|
| 84 |
+
new Executor(hostPort, znode, filename, exec).run();
|
| 85 |
+
} catch (Exception e) {
|
| 86 |
+
e.printStackTrace();
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
public Executor(String hostPort, String znode, String filename,
|
| 91 |
+
String exec[]) throws KeeperException, IOException {
|
| 92 |
+
this.filename = filename;
|
| 93 |
+
this.exec = exec;
|
| 94 |
+
zk = new ZooKeeper(hostPort, 3000, this);
|
| 95 |
+
dm = new DataMonitor(zk, znode, null, this);
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
public void run() {
|
| 99 |
+
try {
|
| 100 |
+
synchronized (this) {
|
| 101 |
+
while (!dm.dead) {
|
| 102 |
+
wait();
|
| 103 |
+
}
|
| 104 |
+
}
|
| 105 |
+
} catch (InterruptedException e) {
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
Recall that the Executor's job is to start and stop the executable whose name you pass in on the command line.
|
| 111 |
+
It does this in response to events fired by the ZooKeeper object. As you can see in the code above, the Executor passes
|
| 112 |
+
a reference to itself as the Watcher argument in the ZooKeeper constructor. It also passes a reference to itself
|
| 113 |
+
as DataMonitorListener argument to the DataMonitor constructor. Per the Executor's definition, it implements both these
|
| 114 |
+
interfaces:
|
| 115 |
+
|
| 116 |
+
public class Executor implements Watcher, Runnable, DataMonitor.DataMonitorListener {
|
| 117 |
+
...
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
The **Watcher** interface is defined by the ZooKeeper Java API.
|
| 121 |
+
ZooKeeper uses it to communicate back to its container. It supports only one method, `process()`,
|
| 122 |
+
and ZooKeeper uses it to communicates generic events that the main thread would be interested in,
|
| 123 |
+
such as the state of the ZooKeeper connection or the ZooKeeper session. The Executor in this example simply
|
| 124 |
+
forwards those events down to the DataMonitor to decide what to do with them. It does this simply to illustrate
|
| 125 |
+
the point that, by convention, the Executor or some Executor-like object "owns" the ZooKeeper connection, but it is
|
| 126 |
+
free to delegate the events to other events to other objects. It also uses this as the default channel on which
|
| 127 |
+
to fire watch events. (More on this later.)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
public void process(WatchedEvent event) {
|
| 131 |
+
dm.process(event);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
The **DataMonitorListener**
|
| 136 |
+
interface, on the other hand, is not part of the ZooKeeper API. It is a completely custom interface,
|
| 137 |
+
designed for this sample application. The DataMonitor object uses it to communicate back to its container, which
|
| 138 |
+
is also the Executor object. The DataMonitorListener interface looks like this:
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
public interface DataMonitorListener {
|
| 142 |
+
/**
|
| 143 |
+
* The existence status of the node has changed.
|
| 144 |
+
*/
|
| 145 |
+
void exists(byte data[]);
|
| 146 |
+
|
| 147 |
+
/**
|
| 148 |
+
* The ZooKeeper session is no longer valid.
|
| 149 |
+
*
|
| 150 |
+
* @param rc
|
| 151 |
+
* the ZooKeeper reason code
|
| 152 |
+
*/
|
| 153 |
+
void closing(int rc);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
This interface is defined in the DataMonitor class and implemented in the Executor class.
|
| 158 |
+
When `Executor.exists()` is invoked, the Executor decides whether to start up or shut down per the requirements.
|
| 159 |
+
Recall that the requires say to kill the executable when the znode ceases to _exist_.
|
| 160 |
+
|
| 161 |
+
When `Executor.closing()` is invoked, the Executor decides whether or not to shut itself down
|
| 162 |
+
in response to the ZooKeeper connection permanently disappearing.
|
| 163 |
+
|
| 164 |
+
As you might have guessed, DataMonitor is the object that invokes
|
| 165 |
+
these methods, in response to changes in ZooKeeper's state.
|
| 166 |
+
|
| 167 |
+
Here are Executor's implementation of
|
| 168 |
+
`DataMonitorListener.exists()` and `DataMonitorListener.closing`:
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
public void exists( byte[] data ) {
|
| 172 |
+
if (data == null) {
|
| 173 |
+
if (child != null) {
|
| 174 |
+
System.out.println("Killing process");
|
| 175 |
+
child.destroy();
|
| 176 |
+
try {
|
| 177 |
+
child.waitFor();
|
| 178 |
+
} catch (InterruptedException e) {
|
| 179 |
+
}
|
| 180 |
+
}
|
| 181 |
+
child = null;
|
| 182 |
+
} else {
|
| 183 |
+
if (child != null) {
|
| 184 |
+
System.out.println("Stopping child");
|
| 185 |
+
child.destroy();
|
| 186 |
+
try {
|
| 187 |
+
child.waitFor();
|
| 188 |
+
} catch (InterruptedException e) {
|
| 189 |
+
e.printStackTrace();
|
| 190 |
+
}
|
| 191 |
+
}
|
| 192 |
+
try {
|
| 193 |
+
FileOutputStream fos = new FileOutputStream(filename);
|
| 194 |
+
fos.write(data);
|
| 195 |
+
fos.close();
|
| 196 |
+
} catch (IOException e) {
|
| 197 |
+
e.printStackTrace();
|
| 198 |
+
}
|
| 199 |
+
try {
|
| 200 |
+
System.out.println("Starting child");
|
| 201 |
+
child = Runtime.getRuntime().exec(exec);
|
| 202 |
+
new StreamWriter(child.getInputStream(), System.out);
|
| 203 |
+
new StreamWriter(child.getErrorStream(), System.err);
|
| 204 |
+
} catch (IOException e) {
|
| 205 |
+
e.printStackTrace();
|
| 206 |
+
}
|
| 207 |
+
}
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
public void closing(int rc) {
|
| 211 |
+
synchronized (this) {
|
| 212 |
+
notifyAll();
|
| 213 |
+
}
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
<a name="sc_DataMonitor"></a>
|
| 218 |
+
|
| 219 |
+
## The DataMonitor Class
|
| 220 |
+
|
| 221 |
+
The DataMonitor class has the meat of the ZooKeeper logic. It is mostly
|
| 222 |
+
asynchronous and event driven. DataMonitor kicks things off in the constructor with:
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
public DataMonitor(ZooKeeper zk, String znode, Watcher chainedWatcher,
|
| 226 |
+
DataMonitorListener listener) {
|
| 227 |
+
this.zk = zk;
|
| 228 |
+
this.znode = znode;
|
| 229 |
+
this.chainedWatcher = chainedWatcher;
|
| 230 |
+
this.listener = listener;
|
| 231 |
+
|
| 232 |
+
// Get things started by checking if the node exists. We are going
|
| 233 |
+
// to be completely event driven
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
The call to `ZooKeeper.exists()` checks for the existence of the znode,
|
| 237 |
+
sets a watch, and passes a reference to itself (`this`)
|
| 238 |
+
as the completion callback object. In this sense, it kicks things off, since the
|
| 239 |
+
real processing happens when the watch is triggered.
|
| 240 |
+
|
| 241 |
+
###### Note
|
| 242 |
+
|
| 243 |
+
>Don't confuse the completion callback with the watch callback. The `ZooKeeper.exists()`
|
| 244 |
+
completion callback, which happens to be the method `StatCallback.processResult()` implemented
|
| 245 |
+
in the DataMonitor object, is invoked when the asynchronous _setting of the watch_ operation
|
| 246 |
+
(by `ZooKeeper.exists()`) completes on the server.
|
| 247 |
+
|
| 248 |
+
>The triggering of the watch, on the other hand, sends an event to the _Executor_ object, since
|
| 249 |
+
the Executor registered as the Watcher of the ZooKeeper object.
|
| 250 |
+
|
| 251 |
+
>As an aside, you might note that the DataMonitor could also register itself as the Watcher
|
| 252 |
+
for this particular watch event. This is new to ZooKeeper 3.0.0 (the support of multiple Watchers). In this
|
| 253 |
+
example, however, DataMonitor does not register as the Watcher.
|
| 254 |
+
|
| 255 |
+
When the `ZooKeeper.exists()` operation completes on the server, the ZooKeeper API invokes this completion callback on
|
| 256 |
+
the client:
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
public void processResult(int rc, String path, Object ctx, Stat stat) {
|
| 260 |
+
boolean exists;
|
| 261 |
+
switch (rc) {
|
| 262 |
+
case Code.Ok:
|
| 263 |
+
exists = true;
|
| 264 |
+
break;
|
| 265 |
+
case Code.NoNode:
|
| 266 |
+
exists = false;
|
| 267 |
+
break;
|
| 268 |
+
case Code.SessionExpired:
|
| 269 |
+
case Code.NoAuth:
|
| 270 |
+
dead = true;
|
| 271 |
+
listener.closing(rc);
|
| 272 |
+
return;
|
| 273 |
+
default:
|
| 274 |
+
// Retry errors
|
| 275 |
+
zk.exists(znode, true, this, null);
|
| 276 |
+
return;
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
byte b[] = null;
|
| 280 |
+
if (exists) {
|
| 281 |
+
try {
|
| 282 |
+
b = zk.getData(znode, false, null);
|
| 283 |
+
} catch (KeeperException e) {
|
| 284 |
+
// We don't need to worry about recovering now. The watch
|
| 285 |
+
// callbacks will kick off any exception handling
|
| 286 |
+
e.printStackTrace();
|
| 287 |
+
} catch (InterruptedException e) {
|
| 288 |
+
return;
|
| 289 |
+
}
|
| 290 |
+
}
|
| 291 |
+
if ((b == null && b != prevData)
|
| 292 |
+
|| (b != null && !Arrays.equals(prevData, b))) {
|
| 293 |
+
listener.exists(b);</emphasis>
|
| 294 |
+
prevData = b;
|
| 295 |
+
}
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
The code first checks the error codes for znode existence, fatal errors, and
|
| 300 |
+
recoverable errors. If the file (or znode) exists, it gets the data from the znode, and
|
| 301 |
+
then invoke the exists() callback of Executor if the state has changed. Note,
|
| 302 |
+
it doesn't have to do any Exception processing for the getData call because it
|
| 303 |
+
has watches pending for anything that could cause an error: if the node is deleted
|
| 304 |
+
before it calls `ZooKeeper.getData()`, the watch event set by
|
| 305 |
+
the `ZooKeeper.exists()` triggers a callback;
|
| 306 |
+
if there is a communication error, a connection watch event fires when
|
| 307 |
+
the connection comes back up.
|
| 308 |
+
|
| 309 |
+
Finally, notice how DataMonitor processes watch events:
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
public void process(WatchedEvent event) {
|
| 313 |
+
String path = event.getPath();
|
| 314 |
+
if (event.getType() == Event.EventType.None) {
|
| 315 |
+
// We are are being told that the state of the
|
| 316 |
+
// connection has changed
|
| 317 |
+
switch (event.getState()) {
|
| 318 |
+
case SyncConnected:
|
| 319 |
+
// In this particular example we don't need to do anything
|
| 320 |
+
// here - watches are automatically re-registered with
|
| 321 |
+
// server and any watches triggered while the client was
|
| 322 |
+
// disconnected will be delivered (in order of course)
|
| 323 |
+
break;
|
| 324 |
+
case Expired:
|
| 325 |
+
// It's all over
|
| 326 |
+
dead = true;
|
| 327 |
+
listener.closing(KeeperException.Code.SessionExpired);
|
| 328 |
+
break;
|
| 329 |
+
}
|
| 330 |
+
} else {
|
| 331 |
+
if (path != null && path.equals(znode)) {
|
| 332 |
+
// Something has changed on the node, let's find out
|
| 333 |
+
zk.exists(znode, true, this, null);
|
| 334 |
+
}
|
| 335 |
+
}
|
| 336 |
+
if (chainedWatcher != null) {
|
| 337 |
+
chainedWatcher.process(event);
|
| 338 |
+
}
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
If the client-side ZooKeeper libraries can re-establish the
|
| 343 |
+
communication channel (SyncConnected event) to ZooKeeper before
|
| 344 |
+
session expiration (Expired event) all of the session's watches will
|
| 345 |
+
automatically be re-established with the server (auto-reset of watches
|
| 346 |
+
is new in ZooKeeper 3.0.0). See [ZooKeeper Watches](zookeeperProgrammers.html#ch_zkWatches)
|
| 347 |
+
in the programmer guide for more on this. A bit lower down in this
|
| 348 |
+
function, when DataMonitor gets an event for a znode, it calls`ZooKeeper.exists()` to find out what has changed.
|
| 349 |
+
|
| 350 |
+
<a name="sc_completeSourceCode"></a>
|
| 351 |
+
|
| 352 |
+
## Complete Source Listings
|
| 353 |
+
|
| 354 |
+
### Executor.java
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
/**
|
| 358 |
+
* A simple example program to use DataMonitor to start and
|
| 359 |
+
* stop executables based on a znode. The program watches the
|
| 360 |
+
* specified znode and saves the data that corresponds to the
|
| 361 |
+
* znode in the filesystem. It also starts the specified program
|
| 362 |
+
* with the specified arguments when the znode exists and kills
|
| 363 |
+
* the program if the znode goes away.
|
| 364 |
+
*/
|
| 365 |
+
import java.io.FileOutputStream;
|
| 366 |
+
import java.io.IOException;
|
| 367 |
+
import java.io.InputStream;
|
| 368 |
+
import java.io.OutputStream;
|
| 369 |
+
|
| 370 |
+
import org.apache.zookeeper.KeeperException;
|
| 371 |
+
import org.apache.zookeeper.WatchedEvent;
|
| 372 |
+
import org.apache.zookeeper.Watcher;
|
| 373 |
+
import org.apache.zookeeper.ZooKeeper;
|
| 374 |
+
|
| 375 |
+
public class Executor
|
| 376 |
+
implements Watcher, Runnable, DataMonitor.DataMonitorListener
|
| 377 |
+
{
|
| 378 |
+
String znode;
|
| 379 |
+
DataMonitor dm;
|
| 380 |
+
ZooKeeper zk;
|
| 381 |
+
String filename;
|
| 382 |
+
String exec[];
|
| 383 |
+
Process child;
|
| 384 |
+
|
| 385 |
+
public Executor(String hostPort, String znode, String filename,
|
| 386 |
+
String exec[]) throws KeeperException, IOException {
|
| 387 |
+
this.filename = filename;
|
| 388 |
+
this.exec = exec;
|
| 389 |
+
zk = new ZooKeeper(hostPort, 3000, this);
|
| 390 |
+
dm = new DataMonitor(zk, znode, null, this);
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
/**
|
| 394 |
+
* @param args
|
| 395 |
+
*/
|
| 396 |
+
public static void main(String[] args) {
|
| 397 |
+
if (args.length < 4) {
|
| 398 |
+
System.err
|
| 399 |
+
.println("USAGE: Executor hostPort znode filename program [args ...]");
|
| 400 |
+
System.exit(2);
|
| 401 |
+
}
|
| 402 |
+
String hostPort = args[0];
|
| 403 |
+
String znode = args[1];
|
| 404 |
+
String filename = args[2];
|
| 405 |
+
String exec[] = new String[args.length - 3];
|
| 406 |
+
System.arraycopy(args, 3, exec, 0, exec.length);
|
| 407 |
+
try {
|
| 408 |
+
new Executor(hostPort, znode, filename, exec).run();
|
| 409 |
+
} catch (Exception e) {
|
| 410 |
+
e.printStackTrace();
|
| 411 |
+
}
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
/***************************************************************************
|
| 415 |
+
* We do process any events ourselves, we just need to forward them on.
|
| 416 |
+
*
|
| 417 |
+
* @see org.apache.zookeeper.Watcher#process(org.apache.zookeeper.proto.WatcherEvent)
|
| 418 |
+
*/
|
| 419 |
+
public void process(WatchedEvent event) {
|
| 420 |
+
dm.process(event);
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
public void run() {
|
| 424 |
+
try {
|
| 425 |
+
synchronized (this) {
|
| 426 |
+
while (!dm.dead) {
|
| 427 |
+
wait();
|
| 428 |
+
}
|
| 429 |
+
}
|
| 430 |
+
} catch (InterruptedException e) {
|
| 431 |
+
}
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
public void closing(int rc) {
|
| 435 |
+
synchronized (this) {
|
| 436 |
+
notifyAll();
|
| 437 |
+
}
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
static class StreamWriter extends Thread {
|
| 441 |
+
OutputStream os;
|
| 442 |
+
|
| 443 |
+
InputStream is;
|
| 444 |
+
|
| 445 |
+
StreamWriter(InputStream is, OutputStream os) {
|
| 446 |
+
this.is = is;
|
| 447 |
+
this.os = os;
|
| 448 |
+
start();
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
public void run() {
|
| 452 |
+
byte b[] = new byte[80];
|
| 453 |
+
int rc;
|
| 454 |
+
try {
|
| 455 |
+
while ((rc = is.read(b)) > 0) {
|
| 456 |
+
os.write(b, 0, rc);
|
| 457 |
+
}
|
| 458 |
+
} catch (IOException e) {
|
| 459 |
+
}
|
| 460 |
+
|
| 461 |
+
}
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
public void exists(byte[] data) {
|
| 465 |
+
if (data == null) {
|
| 466 |
+
if (child != null) {
|
| 467 |
+
System.out.println("Killing process");
|
| 468 |
+
child.destroy();
|
| 469 |
+
try {
|
| 470 |
+
child.waitFor();
|
| 471 |
+
} catch (InterruptedException e) {
|
| 472 |
+
}
|
| 473 |
+
}
|
| 474 |
+
child = null;
|
| 475 |
+
} else {
|
| 476 |
+
if (child != null) {
|
| 477 |
+
System.out.println("Stopping child");
|
| 478 |
+
child.destroy();
|
| 479 |
+
try {
|
| 480 |
+
child.waitFor();
|
| 481 |
+
} catch (InterruptedException e) {
|
| 482 |
+
e.printStackTrace();
|
| 483 |
+
}
|
| 484 |
+
}
|
| 485 |
+
try {
|
| 486 |
+
FileOutputStream fos = new FileOutputStream(filename);
|
| 487 |
+
fos.write(data);
|
| 488 |
+
fos.close();
|
| 489 |
+
} catch (IOException e) {
|
| 490 |
+
e.printStackTrace();
|
| 491 |
+
}
|
| 492 |
+
try {
|
| 493 |
+
System.out.println("Starting child");
|
| 494 |
+
child = Runtime.getRuntime().exec(exec);
|
| 495 |
+
new StreamWriter(child.getInputStream(), System.out);
|
| 496 |
+
new StreamWriter(child.getErrorStream(), System.err);
|
| 497 |
+
} catch (IOException e) {
|
| 498 |
+
e.printStackTrace();
|
| 499 |
+
}
|
| 500 |
+
}
|
| 501 |
+
}
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
### DataMonitor.java
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
/**
|
| 509 |
+
* A simple class that monitors the data and existence of a ZooKeeper
|
| 510 |
+
* node. It uses asynchronous ZooKeeper APIs.
|
| 511 |
+
*/
|
| 512 |
+
import java.util.Arrays;
|
| 513 |
+
|
| 514 |
+
import org.apache.zookeeper.KeeperException;
|
| 515 |
+
import org.apache.zookeeper.WatchedEvent;
|
| 516 |
+
import org.apache.zookeeper.Watcher;
|
| 517 |
+
import org.apache.zookeeper.ZooKeeper;
|
| 518 |
+
import org.apache.zookeeper.AsyncCallback.StatCallback;
|
| 519 |
+
import org.apache.zookeeper.KeeperException.Code;
|
| 520 |
+
import org.apache.zookeeper.data.Stat;
|
| 521 |
+
|
| 522 |
+
public class DataMonitor implements Watcher, StatCallback {
|
| 523 |
+
|
| 524 |
+
ZooKeeper zk;
|
| 525 |
+
String znode;
|
| 526 |
+
Watcher chainedWatcher;
|
| 527 |
+
boolean dead;
|
| 528 |
+
DataMonitorListener listener;
|
| 529 |
+
byte prevData[];
|
| 530 |
+
|
| 531 |
+
public DataMonitor(ZooKeeper zk, String znode, Watcher chainedWatcher,
|
| 532 |
+
DataMonitorListener listener) {
|
| 533 |
+
this.zk = zk;
|
| 534 |
+
this.znode = znode;
|
| 535 |
+
this.chainedWatcher = chainedWatcher;
|
| 536 |
+
this.listener = listener;
|
| 537 |
+
// Get things started by checking if the node exists. We are going
|
| 538 |
+
// to be completely event driven
|
| 539 |
+
zk.exists(znode, true, this, null);
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
/**
|
| 543 |
+
* Other classes use the DataMonitor by implementing this method
|
| 544 |
+
*/
|
| 545 |
+
public interface DataMonitorListener {
|
| 546 |
+
/**
|
| 547 |
+
* The existence status of the node has changed.
|
| 548 |
+
*/
|
| 549 |
+
void exists(byte data[]);
|
| 550 |
+
|
| 551 |
+
/**
|
| 552 |
+
* The ZooKeeper session is no longer valid.
|
| 553 |
+
*
|
| 554 |
+
* @param rc
|
| 555 |
+
* the ZooKeeper reason code
|
| 556 |
+
*/
|
| 557 |
+
void closing(int rc);
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
public void process(WatchedEvent event) {
|
| 561 |
+
String path = event.getPath();
|
| 562 |
+
if (event.getType() == Event.EventType.None) {
|
| 563 |
+
// We are are being told that the state of the
|
| 564 |
+
// connection has changed
|
| 565 |
+
switch (event.getState()) {
|
| 566 |
+
case SyncConnected:
|
| 567 |
+
// In this particular example we don't need to do anything
|
| 568 |
+
// here - watches are automatically re-registered with
|
| 569 |
+
// server and any watches triggered while the client was
|
| 570 |
+
// disconnected will be delivered (in order of course)
|
| 571 |
+
break;
|
| 572 |
+
case Expired:
|
| 573 |
+
// It's all over
|
| 574 |
+
dead = true;
|
| 575 |
+
listener.closing(KeeperException.Code.SessionExpired);
|
| 576 |
+
break;
|
| 577 |
+
}
|
| 578 |
+
} else {
|
| 579 |
+
if (path != null && path.equals(znode)) {
|
| 580 |
+
// Something has changed on the node, let's find out
|
| 581 |
+
zk.exists(znode, true, this, null);
|
| 582 |
+
}
|
| 583 |
+
}
|
| 584 |
+
if (chainedWatcher != null) {
|
| 585 |
+
chainedWatcher.process(event);
|
| 586 |
+
}
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
public void processResult(int rc, String path, Object ctx, Stat stat) {
|
| 590 |
+
boolean exists;
|
| 591 |
+
switch (rc) {
|
| 592 |
+
case Code.Ok:
|
| 593 |
+
exists = true;
|
| 594 |
+
break;
|
| 595 |
+
case Code.NoNode:
|
| 596 |
+
exists = false;
|
| 597 |
+
break;
|
| 598 |
+
case Code.SessionExpired:
|
| 599 |
+
case Code.NoAuth:
|
| 600 |
+
dead = true;
|
| 601 |
+
listener.closing(rc);
|
| 602 |
+
return;
|
| 603 |
+
default:
|
| 604 |
+
// Retry errors
|
| 605 |
+
zk.exists(znode, true, this, null);
|
| 606 |
+
return;
|
| 607 |
+
}
|
| 608 |
+
|
| 609 |
+
byte b[] = null;
|
| 610 |
+
if (exists) {
|
| 611 |
+
try {
|
| 612 |
+
b = zk.getData(znode, false, null);
|
| 613 |
+
} catch (KeeperException e) {
|
| 614 |
+
// We don't need to worry about recovering now. The watch
|
| 615 |
+
// callbacks will kick off any exception handling
|
| 616 |
+
e.printStackTrace();
|
| 617 |
+
} catch (InterruptedException e) {
|
| 618 |
+
return;
|
| 619 |
+
}
|
| 620 |
+
}
|
| 621 |
+
if ((b == null && b != prevData)
|
| 622 |
+
|| (b != null && !Arrays.equals(prevData, b))) {
|
| 623 |
+
listener.exists(b);
|
| 624 |
+
prevData = b;
|
| 625 |
+
}
|
| 626 |
+
}
|
| 627 |
+
}
|
| 628 |
+
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/releasenotes.md
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2004 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper 3.0.0 Release Notes
|
| 18 |
+
|
| 19 |
+
* [Migration Instructions when Upgrading to 3.0.0](#migration)
|
| 20 |
+
* [Migrating Client Code](#migration_code)
|
| 21 |
+
* [Watch Management](#Watch+Management)
|
| 22 |
+
* [Java API](#Java+API)
|
| 23 |
+
* [C API](#C+API)
|
| 24 |
+
* [Migrating Server Data](#migration_data)
|
| 25 |
+
* [Migrating Server Configuration](#migration_config)
|
| 26 |
+
* [Changes Since ZooKeeper 2.2.1](#changes)
|
| 27 |
+
|
| 28 |
+
These release notes include new developer and user facing incompatibilities, features, and major improvements.
|
| 29 |
+
|
| 30 |
+
* [Migration Instructions](#migration)
|
| 31 |
+
* [Changes](#changes)
|
| 32 |
+
|
| 33 |
+
<a name="migration"></a>
|
| 34 |
+
## Migration Instructions when Upgrading to 3.0.0
|
| 35 |
+
<div class="section">
|
| 36 |
+
|
| 37 |
+
*You should only have to read this section if you are upgrading from a previous version of ZooKeeper to version 3.0.0, otw skip down to [changes](#changes)*
|
| 38 |
+
|
| 39 |
+
A small number of changes in this release have resulted in non-backward compatible Zookeeper client user code and server instance data. The following instructions provide details on how to migrate code and date from version 2.2.1 to version 3.0.0.
|
| 40 |
+
|
| 41 |
+
Note: ZooKeeper increments the major version number (major.minor.fix) when backward incompatible changes are made to the source base. As part of the migration from SourceForge we changed the package structure (com.yahoo.zookeeper.* to org.apache.zookeeper.*) and felt it was a good time to incorporate some changes that we had been withholding. As a result the following will be required when migrating from 2.2.1 to 3.0.0 version of ZooKeeper.
|
| 42 |
+
|
| 43 |
+
* [Migrating Client Code](#migration_code)
|
| 44 |
+
* [Migrating Server Data](#migration_data)
|
| 45 |
+
* [Migrating Server Configuration](#migration_config)
|
| 46 |
+
|
| 47 |
+
<a name="migration_code"></a>
|
| 48 |
+
### Migrating Client Code
|
| 49 |
+
|
| 50 |
+
The underlying client-server protocol has changed in version 3.0.0
|
| 51 |
+
of ZooKeeper. As a result clients must be upgraded along with
|
| 52 |
+
serving clusters to ensure proper operation of the system (old
|
| 53 |
+
pre-3.0.0 clients are not guaranteed to operate against upgraded
|
| 54 |
+
3.0.0 servers and vice-versa).
|
| 55 |
+
|
| 56 |
+
<a name="Watch+Management"></a>
|
| 57 |
+
#### Watch Management
|
| 58 |
+
|
| 59 |
+
In previous releases of ZooKeeper any watches registered by clients were lost if the client lost a connection to a ZooKeeper server.
|
| 60 |
+
This meant that developers had to track watches they were interested in and reregister them if a session disconnect event was received.
|
| 61 |
+
In this release the client library tracks watches that a client has registered and reregisters the watches when a connection is made to a new server.
|
| 62 |
+
Applications that still manually reregister interest should continue working properly as long as they are able to handle unsolicited watches.
|
| 63 |
+
For example, an old application may register a watch for /foo and /goo, lose the connection, and reregister only /goo.
|
| 64 |
+
As long as the application is able to receive a notification for /foo, (probably ignoring it) it does not need to be changed.
|
| 65 |
+
One caveat to the watch management: it is possible to miss an event for the creation and deletion of a znode if watching for creation and both the create and delete happens while the client is disconnected from ZooKeeper.
|
| 66 |
+
|
| 67 |
+
This release also allows clients to specify call specific watch functions.
|
| 68 |
+
This gives the developer the ability to modularize logic in different watch functions rather than cramming everything in the watch function attached to the ZooKeeper handle.
|
| 69 |
+
Call specific watch functions receive all session events for as long as they are active, but will only receive the watch callbacks for which they are registered.
|
| 70 |
+
|
| 71 |
+
<a name="Java+API"></a>
|
| 72 |
+
#### Java API
|
| 73 |
+
|
| 74 |
+
1. The java package structure has changed from **com.yahoo.zookeeper*** to **org.apache.zookeeper***. This will probably affect all of your java code which makes use of ZooKeeper APIs (typically import statements)
|
| 75 |
+
1. A number of constants used in the client ZooKeeper API were re-specified using enums (rather than ints). See [ZOOKEEPER-7](https://issues.apache.org/jira/browse/ZOOKEEPER-7), [ZOOKEEPER-132](https://issues.apache.org/jira/browse/ZOOKEEPER-132) and [ZOOKEEPER-139](https://issues.apache.org/jira/browse/ZOOKEEPER-139) for full details
|
| 76 |
+
1. [ZOOKEEPER-18](https://issues.apache.org/jira/browse/ZOOKEEPER-18) removed KeeperStateChanged, use KeeperStateDisconnected instead
|
| 77 |
+
|
| 78 |
+
Also see [the current Java API](http://zookeeper.apache.org/docs/current/apidocs/zookeeper-server/index.html)
|
| 79 |
+
|
| 80 |
+
<a name="C+API"></a>
|
| 81 |
+
#### C API
|
| 82 |
+
|
| 83 |
+
1. A number of constants used in the client ZooKeeper API were renamed in order to reduce namespace collision, see [ZOOKEEPER-6](https://issues.apache.org/jira/browse/ZOOKEEPER-6) for full details
|
| 84 |
+
|
| 85 |
+
<a name="migration_data"></a>
|
| 86 |
+
### Migrating Server Data
|
| 87 |
+
The following issues resulted in changes to the on-disk data format (the snapshot and transaction log files contained within the ZK data directory) and require a migration utility to be run.
|
| 88 |
+
|
| 89 |
+
* [ZOOKEEPER-27 Unique DB identifiers for servers and clients](https://issues.apache.org/jira/browse/ZOOKEEPER-27)
|
| 90 |
+
* [ZOOKEEPER-32 CRCs for ZooKeeper data](https://issues.apache.org/jira/browse/ZOOKEEPER-32)
|
| 91 |
+
* [ZOOKEEPER-33 Better ACL management](https://issues.apache.org/jira/browse/ZOOKEEPER-33)
|
| 92 |
+
* [ZOOKEEPER-38 headers (version+) in log/snap files](https://issues.apache.org/jira/browse/ZOOKEEPER-38)
|
| 93 |
+
|
| 94 |
+
**The following must be run once, and only once, when upgrading the ZooKeeper server instances to version 3.0.0.**
|
| 95 |
+
|
| 96 |
+
###### Note
|
| 97 |
+
> The <dataLogDir> and <dataDir> directories referenced below are specified by the *dataLogDir*
|
| 98 |
+
and *dataDir* specification in your ZooKeeper config file respectively. *dataLogDir* defaults to
|
| 99 |
+
the value of *dataDir* if not specified explicitly in the ZooKeeper server config file (in which
|
| 100 |
+
case provide the same directory for both parameters to the upgrade utility).
|
| 101 |
+
|
| 102 |
+
1. Shutdown the ZooKeeper server cluster.
|
| 103 |
+
1. Backup your <dataLogDir> and <dataDir> directories
|
| 104 |
+
1. Run upgrade using
|
| 105 |
+
* `bin/zkServer.sh upgrade <dataLogDir> <dataDir>`
|
| 106 |
+
|
| 107 |
+
or
|
| 108 |
+
|
| 109 |
+
* `java -classpath pathtolog4j:pathtozookeeper.jar UpgradeMain <dataLogDir> <dataDir>`
|
| 110 |
+
|
| 111 |
+
where <dataLogDir> is the directory where all transaction logs (log.*) are stored. <dataDir> is the directory where all the snapshots (snapshot.*) are stored.
|
| 112 |
+
1. Restart the cluster.
|
| 113 |
+
|
| 114 |
+
If you have any failure during the upgrade procedure keep reading to sanitize your database.
|
| 115 |
+
|
| 116 |
+
This is how upgrade works in ZooKeeper. This will help you troubleshoot in case you have problems while upgrading
|
| 117 |
+
|
| 118 |
+
1. Upgrade moves files from `<dataLogDir>` and `<dataDir>` to `<dataLogDir>/version-1/` and `<dataDir>/version-1` respectively (version-1 sub-directory is created by the upgrade utility).
|
| 119 |
+
1. Upgrade creates a new version sub-directory `<dataDir>/version-2` and `<dataLogDir>/version-2`
|
| 120 |
+
1. Upgrade reads the old database from `<dataDir>/version-1` and `<dataLogDir>/version-1` into the memory and creates a new upgraded snapshot.
|
| 121 |
+
1. Upgrade writes the new database in `<dataDir>/version-2`.
|
| 122 |
+
|
| 123 |
+
Troubleshooting.
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
1. In case you start ZooKeeper 3.0 without upgrading from 2.0 on a 2.0 database - the servers will start up with an empty database.
|
| 127 |
+
This is because the servers assume that `<dataDir>/version-2` and `<dataLogDir>/version-2` will have the database to start with. Since this will be empty
|
| 128 |
+
in case of no upgrade, the servers will start with an empty database. In such a case, shutdown the ZooKeeper servers, remove the version-2 directory (remember
|
| 129 |
+
this will lead to loss of updates after you started 3.0.)
|
| 130 |
+
and then start the upgrade procedure.
|
| 131 |
+
1. If the upgrade fails while trying to rename files into the version-1 directory, you should try and move all the files under `<dataDir>/version-1`
|
| 132 |
+
and `<dataLogDir>/version-1` to `<dataDir>` and `<dataLogDir>` respectively. Then try upgrade again.
|
| 133 |
+
1. If you do not wish to run with ZooKeeper 3.0 and prefer to run with ZooKeeper 2.0 and have already upgraded - you can run ZooKeeper 2 with
|
| 134 |
+
the `<dataDir>` and `<dataLogDir>` directories changed to `<dataDir>/version-1` and `<dataLogDir>/version-1`. Remember that you will lose all the updates that you made after the upgrade.
|
| 135 |
+
|
| 136 |
+
<a name="migration_config"></a>
|
| 137 |
+
### Migrating Server Configuration
|
| 138 |
+
|
| 139 |
+
There is a significant change to the ZooKeeper server configuration file.
|
| 140 |
+
|
| 141 |
+
The default election algorithm, specified by the *electionAlg* configuration attribute, has
|
| 142 |
+
changed from a default of *0* to a default of *3*. See
|
| 143 |
+
[Cluster Options](zookeeperAdmin.html#sc_clusterOptions) section of the administrators guide, specifically
|
| 144 |
+
the *electionAlg* and *server.X* properties.
|
| 145 |
+
|
| 146 |
+
You will either need to explicitly set *electionAlg* to its previous default value
|
| 147 |
+
of *0* or change your *server.X* options to include the leader election port.
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
<a name="changes"></a>
|
| 151 |
+
## Changes Since ZooKeeper 2.2.1
|
| 152 |
+
|
| 153 |
+
Version 2.2.1 code, documentation, binaries, etc... are still accessible on [SourceForge](http://sourceforge.net/projects/zookeeper)
|
| 154 |
+
|
| 155 |
+
| Issue | Notes |
|
| 156 |
+
|-------|-------|
|
| 157 |
+
|[ZOOKEEPER-43](https://issues.apache.org/jira/browse/ZOOKEEPER-43)|Server side of auto reset watches.|
|
| 158 |
+
|[ZOOKEEPER-132](https://issues.apache.org/jira/browse/ZOOKEEPER-132)|Create Enum to replace CreateFlag in ZooKepper.create method|
|
| 159 |
+
|[ZOOKEEPER-139](https://issues.apache.org/jira/browse/ZOOKEEPER-139)|Create Enums for WatcherEvent's KeeperState and EventType|
|
| 160 |
+
|[ZOOKEEPER-18](https://issues.apache.org/jira/browse/ZOOKEEPER-18)|keeper state inconsistency|
|
| 161 |
+
|[ZOOKEEPER-38](https://issues.apache.org/jira/browse/ZOOKEEPER-38)|headers in log/snap files|
|
| 162 |
+
|[ZOOKEEPER-8](https://issues.apache.org/jira/browse/ZOOKEEPER-8)|Stat enchaned to include num of children and size|
|
| 163 |
+
|[ZOOKEEPER-6](https://issues.apache.org/jira/browse/ZOOKEEPER-6)|List of problem identifiers in zookeeper.h|
|
| 164 |
+
|[ZOOKEEPER-7](https://issues.apache.org/jira/browse/ZOOKEEPER-7)|Use enums rather than ints for types and state|
|
| 165 |
+
|[ZOOKEEPER-27](https://issues.apache.org/jira/browse/ZOOKEEPER-27)|Unique DB identifiers for servers and clients|
|
| 166 |
+
|[ZOOKEEPER-32](https://issues.apache.org/jira/browse/ZOOKEEPER-32)|CRCs for ZooKeeper data|
|
| 167 |
+
|[ZOOKEEPER-33](https://issues.apache.org/jira/browse/ZOOKEEPER-33)|Better ACL management|
|
| 168 |
+
|[ZOOKEEPER-203](https://issues.apache.org/jira/browse/ZOOKEEPER-203)|fix datadir typo in releasenotes|
|
| 169 |
+
|[ZOOKEEPER-145](https://issues.apache.org/jira/browse/ZOOKEEPER-145)|write detailed release notes for users migrating from 2.x to 3.0|
|
| 170 |
+
|[ZOOKEEPER-23](https://issues.apache.org/jira/browse/ZOOKEEPER-23)|Auto reset of watches on reconnect|
|
| 171 |
+
|[ZOOKEEPER-191](https://issues.apache.org/jira/browse/ZOOKEEPER-191)|forrest docs for upgrade.|
|
| 172 |
+
|[ZOOKEEPER-201](https://issues.apache.org/jira/browse/ZOOKEEPER-201)|validate magic number when reading snapshot and transaction logs|
|
| 173 |
+
|[ZOOKEEPER-200](https://issues.apache.org/jira/browse/ZOOKEEPER-200)|the magic number for snapshot and log must be different|
|
| 174 |
+
|[ZOOKEEPER-199](https://issues.apache.org/jira/browse/ZOOKEEPER-199)|fix log messages in persistence code|
|
| 175 |
+
|[ZOOKEEPER-197](https://issues.apache.org/jira/browse/ZOOKEEPER-197)|create checksums for snapshots|
|
| 176 |
+
|[ZOOKEEPER-198](https://issues.apache.org/jira/browse/ZOOKEEPER-198)|apache license header missing from FollowerSyncRequest.java|
|
| 177 |
+
|[ZOOKEEPER-5](https://issues.apache.org/jira/browse/ZOOKEEPER-5)|Upgrade Feature in Zookeeper server.|
|
| 178 |
+
|[ZOOKEEPER-194](https://issues.apache.org/jira/browse/ZOOKEEPER-194)|Fix terminology in zookeeperAdmin.xml|
|
| 179 |
+
|[ZOOKEEPER-151](https://issues.apache.org/jira/browse/ZOOKEEPER-151)|Document change to server configuration|
|
| 180 |
+
|[ZOOKEEPER-193](https://issues.apache.org/jira/browse/ZOOKEEPER-193)|update java example doc to compile with latest zookeeper|
|
| 181 |
+
|[ZOOKEEPER-187](https://issues.apache.org/jira/browse/ZOOKEEPER-187)|CreateMode api docs missing|
|
| 182 |
+
|[ZOOKEEPER-186](https://issues.apache.org/jira/browse/ZOOKEEPER-186)|add new "releasenotes.xml" to forrest documentation|
|
| 183 |
+
|[ZOOKEEPER-190](https://issues.apache.org/jira/browse/ZOOKEEPER-190)|Reorg links to docs and navs to docs into related sections|
|
| 184 |
+
|[ZOOKEEPER-189](https://issues.apache.org/jira/browse/ZOOKEEPER-189)|forrest build not validated xml of input documents|
|
| 185 |
+
|[ZOOKEEPER-188](https://issues.apache.org/jira/browse/ZOOKEEPER-188)|Check that election port is present for all servers|
|
| 186 |
+
|[ZOOKEEPER-185](https://issues.apache.org/jira/browse/ZOOKEEPER-185)|Improved version of FLETest|
|
| 187 |
+
|[ZOOKEEPER-184](https://issues.apache.org/jira/browse/ZOOKEEPER-184)|tests: An explicit include directive is needed for the usage of memcpy functions|
|
| 188 |
+
|[ZOOKEEPER-183](https://issues.apache.org/jira/browse/ZOOKEEPER-183)|Array subscript is above array bounds in od_completion, src/cli.c.|
|
| 189 |
+
|[ZOOKEEPER-182](https://issues.apache.org/jira/browse/ZOOKEEPER-182)|zookeeper_init accepts empty host-port string and returns valid pointer to zhandle_t.|
|
| 190 |
+
|[ZOOKEEPER-17](https://issues.apache.org/jira/browse/ZOOKEEPER-17)|zookeeper_init doc needs clarification|
|
| 191 |
+
|[ZOOKEEPER-181](https://issues.apache.org/jira/browse/ZOOKEEPER-181)|Some Source Forge Documents did not get moved over: javaExample, zookeeperTutorial, zookeeperInternals|
|
| 192 |
+
|[ZOOKEEPER-180](https://issues.apache.org/jira/browse/ZOOKEEPER-180)|Placeholder sections needed in document for new topics that the umbrella jira discusses|
|
| 193 |
+
|[ZOOKEEPER-179](https://issues.apache.org/jira/browse/ZOOKEEPER-179)|Programmer's Guide "Basic Operations" section is missing content|
|
| 194 |
+
|[ZOOKEEPER-178](https://issues.apache.org/jira/browse/ZOOKEEPER-178)|FLE test.|
|
| 195 |
+
|[ZOOKEEPER-159](https://issues.apache.org/jira/browse/ZOOKEEPER-159)|Cover two corner cases of leader election|
|
| 196 |
+
|[ZOOKEEPER-156](https://issues.apache.org/jira/browse/ZOOKEEPER-156)|update programmer guide with acl details from old wiki page|
|
| 197 |
+
|[ZOOKEEPER-154](https://issues.apache.org/jira/browse/ZOOKEEPER-154)|reliability graph diagram in overview doc needs context|
|
| 198 |
+
|[ZOOKEEPER-157](https://issues.apache.org/jira/browse/ZOOKEEPER-157)|Peer can't find existing leader|
|
| 199 |
+
|[ZOOKEEPER-155](https://issues.apache.org/jira/browse/ZOOKEEPER-155)|improve "the zookeeper project" section of overview doc|
|
| 200 |
+
|[ZOOKEEPER-140](https://issues.apache.org/jira/browse/ZOOKEEPER-140)|Deadlock in QuorumCnxManager|
|
| 201 |
+
|[ZOOKEEPER-147](https://issues.apache.org/jira/browse/ZOOKEEPER-147)|This is version of the documents with most of the [tbd...] scrubbed out|
|
| 202 |
+
|[ZOOKEEPER-150](https://issues.apache.org/jira/browse/ZOOKEEPER-150)|zookeeper build broken|
|
| 203 |
+
|[ZOOKEEPER-136](https://issues.apache.org/jira/browse/ZOOKEEPER-136)|sync causes hang in all followers of quorum.|
|
| 204 |
+
|[ZOOKEEPER-134](https://issues.apache.org/jira/browse/ZOOKEEPER-134)|findbugs cleanup|
|
| 205 |
+
|[ZOOKEEPER-133](https://issues.apache.org/jira/browse/ZOOKEEPER-133)|hudson tests failing intermittently|
|
| 206 |
+
|[ZOOKEEPER-144](https://issues.apache.org/jira/browse/ZOOKEEPER-144)|add tostring support for watcher event, and enums for event type/state|
|
| 207 |
+
|[ZOOKEEPER-21](https://issues.apache.org/jira/browse/ZOOKEEPER-21)|Improve zk ctor/watcher|
|
| 208 |
+
|[ZOOKEEPER-142](https://issues.apache.org/jira/browse/ZOOKEEPER-142)|Provide Javadoc as to the maximum size of the data byte array that may be stored within a znode|
|
| 209 |
+
|[ZOOKEEPER-93](https://issues.apache.org/jira/browse/ZOOKEEPER-93)|Create Documentation for Zookeeper|
|
| 210 |
+
|[ZOOKEEPER-117](https://issues.apache.org/jira/browse/ZOOKEEPER-117)|threading issues in Leader election|
|
| 211 |
+
|[ZOOKEEPER-137](https://issues.apache.org/jira/browse/ZOOKEEPER-137)|client watcher objects can lose events|
|
| 212 |
+
|[ZOOKEEPER-131](https://issues.apache.org/jira/browse/ZOOKEEPER-131)|Old leader election can elect a dead leader over and over again|
|
| 213 |
+
|[ZOOKEEPER-130](https://issues.apache.org/jira/browse/ZOOKEEPER-130)|update build.xml to support apache release process|
|
| 214 |
+
|[ZOOKEEPER-118](https://issues.apache.org/jira/browse/ZOOKEEPER-118)|findbugs flagged switch statement in followerrequestprocessor.run|
|
| 215 |
+
|[ZOOKEEPER-115](https://issues.apache.org/jira/browse/ZOOKEEPER-115)|Potential NPE in QuorumCnxManager|
|
| 216 |
+
|[ZOOKEEPER-114](https://issues.apache.org/jira/browse/ZOOKEEPER-114)|cleanup ugly event messages in zookeeper client|
|
| 217 |
+
|[ZOOKEEPER-112](https://issues.apache.org/jira/browse/ZOOKEEPER-112)|src/java/main ZooKeeper.java has test code embedded into it.|
|
| 218 |
+
|[ZOOKEEPER-39](https://issues.apache.org/jira/browse/ZOOKEEPER-39)|Use Watcher objects rather than boolean on read operations.|
|
| 219 |
+
|[ZOOKEEPER-97](https://issues.apache.org/jira/browse/ZOOKEEPER-97)|supports optional output directory in code generator.|
|
| 220 |
+
|[ZOOKEEPER-101](https://issues.apache.org/jira/browse/ZOOKEEPER-101)|Integrate ZooKeeper with "violations" feature on hudson|
|
| 221 |
+
|[ZOOKEEPER-105](https://issues.apache.org/jira/browse/ZOOKEEPER-105)|Catch Zookeeper exceptions and print on the stderr.|
|
| 222 |
+
|[ZOOKEEPER-42](https://issues.apache.org/jira/browse/ZOOKEEPER-42)|Change Leader Election to fast tcp.|
|
| 223 |
+
|[ZOOKEEPER-48](https://issues.apache.org/jira/browse/ZOOKEEPER-48)|auth_id now handled correctly when no auth ids present|
|
| 224 |
+
|[ZOOKEEPER-44](https://issues.apache.org/jira/browse/ZOOKEEPER-44)|Create sequence flag children with prefixes of 0's so that they can be lexicographically sorted.|
|
| 225 |
+
|[ZOOKEEPER-108](https://issues.apache.org/jira/browse/ZOOKEEPER-108)|Fix sync operation reordering on a Quorum.|
|
| 226 |
+
|[ZOOKEEPER-25](https://issues.apache.org/jira/browse/ZOOKEEPER-25)|Fuse module for Zookeeper.|
|
| 227 |
+
|[ZOOKEEPER-58](https://issues.apache.org/jira/browse/ZOOKEEPER-58)|Race condition on ClientCnxn.java|
|
| 228 |
+
|[ZOOKEEPER-56](https://issues.apache.org/jira/browse/ZOOKEEPER-56)|Add clover support to build.xml.|
|
| 229 |
+
|[ZOOKEEPER-75](https://issues.apache.org/jira/browse/ZOOKEEPER-75)|register the ZooKeeper mailing lists with nabble.com|
|
| 230 |
+
|[ZOOKEEPER-54](https://issues.apache.org/jira/browse/ZOOKEEPER-54)|remove sleeps in the tests.|
|
| 231 |
+
|[ZOOKEEPER-55](https://issues.apache.org/jira/browse/ZOOKEEPER-55)|build.xml fails to retrieve a release number from SVN and the ant target "dist" fails|
|
| 232 |
+
|[ZOOKEEPER-89](https://issues.apache.org/jira/browse/ZOOKEEPER-89)|invoke WhenOwnerListener.whenNotOwner when the ZK connection fails|
|
| 233 |
+
|[ZOOKEEPER-90](https://issues.apache.org/jira/browse/ZOOKEEPER-90)|invoke WhenOwnerListener.whenNotOwner when the ZK session expires and the znode is the leader|
|
| 234 |
+
|[ZOOKEEPER-82](https://issues.apache.org/jira/browse/ZOOKEEPER-82)|Make the ZooKeeperServer more DI friendly.|
|
| 235 |
+
|[ZOOKEEPER-110](https://issues.apache.org/jira/browse/ZOOKEEPER-110)|Build script relies on svnant, which is not compatible with subversion 1.5 working copies|
|
| 236 |
+
|[ZOOKEEPER-111](https://issues.apache.org/jira/browse/ZOOKEEPER-111)|Significant cleanup of existing tests.|
|
| 237 |
+
|[ZOOKEEPER-122](https://issues.apache.org/jira/browse/ZOOKEEPER-122)|Fix NPE in jute's Utils.toCSVString.|
|
| 238 |
+
|[ZOOKEEPER-123](https://issues.apache.org/jira/browse/ZOOKEEPER-123)|Fix the wrong class is specified for the logger.|
|
| 239 |
+
|[ZOOKEEPER-2](https://issues.apache.org/jira/browse/ZOOKEEPER-2)|Fix synchronization issues in QuorumPeer and FastLeader election.|
|
| 240 |
+
|[ZOOKEEPER-125](https://issues.apache.org/jira/browse/ZOOKEEPER-125)|Remove unwanted class declaration in FastLeaderElection.|
|
| 241 |
+
|[ZOOKEEPER-61](https://issues.apache.org/jira/browse/ZOOKEEPER-61)|Address in client/server test cases.|
|
| 242 |
+
|[ZOOKEEPER-75](https://issues.apache.org/jira/browse/ZOOKEEPER-75)|cleanup the library directory|
|
| 243 |
+
|[ZOOKEEPER-109](https://issues.apache.org/jira/browse/ZOOKEEPER-109)|cleanup of NPE and Resource issue nits found by static analysis|
|
| 244 |
+
|[ZOOKEEPER-76](https://issues.apache.org/jira/browse/ZOOKEEPER-76)|Commit 677109 removed the cobertura library, but not the build targets.|
|
| 245 |
+
|[ZOOKEEPER-63](https://issues.apache.org/jira/browse/ZOOKEEPER-63)|Race condition in client close|
|
| 246 |
+
|[ZOOKEEPER-70](https://issues.apache.org/jira/browse/ZOOKEEPER-70)|Add skeleton forrest doc structure for ZooKeeper|
|
| 247 |
+
|[ZOOKEEPER-79](https://issues.apache.org/jira/browse/ZOOKEEPER-79)|Document jacob's leader election on the wiki recipes page|
|
| 248 |
+
|[ZOOKEEPER-73](https://issues.apache.org/jira/browse/ZOOKEEPER-73)|Move ZK wiki from SourceForge to Apache|
|
| 249 |
+
|[ZOOKEEPER-72](https://issues.apache.org/jira/browse/ZOOKEEPER-72)|Initial creation/setup of ZooKeeper ASF site.|
|
| 250 |
+
|[ZOOKEEPER-71](https://issues.apache.org/jira/browse/ZOOKEEPER-71)|Determine what to do re ZooKeeper Changelog|
|
| 251 |
+
|[ZOOKEEPER-68](https://issues.apache.org/jira/browse/ZOOKEEPER-68)|parseACLs in ZooKeeper.java fails to parse elements of ACL, should be lastIndexOf rather than IndexOf|
|
| 252 |
+
|[ZOOKEEPER-130](https://issues.apache.org/jira/browse/ZOOKEEPER-130)|update build.xml to support apache release process.|
|
| 253 |
+
|[ZOOKEEPER-131](https://issues.apache.org/jira/browse/ZOOKEEPER-131)|Fix Old leader election can elect a dead leader over and over again.|
|
| 254 |
+
|[ZOOKEEPER-137](https://issues.apache.org/jira/browse/ZOOKEEPER-137)|client watcher objects can lose events|
|
| 255 |
+
|[ZOOKEEPER-117](https://issues.apache.org/jira/browse/ZOOKEEPER-117)|threading issues in Leader election|
|
| 256 |
+
|[ZOOKEEPER-128](https://issues.apache.org/jira/browse/ZOOKEEPER-128)|test coverage on async client operations needs to be improved|
|
| 257 |
+
|[ZOOKEEPER-127](https://issues.apache.org/jira/browse/ZOOKEEPER-127)|Use of non-standard election ports in config breaks services|
|
| 258 |
+
|[ZOOKEEPER-53](https://issues.apache.org/jira/browse/ZOOKEEPER-53)|tests failing on solaris.|
|
| 259 |
+
|[ZOOKEEPER-172](https://issues.apache.org/jira/browse/ZOOKEEPER-172)|FLE Test|
|
| 260 |
+
|[ZOOKEEPER-41](https://issues.apache.org/jira/browse/ZOOKEEPER-41)|Sample startup script|
|
| 261 |
+
|[ZOOKEEPER-33](https://issues.apache.org/jira/browse/ZOOKEEPER-33)|Better ACL management|
|
| 262 |
+
|[ZOOKEEPER-49](https://issues.apache.org/jira/browse/ZOOKEEPER-49)|SetACL does not work|
|
| 263 |
+
|[ZOOKEEPER-20](https://issues.apache.org/jira/browse/ZOOKEEPER-20)|Child watches are not triggered when the node is deleted|
|
| 264 |
+
|[ZOOKEEPER-15](https://issues.apache.org/jira/browse/ZOOKEEPER-15)|handle failure better in build.xml:test|
|
| 265 |
+
|[ZOOKEEPER-11](https://issues.apache.org/jira/browse/ZOOKEEPER-11)|ArrayList is used instead of List|
|
| 266 |
+
|[ZOOKEEPER-45](https://issues.apache.org/jira/browse/ZOOKEEPER-45)|Restructure the SVN repository after initial import |
|
| 267 |
+
|[ZOOKEEPER-1](https://issues.apache.org/jira/browse/ZOOKEEPER-1)|Initial ZooKeeper code contribution from Yahoo!|
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/basic.css
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Licensed to the Apache Software Foundation (ASF) under one or more
|
| 3 |
+
* contributor license agreements. See the NOTICE file distributed with
|
| 4 |
+
* this work for additional information regarding copyright ownership.
|
| 5 |
+
* The ASF licenses this file to You under the Apache License, Version 2.0
|
| 6 |
+
* (the "License"); you may not use this file except in compliance with
|
| 7 |
+
* the License. You may obtain a copy of the License at
|
| 8 |
+
*
|
| 9 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
*
|
| 11 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
* See the License for the specific language governing permissions and
|
| 15 |
+
* limitations under the License.
|
| 16 |
+
*/
|
| 17 |
+
/**
|
| 18 |
+
* General
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
img { border: 0; }
|
| 22 |
+
|
| 23 |
+
#content table {
|
| 24 |
+
border: 0;
|
| 25 |
+
width: 100%;
|
| 26 |
+
}
|
| 27 |
+
/*Hack to get IE to render the table at 100%*/
|
| 28 |
+
* html #content table { margin-left: -3px; }
|
| 29 |
+
|
| 30 |
+
#content th,
|
| 31 |
+
#content td {
|
| 32 |
+
margin: 0;
|
| 33 |
+
padding: 0;
|
| 34 |
+
vertical-align: top;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
.clearboth {
|
| 38 |
+
clear: both;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
.note, .warning, .fixme {
|
| 42 |
+
clear:right;
|
| 43 |
+
border: solid black 1px;
|
| 44 |
+
margin: 1em 3em;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
.note .label {
|
| 48 |
+
background: #369;
|
| 49 |
+
color: white;
|
| 50 |
+
font-weight: bold;
|
| 51 |
+
padding: 5px 10px;
|
| 52 |
+
}
|
| 53 |
+
.note .content {
|
| 54 |
+
background: #F0F0FF;
|
| 55 |
+
color: black;
|
| 56 |
+
line-height: 120%;
|
| 57 |
+
font-size: 90%;
|
| 58 |
+
padding: 5px 10px;
|
| 59 |
+
}
|
| 60 |
+
.warning .label {
|
| 61 |
+
background: #C00;
|
| 62 |
+
color: white;
|
| 63 |
+
font-weight: bold;
|
| 64 |
+
padding: 5px 10px;
|
| 65 |
+
}
|
| 66 |
+
.warning .content {
|
| 67 |
+
background: #FFF0F0;
|
| 68 |
+
color: black;
|
| 69 |
+
line-height: 120%;
|
| 70 |
+
font-size: 90%;
|
| 71 |
+
padding: 5px 10px;
|
| 72 |
+
}
|
| 73 |
+
.fixme .label {
|
| 74 |
+
background: #C6C600;
|
| 75 |
+
color: black;
|
| 76 |
+
font-weight: bold;
|
| 77 |
+
padding: 5px 10px;
|
| 78 |
+
}
|
| 79 |
+
.fixme .content {
|
| 80 |
+
padding: 5px 10px;
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
/**
|
| 84 |
+
* Typography
|
| 85 |
+
*/
|
| 86 |
+
|
| 87 |
+
body {
|
| 88 |
+
font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
|
| 89 |
+
font-size: 100%;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
#content {
|
| 93 |
+
font-family: Georgia, Palatino, Times, serif;
|
| 94 |
+
font-size: 95%;
|
| 95 |
+
}
|
| 96 |
+
#tabs {
|
| 97 |
+
font-size: 70%;
|
| 98 |
+
}
|
| 99 |
+
#menu {
|
| 100 |
+
font-size: 80%;
|
| 101 |
+
}
|
| 102 |
+
#footer {
|
| 103 |
+
font-size: 70%;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
h1, h2, h3, h4, h5, h6 {
|
| 107 |
+
font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
|
| 108 |
+
font-weight: bold;
|
| 109 |
+
margin-top: 1em;
|
| 110 |
+
margin-bottom: .5em;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
h1 {
|
| 114 |
+
margin-top: 0;
|
| 115 |
+
margin-bottom: 1em;
|
| 116 |
+
font-size: 1.4em;
|
| 117 |
+
}
|
| 118 |
+
#content h1 {
|
| 119 |
+
font-size: 160%;
|
| 120 |
+
margin-bottom: .5em;
|
| 121 |
+
}
|
| 122 |
+
#menu h1 {
|
| 123 |
+
margin: 0;
|
| 124 |
+
padding: 10px;
|
| 125 |
+
background: #336699;
|
| 126 |
+
color: white;
|
| 127 |
+
}
|
| 128 |
+
h2 { font-size: 120%; }
|
| 129 |
+
h3 { font-size: 100%; }
|
| 130 |
+
h4 { font-size: 90%; }
|
| 131 |
+
h5 { font-size: 80%; }
|
| 132 |
+
h6 { font-size: 75%; }
|
| 133 |
+
|
| 134 |
+
p {
|
| 135 |
+
line-height: 120%;
|
| 136 |
+
text-align: left;
|
| 137 |
+
margin-top: .5em;
|
| 138 |
+
margin-bottom: 1em;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
#content li,
|
| 142 |
+
#content th,
|
| 143 |
+
#content td,
|
| 144 |
+
#content li ul,
|
| 145 |
+
#content li ol{
|
| 146 |
+
margin-top: .5em;
|
| 147 |
+
margin-bottom: .5em;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
#content li li,
|
| 152 |
+
#minitoc-area li{
|
| 153 |
+
margin-top: 0em;
|
| 154 |
+
margin-bottom: 0em;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
#content .attribution {
|
| 158 |
+
text-align: right;
|
| 159 |
+
font-style: italic;
|
| 160 |
+
font-size: 85%;
|
| 161 |
+
margin-top: 1em;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
.codefrag {
|
| 165 |
+
font-family: "Courier New", Courier, monospace;
|
| 166 |
+
font-size: 110%;
|
| 167 |
+
}
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/getBlank.js
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Licensed to the Apache Software Foundation (ASF) under one or more
|
| 3 |
+
* contributor license agreements. See the NOTICE file distributed with
|
| 4 |
+
* this work for additional information regarding copyright ownership.
|
| 5 |
+
* The ASF licenses this file to You under the Apache License, Version 2.0
|
| 6 |
+
* (the "License"); you may not use this file except in compliance with
|
| 7 |
+
* the License. You may obtain a copy of the License at
|
| 8 |
+
*
|
| 9 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
*
|
| 11 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
* See the License for the specific language governing permissions and
|
| 15 |
+
* limitations under the License.
|
| 16 |
+
*/
|
| 17 |
+
/**
|
| 18 |
+
* getBlank script - when included in a html file and called from a form text field, will set the value of this field to ""
|
| 19 |
+
* if the text value is still the standard value.
|
| 20 |
+
* getPrompt script - when included in a html file and called from a form text field, will set the value of this field to the prompt
|
| 21 |
+
* if the text value is empty.
|
| 22 |
+
*
|
| 23 |
+
* Typical usage:
|
| 24 |
+
* <script type="text/javascript" language="JavaScript" src="getBlank.js"></script>
|
| 25 |
+
* <input type="text" id="query" value="Search the site:" onFocus="getBlank (this, 'Search the site:');" onBlur="getBlank (this, 'Search the site:');"/>
|
| 26 |
+
*/
|
| 27 |
+
<!--
|
| 28 |
+
function getBlank (form, stdValue){
|
| 29 |
+
if (form.value == stdValue){
|
| 30 |
+
form.value = '';
|
| 31 |
+
}
|
| 32 |
+
return true;
|
| 33 |
+
}
|
| 34 |
+
function getPrompt (form, stdValue){
|
| 35 |
+
if (form.value == ''){
|
| 36 |
+
form.value = stdValue;
|
| 37 |
+
}
|
| 38 |
+
return true;
|
| 39 |
+
}
|
| 40 |
+
//-->
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/print.css
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Licensed to the Apache Software Foundation (ASF) under one or more
|
| 3 |
+
* contributor license agreements. See the NOTICE file distributed with
|
| 4 |
+
* this work for additional information regarding copyright ownership.
|
| 5 |
+
* The ASF licenses this file to You under the Apache License, Version 2.0
|
| 6 |
+
* (the "License"); you may not use this file except in compliance with
|
| 7 |
+
* the License. You may obtain a copy of the License at
|
| 8 |
+
*
|
| 9 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
*
|
| 11 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
* See the License for the specific language governing permissions and
|
| 15 |
+
* limitations under the License.
|
| 16 |
+
*/
|
| 17 |
+
body {
|
| 18 |
+
font-family: Georgia, Palatino, serif;
|
| 19 |
+
font-size: 12pt;
|
| 20 |
+
background: white;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
#tabs,
|
| 24 |
+
#menu,
|
| 25 |
+
#content .toc {
|
| 26 |
+
display: none;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
#content {
|
| 30 |
+
width: auto;
|
| 31 |
+
padding: 0;
|
| 32 |
+
float: none !important;
|
| 33 |
+
color: black;
|
| 34 |
+
background: inherit;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
a:link, a:visited {
|
| 38 |
+
color: #336699;
|
| 39 |
+
background: inherit;
|
| 40 |
+
text-decoration: underline;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
#top .logo {
|
| 44 |
+
padding: 0;
|
| 45 |
+
margin: 0 0 2em 0;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
#footer {
|
| 49 |
+
margin-top: 4em;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
acronym {
|
| 53 |
+
border: 0;
|
| 54 |
+
}
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/profile.css
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
/* ==================== aural ============================ */
|
| 4 |
+
|
| 5 |
+
@media aural {
|
| 6 |
+
h1, h2, h3, h4, h5, h6 { voice-family: paul, male; stress: 20; richness: 90 }
|
| 7 |
+
h1 { pitch: x-low; pitch-range: 90 }
|
| 8 |
+
h2 { pitch: x-low; pitch-range: 80 }
|
| 9 |
+
h3 { pitch: low; pitch-range: 70 }
|
| 10 |
+
h4 { pitch: medium; pitch-range: 60 }
|
| 11 |
+
h5 { pitch: medium; pitch-range: 50 }
|
| 12 |
+
h6 { pitch: medium; pitch-range: 40 }
|
| 13 |
+
li, dt, dd { pitch: medium; richness: 60 }
|
| 14 |
+
dt { stress: 80 }
|
| 15 |
+
pre, code, tt { pitch: medium; pitch-range: 0; stress: 0; richness: 80 }
|
| 16 |
+
em { pitch: medium; pitch-range: 60; stress: 60; richness: 50 }
|
| 17 |
+
strong { pitch: medium; pitch-range: 60; stress: 90; richness: 90 }
|
| 18 |
+
dfn { pitch: high; pitch-range: 60; stress: 60 }
|
| 19 |
+
s, strike { richness: 0 }
|
| 20 |
+
i { pitch: medium; pitch-range: 60; stress: 60; richness: 50 }
|
| 21 |
+
b { pitch: medium; pitch-range: 60; stress: 90; richness: 90 }
|
| 22 |
+
u { richness: 0 }
|
| 23 |
+
|
| 24 |
+
:link { voice-family: harry, male }
|
| 25 |
+
:visited { voice-family: betty, female }
|
| 26 |
+
:active { voice-family: betty, female; pitch-range: 80; pitch: x-high }
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
#top { background-color: #FFFFFF;}
|
| 30 |
+
|
| 31 |
+
#top .header .current { background-color: #4C6C8F;}
|
| 32 |
+
#top .header .current a:link { color: #ffffff; }
|
| 33 |
+
#top .header .current a:visited { color: #ffffff; }
|
| 34 |
+
#top .header .current a:hover { color: #ffffff; }
|
| 35 |
+
|
| 36 |
+
#tabs li { background-color: #E5E4D9 ;}
|
| 37 |
+
#tabs li a:link { color: #000000; }
|
| 38 |
+
#tabs li a:visited { color: #000000; }
|
| 39 |
+
#tabs li a:hover { color: #000000; }
|
| 40 |
+
|
| 41 |
+
#level2tabs a.selected { background-color: #4C6C8F ;}
|
| 42 |
+
#level2tabs a:link { color: #ffffff; }
|
| 43 |
+
#level2tabs a:visited { color: #ffffff; }
|
| 44 |
+
#level2tabs a:hover { color: #ffffff; }
|
| 45 |
+
|
| 46 |
+
#level2tabs { background-color: #E5E4D9;}
|
| 47 |
+
#level2tabs a.unselected:link { color: #000000; }
|
| 48 |
+
#level2tabs a.unselected:visited { color: #000000; }
|
| 49 |
+
#level2tabs a.unselected:hover { color: #000000; }
|
| 50 |
+
|
| 51 |
+
.heading { background-color: #E5E4D9;}
|
| 52 |
+
|
| 53 |
+
.boxed { background-color: #E5E4D9;}
|
| 54 |
+
.underlined_5 {border-bottom: solid 5px #E5E4D9;}
|
| 55 |
+
.underlined_10 {border-bottom: solid 10px #E5E4D9;}
|
| 56 |
+
table caption {
|
| 57 |
+
background-color: #E5E4D9;
|
| 58 |
+
color: #000000;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
#feedback {
|
| 62 |
+
color: #FFFFFF;
|
| 63 |
+
background: #4C6C8F;
|
| 64 |
+
text-align: center;
|
| 65 |
+
}
|
| 66 |
+
#feedback #feedbackto {
|
| 67 |
+
color: #FFFFFF;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
#publishedStrip {
|
| 71 |
+
color: #FFFFFF;
|
| 72 |
+
background: #4C6C8F;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
#publishedStrip {
|
| 76 |
+
color: #000000;
|
| 77 |
+
background: #E5E4D9;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
#menu a.selected { background-color: #CFDCED;
|
| 81 |
+
border-color: #999999;
|
| 82 |
+
color: #000000;}
|
| 83 |
+
#menu a.selected:visited { color: #000000;}
|
| 84 |
+
|
| 85 |
+
#menu { border-color: #999999;}
|
| 86 |
+
#menu .menupageitemgroup { border-color: #999999;}
|
| 87 |
+
|
| 88 |
+
#menu { background-color: #4C6C8F;}
|
| 89 |
+
#menu { color: #ffffff;}
|
| 90 |
+
#menu a:link { color: #ffffff;}
|
| 91 |
+
#menu a:visited { color: #ffffff;}
|
| 92 |
+
#menu a:hover {
|
| 93 |
+
background-color: #4C6C8F;
|
| 94 |
+
color: #ffffff;}
|
| 95 |
+
|
| 96 |
+
#menu h1 {
|
| 97 |
+
color: #000000;
|
| 98 |
+
background-color: #cfdced;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
#top .searchbox {
|
| 102 |
+
background-color: #E5E4D9 ;
|
| 103 |
+
color: #000000;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
#menu .menupageitemgroup {
|
| 107 |
+
background-color: #E5E4D9;
|
| 108 |
+
}
|
| 109 |
+
#menu .menupageitem {
|
| 110 |
+
color: #000000;
|
| 111 |
+
}
|
| 112 |
+
#menu .menupageitem a:link { color: #000000;}
|
| 113 |
+
#menu .menupageitem a:visited { color: #000000;}
|
| 114 |
+
#menu .menupageitem a:hover {
|
| 115 |
+
background-color: #E5E4D9;
|
| 116 |
+
color: #000000;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
body{
|
| 120 |
+
background-color: #ffffff;
|
| 121 |
+
color: #000000;
|
| 122 |
+
}
|
| 123 |
+
a:link { color:#0000ff}
|
| 124 |
+
a:visited { color:#009999}
|
| 125 |
+
a:hover { color:#6587ff}
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
.ForrestTable { background-color: #ccc;}
|
| 129 |
+
|
| 130 |
+
.ForrestTable td { background-color: #ffffff;}
|
| 131 |
+
|
| 132 |
+
.highlight { background-color: #ffff00;}
|
| 133 |
+
|
| 134 |
+
.fixme { border-color: #c60;}
|
| 135 |
+
|
| 136 |
+
.note { border-color: #069;}
|
| 137 |
+
|
| 138 |
+
.warning { border-color: #900;}
|
| 139 |
+
|
| 140 |
+
#footer { background-color: #E5E4D9;}
|
| 141 |
+
/* extra-css */
|
| 142 |
+
|
| 143 |
+
p.quote {
|
| 144 |
+
margin-left: 2em;
|
| 145 |
+
padding: .5em;
|
| 146 |
+
background-color: #f0f0f0;
|
| 147 |
+
font-family: monospace;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
pre {
|
| 151 |
+
margin-left: 0em;
|
| 152 |
+
padding: 0.5em;
|
| 153 |
+
background-color: #f0f0f0;
|
| 154 |
+
font-family: monospace;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/skin/prototype.js
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperAdmin.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperCLI.md
ADDED
|
@@ -0,0 +1,573 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2021 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper-cli: the ZooKeeper command line interface
|
| 18 |
+
|
| 19 |
+
## Pre-requisites
|
| 20 |
+
Enter into the ZooKeeper-cli
|
| 21 |
+
|
| 22 |
+
```bash
|
| 23 |
+
# connect to the localhost with the default port:2181
|
| 24 |
+
bin/zkCli.sh
|
| 25 |
+
# connect to the remote host with timeout:3s
|
| 26 |
+
bin/zkCli.sh -timeout 3000 -server remoteIP:2181
|
| 27 |
+
# connect to the remote host with -waitforconnection option to wait for connection success before executing commands
|
| 28 |
+
bin/zkCli.sh -waitforconnection -timeout 3000 -server remoteIP:2181
|
| 29 |
+
# connect with a custom client configuration properties file
|
| 30 |
+
bin/zkCli.sh -client-configuration /path/to/client.properties
|
| 31 |
+
```
|
| 32 |
+
## help
|
| 33 |
+
Showing helps about ZooKeeper commands
|
| 34 |
+
|
| 35 |
+
```bash
|
| 36 |
+
[zkshell: 1] help
|
| 37 |
+
# a sample one
|
| 38 |
+
[zkshell: 2] h
|
| 39 |
+
ZooKeeper -server host:port cmd args
|
| 40 |
+
addauth scheme auth
|
| 41 |
+
close
|
| 42 |
+
config [-c] [-w] [-s]
|
| 43 |
+
connect host:port
|
| 44 |
+
create [-s] [-e] [-c] [-t ttl] path [data] [acl]
|
| 45 |
+
delete [-v version] path
|
| 46 |
+
deleteall path
|
| 47 |
+
delquota [-n|-b|-N|-B] path
|
| 48 |
+
get [-s] [-w] path
|
| 49 |
+
getAcl [-s] path
|
| 50 |
+
getAllChildrenNumber path
|
| 51 |
+
getEphemerals path
|
| 52 |
+
history
|
| 53 |
+
listquota path
|
| 54 |
+
ls [-s] [-w] [-R] path
|
| 55 |
+
printwatches on|off
|
| 56 |
+
quit
|
| 57 |
+
reconfig [-s] [-v version] [[-file path] | [-members serverID=host:port1:port2;port3[,...]*]] | [-add serverId=host:port1:port2;port3[,...]]* [-remove serverId[,...]*]
|
| 58 |
+
redo cmdno
|
| 59 |
+
removewatches path [-c|-d|-a] [-l]
|
| 60 |
+
set [-s] [-v version] path data
|
| 61 |
+
setAcl [-s] [-v version] [-R] path acl
|
| 62 |
+
setquota -n|-b|-N|-B val path
|
| 63 |
+
stat [-w] path
|
| 64 |
+
sync path
|
| 65 |
+
version
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
## addauth
|
| 69 |
+
Add a authorized user for ACL
|
| 70 |
+
|
| 71 |
+
```bash
|
| 72 |
+
[zkshell: 9] getAcl /acl_digest_test
|
| 73 |
+
Insufficient permission : /acl_digest_test
|
| 74 |
+
[zkshell: 10] addauth digest user1:12345
|
| 75 |
+
[zkshell: 11] getAcl /acl_digest_test
|
| 76 |
+
'digest,'user1:+owfoSBn/am19roBPzR1/MfCblE=
|
| 77 |
+
: cdrwa
|
| 78 |
+
# add a super user
|
| 79 |
+
# Notice:set zookeeper.DigestAuthenticationProvider
|
| 80 |
+
# e.g. zookeeper.DigestAuthenticationProvider.superDigest=zookeeper:qW/HnTfCSoQpB5G8LgkwT3IbiFc=
|
| 81 |
+
[zkshell: 12] addauth digest zookeeper:admin
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
## close
|
| 85 |
+
Close this client/session.
|
| 86 |
+
|
| 87 |
+
```bash
|
| 88 |
+
[zkshell: 0] close
|
| 89 |
+
2019-03-09 06:42:22,178 [myid:] - INFO [main-EventThread:ClientCnxn$EventThread@528] - EventThread shut down for session: 0x10007ab7c550006
|
| 90 |
+
2019-03-09 06:42:22,179 [myid:] - INFO [main:ZooKeeper@1346] - Session: 0x10007ab7c550006 closed
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
## config
|
| 94 |
+
Showing the config of quorum membership
|
| 95 |
+
|
| 96 |
+
```bash
|
| 97 |
+
[zkshell: 17] config
|
| 98 |
+
server.1=[2001:db8:1:0:0:242:ac11:2]:2888:3888:participant
|
| 99 |
+
server.2=[2001:db8:1:0:0:242:ac11:2]:12888:13888:participant
|
| 100 |
+
server.3=[2001:db8:1:0:0:242:ac11:2]:22888:23888:participant
|
| 101 |
+
version=0
|
| 102 |
+
```
|
| 103 |
+
## connect
|
| 104 |
+
Connect a ZooKeeper server.
|
| 105 |
+
|
| 106 |
+
```bash
|
| 107 |
+
[zkshell: 4] connect
|
| 108 |
+
2019-03-09 06:43:33,179 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@986] - Socket connection established, initiating session, client: /127.0.0.1:35144, server: localhost/127.0.0.1:2181
|
| 109 |
+
2019-03-09 06:43:33,189 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1421] - Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x10007ab7c550007, negotiated timeout = 30000
|
| 110 |
+
connect "localhost:2181,localhost:2182,localhost:2183"
|
| 111 |
+
|
| 112 |
+
# connect a remote server
|
| 113 |
+
[zkshell: 5] connect remoteIP:2181
|
| 114 |
+
```
|
| 115 |
+
## create
|
| 116 |
+
Create a znode.
|
| 117 |
+
|
| 118 |
+
```bash
|
| 119 |
+
# create a persistent_node
|
| 120 |
+
[zkshell: 7] create /persistent_node
|
| 121 |
+
Created /persistent_node
|
| 122 |
+
|
| 123 |
+
# create a ephemeral node
|
| 124 |
+
[zkshell: 8] create -e /ephemeral_node mydata
|
| 125 |
+
Created /ephemeral_node
|
| 126 |
+
|
| 127 |
+
# create the persistent-sequential node
|
| 128 |
+
[zkshell: 9] create -s /persistent_sequential_node mydata
|
| 129 |
+
Created /persistent_sequential_node0000000176
|
| 130 |
+
|
| 131 |
+
# create the ephemeral-sequential_node
|
| 132 |
+
[zkshell: 10] create -s -e /ephemeral_sequential_node mydata
|
| 133 |
+
Created /ephemeral_sequential_node0000000174
|
| 134 |
+
|
| 135 |
+
# create a node with the schema
|
| 136 |
+
[zkshell: 11] create /zk-node-create-schema mydata digest:user1:+owfoSBn/am19roBPzR1/MfCblE=:crwad
|
| 137 |
+
Created /zk-node-create-schema
|
| 138 |
+
[zkshell: 12] addauth digest user1:12345
|
| 139 |
+
[zkshell: 13] getAcl /zk-node-create-schema
|
| 140 |
+
'digest,'user1:+owfoSBn/am19roBPzR1/MfCblE=
|
| 141 |
+
: cdrwa
|
| 142 |
+
|
| 143 |
+
# create the container node.When the last child of a container is deleted,the container becomes to be deleted
|
| 144 |
+
[zkshell: 14] create -c /container_node mydata
|
| 145 |
+
Created /container_node
|
| 146 |
+
[zkshell: 15] create -c /container_node/child_1 mydata
|
| 147 |
+
Created /container_node/child_1
|
| 148 |
+
[zkshell: 16] create -c /container_node/child_2 mydata
|
| 149 |
+
Created /container_node/child_2
|
| 150 |
+
[zkshell: 17] delete /container_node/child_1
|
| 151 |
+
[zkshell: 18] delete /container_node/child_2
|
| 152 |
+
[zkshell: 19] get /container_node
|
| 153 |
+
org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = NoNode for /container_node
|
| 154 |
+
|
| 155 |
+
# create the ttl node.
|
| 156 |
+
# set zookeeper.extendedTypesEnabled=true
|
| 157 |
+
# Otherwise:KeeperErrorCode = Unimplemented for /ttl_node
|
| 158 |
+
[zkshell: 20] create -t 3000 /ttl_node mydata
|
| 159 |
+
Created /ttl_node
|
| 160 |
+
# after 3s later
|
| 161 |
+
[zkshell: 21] get /ttl_node
|
| 162 |
+
org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = NoNode for /ttl_node
|
| 163 |
+
```
|
| 164 |
+
## delete
|
| 165 |
+
Delete a node with a specific path
|
| 166 |
+
|
| 167 |
+
```bash
|
| 168 |
+
[zkshell: 2] delete /config/topics/test
|
| 169 |
+
[zkshell: 3] ls /config/topics/test
|
| 170 |
+
Node does not exist: /config/topics/test
|
| 171 |
+
```
|
| 172 |
+
|
| 173 |
+
## deleteall
|
| 174 |
+
Delete all nodes under a specific path
|
| 175 |
+
|
| 176 |
+
```bash
|
| 177 |
+
zkshell: 1] ls /config
|
| 178 |
+
[changes, clients, topics]
|
| 179 |
+
[zkshell: 2] deleteall /config
|
| 180 |
+
[zkshell: 3] ls /config
|
| 181 |
+
Node does not exist: /config
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
## delquota
|
| 185 |
+
Delete the quota under a path
|
| 186 |
+
|
| 187 |
+
```bash
|
| 188 |
+
[zkshell: 1] delquota /quota_test
|
| 189 |
+
[zkshell: 2] listquota /quota_test
|
| 190 |
+
absolute path is /zookeeper/quota/quota_test/zookeeper_limits
|
| 191 |
+
quota for /quota_test does not exist.
|
| 192 |
+
[zkshell: 3] delquota -n /c1
|
| 193 |
+
[zkshell: 4] delquota -N /c2
|
| 194 |
+
[zkshell: 5] delquota -b /c3
|
| 195 |
+
[zkshell: 6] delquota -B /c4
|
| 196 |
+
|
| 197 |
+
```
|
| 198 |
+
## get
|
| 199 |
+
Get the data of the specific path
|
| 200 |
+
|
| 201 |
+
```bash
|
| 202 |
+
[zkshell: 10] get /latest_producer_id_block
|
| 203 |
+
{"version":1,"broker":0,"block_start":"0","block_end":"999"}
|
| 204 |
+
|
| 205 |
+
# -s to show the stat
|
| 206 |
+
[zkshell: 11] get -s /latest_producer_id_block
|
| 207 |
+
{"version":1,"broker":0,"block_start":"0","block_end":"999"}
|
| 208 |
+
cZxid = 0x90000009a
|
| 209 |
+
ctime = Sat Jul 28 08:14:09 UTC 2018
|
| 210 |
+
mZxid = 0x9000000a2
|
| 211 |
+
mtime = Sat Jul 28 08:14:12 UTC 2018
|
| 212 |
+
pZxid = 0x90000009a
|
| 213 |
+
cversion = 0
|
| 214 |
+
dataVersion = 1
|
| 215 |
+
aclVersion = 0
|
| 216 |
+
ephemeralOwner = 0x0
|
| 217 |
+
dataLength = 60
|
| 218 |
+
numChildren = 0
|
| 219 |
+
|
| 220 |
+
# -w to set a watch on the data change, Notice: turn on the printwatches
|
| 221 |
+
[zkshell: 12] get -w /latest_producer_id_block
|
| 222 |
+
{"version":1,"broker":0,"block_start":"0","block_end":"999"}
|
| 223 |
+
[zkshell: 13] set /latest_producer_id_block mydata
|
| 224 |
+
WATCHER::
|
| 225 |
+
WatchedEvent state:SyncConnected type:NodeDataChanged path:/latest_producer_id_block
|
| 226 |
+
```
|
| 227 |
+
|
| 228 |
+
## getAcl
|
| 229 |
+
Get the ACL permission of one path
|
| 230 |
+
|
| 231 |
+
```bash
|
| 232 |
+
[zkshell: 4] create /acl_test mydata ip:127.0.0.1:crwda
|
| 233 |
+
Created /acl_test
|
| 234 |
+
[zkshell: 5] getAcl /acl_test
|
| 235 |
+
'ip,'127.0.0.1
|
| 236 |
+
: cdrwa
|
| 237 |
+
[zkshell: 6] getAcl /testwatch
|
| 238 |
+
'world,'anyone
|
| 239 |
+
: cdrwa
|
| 240 |
+
```
|
| 241 |
+
## getAllChildrenNumber
|
| 242 |
+
Get all numbers of children nodes under a specific path
|
| 243 |
+
|
| 244 |
+
```bash
|
| 245 |
+
[zkshell: 1] getAllChildrenNumber /
|
| 246 |
+
73779
|
| 247 |
+
[zkshell: 2] getAllChildrenNumber /ZooKeeper
|
| 248 |
+
2
|
| 249 |
+
[zkshell: 3] getAllChildrenNumber /ZooKeeper/quota
|
| 250 |
+
0
|
| 251 |
+
```
|
| 252 |
+
## getEphemerals
|
| 253 |
+
Get all the ephemeral nodes created by this session
|
| 254 |
+
|
| 255 |
+
```bash
|
| 256 |
+
[zkshell: 1] create -e /test-get-ephemerals "ephemeral node"
|
| 257 |
+
Created /test-get-ephemerals
|
| 258 |
+
[zkshell: 2] getEphemerals
|
| 259 |
+
[/test-get-ephemerals]
|
| 260 |
+
[zkshell: 3] getEphemerals /
|
| 261 |
+
[/test-get-ephemerals]
|
| 262 |
+
[zkshell: 4] create -e /test-get-ephemerals-1 "ephemeral node"
|
| 263 |
+
Created /test-get-ephemerals-1
|
| 264 |
+
[zkshell: 5] getEphemerals /test-get-ephemerals
|
| 265 |
+
test-get-ephemerals test-get-ephemerals-1
|
| 266 |
+
[zkshell: 6] getEphemerals /test-get-ephemerals
|
| 267 |
+
[/test-get-ephemerals-1, /test-get-ephemerals]
|
| 268 |
+
[zkshell: 7] getEphemerals /test-get-ephemerals-1
|
| 269 |
+
[/test-get-ephemerals-1]
|
| 270 |
+
```
|
| 271 |
+
|
| 272 |
+
## history
|
| 273 |
+
Showing the history about the recent 11 commands that you have executed
|
| 274 |
+
|
| 275 |
+
```bash
|
| 276 |
+
[zkshell: 7] history
|
| 277 |
+
0 - close
|
| 278 |
+
1 - close
|
| 279 |
+
2 - ls /
|
| 280 |
+
3 - ls /
|
| 281 |
+
4 - connect
|
| 282 |
+
5 - ls /
|
| 283 |
+
6 - ll
|
| 284 |
+
7 - history
|
| 285 |
+
```
|
| 286 |
+
|
| 287 |
+
## listquota
|
| 288 |
+
Listing the quota of one path
|
| 289 |
+
|
| 290 |
+
```bash
|
| 291 |
+
[zkshell: 1] listquota /c1
|
| 292 |
+
absolute path is /zookeeper/quota/c1/zookeeper_limits
|
| 293 |
+
Output quota for /c1 count=-1,bytes=-1=;byteHardLimit=-1;countHardLimit=2
|
| 294 |
+
Output stat for /c1 count=4,bytes=0
|
| 295 |
+
```
|
| 296 |
+
|
| 297 |
+
## ls
|
| 298 |
+
Listing the child nodes of one path
|
| 299 |
+
|
| 300 |
+
```bash
|
| 301 |
+
[zkshell: 36] ls /quota_test
|
| 302 |
+
[child_1, child_2, child_3]
|
| 303 |
+
|
| 304 |
+
# -s to show the stat
|
| 305 |
+
[zkshell: 37] ls -s /quota_test
|
| 306 |
+
[child_1, child_2, child_3]
|
| 307 |
+
cZxid = 0x110000002d
|
| 308 |
+
ctime = Thu Mar 07 11:19:07 UTC 2019
|
| 309 |
+
mZxid = 0x110000002d
|
| 310 |
+
mtime = Thu Mar 07 11:19:07 UTC 2019
|
| 311 |
+
pZxid = 0x1100000033
|
| 312 |
+
cversion = 3
|
| 313 |
+
dataVersion = 0
|
| 314 |
+
aclVersion = 0
|
| 315 |
+
ephemeralOwner = 0x0
|
| 316 |
+
dataLength = 0
|
| 317 |
+
numChildren = 3
|
| 318 |
+
|
| 319 |
+
# -R to show the child nodes recursely
|
| 320 |
+
[zkshell: 38] ls -R /quota_test
|
| 321 |
+
/quota_test
|
| 322 |
+
/quota_test/child_1
|
| 323 |
+
/quota_test/child_2
|
| 324 |
+
/quota_test/child_3
|
| 325 |
+
|
| 326 |
+
# -w to set a watch on the child change,Notice: turn on the printwatches
|
| 327 |
+
[zkshell: 39] ls -w /brokers
|
| 328 |
+
[ids, seqid, topics]
|
| 329 |
+
[zkshell: 40] delete /brokers/ids
|
| 330 |
+
WATCHER::
|
| 331 |
+
WatchedEvent state:SyncConnected type:NodeChildrenChanged path:/brokers
|
| 332 |
+
```
|
| 333 |
+
|
| 334 |
+
## printwatches
|
| 335 |
+
A switch to turn on/off whether printing watches or not.
|
| 336 |
+
|
| 337 |
+
```bash
|
| 338 |
+
[zkshell: 0] printwatches
|
| 339 |
+
printwatches is on
|
| 340 |
+
[zkshell: 1] printwatches off
|
| 341 |
+
[zkshell: 2] printwatches
|
| 342 |
+
printwatches is off
|
| 343 |
+
[zkshell: 3] printwatches on
|
| 344 |
+
[zkshell: 4] printwatches
|
| 345 |
+
printwatches is on
|
| 346 |
+
```
|
| 347 |
+
|
| 348 |
+
## quit
|
| 349 |
+
Quit the CLI windows.
|
| 350 |
+
|
| 351 |
+
```bash
|
| 352 |
+
[zkshell: 1] quit
|
| 353 |
+
```
|
| 354 |
+
|
| 355 |
+
## reconfig
|
| 356 |
+
Change the membership of the ensemble during the runtime.
|
| 357 |
+
|
| 358 |
+
Before using this cli,read the details in the [Dynamic Reconfiguration](zookeeperReconfig.html) about the reconfig feature,especially the "Security" part.
|
| 359 |
+
|
| 360 |
+
Pre-requisites:
|
| 361 |
+
|
| 362 |
+
1. set reconfigEnabled=true in the zoo.cfg
|
| 363 |
+
|
| 364 |
+
2. add a super user or skipAcl,otherwise will get “Insufficient permission”. e.g. addauth digest zookeeper:admin
|
| 365 |
+
|
| 366 |
+
```bash
|
| 367 |
+
# Change follower 2 to an observer and change its port from 2182 to 12182
|
| 368 |
+
# Add observer 5 to the ensemble
|
| 369 |
+
# Remove Observer 4 from the ensemble
|
| 370 |
+
[zkshell: 1] reconfig --add 2=localhost:2781:2786:observer;12182 --add 5=localhost:2781:2786:observer;2185 -remove 4
|
| 371 |
+
Committed new configuration:
|
| 372 |
+
server.1=localhost:2780:2785:participant;0.0.0.0:2181
|
| 373 |
+
server.2=localhost:2781:2786:observer;0.0.0.0:12182
|
| 374 |
+
server.3=localhost:2782:2787:participant;0.0.0.0:2183
|
| 375 |
+
server.5=localhost:2784:2789:observer;0.0.0.0:2185
|
| 376 |
+
version=1c00000002
|
| 377 |
+
|
| 378 |
+
# -members to appoint the membership
|
| 379 |
+
[zkshell: 2] reconfig -members server.1=localhost:2780:2785:participant;0.0.0.0:2181,server.2=localhost:2781:2786:observer;0.0.0.0:12182,server.3=localhost:2782:2787:participant;0.0.0.0:12183
|
| 380 |
+
Committed new configuration:
|
| 381 |
+
server.1=localhost:2780:2785:participant;0.0.0.0:2181
|
| 382 |
+
server.2=localhost:2781:2786:observer;0.0.0.0:12182
|
| 383 |
+
server.3=localhost:2782:2787:participant;0.0.0.0:12183
|
| 384 |
+
version=f9fe0000000c
|
| 385 |
+
|
| 386 |
+
# Change the current config to the one in the myNewConfig.txt
|
| 387 |
+
# But only if current config version is 2100000010
|
| 388 |
+
[zkshell: 3] reconfig -file /data/software/zookeeper/zookeeper-test/conf/myNewConfig.txt -v 2100000010
|
| 389 |
+
Committed new configuration:
|
| 390 |
+
server.1=localhost:2780:2785:participant;0.0.0.0:2181
|
| 391 |
+
server.2=localhost:2781:2786:observer;0.0.0.0:12182
|
| 392 |
+
server.3=localhost:2782:2787:participant;0.0.0.0:2183
|
| 393 |
+
server.5=localhost:2784:2789:observer;0.0.0.0:2185
|
| 394 |
+
version=220000000c
|
| 395 |
+
```
|
| 396 |
+
|
| 397 |
+
## redo
|
| 398 |
+
Redo the cmd with the index from history.
|
| 399 |
+
|
| 400 |
+
```bash
|
| 401 |
+
[zkshell: 4] history
|
| 402 |
+
0 - ls /
|
| 403 |
+
1 - get /consumers
|
| 404 |
+
2 - get /hbase
|
| 405 |
+
3 - ls /hbase
|
| 406 |
+
4 - history
|
| 407 |
+
[zkshell: 5] redo 3
|
| 408 |
+
[backup-masters, draining, flush-table-proc, hbaseid, master-maintenance, meta-region-server, namespace, online-snapshot, replication, rs, running, splitWAL, switch, table, table-lock]
|
| 409 |
+
```
|
| 410 |
+
|
| 411 |
+
## removewatches
|
| 412 |
+
Remove the watches under a node.
|
| 413 |
+
|
| 414 |
+
```bash
|
| 415 |
+
[zkshell: 1] get -w /brokers
|
| 416 |
+
null
|
| 417 |
+
[zkshell: 2] removewatches /brokers
|
| 418 |
+
WATCHER::
|
| 419 |
+
WatchedEvent state:SyncConnected type:DataWatchRemoved path:/brokers
|
| 420 |
+
|
| 421 |
+
```
|
| 422 |
+
|
| 423 |
+
## set
|
| 424 |
+
Set/update the data on a path.
|
| 425 |
+
|
| 426 |
+
```bash
|
| 427 |
+
[zkshell: 50] set /brokers myNewData
|
| 428 |
+
|
| 429 |
+
# -s to show the stat of this node.
|
| 430 |
+
[zkshell: 51] set -s /quota_test mydata_for_quota_test
|
| 431 |
+
cZxid = 0x110000002d
|
| 432 |
+
ctime = Thu Mar 07 11:19:07 UTC 2019
|
| 433 |
+
mZxid = 0x1100000038
|
| 434 |
+
mtime = Thu Mar 07 11:42:41 UTC 2019
|
| 435 |
+
pZxid = 0x1100000033
|
| 436 |
+
cversion = 3
|
| 437 |
+
dataVersion = 2
|
| 438 |
+
aclVersion = 0
|
| 439 |
+
ephemeralOwner = 0x0
|
| 440 |
+
dataLength = 21
|
| 441 |
+
numChildren = 3
|
| 442 |
+
|
| 443 |
+
# -v to set the data with CAS,the version can be found from dataVersion using stat.
|
| 444 |
+
[zkshell: 52] set -v 0 /brokers myNewData
|
| 445 |
+
[zkshell: 53] set -v 0 /brokers myNewData
|
| 446 |
+
version No is not valid : /brokers
|
| 447 |
+
```
|
| 448 |
+
|
| 449 |
+
## setAcl
|
| 450 |
+
Set the Acl permission for one node.
|
| 451 |
+
|
| 452 |
+
```bash
|
| 453 |
+
[zkshell: 28] addauth digest user1:12345
|
| 454 |
+
[zkshell: 30] setAcl /acl_auth_test auth:user1:12345:crwad
|
| 455 |
+
[zkshell: 31] getAcl /acl_auth_test
|
| 456 |
+
'digest,'user1:+owfoSBn/am19roBPzR1/MfCblE=
|
| 457 |
+
: cdrwa
|
| 458 |
+
|
| 459 |
+
# -R to set Acl recursely
|
| 460 |
+
[zkshell: 32] ls /acl_auth_test
|
| 461 |
+
[child_1, child_2]
|
| 462 |
+
[zkshell: 33] getAcl /acl_auth_test/child_2
|
| 463 |
+
'world,'anyone
|
| 464 |
+
: cdrwa
|
| 465 |
+
[zkshell: 34] setAcl -R /acl_auth_test auth:user1:12345:crwad
|
| 466 |
+
[zkshell: 35] getAcl /acl_auth_test/child_2
|
| 467 |
+
'digest,'user1:+owfoSBn/am19roBPzR1/MfCblE=
|
| 468 |
+
: cdrwa
|
| 469 |
+
|
| 470 |
+
# -v set Acl with the acl version which can be found from the aclVersion using the stat
|
| 471 |
+
[zkshell: 36] stat /acl_auth_test
|
| 472 |
+
cZxid = 0xf9fc0000001c
|
| 473 |
+
ctime = Tue Mar 26 16:50:58 CST 2019
|
| 474 |
+
mZxid = 0xf9fc0000001c
|
| 475 |
+
mtime = Tue Mar 26 16:50:58 CST 2019
|
| 476 |
+
pZxid = 0xf9fc0000001f
|
| 477 |
+
cversion = 2
|
| 478 |
+
dataVersion = 0
|
| 479 |
+
aclVersion = 3
|
| 480 |
+
ephemeralOwner = 0x0
|
| 481 |
+
dataLength = 0
|
| 482 |
+
numChildren = 2
|
| 483 |
+
[zkshell: 37] setAcl -v 3 /acl_auth_test auth:user1:12345:crwad
|
| 484 |
+
```
|
| 485 |
+
|
| 486 |
+
## setquota
|
| 487 |
+
Set the quota in one path.
|
| 488 |
+
|
| 489 |
+
```bash
|
| 490 |
+
# -n to limit the number of child nodes(included itself)
|
| 491 |
+
[zkshell: 18] setquota -n 2 /quota_test
|
| 492 |
+
[zkshell: 19] create /quota_test/child_1
|
| 493 |
+
Created /quota_test/child_1
|
| 494 |
+
[zkshell: 20] create /quota_test/child_2
|
| 495 |
+
Created /quota_test/child_2
|
| 496 |
+
[zkshell: 21] create /quota_test/child_3
|
| 497 |
+
Created /quota_test/child_3
|
| 498 |
+
# Notice:don't have a hard constraint,just log the warning info
|
| 499 |
+
2019-03-07 11:22:36,680 [myid:1] - WARN [SyncThread:0:DataTree@374] - Quota exceeded: /quota_test count=3 limit=2
|
| 500 |
+
2019-03-07 11:22:41,861 [myid:1] - WARN [SyncThread:0:DataTree@374] - Quota exceeded: /quota_test count=4 limit=2
|
| 501 |
+
|
| 502 |
+
# -b to limit the bytes(data length) of one path
|
| 503 |
+
[zkshell: 22] setquota -b 5 /brokers
|
| 504 |
+
[zkshell: 23] set /brokers "I_love_zookeeper"
|
| 505 |
+
# Notice:don't have a hard constraint,just log the warning info
|
| 506 |
+
WARN [CommitProcWorkThread-7:DataTree@379] - Quota exceeded: /brokers bytes=4206 limit=5
|
| 507 |
+
|
| 508 |
+
# -N count Hard quota
|
| 509 |
+
[zkshell: 3] create /c1
|
| 510 |
+
Created /c1
|
| 511 |
+
[zkshell: 4] setquota -N 2 /c1
|
| 512 |
+
[zkshell: 5] listquota /c1
|
| 513 |
+
absolute path is /zookeeper/quota/c1/zookeeper_limits
|
| 514 |
+
Output quota for /c1 count=-1,bytes=-1=;byteHardLimit=-1;countHardLimit=2
|
| 515 |
+
Output stat for /c1 count=2,bytes=0
|
| 516 |
+
[zkshell: 6] create /c1/ch-3
|
| 517 |
+
Count Quota has exceeded : /c1/ch-3
|
| 518 |
+
|
| 519 |
+
# -B byte Hard quota
|
| 520 |
+
[zkshell: 3] create /c2
|
| 521 |
+
[zkshell: 4] setquota -B 4 /c2
|
| 522 |
+
[zkshell: 5] set /c2 "foo"
|
| 523 |
+
[zkshell: 6] set /c2 "foo-bar"
|
| 524 |
+
Bytes Quota has exceeded : /c2
|
| 525 |
+
[zkshell: 7] get /c2
|
| 526 |
+
foo
|
| 527 |
+
```
|
| 528 |
+
|
| 529 |
+
## stat
|
| 530 |
+
Showing the stat/metadata of one node.
|
| 531 |
+
|
| 532 |
+
```bash
|
| 533 |
+
[zkshell: 1] stat /hbase
|
| 534 |
+
cZxid = 0x4000013d9
|
| 535 |
+
ctime = Wed Jun 27 20:13:07 CST 2018
|
| 536 |
+
mZxid = 0x4000013d9
|
| 537 |
+
mtime = Wed Jun 27 20:13:07 CST 2018
|
| 538 |
+
pZxid = 0x500000001
|
| 539 |
+
cversion = 17
|
| 540 |
+
dataVersion = 0
|
| 541 |
+
aclVersion = 0
|
| 542 |
+
ephemeralOwner = 0x0
|
| 543 |
+
dataLength = 0
|
| 544 |
+
numChildren = 15
|
| 545 |
+
```
|
| 546 |
+
|
| 547 |
+
## sync
|
| 548 |
+
Sync the data of one node between leader and followers(Asynchronous sync)
|
| 549 |
+
|
| 550 |
+
```bash
|
| 551 |
+
[zkshell: 14] sync /
|
| 552 |
+
[zkshell: 15] Sync is OK
|
| 553 |
+
```
|
| 554 |
+
|
| 555 |
+
## version
|
| 556 |
+
Show the version of the ZooKeeper client/CLI
|
| 557 |
+
|
| 558 |
+
```bash
|
| 559 |
+
[zkshell: 1] version
|
| 560 |
+
ZooKeeper CLI version: 3.6.0-SNAPSHOT-29f9b2c1c0e832081f94d59a6b88709c5f1bb3ca, built on 05/30/2019 09:26 GMT
|
| 561 |
+
```
|
| 562 |
+
|
| 563 |
+
## whoami
|
| 564 |
+
Gives all authentication information added into the current session.
|
| 565 |
+
|
| 566 |
+
[zkshell: 1] whoami
|
| 567 |
+
Auth scheme: User
|
| 568 |
+
ip: 127.0.0.1
|
| 569 |
+
[zkshell: 2] addauth digest user1:12345
|
| 570 |
+
[zkshell: 3] whoami
|
| 571 |
+
Auth scheme: User
|
| 572 |
+
ip: 127.0.0.1
|
| 573 |
+
digest: user1
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperMonitor.md
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2021 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper Monitor Guide
|
| 18 |
+
|
| 19 |
+
* [New Metrics System](#Metrics-System)
|
| 20 |
+
* [Metrics](#Metrics)
|
| 21 |
+
* [Prometheus](#Prometheus)
|
| 22 |
+
* [Alerting with Prometheus](#Alerting)
|
| 23 |
+
* [Grafana](#Grafana)
|
| 24 |
+
* [InfluxDB](#influxdb)
|
| 25 |
+
|
| 26 |
+
* [JMX](#JMX)
|
| 27 |
+
|
| 28 |
+
* [Four letter words](#four-letter-words)
|
| 29 |
+
|
| 30 |
+
<a name="Metrics-System"></a>
|
| 31 |
+
|
| 32 |
+
## New Metrics System
|
| 33 |
+
The feature:`New Metrics System` has been available since 3.6.0 which provides the abundant metrics
|
| 34 |
+
to help users monitor the ZooKeeper on the topic: znode, network, disk, quorum, leader election,
|
| 35 |
+
client, security, failures, watch/session, requestProcessor, and so forth.
|
| 36 |
+
|
| 37 |
+
<a name="Metrics"></a>
|
| 38 |
+
|
| 39 |
+
### Metrics
|
| 40 |
+
All the metrics are included in the `ServerMetrics.java`.
|
| 41 |
+
|
| 42 |
+
<a name="Prometheus"></a>
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
### Pre-requisites:
|
| 46 |
+
- Enable the `Prometheus MetricsProvider` by setting the following in `zoo.cfg`:
|
| 47 |
+
```conf
|
| 48 |
+
metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
- The port for Prometheus metrics can be configured using:
|
| 52 |
+
```conf
|
| 53 |
+
metricsProvider.httpPort=7000 # Default port is 7000
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
#### Enabling HTTPS for Prometheus Metrics:
|
| 57 |
+
|
| 58 |
+
ZooKeeper also supports SSL for Prometheus metrics, which provides secure data transmission. To enable this, configure an HTTPS port and set up SSL certificates as follows:
|
| 59 |
+
|
| 60 |
+
- Define the HTTPS port:
|
| 61 |
+
```conf
|
| 62 |
+
metricsProvider.httpsPort=4443
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
- Configure the SSL key store (holds the server’s private key and certificates):
|
| 66 |
+
```conf
|
| 67 |
+
metricsProvider.ssl.keyStore.location=/path/to/keystore.jks
|
| 68 |
+
metricsProvider.ssl.keyStore.password=your_keystore_password
|
| 69 |
+
metricsProvider.ssl.keyStore.type=jks # Default is JKS
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
- Configure the SSL trust store (used to verify client certificates):
|
| 73 |
+
```conf
|
| 74 |
+
metricsProvider.ssl.trustStore.location=/path/to/truststore.jks
|
| 75 |
+
metricsProvider.ssl.trustStore.password=your_truststore_password
|
| 76 |
+
metricsProvider.ssl.trustStore.type=jks # Default is JKS
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
- **Note**: You can enable both HTTP and HTTPS simultaneously by defining both ports:
|
| 80 |
+
```conf
|
| 81 |
+
metricsProvider.httpPort=7000
|
| 82 |
+
metricsProvider.httpsPort=4443
|
| 83 |
+
```
|
| 84 |
+
### Prometheus
|
| 85 |
+
- Running a [Prometheus](https://prometheus.io/) monitoring service is the easiest way to ingest and record ZooKeeper's metrics.
|
| 86 |
+
|
| 87 |
+
- Install Prometheus:
|
| 88 |
+
Go to the official website download [page](https://prometheus.io/download/), download the latest release.
|
| 89 |
+
|
| 90 |
+
- Set Prometheus's scraper to target the ZooKeeper cluster endpoints:
|
| 91 |
+
|
| 92 |
+
```bash
|
| 93 |
+
cat > /tmp/test-zk.yaml <<EOF
|
| 94 |
+
global:
|
| 95 |
+
scrape_interval: 10s
|
| 96 |
+
scrape_configs:
|
| 97 |
+
- job_name: test-zk
|
| 98 |
+
static_configs:
|
| 99 |
+
- targets: ['192.168.10.32:7000','192.168.10.33:7000','192.168.10.34:7000']
|
| 100 |
+
EOF
|
| 101 |
+
cat /tmp/test-zk.yaml
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
- Set up the Prometheus handler:
|
| 105 |
+
|
| 106 |
+
```bash
|
| 107 |
+
nohup /tmp/prometheus \
|
| 108 |
+
--config.file /tmp/test-zk.yaml \
|
| 109 |
+
--web.listen-address ":9090" \
|
| 110 |
+
--storage.tsdb.path "/tmp/test-zk.data" >> /tmp/test-zk.log 2>&1 &
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
- Now Prometheus will scrape zk metrics every 10 seconds.
|
| 114 |
+
|
| 115 |
+
<a name="Alerting"></a>
|
| 116 |
+
|
| 117 |
+
### Alerting with Prometheus
|
| 118 |
+
- We recommend that you read [Prometheus Official Alerting Page](https://prometheus.io/docs/practices/alerting/) to explore
|
| 119 |
+
some principles of alerting
|
| 120 |
+
|
| 121 |
+
- We recommend that you use [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/) which can
|
| 122 |
+
help users to receive alerting email or instant message(by webhook) in a more convenient way
|
| 123 |
+
|
| 124 |
+
- We provide an alerting example where these metrics should be taken a special attention. Note: this is for your reference only,
|
| 125 |
+
and you need to adjust them according to your actual situation and resource environment
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
use ./promtool check rules rules/zk.yml to check the correctness of the config file
|
| 129 |
+
cat rules/zk.yml
|
| 130 |
+
|
| 131 |
+
groups:
|
| 132 |
+
- name: zk-alert-example
|
| 133 |
+
rules:
|
| 134 |
+
- alert: ZooKeeper server is down
|
| 135 |
+
expr: up == 0
|
| 136 |
+
for: 1m
|
| 137 |
+
labels:
|
| 138 |
+
severity: critical
|
| 139 |
+
annotations:
|
| 140 |
+
summary: "Instance {{ $labels.instance }} ZooKeeper server is down"
|
| 141 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} ZooKeeper server is down: [{{ $value }}]."
|
| 142 |
+
|
| 143 |
+
- alert: create too many znodes
|
| 144 |
+
expr: znode_count > 1000000
|
| 145 |
+
for: 1m
|
| 146 |
+
labels:
|
| 147 |
+
severity: warning
|
| 148 |
+
annotations:
|
| 149 |
+
summary: "Instance {{ $labels.instance }} create too many znodes"
|
| 150 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} create too many znodes: [{{ $value }}]."
|
| 151 |
+
|
| 152 |
+
- alert: create too many connections
|
| 153 |
+
expr: num_alive_connections > 50 # suppose we use the default maxClientCnxns: 60
|
| 154 |
+
for: 1m
|
| 155 |
+
labels:
|
| 156 |
+
severity: warning
|
| 157 |
+
annotations:
|
| 158 |
+
summary: "Instance {{ $labels.instance }} create too many connections"
|
| 159 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} create too many connections: [{{ $value }}]."
|
| 160 |
+
|
| 161 |
+
- alert: znode total occupied memory is too big
|
| 162 |
+
expr: approximate_data_size /1024 /1024 > 1 * 1024 # more than 1024 MB(1 GB)
|
| 163 |
+
for: 1m
|
| 164 |
+
labels:
|
| 165 |
+
severity: warning
|
| 166 |
+
annotations:
|
| 167 |
+
summary: "Instance {{ $labels.instance }} znode total occupied memory is too big"
|
| 168 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} znode total occupied memory is too big: [{{ $value }}] MB."
|
| 169 |
+
|
| 170 |
+
- alert: set too many watch
|
| 171 |
+
expr: watch_count > 10000
|
| 172 |
+
for: 1m
|
| 173 |
+
labels:
|
| 174 |
+
severity: warning
|
| 175 |
+
annotations:
|
| 176 |
+
summary: "Instance {{ $labels.instance }} set too many watch"
|
| 177 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} set too many watch: [{{ $value }}]."
|
| 178 |
+
|
| 179 |
+
- alert: a leader election happens
|
| 180 |
+
expr: increase(election_time_count[5m]) > 0
|
| 181 |
+
for: 1m
|
| 182 |
+
labels:
|
| 183 |
+
severity: warning
|
| 184 |
+
annotations:
|
| 185 |
+
summary: "Instance {{ $labels.instance }} a leader election happens"
|
| 186 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} a leader election happens: [{{ $value }}]."
|
| 187 |
+
|
| 188 |
+
- alert: open too many files
|
| 189 |
+
expr: open_file_descriptor_count > 300
|
| 190 |
+
for: 1m
|
| 191 |
+
labels:
|
| 192 |
+
severity: warning
|
| 193 |
+
annotations:
|
| 194 |
+
summary: "Instance {{ $labels.instance }} open too many files"
|
| 195 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} open too many files: [{{ $value }}]."
|
| 196 |
+
|
| 197 |
+
- alert: fsync time is too long
|
| 198 |
+
expr: rate(fsynctime_sum[1m]) > 100
|
| 199 |
+
for: 1m
|
| 200 |
+
labels:
|
| 201 |
+
severity: warning
|
| 202 |
+
annotations:
|
| 203 |
+
summary: "Instance {{ $labels.instance }} fsync time is too long"
|
| 204 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} fsync time is too long: [{{ $value }}]."
|
| 205 |
+
|
| 206 |
+
- alert: take snapshot time is too long
|
| 207 |
+
expr: rate(snapshottime_sum[5m]) > 100
|
| 208 |
+
for: 1m
|
| 209 |
+
labels:
|
| 210 |
+
severity: warning
|
| 211 |
+
annotations:
|
| 212 |
+
summary: "Instance {{ $labels.instance }} take snapshot time is too long"
|
| 213 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} take snapshot time is too long: [{{ $value }}]."
|
| 214 |
+
|
| 215 |
+
- alert: avg latency is too high
|
| 216 |
+
expr: avg_latency > 100
|
| 217 |
+
for: 1m
|
| 218 |
+
labels:
|
| 219 |
+
severity: warning
|
| 220 |
+
annotations:
|
| 221 |
+
summary: "Instance {{ $labels.instance }} avg latency is too high"
|
| 222 |
+
description: "{{ $labels.instance }} of job {{$labels.job}} avg latency is too high: [{{ $value }}]."
|
| 223 |
+
|
| 224 |
+
- alert: JvmMemoryFillingUp
|
| 225 |
+
expr: jvm_memory_bytes_used / jvm_memory_bytes_max{area="heap"} > 0.8
|
| 226 |
+
for: 5m
|
| 227 |
+
labels:
|
| 228 |
+
severity: warning
|
| 229 |
+
annotations:
|
| 230 |
+
summary: "JVM memory filling up (instance {{ $labels.instance }})"
|
| 231 |
+
description: "JVM memory is filling up (> 80%)\n labels: {{ $labels }} value = {{ $value }}\n"
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
<a name="Grafana"></a>
|
| 235 |
+
|
| 236 |
+
### Grafana
|
| 237 |
+
- Grafana has built-in Prometheus support; just add a Prometheus data source:
|
| 238 |
+
|
| 239 |
+
```bash
|
| 240 |
+
Name: test-zk
|
| 241 |
+
Type: Prometheus
|
| 242 |
+
Url: http://localhost:9090
|
| 243 |
+
Access: proxy
|
| 244 |
+
```
|
| 245 |
+
- Then download and import the default ZooKeeper dashboard [template](https://grafana.com/grafana/dashboards/10465) and customize.
|
| 246 |
+
- Users can ask for Grafana dashboard account if having any good improvements by writing a email to **dev@zookeeper.apache.org**.
|
| 247 |
+
|
| 248 |
+
<a name="influxdb"></a>
|
| 249 |
+
|
| 250 |
+
### InfluxDB
|
| 251 |
+
|
| 252 |
+
InfluxDB is an open source time series data that is often used to store metrics
|
| 253 |
+
from Zookeeper. You can [download](https://portal.influxdata.com/downloads/) the
|
| 254 |
+
open source version or create a [free](https://cloud2.influxdata.com/signup)
|
| 255 |
+
account on InfluxDB Cloud. In either case, configure the [Apache Zookeeper
|
| 256 |
+
Telegraf plugin](https://www.influxdata.com/integration/apache-zookeeper/) to
|
| 257 |
+
start collecting and storing metrics from your Zookeeper clusters into your
|
| 258 |
+
InfluxDB instance. There is also an [Apache Zookeeper InfluxDB
|
| 259 |
+
template](https://www.influxdata.com/influxdb-templates/zookeeper-monitor/) that
|
| 260 |
+
includes the Telegraf configurations and a dashboard to get you set up right
|
| 261 |
+
away.
|
| 262 |
+
|
| 263 |
+
<a name="JMX"></a>
|
| 264 |
+
## JMX
|
| 265 |
+
More details can be found in [here](http://zookeeper.apache.org/doc/current/zookeeperJMX.html)
|
| 266 |
+
|
| 267 |
+
<a name="four-letter-words"></a>
|
| 268 |
+
## Four letter words
|
| 269 |
+
More details can be found in [here](http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_zkCommands)
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperOver.md
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2004 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper
|
| 18 |
+
|
| 19 |
+
* [ZooKeeper: A Distributed Coordination Service for Distributed Applications](#ch_DesignOverview)
|
| 20 |
+
* [Design Goals](#sc_designGoals)
|
| 21 |
+
* [Data model and the hierarchical namespace](#sc_dataModelNameSpace)
|
| 22 |
+
* [Nodes and ephemeral nodes](#Nodes+and+ephemeral+nodes)
|
| 23 |
+
* [Conditional updates and watches](#Conditional+updates+and+watches)
|
| 24 |
+
* [Guarantees](#Guarantees)
|
| 25 |
+
* [Simple API](#Simple+API)
|
| 26 |
+
* [Implementation](#Implementation)
|
| 27 |
+
* [Uses](#Uses)
|
| 28 |
+
* [Performance](#Performance)
|
| 29 |
+
* [Reliability](#Reliability)
|
| 30 |
+
* [The ZooKeeper Project](#The+ZooKeeper+Project)
|
| 31 |
+
|
| 32 |
+
<a name="ch_DesignOverview"></a>
|
| 33 |
+
|
| 34 |
+
## ZooKeeper: A Distributed Coordination Service for Distributed Applications
|
| 35 |
+
|
| 36 |
+
ZooKeeper is a distributed, open-source coordination service for
|
| 37 |
+
distributed applications. It exposes a simple set of primitives that
|
| 38 |
+
distributed applications can build upon to implement higher level services
|
| 39 |
+
for synchronization, configuration maintenance, and groups and naming. It
|
| 40 |
+
is designed to be easy to program to, and uses a data model styled after
|
| 41 |
+
the familiar directory tree structure of file systems. It runs in Java and
|
| 42 |
+
has bindings for both Java and C.
|
| 43 |
+
|
| 44 |
+
Coordination services are notoriously hard to get right. They are
|
| 45 |
+
especially prone to errors such as race conditions and deadlock. The
|
| 46 |
+
motivation behind ZooKeeper is to relieve distributed applications the
|
| 47 |
+
responsibility of implementing coordination services from scratch.
|
| 48 |
+
|
| 49 |
+
<a name="sc_designGoals"></a>
|
| 50 |
+
|
| 51 |
+
### Design Goals
|
| 52 |
+
|
| 53 |
+
**ZooKeeper is simple.** ZooKeeper
|
| 54 |
+
allows distributed processes to coordinate with each other through a
|
| 55 |
+
shared hierarchical namespace which is organized similarly to a standard
|
| 56 |
+
file system. The namespace consists of data registers - called znodes,
|
| 57 |
+
in ZooKeeper parlance - and these are similar to files and directories.
|
| 58 |
+
Unlike a typical file system, which is designed for storage, ZooKeeper
|
| 59 |
+
data is kept in-memory, which means ZooKeeper can achieve high
|
| 60 |
+
throughput and low latency numbers.
|
| 61 |
+
|
| 62 |
+
The ZooKeeper implementation puts a premium on high performance,
|
| 63 |
+
highly available, strictly ordered access. The performance aspects of
|
| 64 |
+
ZooKeeper means it can be used in large, distributed systems. The
|
| 65 |
+
reliability aspects keep it from being a single point of failure. The
|
| 66 |
+
strict ordering means that sophisticated synchronization primitives can
|
| 67 |
+
be implemented at the client.
|
| 68 |
+
|
| 69 |
+
**ZooKeeper is replicated.** Like the
|
| 70 |
+
distributed processes it coordinates, ZooKeeper itself is intended to be
|
| 71 |
+
replicated over a set of hosts called an ensemble.
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
|
| 75 |
+
The servers that make up the ZooKeeper service must all know about
|
| 76 |
+
each other. They maintain an in-memory image of state, along with a
|
| 77 |
+
transaction logs and snapshots in a persistent store. As long as a
|
| 78 |
+
majority of the servers are available, the ZooKeeper service will be
|
| 79 |
+
available.
|
| 80 |
+
|
| 81 |
+
Clients connect to a single ZooKeeper server. The client maintains
|
| 82 |
+
a TCP connection through which it sends requests, gets responses, gets
|
| 83 |
+
watch events, and sends heart beats. If the TCP connection to the server
|
| 84 |
+
breaks, the client will connect to a different server.
|
| 85 |
+
|
| 86 |
+
**ZooKeeper is ordered.** ZooKeeper
|
| 87 |
+
stamps each update with a number that reflects the order of all
|
| 88 |
+
ZooKeeper transactions. Subsequent operations can use the order to
|
| 89 |
+
implement higher-level abstractions, such as synchronization
|
| 90 |
+
primitives.
|
| 91 |
+
|
| 92 |
+
**ZooKeeper is fast.** It is
|
| 93 |
+
especially fast in "read-dominant" workloads. ZooKeeper applications run
|
| 94 |
+
on thousands of machines, and it performs best where reads are more
|
| 95 |
+
common than writes, at ratios of around 10:1.
|
| 96 |
+
|
| 97 |
+
<a name="sc_dataModelNameSpace"></a>
|
| 98 |
+
|
| 99 |
+
### Data model and the hierarchical namespace
|
| 100 |
+
|
| 101 |
+
The namespace provided by ZooKeeper is much like that of a
|
| 102 |
+
standard file system. A name is a sequence of path elements separated by
|
| 103 |
+
a slash (/). Every node in ZooKeeper's namespace is identified by a
|
| 104 |
+
path.
|
| 105 |
+
|
| 106 |
+
#### ZooKeeper's Hierarchical Namespace
|
| 107 |
+
|
| 108 |
+

|
| 109 |
+
|
| 110 |
+
<a name="Nodes+and+ephemeral+nodes"></a>
|
| 111 |
+
|
| 112 |
+
### Nodes and ephemeral nodes
|
| 113 |
+
|
| 114 |
+
Unlike standard file systems, each node in a ZooKeeper
|
| 115 |
+
namespace can have data associated with it as well as children. It is
|
| 116 |
+
like having a file-system that allows a file to also be a directory.
|
| 117 |
+
(ZooKeeper was designed to store coordination data: status information,
|
| 118 |
+
configuration, location information, etc., so the data stored at each
|
| 119 |
+
node is usually small, in the byte to kilobyte range.) We use the term
|
| 120 |
+
_znode_ to make it clear that we are talking about
|
| 121 |
+
ZooKeeper data nodes.
|
| 122 |
+
|
| 123 |
+
Znodes maintain a stat structure that includes version numbers for
|
| 124 |
+
data changes, ACL changes, and timestamps, to allow cache validations
|
| 125 |
+
and coordinated updates. Each time a znode's data changes, the version
|
| 126 |
+
number increases. For instance, whenever a client retrieves data it also
|
| 127 |
+
receives the version of the data.
|
| 128 |
+
|
| 129 |
+
The data stored at each znode in a namespace is read and written
|
| 130 |
+
atomically. Reads get all the data bytes associated with a znode and a
|
| 131 |
+
write replaces all the data. Each node has an Access Control List (ACL)
|
| 132 |
+
that restricts who can do what.
|
| 133 |
+
|
| 134 |
+
ZooKeeper also has the notion of ephemeral nodes. These znodes
|
| 135 |
+
exists as long as the session that created the znode is active. When the
|
| 136 |
+
session ends the znode is deleted.
|
| 137 |
+
|
| 138 |
+
<a name="Conditional+updates+and+watches"></a>
|
| 139 |
+
|
| 140 |
+
### Conditional updates and watches
|
| 141 |
+
|
| 142 |
+
ZooKeeper supports the concept of _watches_.
|
| 143 |
+
Clients can set a watch on a znode. A watch will be triggered and
|
| 144 |
+
removed when the znode changes. When a watch is triggered, the client
|
| 145 |
+
receives a packet saying that the znode has changed. If the
|
| 146 |
+
connection between the client and one of the ZooKeeper servers is
|
| 147 |
+
broken, the client will receive a local notification.
|
| 148 |
+
|
| 149 |
+
**New in 3.6.0:** Clients can also set
|
| 150 |
+
permanent, recursive watches on a znode that are not removed when triggered
|
| 151 |
+
and that trigger for changes on the registered znode as well as any children
|
| 152 |
+
znodes recursively.
|
| 153 |
+
|
| 154 |
+
<a name="Guarantees"></a>
|
| 155 |
+
|
| 156 |
+
### Guarantees
|
| 157 |
+
|
| 158 |
+
ZooKeeper is very fast and very simple. Since its goal, though, is
|
| 159 |
+
to be a basis for the construction of more complicated services, such as
|
| 160 |
+
synchronization, it provides a set of guarantees. These are:
|
| 161 |
+
|
| 162 |
+
* Sequential Consistency - Updates from a client will be applied
|
| 163 |
+
in the order that they were sent.
|
| 164 |
+
* Atomicity - Updates either succeed or fail. No partial
|
| 165 |
+
results.
|
| 166 |
+
* Single System Image - A client will see the same view of the
|
| 167 |
+
service regardless of the server that it connects to. i.e., a
|
| 168 |
+
client will never see an older view of the system even if the
|
| 169 |
+
client fails over to a different server with the same session.
|
| 170 |
+
* Reliability - Once an update has been applied, it will persist
|
| 171 |
+
from that time forward until a client overwrites the update.
|
| 172 |
+
* Timeliness - The clients view of the system is guaranteed to
|
| 173 |
+
be up-to-date within a certain time bound.
|
| 174 |
+
|
| 175 |
+
<a name="Simple+API"></a>
|
| 176 |
+
|
| 177 |
+
### Simple API
|
| 178 |
+
|
| 179 |
+
One of the design goals of ZooKeeper is providing a very simple
|
| 180 |
+
programming interface. As a result, it supports only these
|
| 181 |
+
operations:
|
| 182 |
+
|
| 183 |
+
* *create* :
|
| 184 |
+
creates a node at a location in the tree
|
| 185 |
+
|
| 186 |
+
* *delete* :
|
| 187 |
+
deletes a node
|
| 188 |
+
|
| 189 |
+
* *exists* :
|
| 190 |
+
tests if a node exists at a location
|
| 191 |
+
|
| 192 |
+
* *get data* :
|
| 193 |
+
reads the data from a node
|
| 194 |
+
|
| 195 |
+
* *set data* :
|
| 196 |
+
writes data to a node
|
| 197 |
+
|
| 198 |
+
* *get children* :
|
| 199 |
+
retrieves a list of children of a node
|
| 200 |
+
|
| 201 |
+
* *sync* :
|
| 202 |
+
waits for data to be propagated
|
| 203 |
+
|
| 204 |
+
<a name="Implementation"></a>
|
| 205 |
+
|
| 206 |
+
### Implementation
|
| 207 |
+
|
| 208 |
+
[ZooKeeper Components](#zkComponents) shows the high-level components
|
| 209 |
+
of the ZooKeeper service. With the exception of the request processor,
|
| 210 |
+
each of
|
| 211 |
+
the servers that make up the ZooKeeper service replicates its own copy
|
| 212 |
+
of each of the components.
|
| 213 |
+
|
| 214 |
+
<a name="zkComponents"></a>
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
|
| 218 |
+
The replicated database is an in-memory database containing the
|
| 219 |
+
entire data tree. Updates are logged to disk for recoverability, and
|
| 220 |
+
writes are serialized to disk before they are applied to the in-memory
|
| 221 |
+
database.
|
| 222 |
+
|
| 223 |
+
Every ZooKeeper server services clients. Clients connect to
|
| 224 |
+
exactly one server to submit requests. Read requests are serviced from
|
| 225 |
+
the local replica of each server database. Requests that change the
|
| 226 |
+
state of the service, write requests, are processed by an agreement
|
| 227 |
+
protocol.
|
| 228 |
+
|
| 229 |
+
As part of the agreement protocol all write requests from clients
|
| 230 |
+
are forwarded to a single server, called the
|
| 231 |
+
_leader_. The rest of the ZooKeeper servers, called
|
| 232 |
+
_followers_, receive message proposals from the
|
| 233 |
+
leader and agree upon message delivery. The messaging layer takes care
|
| 234 |
+
of replacing leaders on failures and syncing followers with
|
| 235 |
+
leaders.
|
| 236 |
+
|
| 237 |
+
ZooKeeper uses a custom atomic messaging protocol. Since the
|
| 238 |
+
messaging layer is atomic, ZooKeeper can guarantee that the local
|
| 239 |
+
replicas never diverge. When the leader receives a write request, it
|
| 240 |
+
calculates what the state of the system is when the write is to be
|
| 241 |
+
applied and transforms this into a transaction that captures this new
|
| 242 |
+
state.
|
| 243 |
+
|
| 244 |
+
<a name="Uses"></a>
|
| 245 |
+
|
| 246 |
+
### Uses
|
| 247 |
+
|
| 248 |
+
The programming interface to ZooKeeper is deliberately simple.
|
| 249 |
+
With it, however, you can implement higher order operations, such as
|
| 250 |
+
synchronizations primitives, group membership, ownership, etc.
|
| 251 |
+
|
| 252 |
+
<a name="Performance"></a>
|
| 253 |
+
|
| 254 |
+
### Performance
|
| 255 |
+
|
| 256 |
+
ZooKeeper is designed to be highly performance. But is it? The
|
| 257 |
+
results of the ZooKeeper's development team at Yahoo! Research indicate
|
| 258 |
+
that it is. (See [ZooKeeper Throughput as the Read-Write Ratio Varies](#zkPerfRW).) It is especially high
|
| 259 |
+
performance in applications where reads outnumber writes, since writes
|
| 260 |
+
involve synchronizing the state of all servers. (Reads outnumbering
|
| 261 |
+
writes is typically the case for a coordination service.)
|
| 262 |
+
|
| 263 |
+
<a name="zkPerfRW"></a>
|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
|
| 267 |
+
The [ZooKeeper Throughput as the Read-Write Ratio Varies](#zkPerfRW) is a throughput
|
| 268 |
+
graph of ZooKeeper release 3.2 running on servers with dual 2Ghz
|
| 269 |
+
Xeon and two SATA 15K RPM drives. One drive was used as a
|
| 270 |
+
dedicated ZooKeeper log device. The snapshots were written to
|
| 271 |
+
the OS drive. Write requests were 1K writes and the reads were
|
| 272 |
+
1K reads. "Servers" indicate the size of the ZooKeeper
|
| 273 |
+
ensemble, the number of servers that make up the
|
| 274 |
+
service. Approximately 30 other servers were used to simulate
|
| 275 |
+
the clients. The ZooKeeper ensemble was configured such that
|
| 276 |
+
leaders do not allow connections from clients.
|
| 277 |
+
|
| 278 |
+
######Note
|
| 279 |
+
>In version 3.2 r/w performance improved by ~2x compared to
|
| 280 |
+
the [previous 3.1 release](http://zookeeper.apache.org/docs/r3.1.1/zookeeperOver.html#Performance).
|
| 281 |
+
|
| 282 |
+
Benchmarks also indicate that it is reliable, too.
|
| 283 |
+
[Reliability in the Presence of Errors](#zkPerfReliability) shows how a deployment responds to
|
| 284 |
+
various failures. The events marked in the figure are the following:
|
| 285 |
+
|
| 286 |
+
1. Failure and recovery of a follower
|
| 287 |
+
1. Failure and recovery of a different follower
|
| 288 |
+
1. Failure of the leader
|
| 289 |
+
1. Failure and recovery of two followers
|
| 290 |
+
1. Failure of another leader
|
| 291 |
+
|
| 292 |
+
<a name="Reliability"></a>
|
| 293 |
+
|
| 294 |
+
### Reliability
|
| 295 |
+
|
| 296 |
+
To show the behavior of the system over time as
|
| 297 |
+
failures are injected we ran a ZooKeeper service made up of
|
| 298 |
+
7 machines. We ran the same saturation benchmark as before,
|
| 299 |
+
but this time we kept the write percentage at a constant
|
| 300 |
+
30%, which is a conservative ratio of our expected
|
| 301 |
+
workloads.
|
| 302 |
+
|
| 303 |
+
<a name="zkPerfReliability"></a>
|
| 304 |
+
|
| 305 |
+

|
| 306 |
+
|
| 307 |
+
There are a few important observations from this graph. First, if
|
| 308 |
+
followers fail and recover quickly, then ZooKeeper is able to sustain a
|
| 309 |
+
high throughput despite the failure. But maybe more importantly, the
|
| 310 |
+
leader election algorithm allows for the system to recover fast enough
|
| 311 |
+
to prevent throughput from dropping substantially. In our observations,
|
| 312 |
+
ZooKeeper takes less than 200ms to elect a new leader. Third, as
|
| 313 |
+
followers recover, ZooKeeper is able to raise throughput again once they
|
| 314 |
+
start processing requests.
|
| 315 |
+
|
| 316 |
+
<a name="The+ZooKeeper+Project"></a>
|
| 317 |
+
|
| 318 |
+
### The ZooKeeper Project
|
| 319 |
+
|
| 320 |
+
ZooKeeper has been
|
| 321 |
+
[successfully used](https://cwiki.apache.org/confluence/display/ZOOKEEPER/PoweredBy)
|
| 322 |
+
in many industrial applications. It is used at Yahoo! as the
|
| 323 |
+
coordination and failure recovery service for Yahoo! Message
|
| 324 |
+
Broker, which is a highly scalable publish-subscribe system
|
| 325 |
+
managing thousands of topics for replication and data
|
| 326 |
+
delivery. It is used by the Fetching Service for Yahoo!
|
| 327 |
+
crawler, where it also manages failure recovery. A number of
|
| 328 |
+
Yahoo! advertising systems also use ZooKeeper to implement
|
| 329 |
+
reliable services.
|
| 330 |
+
|
| 331 |
+
All users and developers are encouraged to join the
|
| 332 |
+
community and contribute their expertise. See the
|
| 333 |
+
[Zookeeper Project on Apache](http://zookeeper.apache.org/)
|
| 334 |
+
for more information.
|
| 335 |
+
|
| 336 |
+
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperProgrammers.md
ADDED
|
@@ -0,0 +1,1642 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2004 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper Programmer's Guide
|
| 18 |
+
|
| 19 |
+
### Developing Distributed Applications that use ZooKeeper
|
| 20 |
+
|
| 21 |
+
* [Introduction](#_introduction)
|
| 22 |
+
* [The ZooKeeper Data Model](#ch_zkDataModel)
|
| 23 |
+
* [ZNodes](#sc_zkDataModel_znodes)
|
| 24 |
+
* [Watches](#sc_zkDataMode_watches)
|
| 25 |
+
* [Data Access](#Data+Access)
|
| 26 |
+
* [Ephemeral Nodes](#Ephemeral+Nodes)
|
| 27 |
+
* [Sequence Nodes -- Unique Naming](#Sequence+Nodes+--+Unique+Naming)
|
| 28 |
+
* [Container Nodes](#Container+Nodes)
|
| 29 |
+
* [TTL Nodes](#TTL+Nodes)
|
| 30 |
+
* [Time in ZooKeeper](#sc_timeInZk)
|
| 31 |
+
* [ZooKeeper Stat Structure](#sc_zkStatStructure)
|
| 32 |
+
* [ZooKeeper Sessions](#ch_zkSessions)
|
| 33 |
+
* [ZooKeeper Watches](#ch_zkWatches)
|
| 34 |
+
* [Semantics of Watches](#sc_WatchSemantics)
|
| 35 |
+
* [Persistent, Recursive Watches](#sc_WatchPersistentRecursive)
|
| 36 |
+
* [Remove Watches](#sc_WatchRemoval)
|
| 37 |
+
* [What ZooKeeper Guarantees about Watches](#sc_WatchGuarantees)
|
| 38 |
+
* [Things to Remember about Watches](#sc_WatchRememberThese)
|
| 39 |
+
* [ZooKeeper access control using ACLs](#sc_ZooKeeperAccessControl)
|
| 40 |
+
* [ACL Permissions](#sc_ACLPermissions)
|
| 41 |
+
* [Builtin ACL Schemes](#sc_BuiltinACLSchemes)
|
| 42 |
+
* [ZooKeeper C client API](#ZooKeeper+C+client+API)
|
| 43 |
+
* [Pluggable ZooKeeper authentication](#sc_ZooKeeperPluggableAuthentication)
|
| 44 |
+
* [Consistency Guarantees](#ch_zkGuarantees)
|
| 45 |
+
* [Bindings](#ch_bindings)
|
| 46 |
+
* [Java Binding](#Java+Binding)
|
| 47 |
+
* [Client Configuration Parameters](#sc_java_client_configuration)
|
| 48 |
+
* [C Binding](#C+Binding)
|
| 49 |
+
* [Installation](#Installation)
|
| 50 |
+
* [Building Your Own C Client](#Building+Your+Own+C+Client)
|
| 51 |
+
* [Building Blocks: A Guide to ZooKeeper Operations](#ch_guideToZkOperations)
|
| 52 |
+
* [Handling Errors](#sc_errorsZk)
|
| 53 |
+
* [Connecting to ZooKeeper](#sc_connectingToZk)
|
| 54 |
+
* [Gotchas: Common Problems and Troubleshooting](#ch_gotchas)
|
| 55 |
+
|
| 56 |
+
<a name="_introduction"></a>
|
| 57 |
+
|
| 58 |
+
## Introduction
|
| 59 |
+
|
| 60 |
+
This document is a guide for developers wishing to create
|
| 61 |
+
distributed applications that take advantage of ZooKeeper's coordination
|
| 62 |
+
services. It contains conceptual and practical information.
|
| 63 |
+
|
| 64 |
+
The first four sections of this guide present a higher level
|
| 65 |
+
discussions of various ZooKeeper concepts. These are necessary both for an
|
| 66 |
+
understanding of how ZooKeeper works as well how to work with it. It does
|
| 67 |
+
not contain source code, but it does assume a familiarity with the
|
| 68 |
+
problems associated with distributed computing. The sections in this first
|
| 69 |
+
group are:
|
| 70 |
+
|
| 71 |
+
* [The ZooKeeper Data Model](#ch_zkDataModel)
|
| 72 |
+
* [ZooKeeper Sessions](#ch_zkSessions)
|
| 73 |
+
* [ZooKeeper Watches](#ch_zkWatches)
|
| 74 |
+
* [Consistency Guarantees](#ch_zkGuarantees)
|
| 75 |
+
|
| 76 |
+
The next four sections provide practical programming
|
| 77 |
+
information. These are:
|
| 78 |
+
|
| 79 |
+
* [Building Blocks: A Guide to ZooKeeper Operations](#ch_guideToZkOperations)
|
| 80 |
+
* [Bindings](#ch_bindings)
|
| 81 |
+
* [Gotchas: Common Problems and Troubleshooting](#ch_gotchas)
|
| 82 |
+
|
| 83 |
+
The book concludes with an [appendix](#apx_linksToOtherInfo) containing links to other
|
| 84 |
+
useful, ZooKeeper-related information.
|
| 85 |
+
|
| 86 |
+
Most of the information in this document is written to be accessible as
|
| 87 |
+
stand-alone reference material. However, before starting your first
|
| 88 |
+
ZooKeeper application, you should probably at least read the chapters on
|
| 89 |
+
the [ZooKeeper Data Model](#ch_zkDataModel) and [ZooKeeper Basic Operations](#ch_guideToZkOperations).
|
| 90 |
+
|
| 91 |
+
<a name="ch_zkDataModel"></a>
|
| 92 |
+
|
| 93 |
+
## The ZooKeeper Data Model
|
| 94 |
+
|
| 95 |
+
ZooKeeper has a hierarchal namespace, much like a distributed file
|
| 96 |
+
system. The only difference is that each node in the namespace can have
|
| 97 |
+
data associated with it as well as children. It is like having a file
|
| 98 |
+
system that allows a file to also be a directory. Paths to nodes are
|
| 99 |
+
always expressed as canonical, absolute, slash-separated paths; there are
|
| 100 |
+
no relative reference. Any unicode character can be used in a path subject
|
| 101 |
+
to the following constraints:
|
| 102 |
+
|
| 103 |
+
* The null character (\\u0000) cannot be part of a path name. (This
|
| 104 |
+
causes problems with the C binding.)
|
| 105 |
+
* The following characters can't be used because they don't
|
| 106 |
+
display well, or render in confusing ways: \\u0001 - \\u001F and \\u007F
|
| 107 |
+
- \\u009F.
|
| 108 |
+
* The following characters are not allowed: \\ud800 - uF8FF,
|
| 109 |
+
\\uFFF0 - uFFFF.
|
| 110 |
+
* The "." character can be used as part of another name, but "."
|
| 111 |
+
and ".." cannot alone be used to indicate a node along a path,
|
| 112 |
+
because ZooKeeper doesn't use relative paths. The following would be
|
| 113 |
+
invalid: "/a/b/./c" or "/a/b/../c".
|
| 114 |
+
* The token "zookeeper" is reserved.
|
| 115 |
+
|
| 116 |
+
<a name="sc_zkDataModel_znodes"></a>
|
| 117 |
+
|
| 118 |
+
### ZNodes
|
| 119 |
+
|
| 120 |
+
Every node in a ZooKeeper tree is referred to as a
|
| 121 |
+
_znode_. Znodes maintain a stat structure that
|
| 122 |
+
includes version numbers for data changes, acl changes. The stat
|
| 123 |
+
structure also has timestamps. The version number, together with the
|
| 124 |
+
timestamp, allows ZooKeeper to validate the cache and to coordinate
|
| 125 |
+
updates. Each time a znode's data changes, the version number increases.
|
| 126 |
+
For instance, whenever a client retrieves data, it also receives the
|
| 127 |
+
version of the data. And when a client performs an update or a delete,
|
| 128 |
+
it must supply the version of the data of the znode it is changing. If
|
| 129 |
+
the version it supplies doesn't match the actual version of the data,
|
| 130 |
+
the update will fail. (This behavior can be overridden.
|
| 131 |
+
|
| 132 |
+
######Note
|
| 133 |
+
|
| 134 |
+
>In distributed application engineering, the word
|
| 135 |
+
_node_ can refer to a generic host machine, a
|
| 136 |
+
server, a member of an ensemble, a client process, etc. In the ZooKeeper
|
| 137 |
+
documentation, _znodes_ refer to the data nodes.
|
| 138 |
+
_Servers_ refers to machines that make up the
|
| 139 |
+
ZooKeeper service; _quorum peers_ refer to the
|
| 140 |
+
servers that make up an ensemble; client refers to any host or process
|
| 141 |
+
which uses a ZooKeeper service.
|
| 142 |
+
|
| 143 |
+
Znodes are the main entity that a programmer access. They have
|
| 144 |
+
several characteristics that are worth mentioning here.
|
| 145 |
+
|
| 146 |
+
<a name="sc_zkDataMode_watches"></a>
|
| 147 |
+
|
| 148 |
+
#### Watches
|
| 149 |
+
|
| 150 |
+
Clients can set watches on znodes. Changes to that znode trigger
|
| 151 |
+
the watch and then clear the watch. When a watch triggers, ZooKeeper
|
| 152 |
+
sends the client a notification. More information about watches can be
|
| 153 |
+
found in the section
|
| 154 |
+
[ZooKeeper Watches](#ch_zkWatches).
|
| 155 |
+
|
| 156 |
+
<a name="Data+Access"></a>
|
| 157 |
+
|
| 158 |
+
#### Data Access
|
| 159 |
+
|
| 160 |
+
The data stored at each znode in a namespace is read and written
|
| 161 |
+
atomically. Reads get all the data bytes associated with a znode and a
|
| 162 |
+
write replaces all the data. Each node has an Access Control List
|
| 163 |
+
(ACL) that restricts who can do what.
|
| 164 |
+
|
| 165 |
+
ZooKeeper was not designed to be a general database or large
|
| 166 |
+
object store. Instead, it manages coordination data. This data can
|
| 167 |
+
come in the form of configuration, status information, rendezvous, etc.
|
| 168 |
+
A common property of the various forms of coordination data is that
|
| 169 |
+
they are relatively small: measured in kilobytes.
|
| 170 |
+
The ZooKeeper client and the server implementations have sanity checks
|
| 171 |
+
to ensure that znodes have less than 1M of data, but the data should
|
| 172 |
+
be much less than that on average. Operating on relatively large data
|
| 173 |
+
sizes will cause some operations to take much more time than others and
|
| 174 |
+
will affect the latencies of some operations because of the extra time
|
| 175 |
+
needed to move more data over the network and onto storage media. If
|
| 176 |
+
large data storage is needed, the usual pattern of dealing with such
|
| 177 |
+
data is to store it on a bulk storage system, such as NFS or HDFS, and
|
| 178 |
+
store pointers to the storage locations in ZooKeeper.
|
| 179 |
+
|
| 180 |
+
<a name="Ephemeral+Nodes"></a>
|
| 181 |
+
|
| 182 |
+
#### Ephemeral Nodes
|
| 183 |
+
|
| 184 |
+
ZooKeeper also has the notion of ephemeral nodes. These znodes
|
| 185 |
+
exists as long as the session that created the znode is active. When
|
| 186 |
+
the session ends the znode is deleted. Because of this behavior
|
| 187 |
+
ephemeral znodes are not allowed to have children. The list of ephemerals
|
| 188 |
+
for the session can be retrieved using **getEphemerals()** api.
|
| 189 |
+
|
| 190 |
+
##### getEphemerals()
|
| 191 |
+
Retrieves the list of ephemeral nodes created by the session for the
|
| 192 |
+
given path. If the path is empty, it will list all the ephemeral nodes
|
| 193 |
+
for the session.
|
| 194 |
+
**Use Case** - A sample use case might be, if the list of ephemeral
|
| 195 |
+
nodes for the session needs to be collected for duplicate data entry check
|
| 196 |
+
and the nodes are created in a sequential manner so you do not know the name
|
| 197 |
+
for duplicate check. In that case, getEphemerals() api could be used to
|
| 198 |
+
get the list of nodes for the session. This might be a typical use case
|
| 199 |
+
for service discovery.
|
| 200 |
+
|
| 201 |
+
<a name="Sequence+Nodes+--+Unique+Naming"></a>
|
| 202 |
+
|
| 203 |
+
#### Sequence Nodes -- Unique Naming
|
| 204 |
+
|
| 205 |
+
When creating a znode you can also request that
|
| 206 |
+
ZooKeeper append a monotonically increasing counter to the end
|
| 207 |
+
of path. This counter is unique to the parent znode. The
|
| 208 |
+
counter has a format of %010d -- that is 10 digits with 0
|
| 209 |
+
(zero) padding (the counter is formatted in this way to
|
| 210 |
+
simplify sorting), i.e. "<path>0000000001". See
|
| 211 |
+
[Queue
|
| 212 |
+
Recipe](recipes.html#sc_recipes_Queues) for an example use of this feature. Note: the
|
| 213 |
+
counter used to store the next sequence number is a signed int
|
| 214 |
+
(4bytes) maintained by the parent node, the counter will
|
| 215 |
+
overflow when incremented beyond 2147483647 (resulting in a
|
| 216 |
+
name "<path>-2147483648").
|
| 217 |
+
|
| 218 |
+
<a name="Container+Nodes"></a>
|
| 219 |
+
|
| 220 |
+
#### Container Nodes
|
| 221 |
+
|
| 222 |
+
**Added in 3.5.3**
|
| 223 |
+
|
| 224 |
+
ZooKeeper has the notion of container znodes. Container znodes are
|
| 225 |
+
special purpose znodes useful for recipes such as leader, lock, etc.
|
| 226 |
+
When the last child of a container is deleted, the container becomes
|
| 227 |
+
a candidate to be deleted by the server at some point in the future.
|
| 228 |
+
|
| 229 |
+
Given this property, you should be prepared to get
|
| 230 |
+
KeeperException.NoNodeException when creating children inside of
|
| 231 |
+
container znodes. i.e. when creating child znodes inside of container znodes
|
| 232 |
+
always check for KeeperException.NoNodeException and recreate the container
|
| 233 |
+
znode when it occurs.
|
| 234 |
+
|
| 235 |
+
<a name="TTL+Nodes"></a>
|
| 236 |
+
|
| 237 |
+
#### TTL Nodes
|
| 238 |
+
|
| 239 |
+
**Added in 3.5.3**
|
| 240 |
+
|
| 241 |
+
When creating PERSISTENT or PERSISTENT_SEQUENTIAL znodes,
|
| 242 |
+
you can optionally set a TTL in milliseconds for the znode. If the znode
|
| 243 |
+
is not modified within the TTL and has no children it will become a candidate
|
| 244 |
+
to be deleted by the server at some point in the future.
|
| 245 |
+
|
| 246 |
+
Note: TTL Nodes must be enabled via System property as they
|
| 247 |
+
are disabled by default. See the [Administrator's Guide](zookeeperAdmin.html#sc_configuration) for
|
| 248 |
+
details. If you attempt to create TTL Nodes without the
|
| 249 |
+
proper System property set the server will throw
|
| 250 |
+
KeeperException.UnimplementedException.
|
| 251 |
+
|
| 252 |
+
<a name="sc_timeInZk"></a>
|
| 253 |
+
|
| 254 |
+
### Time in ZooKeeper
|
| 255 |
+
|
| 256 |
+
ZooKeeper tracks time multiple ways:
|
| 257 |
+
|
| 258 |
+
* **Zxid**
|
| 259 |
+
Every change to the ZooKeeper state receives a stamp in the
|
| 260 |
+
form of a _zxid_ (ZooKeeper Transaction Id).
|
| 261 |
+
This exposes the total ordering of all changes to ZooKeeper. Each
|
| 262 |
+
change will have a unique zxid and if zxid1 is smaller than zxid2
|
| 263 |
+
then zxid1 happened before zxid2.
|
| 264 |
+
* **Version numbers**
|
| 265 |
+
Every change to a node will cause an increase to one of the
|
| 266 |
+
version numbers of that node. The three version numbers are version
|
| 267 |
+
(number of changes to the data of a znode), cversion (number of
|
| 268 |
+
changes to the children of a znode), and aversion (number of changes
|
| 269 |
+
to the ACL of a znode).
|
| 270 |
+
* **Ticks**
|
| 271 |
+
When using multi-server ZooKeeper, servers use ticks to define
|
| 272 |
+
timing of events such as status uploads, session timeouts,
|
| 273 |
+
connection timeouts between peers, etc. The tick time is only
|
| 274 |
+
indirectly exposed through the minimum session timeout (2 times the
|
| 275 |
+
tick time); if a client requests a session timeout less than the
|
| 276 |
+
minimum session timeout, the server will tell the client that the
|
| 277 |
+
session timeout is actually the minimum session timeout.
|
| 278 |
+
* **Real time**
|
| 279 |
+
ZooKeeper doesn't use real time, or clock time, at all except
|
| 280 |
+
to put timestamps into the stat structure on znode creation and
|
| 281 |
+
znode modification.
|
| 282 |
+
|
| 283 |
+
<a name="sc_zkStatStructure"></a>
|
| 284 |
+
|
| 285 |
+
### ZooKeeper Stat Structure
|
| 286 |
+
|
| 287 |
+
The Stat structure for each znode in ZooKeeper is made up of the
|
| 288 |
+
following fields:
|
| 289 |
+
|
| 290 |
+
* **czxid**
|
| 291 |
+
The zxid of the change that caused this znode to be
|
| 292 |
+
created.
|
| 293 |
+
* **mzxid**
|
| 294 |
+
The zxid of the change that last modified this znode.
|
| 295 |
+
* **pzxid**
|
| 296 |
+
The zxid of the change that last modified children of this znode.
|
| 297 |
+
* **ctime**
|
| 298 |
+
The time in milliseconds from epoch when this znode was
|
| 299 |
+
created.
|
| 300 |
+
* **mtime**
|
| 301 |
+
The time in milliseconds from epoch when this znode was last
|
| 302 |
+
modified.
|
| 303 |
+
* **version**
|
| 304 |
+
The number of changes to the data of this znode.
|
| 305 |
+
* **cversion**
|
| 306 |
+
The number of changes to the children of this znode.
|
| 307 |
+
* **aversion**
|
| 308 |
+
The number of changes to the ACL of this znode.
|
| 309 |
+
* **ephemeralOwner**
|
| 310 |
+
The session id of the owner of this znode if the znode is an
|
| 311 |
+
ephemeral node. If it is not an ephemeral node, it will be
|
| 312 |
+
zero.
|
| 313 |
+
* **dataLength**
|
| 314 |
+
The length of the data field of this znode.
|
| 315 |
+
* **numChildren**
|
| 316 |
+
The number of children of this znode.
|
| 317 |
+
|
| 318 |
+
<a name="ch_zkSessions"></a>
|
| 319 |
+
|
| 320 |
+
## ZooKeeper Sessions
|
| 321 |
+
|
| 322 |
+
A ZooKeeper client establishes a session with the ZooKeeper
|
| 323 |
+
service by creating a handle to the service using a language
|
| 324 |
+
binding. Once created, the handle starts off in the CONNECTING state
|
| 325 |
+
and the client library tries to connect to one of the servers that
|
| 326 |
+
make up the ZooKeeper service at which point it switches to the
|
| 327 |
+
CONNECTED state. During normal operation the client handle will be in one of these
|
| 328 |
+
two states. If an unrecoverable error occurs, such as session
|
| 329 |
+
expiration or authentication failure, or if the application explicitly
|
| 330 |
+
closes the handle, the handle will move to the CLOSED state.
|
| 331 |
+
The following figure shows the possible state transitions of a
|
| 332 |
+
ZooKeeper client:
|
| 333 |
+
|
| 334 |
+

|
| 335 |
+
|
| 336 |
+
To create a client session the application code must provide
|
| 337 |
+
a connection string containing a comma separated list of host:port pairs,
|
| 338 |
+
each corresponding to a ZooKeeper server (e.g. "127.0.0.1:4545" or
|
| 339 |
+
"127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"). The ZooKeeper
|
| 340 |
+
client library will pick an arbitrary server and try to connect to
|
| 341 |
+
it. If this connection fails, or if the client becomes
|
| 342 |
+
disconnected from the server for any reason, the client will
|
| 343 |
+
automatically try the next server in the list, until a connection
|
| 344 |
+
is (re-)established.
|
| 345 |
+
|
| 346 |
+
**Added in 3.2.0**: An
|
| 347 |
+
optional "chroot" suffix may also be appended to the connection
|
| 348 |
+
string. This will run the client commands while interpreting all
|
| 349 |
+
paths relative to this root (similar to the unix chroot
|
| 350 |
+
command). If used the example would look like:
|
| 351 |
+
"127.0.0.1:4545/app/a" or
|
| 352 |
+
"127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the
|
| 353 |
+
client would be rooted at "/app/a" and all paths would be relative
|
| 354 |
+
to this root - ie getting/setting/etc... "/foo/bar" would result
|
| 355 |
+
in operations being run on "/app/a/foo/bar" (from the server
|
| 356 |
+
perspective). This feature is particularly useful in multi-tenant
|
| 357 |
+
environments where each user of a particular ZooKeeper service
|
| 358 |
+
could be rooted differently. This makes re-use much simpler as
|
| 359 |
+
each user can code his/her application as if it were rooted at
|
| 360 |
+
"/", while actual location (say /app/a) could be determined at
|
| 361 |
+
deployment time.
|
| 362 |
+
|
| 363 |
+
When a client gets a handle to the ZooKeeper service,
|
| 364 |
+
ZooKeeper creates a ZooKeeper session, represented as a 64-bit
|
| 365 |
+
number, that it assigns to the client. If the client connects to a
|
| 366 |
+
different ZooKeeper server, it will send the session id as a part
|
| 367 |
+
of the connection handshake. As a security measure, the server
|
| 368 |
+
creates a password for the session id that any ZooKeeper server
|
| 369 |
+
can validate.The password is sent to the client with the session
|
| 370 |
+
id when the client establishes the session. The client sends this
|
| 371 |
+
password with the session id whenever it reestablishes the session
|
| 372 |
+
with a new server.
|
| 373 |
+
|
| 374 |
+
One of the parameters to the ZooKeeper client library call
|
| 375 |
+
to create a ZooKeeper session is the session timeout in
|
| 376 |
+
milliseconds. The client sends a requested timeout, the server
|
| 377 |
+
responds with the timeout that it can give the client. The current
|
| 378 |
+
implementation requires that the timeout be a minimum of 2 times
|
| 379 |
+
the tickTime (as set in the server configuration) and a maximum of
|
| 380 |
+
20 times the tickTime. The ZooKeeper client API allows access to
|
| 381 |
+
the negotiated timeout.
|
| 382 |
+
|
| 383 |
+
When a client (session) becomes partitioned from the ZK
|
| 384 |
+
serving cluster it will begin searching the list of servers that
|
| 385 |
+
were specified during session creation. Eventually, when
|
| 386 |
+
connectivity between the client and at least one of the servers is
|
| 387 |
+
re-established, the session will either again transition to the
|
| 388 |
+
"connected" state (if reconnected within the session timeout
|
| 389 |
+
value) or it will transition to the "expired" state (if
|
| 390 |
+
reconnected after the session timeout). It is not advisable to
|
| 391 |
+
create a new session object (a new ZooKeeper.class or zookeeper
|
| 392 |
+
handle in the c binding) for disconnection. The ZK client library
|
| 393 |
+
will handle reconnect for you. In particular we have heuristics
|
| 394 |
+
built into the client library to handle things like "herd effect",
|
| 395 |
+
etc... Only create a new session when you are notified of session
|
| 396 |
+
expiration (mandatory).
|
| 397 |
+
|
| 398 |
+
Session expiration is managed by the ZooKeeper cluster
|
| 399 |
+
itself, not by the client. When the ZK client establishes a
|
| 400 |
+
session with the cluster it provides a "timeout" value detailed
|
| 401 |
+
above. This value is used by the cluster to determine when the
|
| 402 |
+
client's session expires. Expirations happens when the cluster
|
| 403 |
+
does not hear from the client within the specified session timeout
|
| 404 |
+
period (i.e. no heartbeat). At session expiration the cluster will
|
| 405 |
+
delete any/all ephemeral nodes owned by that session and
|
| 406 |
+
immediately notify any/all connected clients of the change (anyone
|
| 407 |
+
watching those znodes). At this point the client of the expired
|
| 408 |
+
session is still disconnected from the cluster, it will not be
|
| 409 |
+
notified of the session expiration until/unless it is able to
|
| 410 |
+
re-establish a connection to the cluster. The client will stay in
|
| 411 |
+
disconnected state until the TCP connection is re-established with
|
| 412 |
+
the cluster, at which point the watcher of the expired session
|
| 413 |
+
will receive the "session expired" notification.
|
| 414 |
+
|
| 415 |
+
Example state transitions for an expired session as seen by
|
| 416 |
+
the expired session's watcher:
|
| 417 |
+
|
| 418 |
+
1. 'connected' : session is established and client
|
| 419 |
+
is communicating with cluster (client/server communication is
|
| 420 |
+
operating properly)
|
| 421 |
+
1. .... client is partitioned from the
|
| 422 |
+
cluster
|
| 423 |
+
1. 'disconnected' : client has lost connectivity
|
| 424 |
+
with the cluster
|
| 425 |
+
1. .... time elapses, after 'timeout' period the
|
| 426 |
+
cluster expires the session, nothing is seen by client as it is
|
| 427 |
+
disconnected from cluster
|
| 428 |
+
1. .... time elapses, the client regains network
|
| 429 |
+
level connectivity with the cluster
|
| 430 |
+
1. 'expired' : eventually the client reconnects to
|
| 431 |
+
the cluster, it is then notified of the
|
| 432 |
+
expiration
|
| 433 |
+
|
| 434 |
+
Another parameter to the ZooKeeper session establishment
|
| 435 |
+
call is the default watcher. Watchers are notified when any state
|
| 436 |
+
change occurs in the client. For example if the client loses
|
| 437 |
+
connectivity to the server the client will be notified, or if the
|
| 438 |
+
client's session expires, etc... This watcher should consider the
|
| 439 |
+
initial state to be disconnected (i.e. before any state changes
|
| 440 |
+
events are sent to the watcher by the client lib). In the case of
|
| 441 |
+
a new connection, the first event sent to the watcher is typically
|
| 442 |
+
the session connection event.
|
| 443 |
+
|
| 444 |
+
The session is kept alive by requests sent by the client. If
|
| 445 |
+
the session is idle for a period of time that would timeout the
|
| 446 |
+
session, the client will send a PING request to keep the session
|
| 447 |
+
alive. This PING request not only allows the ZooKeeper server to
|
| 448 |
+
know that the client is still active, but it also allows the
|
| 449 |
+
client to verify that its connection to the ZooKeeper server is
|
| 450 |
+
still active. The timing of the PING is conservative enough to
|
| 451 |
+
ensure reasonable time to detect a dead connection and reconnect
|
| 452 |
+
to a new server.
|
| 453 |
+
|
| 454 |
+
Once a connection to the server is successfully established
|
| 455 |
+
(connected) there are basically two cases where the client lib generates
|
| 456 |
+
connectionloss (the result code in c binding, exception in Java -- see
|
| 457 |
+
the API documentation for binding specific details) when either a synchronous or
|
| 458 |
+
asynchronous operation is performed and one of the following holds:
|
| 459 |
+
|
| 460 |
+
1. The application calls an operation on a session that is no
|
| 461 |
+
longer alive/valid
|
| 462 |
+
1. The ZooKeeper client disconnects from a server when there
|
| 463 |
+
are pending operations to that server, i.e., there is a pending asynchronous call.
|
| 464 |
+
|
| 465 |
+
**Added in 3.2.0 -- SessionMovedException**. There is an internal
|
| 466 |
+
exception that is generally not seen by clients called the SessionMovedException.
|
| 467 |
+
This exception occurs because a request was received on a connection for a session
|
| 468 |
+
which has been reestablished on a different server. The normal cause of this error is
|
| 469 |
+
a client that sends a request to a server, but the network packet gets delayed, so
|
| 470 |
+
the client times out and connects to a new server. When the delayed packet arrives at
|
| 471 |
+
the first server, the old server detects that the session has moved, and closes the
|
| 472 |
+
client connection. Clients normally do not see this error since they do not read
|
| 473 |
+
from those old connections. (Old connections are usually closed.) One situation in which this
|
| 474 |
+
condition can be seen is when two clients try to reestablish the same connection using
|
| 475 |
+
a saved session id and password. One of the clients will reestablish the connection
|
| 476 |
+
and the second client will be disconnected (causing the pair to attempt to re-establish
|
| 477 |
+
its connection/session indefinitely).
|
| 478 |
+
|
| 479 |
+
**Updating the list of servers**. We allow a client to
|
| 480 |
+
update the connection string by providing a new comma separated list of host:port pairs,
|
| 481 |
+
each corresponding to a ZooKeeper server. The function invokes a probabilistic load-balancing
|
| 482 |
+
algorithm which may cause the client to disconnect from its current host with the goal
|
| 483 |
+
to achieve expected uniform number of connections per server in the new list.
|
| 484 |
+
In case the current host to which the client is connected is not in the new list
|
| 485 |
+
this call will always cause the connection to be dropped. Otherwise, the decision
|
| 486 |
+
is based on whether the number of servers has increased or decreased and by how much.
|
| 487 |
+
|
| 488 |
+
For example, if the previous connection string contained 3 hosts and now the list contains
|
| 489 |
+
these 3 hosts and 2 more hosts, 40% of clients connected to each of the 3 hosts will
|
| 490 |
+
move to one of the new hosts in order to balance the load. The algorithm will cause the client
|
| 491 |
+
to drop its connection to the current host to which it is connected with probability 0.4 and in this
|
| 492 |
+
case cause the client to connect to one of the 2 new hosts, chosen at random.
|
| 493 |
+
|
| 494 |
+
Another example -- suppose we have 5 hosts and now update the list to remove 2 of the hosts,
|
| 495 |
+
the clients connected to the 3 remaining hosts will stay connected, whereas all clients connected
|
| 496 |
+
to the 2 removed hosts will need to move to one of the 3 hosts, chosen at random. If the connection
|
| 497 |
+
is dropped, the client moves to a special mode where he chooses a new server to connect to using the
|
| 498 |
+
probabilistic algorithm, and not just round robin.
|
| 499 |
+
|
| 500 |
+
In the first example, each client decides to disconnect with probability 0.4 but once the decision is
|
| 501 |
+
made, it will try to connect to a random new server and only if it cannot connect to any of the new
|
| 502 |
+
servers will it try to connect to the old ones. After finding a server, or trying all servers in the
|
| 503 |
+
new list and failing to connect, the client moves back to the normal mode of operation where it picks
|
| 504 |
+
an arbitrary server from the connectString and attempts to connect to it. If that fails, it will continue
|
| 505 |
+
trying different random servers in round robin. (see above the algorithm used to initially choose a server)
|
| 506 |
+
|
| 507 |
+
**Local session**. Added in 3.5.0, mainly implemented by [ZOOKEEPER-1147](https://issues.apache.org/jira/browse/ZOOKEEPER-1147).
|
| 508 |
+
|
| 509 |
+
- Background: The creation and closing of sessions are costly in ZooKeeper because they need quorum confirmations,
|
| 510 |
+
they become the bottleneck of a ZooKeeper ensemble when it needs to handle thousands of client connections.
|
| 511 |
+
So after 3.5.0, we introduce a new type of session: local session which doesn't have a full functionality of a normal(global) session, this feature
|
| 512 |
+
will be available by turning on *localSessionsEnabled*.
|
| 513 |
+
|
| 514 |
+
when *localSessionsUpgradingEnabled* is disable:
|
| 515 |
+
|
| 516 |
+
- Local sessions cannot create ephemeral nodes
|
| 517 |
+
|
| 518 |
+
- Once a local session is lost, users cannot re-establish it using the session-id/password, the session and its watches are gone for good.
|
| 519 |
+
Note: Losing the tcp connection does not necessarily imply that the session is lost. If the connection can be reestablished with the same zk server
|
| 520 |
+
before the session timeout then the client can continue (it simply cannot move to another server).
|
| 521 |
+
|
| 522 |
+
- When a local session connects, the session info is only maintained on the zookeeper server that it is connected to. The leader is not aware of the creation of such a session and
|
| 523 |
+
there is no state written to disk.
|
| 524 |
+
|
| 525 |
+
- The pings, expiration and other session state maintenance are handled by the server which current session is connected to.
|
| 526 |
+
|
| 527 |
+
when *localSessionsUpgradingEnabled* is enable:
|
| 528 |
+
|
| 529 |
+
- A local session can be upgraded to the global session automatically.
|
| 530 |
+
|
| 531 |
+
- When a new session is created it is saved locally in a wrapped *LocalSessionTracker*. It can subsequently be upgraded
|
| 532 |
+
to a global session as required (e.g. create ephemeral nodes). If an upgrade is requested the session is removed from local
|
| 533 |
+
collections while keeping the same session ID.
|
| 534 |
+
|
| 535 |
+
- Currently, Only the operation: *create ephemeral node* needs a session upgrade from local to global.
|
| 536 |
+
The reason is that the creation of ephemeral node depends heavily on a global session. If local session can create ephemeral
|
| 537 |
+
node without upgrading to global session, it will cause the data inconsistency between different nodes.
|
| 538 |
+
The leader also needs to know about the lifespan of a session in order to clean up ephemeral nodes on close/expiry.
|
| 539 |
+
This requires a global session as the local session is tied to its particular server.
|
| 540 |
+
|
| 541 |
+
- A session can be both a local and global session during upgrade, but the operation of upgrade cannot be called concurrently by two thread.
|
| 542 |
+
|
| 543 |
+
- *ZooKeeperServer*(Standalone) uses *SessionTrackerImpl*; *LeaderZookeeper* uses *LeaderSessionTracker* which holds
|
| 544 |
+
*SessionTrackerImpl*(global) and *LocalSessionTracker*(if enable); *FollowerZooKeeperServer* and *ObserverZooKeeperServer*
|
| 545 |
+
use *LearnerSessionTracker* which holds *LocalSessionTracker*.
|
| 546 |
+
The UML Graph of Classes about session:
|
| 547 |
+
|
| 548 |
+
```
|
| 549 |
+
+----------------+ +--------------------+ +---------------------+
|
| 550 |
+
| | --> | | ----> | LocalSessionTracker |
|
| 551 |
+
| SessionTracker | | SessionTrackerImpl | +---------------------+
|
| 552 |
+
| | | | +-----------------------+
|
| 553 |
+
| | | | +-------------------------> | LeaderSessionTracker |
|
| 554 |
+
+----------------+ +--------------------+ | +-----------------------+
|
| 555 |
+
| |
|
| 556 |
+
| |
|
| 557 |
+
| |
|
| 558 |
+
| +---------------------------+
|
| 559 |
+
+---------> | |
|
| 560 |
+
| UpgradeableSessionTracker |
|
| 561 |
+
| |
|
| 562 |
+
| | ------------------------+
|
| 563 |
+
+---------------------------+ |
|
| 564 |
+
|
|
| 565 |
+
|
|
| 566 |
+
v
|
| 567 |
+
+-----------------------+
|
| 568 |
+
| LearnerSessionTracker |
|
| 569 |
+
+-----------------------+
|
| 570 |
+
```
|
| 571 |
+
|
| 572 |
+
- Q&A
|
| 573 |
+
- *What's the reason for having the config option to disable local session upgrade?*
|
| 574 |
+
- In a large deployment which wants to handle a very large number of clients, we know that clients connecting via the observers
|
| 575 |
+
which is supposed to be local session only. So this is more like a safeguard against someone accidentally creates lots of ephemeral nodes and global sessions.
|
| 576 |
+
|
| 577 |
+
- *When is the session created?*
|
| 578 |
+
- In the current implementation, it will try to create a local session when processing *ConnectRequest* and when
|
| 579 |
+
*createSession* request reaches *FinalRequestProcessor*.
|
| 580 |
+
|
| 581 |
+
- *What happens if the create for session is sent at server A and the client disconnects to some other server B
|
| 582 |
+
which ends up sending it again and then disconnects and connects back to server A?*
|
| 583 |
+
- When a client reconnects to B, its sessionId won’t exist in B’s local session tracker. So B will send validation packet.
|
| 584 |
+
If CreateSession issued by A is committed before validation packet arrive the client will be able to connect.
|
| 585 |
+
Otherwise, the client will get session expired because the quorum hasn’t know about this session yet.
|
| 586 |
+
If the client also tries to connect back to A again, the session is already removed from local session tracker.
|
| 587 |
+
So A will need to send a validation packet to the leader. The outcome should be the same as B depending on the timing of the request.
|
| 588 |
+
|
| 589 |
+
<a name="ch_zkWatches"></a>
|
| 590 |
+
|
| 591 |
+
## ZooKeeper Watches
|
| 592 |
+
|
| 593 |
+
All of the read operations in ZooKeeper - **getData()**, **getChildren()**, and **exists()** - have the option of setting a watch as a
|
| 594 |
+
side effect. Here is ZooKeeper's definition of a watch: a watch event is
|
| 595 |
+
one-time trigger, sent to the client that set the watch, which occurs when
|
| 596 |
+
the data for which the watch was set changes. There are three key points
|
| 597 |
+
to consider in this definition of a watch:
|
| 598 |
+
|
| 599 |
+
* **One-time trigger**
|
| 600 |
+
One watch event will be sent to the client when the data has changed.
|
| 601 |
+
For example, if a client does a getData("/znode1", true) and later the
|
| 602 |
+
data for /znode1 is changed or deleted, the client will get a watch
|
| 603 |
+
event for /znode1. If /znode1 changes again, no watch event will be
|
| 604 |
+
sent unless the client has done another read that sets a new
|
| 605 |
+
watch.
|
| 606 |
+
* **Sent to the client**
|
| 607 |
+
This implies that an event is on the way to the client, but may
|
| 608 |
+
not reach the client before the successful return code to the change
|
| 609 |
+
operation reaches the client that initiated the change. Watches are
|
| 610 |
+
sent asynchronously to watchers. ZooKeeper provides an ordering
|
| 611 |
+
guarantee: a client will never see a change for which it has set a
|
| 612 |
+
watch until it first sees the watch event. Network delays or other
|
| 613 |
+
factors may cause different clients to see watches and return codes
|
| 614 |
+
from updates at different times. The key point is that everything seen
|
| 615 |
+
by the different clients will have a consistent order.
|
| 616 |
+
* **The data for which the watch was
|
| 617 |
+
set**
|
| 618 |
+
This refers to the different ways a node can change. It
|
| 619 |
+
helps to think of ZooKeeper as maintaining two lists of
|
| 620 |
+
watches: data watches and child watches. getData() and
|
| 621 |
+
exists() set data watches. getChildren() sets child
|
| 622 |
+
watches. Alternatively, it may help to think of watches being
|
| 623 |
+
set according to the kind of data returned. getData() and
|
| 624 |
+
exists() return information about the data of the node,
|
| 625 |
+
whereas getChildren() returns a list of children. Thus,
|
| 626 |
+
setData() will trigger data watches for the znode being set
|
| 627 |
+
(assuming the set is successful). A successful create() will
|
| 628 |
+
trigger a data watch for the znode being created and a child
|
| 629 |
+
watch for the parent znode. A successful delete() will trigger
|
| 630 |
+
both a data watch and a child watch (since there can be no
|
| 631 |
+
more children) for a znode being deleted as well as a child
|
| 632 |
+
watch for the parent znode.
|
| 633 |
+
|
| 634 |
+
Watches are maintained locally at the ZooKeeper server to which the
|
| 635 |
+
client is connected. This allows watches to be lightweight to set,
|
| 636 |
+
maintain, and dispatch. When a client connects to a new server, the watch
|
| 637 |
+
will be triggered for any session events. Watches will not be received
|
| 638 |
+
while disconnected from a server. When a client reconnects, any previously
|
| 639 |
+
registered watches will be reregistered and triggered if needed. In
|
| 640 |
+
general this all occurs transparently. There is one case where a watch
|
| 641 |
+
may be missed: a watch for the existence of a znode not yet created will
|
| 642 |
+
be missed if the znode is created and deleted while disconnected.
|
| 643 |
+
|
| 644 |
+
**New in 3.6.0:** Clients can also set
|
| 645 |
+
permanent, recursive watches on a znode that are not removed when triggered
|
| 646 |
+
and that trigger for changes on the registered znode as well as any children
|
| 647 |
+
znodes recursively.
|
| 648 |
+
|
| 649 |
+
<a name="sc_WatchSemantics"></a>
|
| 650 |
+
|
| 651 |
+
### Semantics of Watches
|
| 652 |
+
|
| 653 |
+
We can set watches with the three calls that read the state of
|
| 654 |
+
ZooKeeper: exists, getData, and getChildren. The following list details
|
| 655 |
+
the events that a watch can trigger and the calls that enable them:
|
| 656 |
+
|
| 657 |
+
* **Created event:**
|
| 658 |
+
Enabled with a call to exists.
|
| 659 |
+
* **Deleted event:**
|
| 660 |
+
Enabled with a call to exists, getData, and getChildren.
|
| 661 |
+
* **Changed event:**
|
| 662 |
+
Enabled with a call to exists and getData.
|
| 663 |
+
* **Child event:**
|
| 664 |
+
Enabled with a call to getChildren.
|
| 665 |
+
|
| 666 |
+
<a name="sc_WatchPersistentRecursive"></a>
|
| 667 |
+
|
| 668 |
+
### Persistent, Recursive Watches
|
| 669 |
+
|
| 670 |
+
**New in 3.6.0:** There is now a variation on the standard
|
| 671 |
+
watch described above whereby you can set a watch that does not get removed when triggered.
|
| 672 |
+
Additionally, these watches trigger the event types *NodeCreated*, *NodeDeleted*, and *NodeDataChanged*
|
| 673 |
+
and, optionally, recursively for all znodes starting at the znode that the watch is registered for. Note
|
| 674 |
+
that *NodeChildrenChanged* events are not triggered for persistent recursive watches as it would be redundant.
|
| 675 |
+
|
| 676 |
+
Persistent watches are set using the method *addWatch()*. The triggering semantics and guarantees
|
| 677 |
+
(other than one-time triggering) are the same as standard watches. The only exception regarding events is that
|
| 678 |
+
recursive persistent watchers never trigger child changed events as they are redundant.
|
| 679 |
+
Persistent watches are removed using *removeWatches()* with watcher type *WatcherType.Any*.
|
| 680 |
+
|
| 681 |
+
<a name="sc_WatchRemoval"></a>
|
| 682 |
+
|
| 683 |
+
### Remove Watches
|
| 684 |
+
|
| 685 |
+
We can remove the watches registered on a znode with a call to
|
| 686 |
+
removeWatches. Also, a ZooKeeper client can remove watches locally even
|
| 687 |
+
if there is no server connection by setting the local flag to true. The
|
| 688 |
+
following list details the events which will be triggered after the
|
| 689 |
+
successful watch removal.
|
| 690 |
+
|
| 691 |
+
* **Child Remove event:**
|
| 692 |
+
Watcher which was added with a call to getChildren.
|
| 693 |
+
* **Data Remove event:**
|
| 694 |
+
Watcher which was added with a call to exists or getData.
|
| 695 |
+
* **Persistent Remove event:**
|
| 696 |
+
Watcher which was added with a call to add a persistent watch.
|
| 697 |
+
|
| 698 |
+
<a name="sc_WatchGuarantees"></a>
|
| 699 |
+
|
| 700 |
+
### What ZooKeeper Guarantees about Watches
|
| 701 |
+
|
| 702 |
+
With regard to watches, ZooKeeper maintains these
|
| 703 |
+
guarantees:
|
| 704 |
+
|
| 705 |
+
* Watches are ordered with respect to other events, other
|
| 706 |
+
watches, and asynchronous replies. The ZooKeeper client libraries
|
| 707 |
+
ensures that everything is dispatched in order.
|
| 708 |
+
|
| 709 |
+
* A client will see a watch event for a znode it is watching
|
| 710 |
+
before seeing the new data that corresponds to that znode.
|
| 711 |
+
|
| 712 |
+
* The order of watch events from ZooKeeper corresponds to the
|
| 713 |
+
order of the updates as seen by the ZooKeeper service.
|
| 714 |
+
|
| 715 |
+
<a name="sc_WatchRememberThese"></a>
|
| 716 |
+
|
| 717 |
+
### Things to Remember about Watches
|
| 718 |
+
|
| 719 |
+
* Standard watches are one time triggers; if you get a watch event and
|
| 720 |
+
you want to get notified of future changes, you must set another
|
| 721 |
+
watch.
|
| 722 |
+
|
| 723 |
+
* Because standard watches are one time triggers and there is latency
|
| 724 |
+
between getting the event and sending a new request to get a watch
|
| 725 |
+
you cannot reliably see every change that happens to a node in
|
| 726 |
+
ZooKeeper. Be prepared to handle the case where the znode changes
|
| 727 |
+
multiple times between getting the event and setting the watch
|
| 728 |
+
again. (You may not care, but at least realize it may
|
| 729 |
+
happen.)
|
| 730 |
+
|
| 731 |
+
* A watch object, or function/context pair, will only be
|
| 732 |
+
triggered once for a given notification. For example, if the same
|
| 733 |
+
watch object is registered for an exists and a getData call for the
|
| 734 |
+
same file and that file is then deleted, the watch object would
|
| 735 |
+
only be invoked once with the deletion notification for the file.
|
| 736 |
+
|
| 737 |
+
* When you disconnect from a server (for example, when the
|
| 738 |
+
server fails), you will not get any watches until the connection
|
| 739 |
+
is reestablished. For this reason session events are sent to all
|
| 740 |
+
outstanding watch handlers. Use session events to go into a safe
|
| 741 |
+
mode: you will not be receiving events while disconnected, so your
|
| 742 |
+
process should act conservatively in that mode.
|
| 743 |
+
|
| 744 |
+
<a name="sc_ZooKeeperAccessControl"></a>
|
| 745 |
+
|
| 746 |
+
## ZooKeeper access control using ACLs
|
| 747 |
+
|
| 748 |
+
ZooKeeper uses ACLs to control access to its znodes (the
|
| 749 |
+
data nodes of a ZooKeeper data tree). The ACL implementation is
|
| 750 |
+
quite similar to UNIX file access permissions: it employs
|
| 751 |
+
permission bits to allow/disallow various operations against a
|
| 752 |
+
node and the scope to which the bits apply. Unlike standard UNIX
|
| 753 |
+
permissions, a ZooKeeper node is not limited by the three standard
|
| 754 |
+
scopes for user (owner of the file), group, and world
|
| 755 |
+
(other). ZooKeeper does not have a notion of an owner of a
|
| 756 |
+
znode. Instead, an ACL specifies sets of ids and permissions that
|
| 757 |
+
are associated with those ids.
|
| 758 |
+
|
| 759 |
+
Note also that an ACL pertains only to a specific znode. In
|
| 760 |
+
particular it does not apply to children. For example, if
|
| 761 |
+
_/app_ is only readable by ip:172.16.16.1 and
|
| 762 |
+
_/app/status_ is world readable, anyone will
|
| 763 |
+
be able to read _/app/status_; ACLs are not
|
| 764 |
+
recursive.
|
| 765 |
+
|
| 766 |
+
ZooKeeper supports pluggable authentication schemes. Ids are
|
| 767 |
+
specified using the form _scheme:expression_,
|
| 768 |
+
where _scheme_ is the authentication scheme
|
| 769 |
+
that the id corresponds to. The set of valid expressions are defined
|
| 770 |
+
by the scheme. For example, _ip:172.16.16.1_ is
|
| 771 |
+
an id for a host with the address _172.16.16.1_
|
| 772 |
+
using the _ip_ scheme, whereas _digest:bob:password_
|
| 773 |
+
is an id for the user with the name of _bob_ using
|
| 774 |
+
the _digest_ scheme.
|
| 775 |
+
|
| 776 |
+
When a client connects to ZooKeeper and authenticates
|
| 777 |
+
itself, ZooKeeper associates all the ids that correspond to a
|
| 778 |
+
client with the clients connection. These ids are checked against
|
| 779 |
+
the ACLs of znodes when a client tries to access a node. ACLs are
|
| 780 |
+
made up of pairs of _(scheme:expression,
|
| 781 |
+
perms)_. The format of
|
| 782 |
+
the _expression_ is specific to the scheme. For
|
| 783 |
+
example, the pair _(ip:19.22.0.0/16, READ)_
|
| 784 |
+
gives the _READ_ permission to any clients with
|
| 785 |
+
an IP address that starts with 19.22.
|
| 786 |
+
|
| 787 |
+
<a name="sc_ACLPermissions"></a>
|
| 788 |
+
|
| 789 |
+
### ACL Permissions
|
| 790 |
+
|
| 791 |
+
ZooKeeper supports the following permissions:
|
| 792 |
+
|
| 793 |
+
* **CREATE**: you can create a child node
|
| 794 |
+
* **READ**: you can get data from a node and list its children.
|
| 795 |
+
* **WRITE**: you can set data for a node
|
| 796 |
+
* **DELETE**: you can delete a child node
|
| 797 |
+
* **ADMIN**: you can set permissions
|
| 798 |
+
|
| 799 |
+
The _CREATE_
|
| 800 |
+
and _DELETE_ permissions have been broken out
|
| 801 |
+
of the _WRITE_ permission for finer grained
|
| 802 |
+
access controls. The cases for _CREATE_
|
| 803 |
+
and _DELETE_ are the following:
|
| 804 |
+
|
| 805 |
+
You want A to be able to do a set on a ZooKeeper node, but
|
| 806 |
+
not be able to _CREATE_
|
| 807 |
+
or _DELETE_ children.
|
| 808 |
+
|
| 809 |
+
_CREATE_
|
| 810 |
+
without _DELETE_: clients create requests by
|
| 811 |
+
creating ZooKeeper nodes in a parent directory. You want all
|
| 812 |
+
clients to be able to add, but only request processor can
|
| 813 |
+
delete. (This is kind of like the APPEND permission for
|
| 814 |
+
files.)
|
| 815 |
+
|
| 816 |
+
Also, the _ADMIN_ permission is there
|
| 817 |
+
since ZooKeeper doesn’t have a notion of file owner. In some
|
| 818 |
+
sense the _ADMIN_ permission designates the
|
| 819 |
+
entity as the owner. ZooKeeper doesn’t support the LOOKUP
|
| 820 |
+
permission (execute permission bit on directories to allow you
|
| 821 |
+
to LOOKUP even though you can't list the directory). Everyone
|
| 822 |
+
implicitly has LOOKUP permission. This allows you to stat a
|
| 823 |
+
node, but nothing more. (The problem is, if you want to call
|
| 824 |
+
zoo_exists() on a node that doesn't exist, there is no
|
| 825 |
+
permission to check.)
|
| 826 |
+
|
| 827 |
+
_ADMIN_ permission also has a special role in terms of ACLs:
|
| 828 |
+
in order to retrieve ACLs of a znode user has to have _READ_ or _ADMIN_
|
| 829 |
+
permission, but without _ADMIN_ permission, digest hash values will be
|
| 830 |
+
masked out.
|
| 831 |
+
|
| 832 |
+
As of versions **3.9.2 / 3.8.4 / 3.7.3** the exists() call will now verify ACLs
|
| 833 |
+
on nodes that exist and the client must have READ permission otherwise
|
| 834 |
+
'Insufficient permission' error will be raised.
|
| 835 |
+
|
| 836 |
+
<a name="sc_BuiltinACLSchemes"></a>
|
| 837 |
+
|
| 838 |
+
#### Builtin ACL Schemes
|
| 839 |
+
|
| 840 |
+
ZooKeeeper has the following built in schemes:
|
| 841 |
+
|
| 842 |
+
* **world** has a
|
| 843 |
+
single id, _anyone_, that represents
|
| 844 |
+
anyone.
|
| 845 |
+
* **auth** is a special
|
| 846 |
+
scheme which ignores any provided expression and instead uses the current user,
|
| 847 |
+
credentials, and scheme. Any expression (whether _user_ like with SASL
|
| 848 |
+
authentication or _user:password_ like with DIGEST authentication) provided is ignored
|
| 849 |
+
by the ZooKeeper server when persisting the ACL. However, the expression must still be
|
| 850 |
+
provided in the ACL because the ACL must match the form _scheme:expression:perms_.
|
| 851 |
+
This scheme is provided as a convenience as it is a common use-case for
|
| 852 |
+
a user to create a znode and then restrict access to that znode to only that user.
|
| 853 |
+
If there is no authenticated user, setting an ACL with the auth scheme will fail.
|
| 854 |
+
* **digest** uses
|
| 855 |
+
a _username:password_ string to generate
|
| 856 |
+
MD5 hash which is then used as an ACL ID
|
| 857 |
+
identity. Authentication is done by sending
|
| 858 |
+
the _username:password_ in clear text. When
|
| 859 |
+
used in the ACL the expression will be
|
| 860 |
+
the _username:base64_
|
| 861 |
+
encoded _SHA1_
|
| 862 |
+
password _digest_.
|
| 863 |
+
* **ip** uses the
|
| 864 |
+
client host IP as an ACL ID identity. The ACL expression is of
|
| 865 |
+
the form _addr/bits_ where the most
|
| 866 |
+
significant _bits_
|
| 867 |
+
of _addr_ are matched against the most
|
| 868 |
+
significant _bits_ of the client host
|
| 869 |
+
IP.
|
| 870 |
+
* **x509** uses the client
|
| 871 |
+
X500 Principal as an ACL ID identity. The ACL expression is the exact
|
| 872 |
+
X500 Principal name of a client. When using the secure port, clients
|
| 873 |
+
are automatically authenticated and their auth info for the x509 scheme
|
| 874 |
+
is set.
|
| 875 |
+
|
| 876 |
+
<a name="ZooKeeper+C+client+API"></a>
|
| 877 |
+
|
| 878 |
+
#### ZooKeeper C client API
|
| 879 |
+
|
| 880 |
+
The following constants are provided by the ZooKeeper C
|
| 881 |
+
library:
|
| 882 |
+
|
| 883 |
+
* _const_ _int_ ZOO_PERM_READ; //can read node’s value and list its children
|
| 884 |
+
* _const_ _int_ ZOO_PERM_WRITE;// can set the node’s value
|
| 885 |
+
* _const_ _int_ ZOO_PERM_CREATE; //can create children
|
| 886 |
+
* _const_ _int_ ZOO_PERM_DELETE;// can delete children
|
| 887 |
+
* _const_ _int_ ZOO_PERM_ADMIN; //can execute set_acl()
|
| 888 |
+
* _const_ _int_ ZOO_PERM_ALL;// all of the above flags OR’d together
|
| 889 |
+
|
| 890 |
+
The following are the standard ACL IDs:
|
| 891 |
+
|
| 892 |
+
* _struct_ Id ZOO_ANYONE_ID_UNSAFE; //(‘world’,’anyone’)
|
| 893 |
+
* _struct_ Id ZOO_AUTH_IDS;// (‘auth’,’’)
|
| 894 |
+
|
| 895 |
+
ZOO_AUTH_IDS empty identity string should be interpreted as “the identity of the creator”.
|
| 896 |
+
|
| 897 |
+
ZooKeeper client comes with three standard ACLs:
|
| 898 |
+
|
| 899 |
+
* _struct_ ACL_vector ZOO_OPEN_ACL_UNSAFE; //(ZOO_PERM_ALL,ZOO_ANYONE_ID_UNSAFE)
|
| 900 |
+
* _struct_ ACL_vector ZOO_READ_ACL_UNSAFE;// (ZOO_PERM_READ, ZOO_ANYONE_ID_UNSAFE)
|
| 901 |
+
* _struct_ ACL_vector ZOO_CREATOR_ALL_ACL; //(ZOO_PERM_ALL,ZOO_AUTH_IDS)
|
| 902 |
+
|
| 903 |
+
The ZOO_OPEN_ACL_UNSAFE is completely open free for all
|
| 904 |
+
ACL: any application can execute any operation on the node and
|
| 905 |
+
can create, list and delete its children. The
|
| 906 |
+
ZOO_READ_ACL_UNSAFE is read-only access for any
|
| 907 |
+
application. CREATE_ALL_ACL grants all permissions to the
|
| 908 |
+
creator of the node. The creator must have been authenticated by
|
| 909 |
+
the server (for example, using “_digest_”
|
| 910 |
+
scheme) before it can create nodes with this ACL.
|
| 911 |
+
|
| 912 |
+
The following ZooKeeper operations deal with ACLs:
|
| 913 |
+
|
| 914 |
+
* _int_ _zoo_add_auth_
|
| 915 |
+
(zhandle_t \*zh,_const_ _char_*
|
| 916 |
+
scheme,_const_ _char_*
|
| 917 |
+
cert, _int_ certLen, void_completion_t
|
| 918 |
+
completion, _const_ _void_
|
| 919 |
+
\*data);
|
| 920 |
+
|
| 921 |
+
The application uses the zoo_add_auth function to
|
| 922 |
+
authenticate itself to the server. The function can be called
|
| 923 |
+
multiple times if the application wants to authenticate using
|
| 924 |
+
different schemes and/or identities.
|
| 925 |
+
|
| 926 |
+
* _int_ _zoo_create_
|
| 927 |
+
(zhandle_t \*zh, _const_ _char_
|
| 928 |
+
\*path, _const_ _char_
|
| 929 |
+
\*value,_int_
|
| 930 |
+
valuelen, _const_ _struct_
|
| 931 |
+
ACL_vector \*acl, _int_
|
| 932 |
+
flags,_char_
|
| 933 |
+
\*realpath, _int_
|
| 934 |
+
max_realpath_len);
|
| 935 |
+
|
| 936 |
+
zoo_create(...) operation creates a new node. The acl
|
| 937 |
+
parameter is a list of ACLs associated with the node. The parent
|
| 938 |
+
node must have the CREATE permission bit set.
|
| 939 |
+
|
| 940 |
+
* _int_ _zoo_get_acl_
|
| 941 |
+
(zhandle_t \*zh, _const_ _char_
|
| 942 |
+
\*path,_struct_ ACL_vector
|
| 943 |
+
\*acl, _struct_ Stat \*stat);
|
| 944 |
+
|
| 945 |
+
This operation returns a node’s ACL info. The node must have READ or ADMIN
|
| 946 |
+
permission set. Without ADMIN permission, the digest hash values will be masked out.
|
| 947 |
+
|
| 948 |
+
* _int_ _zoo_set_acl_
|
| 949 |
+
(zhandle_t \*zh, _const_ _char_
|
| 950 |
+
\*path, _int_
|
| 951 |
+
version,_const_ _struct_
|
| 952 |
+
ACL_vector \*acl);
|
| 953 |
+
|
| 954 |
+
This function replaces node’s ACL list with a new one. The
|
| 955 |
+
node must have the ADMIN permission set.
|
| 956 |
+
|
| 957 |
+
Here is a sample code that makes use of the above APIs to
|
| 958 |
+
authenticate itself using the “_foo_” scheme
|
| 959 |
+
and create an ephemeral node “/xyz” with create-only
|
| 960 |
+
permissions.
|
| 961 |
+
|
| 962 |
+
######Note
|
| 963 |
+
>This is a very simple example which is intended to show
|
| 964 |
+
how to interact with ZooKeeper ACLs
|
| 965 |
+
specifically. See *.../trunk/zookeeper-client/zookeeper-client-c/src/cli.c*
|
| 966 |
+
for an example of a C client implementation
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
|
| 970 |
+
#include <string.h>
|
| 971 |
+
#include <errno.h>
|
| 972 |
+
|
| 973 |
+
#include "zookeeper.h"
|
| 974 |
+
|
| 975 |
+
static zhandle_t *zh;
|
| 976 |
+
|
| 977 |
+
/**
|
| 978 |
+
* In this example this method gets the cert for your
|
| 979 |
+
* environment -- you must provide
|
| 980 |
+
*/
|
| 981 |
+
char *foo_get_cert_once(char* id) { return 0; }
|
| 982 |
+
|
| 983 |
+
/** Watcher function -- empty for this example, not something you should
|
| 984 |
+
* do in real code */
|
| 985 |
+
void watcher(zhandle_t *zzh, int type, int state, const char *path,
|
| 986 |
+
void *watcherCtx) {}
|
| 987 |
+
|
| 988 |
+
int main(int argc, char argv) {
|
| 989 |
+
char buffer[512];
|
| 990 |
+
char p[2048];
|
| 991 |
+
char *cert=0;
|
| 992 |
+
char appId[64];
|
| 993 |
+
|
| 994 |
+
strcpy(appId, "example.foo_test");
|
| 995 |
+
cert = foo_get_cert_once(appId);
|
| 996 |
+
if(cert!=0) {
|
| 997 |
+
fprintf(stderr,
|
| 998 |
+
"Certificate for appid [%s] is [%s]\n",appId,cert);
|
| 999 |
+
strncpy(p,cert, sizeof(p)-1);
|
| 1000 |
+
free(cert);
|
| 1001 |
+
} else {
|
| 1002 |
+
fprintf(stderr, "Certificate for appid [%s] not found\n",appId);
|
| 1003 |
+
strcpy(p, "dummy");
|
| 1004 |
+
}
|
| 1005 |
+
|
| 1006 |
+
zoo_set_debug_level(ZOO_LOG_LEVEL_DEBUG);
|
| 1007 |
+
|
| 1008 |
+
zh = zookeeper_init("localhost:3181", watcher, 10000, 0, 0, 0);
|
| 1009 |
+
if (!zh) {
|
| 1010 |
+
return errno;
|
| 1011 |
+
}
|
| 1012 |
+
if(zoo_add_auth(zh,"foo",p,strlen(p),0,0)!=ZOK)
|
| 1013 |
+
return 2;
|
| 1014 |
+
|
| 1015 |
+
struct ACL CREATE_ONLY_ACL[] = {{ZOO_PERM_CREATE, ZOO_AUTH_IDS}};
|
| 1016 |
+
struct ACL_vector CREATE_ONLY = {1, CREATE_ONLY_ACL};
|
| 1017 |
+
int rc = zoo_create(zh,"/xyz","value", 5, &CREATE_ONLY, ZOO_EPHEMERAL,
|
| 1018 |
+
buffer, sizeof(buffer)-1);
|
| 1019 |
+
|
| 1020 |
+
/** this operation will fail with a ZNOAUTH error */
|
| 1021 |
+
int buflen= sizeof(buffer);
|
| 1022 |
+
struct Stat stat;
|
| 1023 |
+
rc = zoo_get(zh, "/xyz", 0, buffer, &buflen, &stat);
|
| 1024 |
+
if (rc) {
|
| 1025 |
+
fprintf(stderr, "Error %d for %s\n", rc, __LINE__);
|
| 1026 |
+
}
|
| 1027 |
+
|
| 1028 |
+
zookeeper_close(zh);
|
| 1029 |
+
return 0;
|
| 1030 |
+
}
|
| 1031 |
+
|
| 1032 |
+
|
| 1033 |
+
<a name="sc_ZooKeeperPluggableAuthentication"></a>
|
| 1034 |
+
|
| 1035 |
+
## Pluggable ZooKeeper authentication
|
| 1036 |
+
|
| 1037 |
+
ZooKeeper runs in a variety of different environments with
|
| 1038 |
+
various different authentication schemes, so it has a completely
|
| 1039 |
+
pluggable authentication framework. Even the builtin authentication
|
| 1040 |
+
schemes use the pluggable authentication framework.
|
| 1041 |
+
|
| 1042 |
+
To understand how the authentication framework works, first you must
|
| 1043 |
+
understand the two main authentication operations. The framework
|
| 1044 |
+
first must authenticate the client. This is usually done as soon as
|
| 1045 |
+
the client connects to a server and consists of validating information
|
| 1046 |
+
sent from or gathered about a client and associating it with the connection.
|
| 1047 |
+
The second operation handled by the framework is finding the entries in an
|
| 1048 |
+
ACL that correspond to client. ACL entries are <_idspec,
|
| 1049 |
+
permissions_> pairs. The _idspec_ may be
|
| 1050 |
+
a simple string match against the authentication information associated
|
| 1051 |
+
with the connection or it may be a expression that is evaluated against that
|
| 1052 |
+
information. It is up to the implementation of the authentication plugin
|
| 1053 |
+
to do the match. Here is the interface that an authentication plugin must
|
| 1054 |
+
implement:
|
| 1055 |
+
|
| 1056 |
+
|
| 1057 |
+
public interface AuthenticationProvider {
|
| 1058 |
+
String getScheme();
|
| 1059 |
+
KeeperException.Code handleAuthentication(ServerCnxn cnxn, byte authData[]);
|
| 1060 |
+
boolean isValid(String id);
|
| 1061 |
+
boolean matches(String id, String aclExpr);
|
| 1062 |
+
boolean isAuthenticated();
|
| 1063 |
+
}
|
| 1064 |
+
|
| 1065 |
+
|
| 1066 |
+
The first method _getScheme_ returns the string
|
| 1067 |
+
that identifies the plugin. Because we support multiple methods of authentication,
|
| 1068 |
+
an authentication credential or an _idspec_ will always be
|
| 1069 |
+
prefixed with _scheme:_. The ZooKeeper server uses the scheme
|
| 1070 |
+
returned by the authentication plugin to determine which ids the scheme
|
| 1071 |
+
applies to.
|
| 1072 |
+
|
| 1073 |
+
_handleAuthentication_ is called when a client
|
| 1074 |
+
sends authentication information to be associated with a connection. The
|
| 1075 |
+
client specifies the scheme to which the information corresponds. The
|
| 1076 |
+
ZooKeeper server passes the information to the authentication plugin whose
|
| 1077 |
+
_getScheme_ matches the scheme passed by the client. The
|
| 1078 |
+
implementor of _handleAuthentication_ will usually return
|
| 1079 |
+
an error if it determines that the information is bad, or it will associate information
|
| 1080 |
+
with the connection using _cnxn.getAuthInfo().add(new Id(getScheme(), data))_.
|
| 1081 |
+
|
| 1082 |
+
The authentication plugin is involved in both setting and using ACLs. When an
|
| 1083 |
+
ACL is set for a znode, the ZooKeeper server will pass the id part of the entry to
|
| 1084 |
+
the _isValid(String id)_ method. It is up to the plugin to verify
|
| 1085 |
+
that the id has a correct form. For example, _ip:172.16.0.0/16_
|
| 1086 |
+
is a valid id, but _ip:host.com_ is not. If the new ACL includes
|
| 1087 |
+
an "auth" entry, _isAuthenticated_ is used to see if the
|
| 1088 |
+
authentication information for this scheme that is associated with the connection
|
| 1089 |
+
should be added to the ACL. Some schemes
|
| 1090 |
+
should not be included in auth. For example, the IP address of the client is not
|
| 1091 |
+
considered as an id that should be added to the ACL if auth is specified.
|
| 1092 |
+
|
| 1093 |
+
ZooKeeper invokes _matches(String id, String aclExpr)_ when checking an ACL. It
|
| 1094 |
+
needs to match authentication information of the client against the relevant ACL
|
| 1095 |
+
entries. To find the entries which apply to the client, the ZooKeeper server will
|
| 1096 |
+
find the scheme of each entry and if there is authentication information
|
| 1097 |
+
from that client for that scheme, _matches(String id, String aclExpr)_
|
| 1098 |
+
will be called with _id_ set to the authentication information
|
| 1099 |
+
that was previously added to the connection by _handleAuthentication_ and
|
| 1100 |
+
_aclExpr_ set to the id of the ACL entry. The authentication plugin
|
| 1101 |
+
uses its own logic and matching scheme to determine if _id_ is included
|
| 1102 |
+
in _aclExpr_.
|
| 1103 |
+
|
| 1104 |
+
There are two built in authentication plugins: _ip_ and
|
| 1105 |
+
_digest_. Additional plugins can adding using system properties. At
|
| 1106 |
+
startup the ZooKeeper server will look for system properties that start with
|
| 1107 |
+
"zookeeper.authProvider." and interpret the value of those properties as the class name
|
| 1108 |
+
of an authentication plugin. These properties can be set using the
|
| 1109 |
+
_-Dzookeeeper.authProvider.X=com.f.MyAuth_ or adding entries such as
|
| 1110 |
+
the following in the server configuration file:
|
| 1111 |
+
|
| 1112 |
+
|
| 1113 |
+
authProvider.1=com.f.MyAuth
|
| 1114 |
+
authProvider.2=com.f.MyAuth2
|
| 1115 |
+
|
| 1116 |
+
|
| 1117 |
+
Care should be taking to ensure that the suffix on the property is unique. If there are
|
| 1118 |
+
duplicates such as _-Dzookeeeper.authProvider.X=com.f.MyAuth -Dzookeeper.authProvider.X=com.f.MyAuth2_,
|
| 1119 |
+
only one will be used. Also all servers must have the same plugins defined, otherwise clients using
|
| 1120 |
+
the authentication schemes provided by the plugins will have problems connecting to some servers.
|
| 1121 |
+
|
| 1122 |
+
**Added in 3.6.0**: An alternate abstraction is available for pluggable
|
| 1123 |
+
authentication. It provides additional arguments.
|
| 1124 |
+
|
| 1125 |
+
|
| 1126 |
+
public abstract class ServerAuthenticationProvider implements AuthenticationProvider {
|
| 1127 |
+
public abstract KeeperException.Code handleAuthentication(ServerObjs serverObjs, byte authData[]);
|
| 1128 |
+
public abstract boolean matches(ServerObjs serverObjs, MatchValues matchValues);
|
| 1129 |
+
}
|
| 1130 |
+
|
| 1131 |
+
|
| 1132 |
+
Instead of implementing AuthenticationProvider you extend ServerAuthenticationProvider. Your handleAuthentication()
|
| 1133 |
+
and matches() methods will then receive the additional parameters (via ServerObjs and MatchValues).
|
| 1134 |
+
|
| 1135 |
+
* **ZooKeeperServer**
|
| 1136 |
+
The ZooKeeperServer instance
|
| 1137 |
+
* **ServerCnxn**
|
| 1138 |
+
The current connection
|
| 1139 |
+
* **path**
|
| 1140 |
+
The ZNode path being operated on (or null if not used)
|
| 1141 |
+
* **perm**
|
| 1142 |
+
The operation value or 0
|
| 1143 |
+
* **setAcls**
|
| 1144 |
+
When the setAcl() method is being operated on, the list of ACLs that are being set
|
| 1145 |
+
|
| 1146 |
+
<a name="ch_zkGuarantees"></a>
|
| 1147 |
+
|
| 1148 |
+
## Consistency Guarantees
|
| 1149 |
+
|
| 1150 |
+
ZooKeeper is a high performance, scalable service. Both reads and
|
| 1151 |
+
write operations are designed to be fast, though reads are faster than
|
| 1152 |
+
writes. The reason for this is that in the case of reads, ZooKeeper can
|
| 1153 |
+
serve older data, which in turn is due to ZooKeeper's consistency
|
| 1154 |
+
guarantees:
|
| 1155 |
+
|
| 1156 |
+
* *Sequential Consistency* :
|
| 1157 |
+
Updates from a client will be applied in the order that they
|
| 1158 |
+
were sent.
|
| 1159 |
+
|
| 1160 |
+
* *Atomicity* :
|
| 1161 |
+
Updates either succeed or fail -- there are no partial
|
| 1162 |
+
results.
|
| 1163 |
+
|
| 1164 |
+
* *Single System Image* :
|
| 1165 |
+
A client will see the same view of the service regardless of
|
| 1166 |
+
the server that it connects to. i.e., a client will never see an
|
| 1167 |
+
older view of the system even if the client fails over to a
|
| 1168 |
+
different server with the same session.
|
| 1169 |
+
|
| 1170 |
+
* *Reliability* :
|
| 1171 |
+
Once an update has been applied, it will persist from that
|
| 1172 |
+
time forward until a client overwrites the update. This guarantee
|
| 1173 |
+
has two corollaries:
|
| 1174 |
+
1. If a client gets a successful return code, the update will
|
| 1175 |
+
have been applied. On some failures (communication errors,
|
| 1176 |
+
timeouts, etc) the client will not know if the update has
|
| 1177 |
+
applied or not. We take steps to minimize the failures, but the
|
| 1178 |
+
guarantee is only present with successful return codes.
|
| 1179 |
+
(This is called the _monotonicity condition_ in Paxos.)
|
| 1180 |
+
1. Any updates that are seen by the client, through a read
|
| 1181 |
+
request or successful update, will never be rolled back when
|
| 1182 |
+
recovering from server failures.
|
| 1183 |
+
|
| 1184 |
+
* *Timeliness* :
|
| 1185 |
+
The clients view of the system is guaranteed to be up-to-date
|
| 1186 |
+
within a certain time bound (on the order of tens of seconds).
|
| 1187 |
+
Either system changes will be seen by a client within this bound, or
|
| 1188 |
+
the client will detect a service outage.
|
| 1189 |
+
|
| 1190 |
+
Using these consistency guarantees it is easy to build higher level
|
| 1191 |
+
functions such as leader election, barriers, queues, and read/write
|
| 1192 |
+
revocable locks solely at the ZooKeeper client (no additions needed to
|
| 1193 |
+
ZooKeeper). See [Recipes and Solutions](recipes.html)
|
| 1194 |
+
for more details.
|
| 1195 |
+
|
| 1196 |
+
######Note
|
| 1197 |
+
|
| 1198 |
+
>Sometimes developers mistakenly assume one other guarantee that
|
| 1199 |
+
ZooKeeper does _not_ in fact make. This is:
|
| 1200 |
+
> * Simultaneously Consistent Cross-Client Views* :
|
| 1201 |
+
ZooKeeper does not guarantee that at every instance in
|
| 1202 |
+
time, two different clients will have identical views of
|
| 1203 |
+
ZooKeeper data. Due to factors like network delays, one client
|
| 1204 |
+
may perform an update before another client gets notified of the
|
| 1205 |
+
change. Consider the scenario of two clients, A and B. If client
|
| 1206 |
+
A sets the value of a znode /a from 0 to 1, then tells client B
|
| 1207 |
+
to read /a, client B may read the old value of 0, depending on
|
| 1208 |
+
which server it is connected to. If it
|
| 1209 |
+
is important that Client A and Client B read the same value,
|
| 1210 |
+
Client B should call the **sync()** method from the ZooKeeper API
|
| 1211 |
+
method before it performs its read.
|
| 1212 |
+
So, ZooKeeper by itself doesn't guarantee that changes occur
|
| 1213 |
+
synchronously across all servers, but ZooKeeper
|
| 1214 |
+
primitives can be used to construct higher level functions that
|
| 1215 |
+
provide useful client synchronization. (For more information,
|
| 1216 |
+
see the [ZooKeeper Recipes](recipes.html).
|
| 1217 |
+
|
| 1218 |
+
<a name="ch_bindings"></a>
|
| 1219 |
+
|
| 1220 |
+
## Bindings
|
| 1221 |
+
|
| 1222 |
+
The ZooKeeper client libraries come in two languages: Java and C.
|
| 1223 |
+
The following sections describe these.
|
| 1224 |
+
|
| 1225 |
+
<a name="Java+Binding"></a>
|
| 1226 |
+
|
| 1227 |
+
### Java Binding
|
| 1228 |
+
|
| 1229 |
+
There are two packages that make up the ZooKeeper Java binding:
|
| 1230 |
+
**org.apache.zookeeper** and **org.apache.zookeeper.data**. The rest of the
|
| 1231 |
+
packages that make up ZooKeeper are used internally or are part of the
|
| 1232 |
+
server implementation. The **org.apache.zookeeper.data** package is made up of
|
| 1233 |
+
generated classes that are used simply as containers.
|
| 1234 |
+
|
| 1235 |
+
The main class used by a ZooKeeper Java client is the **ZooKeeper** class. Its two constructors differ only
|
| 1236 |
+
by an optional session id and password. ZooKeeper supports session
|
| 1237 |
+
recovery across instances of a process. A Java program may save its
|
| 1238 |
+
session id and password to stable storage, restart, and recover the
|
| 1239 |
+
session that was used by the earlier instance of the program.
|
| 1240 |
+
|
| 1241 |
+
When a ZooKeeper object is created, two threads are created as
|
| 1242 |
+
well: an IO thread and an event thread. All IO happens on the IO thread
|
| 1243 |
+
(using Java NIO). All event callbacks happen on the event thread.
|
| 1244 |
+
Session maintenance such as reconnecting to ZooKeeper servers and
|
| 1245 |
+
maintaining heartbeat is done on the IO thread. Responses for
|
| 1246 |
+
synchronous methods are also processed in the IO thread. All responses
|
| 1247 |
+
to asynchronous methods and watch events are processed on the event
|
| 1248 |
+
thread. There are a few things to notice that result from this
|
| 1249 |
+
design:
|
| 1250 |
+
|
| 1251 |
+
* All completions for asynchronous calls and watcher callbacks
|
| 1252 |
+
will be made in order, one at a time. The caller can do any
|
| 1253 |
+
processing they wish, but no other callbacks will be processed
|
| 1254 |
+
during that time.
|
| 1255 |
+
* Callbacks do not block the processing of the IO thread or the
|
| 1256 |
+
processing of the synchronous calls.
|
| 1257 |
+
* Synchronous calls may not return in the correct order. For
|
| 1258 |
+
example, assume a client does the following processing: issues an
|
| 1259 |
+
asynchronous read of node **/a** with
|
| 1260 |
+
_watch_ set to true, and then in the completion
|
| 1261 |
+
callback of the read it does a synchronous read of **/a**. (Maybe not good practice, but not illegal
|
| 1262 |
+
either, and it makes for a simple example.)
|
| 1263 |
+
Note that if there is a change to **/a** between the asynchronous read and the
|
| 1264 |
+
synchronous read, the client library will receive the watch event
|
| 1265 |
+
saying **/a** changed before the
|
| 1266 |
+
response for the synchronous read, but because of the completion
|
| 1267 |
+
callback blocking the event queue, the synchronous read will
|
| 1268 |
+
return with the new value of **/a**
|
| 1269 |
+
before the watch event is processed.
|
| 1270 |
+
|
| 1271 |
+
Finally, the rules associated with shutdown are straightforward:
|
| 1272 |
+
once a ZooKeeper object is closed or receives a fatal event
|
| 1273 |
+
(SESSION_EXPIRED and AUTH_FAILED), the ZooKeeper object becomes invalid.
|
| 1274 |
+
On a close, the two threads shut down and any further access on zookeeper
|
| 1275 |
+
handle is undefined behavior and should be avoided.
|
| 1276 |
+
|
| 1277 |
+
<a name="sc_java_client_configuration"></a>
|
| 1278 |
+
|
| 1279 |
+
#### Client Configuration Parameters
|
| 1280 |
+
|
| 1281 |
+
The following list contains configuration properties for the Java client. You can set any
|
| 1282 |
+
of these properties using Java system properties. For server properties, please check the
|
| 1283 |
+
[Server configuration section of the Admin Guide](zookeeperAdmin.html#sc_configuration).
|
| 1284 |
+
The ZooKeeper Wiki also has useful pages about
|
| 1285 |
+
[ZooKeeper SSL support](https://cwiki.apache.org/confluence/display/ZOOKEEPER/ZooKeeper+SSL+User+Guide),
|
| 1286 |
+
and [SASL authentication for ZooKeeper](https://cwiki.apache.org/confluence/display/ZOOKEEPER/ZooKeeper+and+SASL).
|
| 1287 |
+
|
| 1288 |
+
|
| 1289 |
+
* *zookeeper.sasl.client* :
|
| 1290 |
+
Set the value to **false** to disable
|
| 1291 |
+
SASL authentication. Default is **true**.
|
| 1292 |
+
|
| 1293 |
+
* *zookeeper.sasl.clientconfig* :
|
| 1294 |
+
Specifies the context key in the JAAS login file. Default is "Client".
|
| 1295 |
+
|
| 1296 |
+
* *zookeeper.server.principal* :
|
| 1297 |
+
Specifies the server principal to be used by the client for authentication, while connecting to the zookeeper
|
| 1298 |
+
server, when Kerberos authentication is enabled. If this configuration is provided, then
|
| 1299 |
+
the ZooKeeper client will NOT USE any of the following parameters to determine the server principal:
|
| 1300 |
+
zookeeper.sasl.client.username, zookeeper.sasl.client.canonicalize.hostname, zookeeper.server.realm
|
| 1301 |
+
Note: this config parameter is working only for ZooKeeper 3.5.7+, 3.6.0+
|
| 1302 |
+
|
| 1303 |
+
* *zookeeper.sasl.client.username* :
|
| 1304 |
+
Traditionally, a principal is divided into three parts: the primary, the instance, and the realm.
|
| 1305 |
+
The format of a typical Kerberos V5 principal is primary/instance@REALM.
|
| 1306 |
+
zookeeper.sasl.client.username specifies the primary part of the server principal. Default
|
| 1307 |
+
is "zookeeper". Instance part is derived from the server IP. Finally server's principal is
|
| 1308 |
+
username/IP@realm, where username is the value of zookeeper.sasl.client.username, IP is
|
| 1309 |
+
the server IP, and realm is the value of zookeeper.server.realm.
|
| 1310 |
+
|
| 1311 |
+
* *zookeeper.sasl.client.canonicalize.hostname* :
|
| 1312 |
+
Expecting the zookeeper.server.principal parameter is not provided, the ZooKeeper client will try to
|
| 1313 |
+
determine the 'instance' (host) part of the ZooKeeper server principal. First it takes the hostname provided
|
| 1314 |
+
as the ZooKeeper server connection string. Then it tries to 'canonicalize' the address by getting
|
| 1315 |
+
the fully qualified domain name belonging to the address. You can disable this 'canonicalization'
|
| 1316 |
+
by setting: zookeeper.sasl.client.canonicalize.hostname=false
|
| 1317 |
+
|
| 1318 |
+
* *zookeeper.server.realm* :
|
| 1319 |
+
Realm part of the server principal. By default it is the client principal realm.
|
| 1320 |
+
|
| 1321 |
+
* *zookeeper.disableAutoWatchReset* :
|
| 1322 |
+
This switch controls whether automatic watch resetting is enabled. Clients automatically
|
| 1323 |
+
reset watches during session reconnect by default, this option allows the client to turn off
|
| 1324 |
+
this behavior by setting zookeeper.disableAutoWatchReset to **true**.
|
| 1325 |
+
|
| 1326 |
+
* *zookeeper.client.secure* :
|
| 1327 |
+
**New in 3.5.5:**
|
| 1328 |
+
If you want to connect to the server secure client port, you need to set this property to
|
| 1329 |
+
**true**
|
| 1330 |
+
on the client. This will connect to server using SSL with specified credentials. Note that
|
| 1331 |
+
it requires the Netty client.
|
| 1332 |
+
|
| 1333 |
+
* *zookeeper.clientCnxnSocket* :
|
| 1334 |
+
Specifies which ClientCnxnSocket to be used. Possible values are
|
| 1335 |
+
**org.apache.zookeeper.ClientCnxnSocketNIO**
|
| 1336 |
+
and
|
| 1337 |
+
**org.apache.zookeeper.ClientCnxnSocketNetty**
|
| 1338 |
+
. Default is
|
| 1339 |
+
**org.apache.zookeeper.ClientCnxnSocketNIO**
|
| 1340 |
+
. If you want to connect to server's secure client port, you need to set this property to
|
| 1341 |
+
**org.apache.zookeeper.ClientCnxnSocketNetty**
|
| 1342 |
+
on client.
|
| 1343 |
+
|
| 1344 |
+
* *zookeeper.ssl.keyStore.location and zookeeper.ssl.keyStore.password* :
|
| 1345 |
+
**New in 3.5.5:**
|
| 1346 |
+
Specifies the file path to a JKS containing the local credentials to be used for SSL connections,
|
| 1347 |
+
and the password to unlock the file.
|
| 1348 |
+
|
| 1349 |
+
* *zookeeper.ssl.keyStore.passwordPath* :
|
| 1350 |
+
**New in 3.8.0:**
|
| 1351 |
+
Specifies the file path which contains the keystore password
|
| 1352 |
+
|
| 1353 |
+
* *zookeeper.ssl.trustStore.location and zookeeper.ssl.trustStore.password* :
|
| 1354 |
+
**New in 3.5.5:**
|
| 1355 |
+
Specifies the file path to a JKS containing the remote credentials to be used for SSL connections,
|
| 1356 |
+
and the password to unlock the file.
|
| 1357 |
+
|
| 1358 |
+
* *zookeeper.ssl.trustStore.passwordPath* :
|
| 1359 |
+
**New in 3.8.0:**
|
| 1360 |
+
Specifies the file path which contains the truststore password
|
| 1361 |
+
|
| 1362 |
+
* *zookeeper.ssl.keyStore.type* and *zookeeper.ssl.trustStore.type*:
|
| 1363 |
+
**New in 3.5.5:**
|
| 1364 |
+
Specifies the file format of keys/trust store files used to establish TLS connection to the ZooKeeper server.
|
| 1365 |
+
Values: JKS, PEM, PKCS12 or null (detect by filename). Default: null.
|
| 1366 |
+
**New in 3.6.3, 3.7.0:**
|
| 1367 |
+
The format BCFKS was added.
|
| 1368 |
+
|
| 1369 |
+
* *jute.maxbuffer* :
|
| 1370 |
+
In the client side, it specifies the maximum size of the incoming data from the server. The default is 0xfffff(1048575) bytes,
|
| 1371 |
+
or just under 1M. This is really a sanity check. The ZooKeeper server is designed to store and send
|
| 1372 |
+
data on the order of kilobytes. If incoming data length is more than this value, an IOException
|
| 1373 |
+
is raised. This value of client side should keep same with the server side(Setting **System.setProperty("jute.maxbuffer", "xxxx")** in the client side will work),
|
| 1374 |
+
otherwise problems will arise.
|
| 1375 |
+
|
| 1376 |
+
* *zookeeper.kinit* :
|
| 1377 |
+
Specifies path to kinit binary. Default is "/usr/bin/kinit".
|
| 1378 |
+
|
| 1379 |
+
<a name="C+Binding"></a>
|
| 1380 |
+
|
| 1381 |
+
### C Binding
|
| 1382 |
+
|
| 1383 |
+
The C binding has a single-threaded and multi-threaded library.
|
| 1384 |
+
The multi-threaded library is easiest to use and is most similar to the
|
| 1385 |
+
Java API. This library will create an IO thread and an event dispatch
|
| 1386 |
+
thread for handling connection maintenance and callbacks. The
|
| 1387 |
+
single-threaded library allows ZooKeeper to be used in event driven
|
| 1388 |
+
applications by exposing the event loop used in the multi-threaded
|
| 1389 |
+
library.
|
| 1390 |
+
|
| 1391 |
+
The package includes two shared libraries: zookeeper_st and
|
| 1392 |
+
zookeeper_mt. The former only provides the asynchronous APIs and
|
| 1393 |
+
callbacks for integrating into the application's event loop. The only
|
| 1394 |
+
reason this library exists is to support the platforms were a
|
| 1395 |
+
_pthread_ library is not available or is unstable
|
| 1396 |
+
(i.e. FreeBSD 4.x). In all other cases, application developers should
|
| 1397 |
+
link with zookeeper_mt, as it includes support for both Sync and Async
|
| 1398 |
+
API.
|
| 1399 |
+
|
| 1400 |
+
<a name="Installation"></a>
|
| 1401 |
+
|
| 1402 |
+
#### Installation
|
| 1403 |
+
|
| 1404 |
+
If you're building the client from a check-out from the Apache
|
| 1405 |
+
repository, follow the steps outlined below. If you're building from a
|
| 1406 |
+
project source package downloaded from apache, skip to step **3**.
|
| 1407 |
+
|
| 1408 |
+
1. Run `mvn compile` in zookeeper-jute directory (*.../trunk/zookeeper-jute*).
|
| 1409 |
+
This will create a directory named "generated" under
|
| 1410 |
+
*.../trunk/zookeeper-client/zookeeper-client-c*.
|
| 1411 |
+
1. Change directory to the*.../trunk/zookeeper-client/zookeeper-client-c*
|
| 1412 |
+
and run `autoreconf -if` to bootstrap **autoconf**, **automake** and **libtool**. Make sure you have **autoconf version 2.59** or greater installed.
|
| 1413 |
+
Skip to step**4**.
|
| 1414 |
+
1. If you are building from a project source package,
|
| 1415 |
+
unzip/untar the source tarball and cd to the*
|
| 1416 |
+
zookeeper-x.x.x/zookeeper-client/zookeeper-client-c* directory.
|
| 1417 |
+
1. Run `./configure <your-options>` to
|
| 1418 |
+
generate the makefile. Here are some of options the **configure** utility supports that can be
|
| 1419 |
+
useful in this step:
|
| 1420 |
+
* `--enable-debug`
|
| 1421 |
+
Enables optimization and enables debug info compiler
|
| 1422 |
+
options. (Disabled by default.)
|
| 1423 |
+
* `--without-syncapi`
|
| 1424 |
+
Disables Sync API support; zookeeper_mt library won't be
|
| 1425 |
+
built. (Enabled by default.)
|
| 1426 |
+
* `--disable-static`
|
| 1427 |
+
Do not build static libraries. (Enabled by
|
| 1428 |
+
default.)
|
| 1429 |
+
* `--disable-shared`
|
| 1430 |
+
Do not build shared libraries. (Enabled by
|
| 1431 |
+
default.)
|
| 1432 |
+
######Note
|
| 1433 |
+
>See INSTALL for general information about running **configure**.
|
| 1434 |
+
1. Run `make` or `make
|
| 1435 |
+
install` to build the libraries and install them.
|
| 1436 |
+
1. To generate doxygen documentation for the ZooKeeper API, run
|
| 1437 |
+
`make doxygen-doc`. All documentation will be
|
| 1438 |
+
placed in a new subfolder named docs. By default, this command
|
| 1439 |
+
only generates HTML. For information on other document formats,
|
| 1440 |
+
run `./configure --help`
|
| 1441 |
+
|
| 1442 |
+
<a name="Building+Your+Own+C+Client"></a>
|
| 1443 |
+
|
| 1444 |
+
#### Building Your Own C Client
|
| 1445 |
+
|
| 1446 |
+
In order to be able to use the ZooKeeper C API in your application
|
| 1447 |
+
you have to remember to
|
| 1448 |
+
|
| 1449 |
+
1. Include ZooKeeper header: `#include <zookeeper/zookeeper.h>`
|
| 1450 |
+
1. If you are building a multithreaded client, compile with
|
| 1451 |
+
`-DTHREADED` compiler flag to enable the multi-threaded version of
|
| 1452 |
+
the library, and then link against the
|
| 1453 |
+
_zookeeper_mt_ library. If you are building a
|
| 1454 |
+
single-threaded client, do not compile with `-DTHREADED`, and be
|
| 1455 |
+
sure to link against the_zookeeper_st_library.
|
| 1456 |
+
|
| 1457 |
+
######Note
|
| 1458 |
+
>See *.../trunk/zookeeper-client/zookeeper-client-c/src/cli.c*
|
| 1459 |
+
for an example of a C client implementation
|
| 1460 |
+
|
| 1461 |
+
<a name="ch_guideToZkOperations"></a>
|
| 1462 |
+
|
| 1463 |
+
## Building Blocks: A Guide to ZooKeeper Operations
|
| 1464 |
+
|
| 1465 |
+
This section surveys all the operations a developer can perform
|
| 1466 |
+
against a ZooKeeper server. It is lower level information than the earlier
|
| 1467 |
+
concepts chapters in this manual, but higher level than the ZooKeeper API
|
| 1468 |
+
Reference. It covers these topics:
|
| 1469 |
+
|
| 1470 |
+
* [Connecting to ZooKeeper](#sc_connectingToZk)
|
| 1471 |
+
|
| 1472 |
+
<a name="sc_errorsZk"></a>
|
| 1473 |
+
|
| 1474 |
+
### Handling Errors
|
| 1475 |
+
|
| 1476 |
+
Both the Java and C client bindings may report errors. The Java client binding does so by throwing KeeperException, calling code() on the exception will return the specific error code. The C client binding returns an error code as defined in the enum ZOO_ERRORS. API callbacks indicate result code for both language bindings. See the API documentation (javadoc for Java, doxygen for C) for full details on the possible errors and their meaning.
|
| 1477 |
+
|
| 1478 |
+
<a name="sc_connectingToZk"></a>
|
| 1479 |
+
|
| 1480 |
+
### Connecting to ZooKeeper
|
| 1481 |
+
|
| 1482 |
+
Before we begin, you will have to set up a running Zookeeper server so that we can start developing the client. For C client bindings, we will be using the multithreaded library(zookeeper_mt) with a simple example written in C. To establish a connection with Zookeeper server, we make use of C API - _zookeeper_init_ with the following signature:
|
| 1483 |
+
|
| 1484 |
+
int zookeeper_init(const char *host, watcher_fn fn, int recv_timeout, const clientid_t *clientid, void *context, int flags);
|
| 1485 |
+
|
| 1486 |
+
* **host* :
|
| 1487 |
+
Connection string to zookeeper server in the format of host:port. If there are multiple servers, use comma as separator after specifying the host:port pairs. Eg: "127.0.0.1:2181,127.0.0.1:3001,127.0.0.1:3002"
|
| 1488 |
+
|
| 1489 |
+
* *fn* :
|
| 1490 |
+
Watcher function to process events when a notification is triggered.
|
| 1491 |
+
|
| 1492 |
+
* *recv_timeout* :
|
| 1493 |
+
Session expiration time in milliseconds.
|
| 1494 |
+
|
| 1495 |
+
* **clientid* :
|
| 1496 |
+
We can specify 0 for a new session. If a session has already establish previously, we could provide that client ID and it would reconnect to that previous session.
|
| 1497 |
+
|
| 1498 |
+
* **context* :
|
| 1499 |
+
Context object that can be associated with the zkhandle_t handler. If it is not used, we can set it to 0.
|
| 1500 |
+
|
| 1501 |
+
* *flags* :
|
| 1502 |
+
In an initiation, we can leave it for 0.
|
| 1503 |
+
|
| 1504 |
+
We will demonstrate client that outputs "Connected to Zookeeper" after successful connection or an error message otherwise. Let's call the following code _zkClient.cc_ :
|
| 1505 |
+
|
| 1506 |
+
|
| 1507 |
+
#include <stdio.h>
|
| 1508 |
+
#include <zookeeper/zookeeper.h>
|
| 1509 |
+
#include <errno.h>
|
| 1510 |
+
using namespace std;
|
| 1511 |
+
|
| 1512 |
+
// Keeping track of the connection state
|
| 1513 |
+
static int connected = 0;
|
| 1514 |
+
static int expired = 0;
|
| 1515 |
+
|
| 1516 |
+
// *zkHandler handles the connection with Zookeeper
|
| 1517 |
+
static zhandle_t *zkHandler;
|
| 1518 |
+
|
| 1519 |
+
// watcher function would process events
|
| 1520 |
+
void watcher(zhandle_t *zkH, int type, int state, const char *path, void *watcherCtx)
|
| 1521 |
+
{
|
| 1522 |
+
if (type == ZOO_SESSION_EVENT) {
|
| 1523 |
+
|
| 1524 |
+
// state refers to states of zookeeper connection.
|
| 1525 |
+
// To keep it simple, we would demonstrate these 3: ZOO_EXPIRED_SESSION_STATE, ZOO_CONNECTED_STATE, ZOO_NOTCONNECTED_STATE
|
| 1526 |
+
// If you are using ACL, you should be aware of an authentication failure state - ZOO_AUTH_FAILED_STATE
|
| 1527 |
+
if (state == ZOO_CONNECTED_STATE) {
|
| 1528 |
+
connected = 1;
|
| 1529 |
+
} else if (state == ZOO_NOTCONNECTED_STATE ) {
|
| 1530 |
+
connected = 0;
|
| 1531 |
+
} else if (state == ZOO_EXPIRED_SESSION_STATE) {
|
| 1532 |
+
expired = 1;
|
| 1533 |
+
connected = 0;
|
| 1534 |
+
zookeeper_close(zkH);
|
| 1535 |
+
}
|
| 1536 |
+
}
|
| 1537 |
+
}
|
| 1538 |
+
|
| 1539 |
+
int main(){
|
| 1540 |
+
zoo_set_debug_level(ZOO_LOG_LEVEL_DEBUG);
|
| 1541 |
+
|
| 1542 |
+
// zookeeper_init returns the handler upon a successful connection, null otherwise
|
| 1543 |
+
zkHandler = zookeeper_init("localhost:2181", watcher, 10000, 0, 0, 0);
|
| 1544 |
+
|
| 1545 |
+
if (!zkHandler) {
|
| 1546 |
+
return errno;
|
| 1547 |
+
}else{
|
| 1548 |
+
printf("Connection established with Zookeeper. \n");
|
| 1549 |
+
}
|
| 1550 |
+
|
| 1551 |
+
// Close Zookeeper connection
|
| 1552 |
+
zookeeper_close(zkHandler);
|
| 1553 |
+
|
| 1554 |
+
return 0;
|
| 1555 |
+
}
|
| 1556 |
+
|
| 1557 |
+
|
| 1558 |
+
Compile the code with the multithreaded library mentioned before.
|
| 1559 |
+
|
| 1560 |
+
`> g++ -Iinclude/ zkClient.cpp -lzookeeper_mt -o Client`
|
| 1561 |
+
|
| 1562 |
+
Run the client.
|
| 1563 |
+
|
| 1564 |
+
`> ./Client`
|
| 1565 |
+
|
| 1566 |
+
From the output, you should see "Connected to Zookeeper" along with Zookeeper's DEBUG messages if the connection is successful.
|
| 1567 |
+
|
| 1568 |
+
<a name="ch_gotchas"></a>
|
| 1569 |
+
|
| 1570 |
+
## Gotchas: Common Problems and Troubleshooting
|
| 1571 |
+
|
| 1572 |
+
So now you know ZooKeeper. It's fast, simple, your application
|
| 1573 |
+
works, but wait ... something's wrong. Here are some pitfalls that
|
| 1574 |
+
ZooKeeper users fall into:
|
| 1575 |
+
|
| 1576 |
+
1. If you are using watches, you must look for the connected watch
|
| 1577 |
+
event. When a ZooKeeper client disconnects from a server, you will
|
| 1578 |
+
not receive notification of changes until reconnected. If you are
|
| 1579 |
+
watching for a znode to come into existence, you will miss the event
|
| 1580 |
+
if the znode is created and deleted while you are disconnected.
|
| 1581 |
+
1. You must test ZooKeeper server failures. The ZooKeeper service
|
| 1582 |
+
can survive failures as long as a majority of servers are active. The
|
| 1583 |
+
question to ask is: can your application handle it? In the real world
|
| 1584 |
+
a client's connection to ZooKeeper can break. (ZooKeeper server
|
| 1585 |
+
failures and network partitions are common reasons for connection
|
| 1586 |
+
loss.) The ZooKeeper client library takes care of recovering your
|
| 1587 |
+
connection and letting you know what happened, but you must make sure
|
| 1588 |
+
that you recover your state and any outstanding requests that failed.
|
| 1589 |
+
Find out if you got it right in the test lab, not in production - test
|
| 1590 |
+
with a ZooKeeper service made up of a several of servers and subject
|
| 1591 |
+
them to reboots.
|
| 1592 |
+
1. The list of ZooKeeper servers used by the client must match the
|
| 1593 |
+
list of ZooKeeper servers that each ZooKeeper server has. Things can
|
| 1594 |
+
work, although not optimally, if the client list is a subset of the
|
| 1595 |
+
real list of ZooKeeper servers, but not if the client lists ZooKeeper
|
| 1596 |
+
servers not in the ZooKeeper cluster.
|
| 1597 |
+
1. Be careful where you put that transaction log. The most
|
| 1598 |
+
performance-critical part of ZooKeeper is the transaction log.
|
| 1599 |
+
ZooKeeper must sync transactions to media before it returns a
|
| 1600 |
+
response. A dedicated transaction log device is key to consistent good
|
| 1601 |
+
performance. Putting the log on a busy device will adversely effect
|
| 1602 |
+
performance. If you only have one storage device, put trace files on
|
| 1603 |
+
NFS and increase the snapshotCount; it doesn't eliminate the problem,
|
| 1604 |
+
but it can mitigate it.
|
| 1605 |
+
1. Set your Java max heap size correctly. It is very important to
|
| 1606 |
+
_avoid swapping._ Going to disk unnecessarily will
|
| 1607 |
+
almost certainly degrade your performance unacceptably. Remember, in
|
| 1608 |
+
ZooKeeper, everything is ordered, so if one request hits the disk, all
|
| 1609 |
+
other queued requests hit the disk.
|
| 1610 |
+
To avoid swapping, try to set the heapsize to the amount of
|
| 1611 |
+
physical memory you have, minus the amount needed by the OS and cache.
|
| 1612 |
+
The best way to determine an optimal heap size for your configurations
|
| 1613 |
+
is to _run load tests_. If for some reason you
|
| 1614 |
+
can't, be conservative in your estimates and choose a number well
|
| 1615 |
+
below the limit that would cause your machine to swap. For example, on
|
| 1616 |
+
a 4G machine, a 3G heap is a conservative estimate to start
|
| 1617 |
+
with.
|
| 1618 |
+
|
| 1619 |
+
## Links to Other Information
|
| 1620 |
+
|
| 1621 |
+
Outside the formal documentation, there're several other sources of
|
| 1622 |
+
information for ZooKeeper developers.
|
| 1623 |
+
|
| 1624 |
+
* *[API Reference](https://zookeeper.apache.org/doc/current/apidocs/zookeeper-server/index.html)* :
|
| 1625 |
+
The complete reference to the ZooKeeper API
|
| 1626 |
+
|
| 1627 |
+
* *[ZooKeeper Talk at the Hadoop Summit 2008](https://www.youtube.com/watch?v=rXI9xiesUV8)* :
|
| 1628 |
+
A video introduction to ZooKeeper, by Benjamin Reed of Yahoo!
|
| 1629 |
+
Research
|
| 1630 |
+
|
| 1631 |
+
* *[Barrier and Queue Tutorial](https://cwiki.apache.org/confluence/display/ZOOKEEPER/Tutorial)* :
|
| 1632 |
+
The excellent Java tutorial by Flavio Junqueira, implementing
|
| 1633 |
+
simple barriers and producer-consumer queues using ZooKeeper.
|
| 1634 |
+
|
| 1635 |
+
* *[ZooKeeper - A Reliable, Scalable Distributed Coordination System](https://cwiki.apache.org/confluence/display/ZOOKEEPER/ZooKeeperArticles)* :
|
| 1636 |
+
An article by Todd Hoff (07/15/2008)
|
| 1637 |
+
|
| 1638 |
+
* *[ZooKeeper Recipes](recipes.html)* :
|
| 1639 |
+
Pseudo-level discussion of the implementation of various
|
| 1640 |
+
synchronization solutions with ZooKeeper: Event Handles, Queues,
|
| 1641 |
+
Locks, and Two-phase Commits.
|
| 1642 |
+
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperQuotas.md
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2004 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper Quota's Guide
|
| 18 |
+
|
| 19 |
+
### A Guide to Deployment and Administration
|
| 20 |
+
|
| 21 |
+
* [Quotas](#zookeeper_quotas)
|
| 22 |
+
* [Setting Quotas](#Setting+Quotas)
|
| 23 |
+
* [Listing Quotas](#Listing+Quotas)
|
| 24 |
+
* [Deleting Quotas](#Deleting+Quotas)
|
| 25 |
+
|
| 26 |
+
<a name="zookeeper_quotas"></a>
|
| 27 |
+
|
| 28 |
+
## Quotas
|
| 29 |
+
|
| 30 |
+
ZooKeeper has both namespace and bytes quotas. You can use the ZooKeeperMain class to setup quotas.
|
| 31 |
+
ZooKeeper prints _WARN_ messages if users exceed the quota assigned to them. The messages
|
| 32 |
+
are printed in the log of the ZooKeeper.
|
| 33 |
+
|
| 34 |
+
Notice: What the `namespace` quota means is the count quota which limits the number of children
|
| 35 |
+
under the path(included itself).
|
| 36 |
+
|
| 37 |
+
$ bin/zkCli.sh -server host:port**
|
| 38 |
+
|
| 39 |
+
The above command gives you a command line option of using quotas.
|
| 40 |
+
|
| 41 |
+
<a name="Setting+Quotas"></a>
|
| 42 |
+
|
| 43 |
+
### Setting Quotas
|
| 44 |
+
|
| 45 |
+
- You can use `setquota` to set a quota on a ZooKeeper node. It has an option of setting quota with
|
| 46 |
+
`-n` (for namespace/count) and `-b` (for bytes/data length).
|
| 47 |
+
|
| 48 |
+
- The ZooKeeper quota is stored in ZooKeeper itself in **/zookeeper/quota**. To disable other people from
|
| 49 |
+
changing the quotas, users can set the ACL for **/zookeeper/quota** ,so that only admins are able to read and write to it.
|
| 50 |
+
|
| 51 |
+
- If the quota doesn't exist in the specified path,create the quota, otherwise update the quota.
|
| 52 |
+
|
| 53 |
+
- The Scope of the quota users set is all the nodes under the path specified (included itself).
|
| 54 |
+
|
| 55 |
+
- In order to simplify the calculation of quota in the current directory/hierarchy structure, a complete tree path(from root to leaf node)
|
| 56 |
+
can be set only one quota. In the situation when setting a quota in a path which its parent or child node already has a quota. `setquota` will
|
| 57 |
+
reject and tell the specified parent or child path, users can adjust allocations of quotas(delete/move-up/move-down the quota)
|
| 58 |
+
according to specific circumstances.
|
| 59 |
+
|
| 60 |
+
- Combined with the Chroot, the quota will have a better isolation effectiveness between different applications.For example:
|
| 61 |
+
|
| 62 |
+
```bash
|
| 63 |
+
# Chroot is:
|
| 64 |
+
192.168.0.1:2181,192.168.0.2:2181,192.168.0.3:2181/apps/app1
|
| 65 |
+
setquota -n 100000 /apps/app1
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
- Users cannot set the quota on the path under **/zookeeper/quota**
|
| 69 |
+
|
| 70 |
+
- The quota supports the soft and hard quota. The soft quota just logs the warning info when exceeding the quota, but the hard quota
|
| 71 |
+
also throws a `QuotaExceededException`. When setting soft and hard quota on the same path, the hard quota has the priority.
|
| 72 |
+
|
| 73 |
+
<a name="Listing+Quotas"></a>
|
| 74 |
+
|
| 75 |
+
### Listing Quotas
|
| 76 |
+
|
| 77 |
+
You can use _listquota_ to list a quota on a ZooKeeper node.
|
| 78 |
+
|
| 79 |
+
<a name="Deleting+Quotas"></a>
|
| 80 |
+
|
| 81 |
+
### Deleting Quotas
|
| 82 |
+
|
| 83 |
+
You can use _delquota_ to delete quota on a ZooKeeper node.
|
| 84 |
+
|
| 85 |
+
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperReconfig.md
ADDED
|
@@ -0,0 +1,908 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2004 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper Dynamic Reconfiguration
|
| 18 |
+
|
| 19 |
+
* [Overview](#ch_reconfig_intro)
|
| 20 |
+
* [Changes to Configuration Format](#ch_reconfig_format)
|
| 21 |
+
* [Specifying the client port](#sc_reconfig_clientport)
|
| 22 |
+
* [Specifying multiple server addresses](#sc_multiaddress)
|
| 23 |
+
* [The standaloneEnabled flag](#sc_reconfig_standaloneEnabled)
|
| 24 |
+
* [The reconfigEnabled flag](#sc_reconfig_reconfigEnabled)
|
| 25 |
+
* [Dynamic configuration file](#sc_reconfig_file)
|
| 26 |
+
* [Backward compatibility](#sc_reconfig_backward)
|
| 27 |
+
* [Upgrading to 3.5.0](#ch_reconfig_upgrade)
|
| 28 |
+
* [Dynamic Reconfiguration of the ZooKeeper Ensemble](#ch_reconfig_dyn)
|
| 29 |
+
* [API](#ch_reconfig_api)
|
| 30 |
+
* [Security](#sc_reconfig_access_control)
|
| 31 |
+
* [Retrieving the current dynamic configuration](#sc_reconfig_retrieving)
|
| 32 |
+
* [Modifying the current dynamic configuration](#sc_reconfig_modifying)
|
| 33 |
+
* [General](#sc_reconfig_general)
|
| 34 |
+
* [Incremental mode](#sc_reconfig_incremental)
|
| 35 |
+
* [Non-incremental mode](#sc_reconfig_nonincremental)
|
| 36 |
+
* [Conditional reconfig](#sc_reconfig_conditional)
|
| 37 |
+
* [Error conditions](#sc_reconfig_errors)
|
| 38 |
+
* [Additional comments](#sc_reconfig_additional)
|
| 39 |
+
* [Rebalancing Client Connections](#ch_reconfig_rebalancing)
|
| 40 |
+
|
| 41 |
+
<a name="ch_reconfig_intro"></a>
|
| 42 |
+
|
| 43 |
+
## Overview
|
| 44 |
+
|
| 45 |
+
Prior to the 3.5.0 release, the membership and all other configuration
|
| 46 |
+
parameters of Zookeeper were static - loaded during boot and immutable at
|
| 47 |
+
runtime. Operators resorted to ''rolling restarts'' - a manually intensive
|
| 48 |
+
and error-prone method of changing the configuration that has caused data
|
| 49 |
+
loss and inconsistency in production.
|
| 50 |
+
|
| 51 |
+
Starting with 3.5.0, “rolling restarts” are no longer needed!
|
| 52 |
+
ZooKeeper comes with full support for automated configuration changes: the
|
| 53 |
+
set of Zookeeper servers, their roles (participant / observer), all ports,
|
| 54 |
+
and even the quorum system can be changed dynamically, without service
|
| 55 |
+
interruption and while maintaining data consistency. Reconfigurations are
|
| 56 |
+
performed immediately, just like other operations in ZooKeeper. Multiple
|
| 57 |
+
changes can be done using a single reconfiguration command. The dynamic
|
| 58 |
+
reconfiguration functionality does not limit operation concurrency, does
|
| 59 |
+
not require client operations to be stopped during reconfigurations, has a
|
| 60 |
+
very simple interface for administrators and no added complexity to other
|
| 61 |
+
client operations.
|
| 62 |
+
|
| 63 |
+
New client-side features allow clients to find out about configuration
|
| 64 |
+
changes and to update the connection string (list of servers and their
|
| 65 |
+
client ports) stored in their ZooKeeper handle. A probabilistic algorithm
|
| 66 |
+
is used to rebalance clients across the new configuration servers while
|
| 67 |
+
keeping the extent of client migrations proportional to the change in
|
| 68 |
+
ensemble membership.
|
| 69 |
+
|
| 70 |
+
This document provides the administrator manual for reconfiguration.
|
| 71 |
+
For a detailed description of the reconfiguration algorithms, performance
|
| 72 |
+
measurements, and more, please see our paper:
|
| 73 |
+
|
| 74 |
+
* *Shraer, A., Reed, B., Malkhi, D., Junqueira, F. Dynamic
|
| 75 |
+
Reconfiguration of Primary/Backup Clusters. In _USENIX Annual
|
| 76 |
+
Technical Conference (ATC)_(2012), 425-437* :
|
| 77 |
+
Links: [paper (pdf)](https://www.usenix.org/system/files/conference/atc12/atc12-final74.pdf), [slides (pdf)](https://www.usenix.org/sites/default/files/conference/protected-files/shraer\_atc12\_slides.pdf), [video](https://www.usenix.org/conference/atc12/technical-sessions/presentation/shraer), [hadoop summit slides](http://www.slideshare.net/Hadoop\_Summit/dynamic-reconfiguration-of-zookeeper)
|
| 78 |
+
|
| 79 |
+
**Note:** Starting with 3.5.3, the dynamic reconfiguration
|
| 80 |
+
feature is disabled by default, and has to be explicitly turned on via
|
| 81 |
+
[reconfigEnabled](zookeeperAdmin.html#sc_advancedConfiguration) configuration option.
|
| 82 |
+
|
| 83 |
+
<a name="ch_reconfig_format"></a>
|
| 84 |
+
|
| 85 |
+
## Changes to Configuration Format
|
| 86 |
+
|
| 87 |
+
<a name="sc_reconfig_clientport"></a>
|
| 88 |
+
|
| 89 |
+
### Specifying the client port
|
| 90 |
+
|
| 91 |
+
A client port of a server is the port on which the server accepts plaintext (non-TLS) client connection requests
|
| 92 |
+
and secure client port is the port on which the server accepts TLS client connection requests.
|
| 93 |
+
|
| 94 |
+
Starting with 3.5.0 the
|
| 95 |
+
_clientPort_ and _clientPortAddress_ configuration parameters should no longer be used in zoo.cfg.
|
| 96 |
+
|
| 97 |
+
Starting with 3.10.0 the
|
| 98 |
+
_secureClientPort_ and _secureClientPortAddress_ configuration parameters should no longer be used in zoo.cfg.
|
| 99 |
+
|
| 100 |
+
Instead, this information is now part of the server keyword specification, which
|
| 101 |
+
becomes as follows:
|
| 102 |
+
|
| 103 |
+
server.<positive id> = <address1>:<quorum port>:<leader election port>[:role];[[<client port address>:]<client port>][;[<secure client port address>:]<secure client port>]
|
| 104 |
+
|
| 105 |
+
- [New in ZK 3.10.0] The client port specification is optional and is to the right of the
|
| 106 |
+
first semicolon. The secure client port specification is also optional and is to the right
|
| 107 |
+
of the second semicolon. However, both the client port and secure client port specification
|
| 108 |
+
cannot be omitted, at least one of them should be present. If the user intends to omit client
|
| 109 |
+
port specification and provide only secure client port specification (TLS-only server), a second
|
| 110 |
+
semicolon should still be specified to indicate an empty client port specification (see last
|
| 111 |
+
example below). In either spec, the port address is optional, and if not specified it defaults
|
| 112 |
+
to "0.0.0.0".
|
| 113 |
+
- As usual, role is also optional, it can be _participant_ or _observer_ (_participant_ by default).
|
| 114 |
+
|
| 115 |
+
Examples of legal server statements:
|
| 116 |
+
|
| 117 |
+
server.5 = 125.23.63.23:1234:1235;1236 (non-TLS server)
|
| 118 |
+
server.5 = 125.23.63.23:1234:1235;1236;1237 (non-TLS + TLS server)
|
| 119 |
+
server.5 = 125.23.63.23:1234:1235;;1237 (TLS-only server)
|
| 120 |
+
server.5 = 125.23.63.23:1234:1235:participant;1236 (non-TLS server)
|
| 121 |
+
server.5 = 125.23.63.23:1234:1235:observer;1236 (non-TLS server)
|
| 122 |
+
server.5 = 125.23.63.23:1234:1235;125.23.63.24:1236 (non-TLS server)
|
| 123 |
+
server.5 = 125.23.63.23:1234:1235:participant;125.23.63.23:1236 (non-TLS server)
|
| 124 |
+
server.5 = 125.23.63.23:1234:1235:participant;125.23.63.23:1236;125.23.63.23:1237 (non-TLS + TLS server)
|
| 125 |
+
server.5 = 125.23.63.23:1234:1235:participant;;125.23.63.23:1237 (TLS-only server)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
<a name="sc_multiaddress"></a>
|
| 129 |
+
|
| 130 |
+
### Specifying multiple server addresses
|
| 131 |
+
|
| 132 |
+
Since ZooKeeper 3.6.0 it is possible to specify multiple addresses for each
|
| 133 |
+
ZooKeeper server (see [ZOOKEEPER-3188](https://issues.apache.org/jira/projects/ZOOKEEPER/issues/ZOOKEEPER-3188)).
|
| 134 |
+
This helps to increase availability and adds network level
|
| 135 |
+
resiliency to ZooKeeper. When multiple physical network interfaces are used
|
| 136 |
+
for the servers, ZooKeeper is able to bind on all interfaces and runtime switching
|
| 137 |
+
to a working interface in case a network error. The different addresses can be
|
| 138 |
+
specified in the config using a pipe ('|') character.
|
| 139 |
+
|
| 140 |
+
Examples for a valid configurations using multiple addresses:
|
| 141 |
+
|
| 142 |
+
server.2=zoo2-net1:2888:3888|zoo2-net2:2889:3889;2188
|
| 143 |
+
server.2=zoo2-net1:2888:3888|zoo2-net2:2889:3889|zoo2-net3:2890:3890;2188
|
| 144 |
+
server.2=zoo2-net1:2888:3888|zoo2-net2:2889:3889;zoo2-net1:2188
|
| 145 |
+
server.2=zoo2-net1:2888:3888:observer|zoo2-net2:2889:3889:observer;2188
|
| 146 |
+
|
| 147 |
+
<a name="sc_reconfig_standaloneEnabled"></a>
|
| 148 |
+
|
| 149 |
+
### The _standaloneEnabled_ flag
|
| 150 |
+
|
| 151 |
+
Prior to 3.5.0, one could run ZooKeeper in Standalone mode or in a
|
| 152 |
+
Distributed mode. These are separate implementation stacks, and
|
| 153 |
+
switching between them during run time is not possible. By default (for
|
| 154 |
+
backward compatibility) _standaloneEnabled_ is set to
|
| 155 |
+
_true_. The consequence of using this default is that
|
| 156 |
+
if started with a single server the ensemble will not be allowed to
|
| 157 |
+
grow, and if started with more than one server it will not be allowed to
|
| 158 |
+
shrink to contain fewer than two participants.
|
| 159 |
+
|
| 160 |
+
Setting the flag to _false_ instructs the system
|
| 161 |
+
to run the Distributed software stack even if there is only a single
|
| 162 |
+
participant in the ensemble. To achieve this the (static) configuration
|
| 163 |
+
file should contain:
|
| 164 |
+
|
| 165 |
+
standaloneEnabled=false**
|
| 166 |
+
|
| 167 |
+
With this setting it is possible to start a ZooKeeper ensemble
|
| 168 |
+
containing a single participant and to dynamically grow it by adding
|
| 169 |
+
more servers. Similarly, it is possible to shrink an ensemble so that
|
| 170 |
+
just a single participant remains, by removing servers.
|
| 171 |
+
|
| 172 |
+
Since running the Distributed mode allows more flexibility, we
|
| 173 |
+
recommend setting the flag to _false_. We expect that
|
| 174 |
+
the legacy Standalone mode will be deprecated in the future.
|
| 175 |
+
|
| 176 |
+
<a name="sc_reconfig_reconfigEnabled"></a>
|
| 177 |
+
|
| 178 |
+
### The _reconfigEnabled_ flag
|
| 179 |
+
|
| 180 |
+
Starting with 3.5.0 and prior to 3.5.3, there is no way to disable
|
| 181 |
+
dynamic reconfiguration feature. We would like to offer the option of
|
| 182 |
+
disabling reconfiguration feature because with reconfiguration enabled,
|
| 183 |
+
we have a security concern that a malicious actor can make arbitrary changes
|
| 184 |
+
to the configuration of a ZooKeeper ensemble, including adding a compromised
|
| 185 |
+
server to the ensemble. We prefer to leave to the discretion of the user to
|
| 186 |
+
decide whether to enable it or not and make sure that the appropriate security
|
| 187 |
+
measure are in place. So in 3.5.3 the [reconfigEnabled](zookeeperAdmin.html#sc_advancedConfiguration) configuration option is introduced
|
| 188 |
+
such that the reconfiguration feature can be completely disabled and any attempts
|
| 189 |
+
to reconfigure a cluster through reconfig API with or without authentication
|
| 190 |
+
will fail by default, unless **reconfigEnabled** is set to
|
| 191 |
+
**true**.
|
| 192 |
+
|
| 193 |
+
To set the option to true, the configuration file (zoo.cfg) should contain:
|
| 194 |
+
|
| 195 |
+
reconfigEnabled=true
|
| 196 |
+
|
| 197 |
+
<a name="sc_reconfig_file"></a>
|
| 198 |
+
|
| 199 |
+
### Dynamic configuration file
|
| 200 |
+
|
| 201 |
+
Starting with 3.5.0 we're distinguishing between dynamic
|
| 202 |
+
configuration parameters, which can be changed during runtime, and
|
| 203 |
+
static configuration parameters, which are read from a configuration
|
| 204 |
+
file when a server boots and don't change during its execution. For now,
|
| 205 |
+
the following configuration keywords are considered part of the dynamic
|
| 206 |
+
configuration: _server_, _group_
|
| 207 |
+
and _weight_.
|
| 208 |
+
|
| 209 |
+
Dynamic configuration parameters are stored in a separate file on
|
| 210 |
+
the server (which we call the dynamic configuration file). This file is
|
| 211 |
+
linked from the static config file using the new
|
| 212 |
+
_dynamicConfigFile_ keyword.
|
| 213 |
+
|
| 214 |
+
**Example**
|
| 215 |
+
|
| 216 |
+
#### zoo_replicated1.cfg
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
tickTime=2000
|
| 220 |
+
dataDir=/zookeeper/data/zookeeper1
|
| 221 |
+
initLimit=5
|
| 222 |
+
syncLimit=2
|
| 223 |
+
dynamicConfigFile=/zookeeper/conf/zoo_replicated1.cfg.dynamic
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
#### zoo_replicated1.cfg.dynamic
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
server.1=125.23.63.23:2780:2783:participant;2791
|
| 230 |
+
server.2=125.23.63.24:2781:2784:participant;2792
|
| 231 |
+
server.3=125.23.63.25:2782:2785:participant;2793
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
When the ensemble configuration changes, the static configuration
|
| 235 |
+
parameters remain the same. The dynamic parameters are pushed by
|
| 236 |
+
ZooKeeper and overwrite the dynamic configuration files on all servers.
|
| 237 |
+
Thus, the dynamic configuration files on the different servers are
|
| 238 |
+
usually identical (they can only differ momentarily when a
|
| 239 |
+
reconfiguration is in progress, or if a new configuration hasn't
|
| 240 |
+
propagated yet to some of the servers). Once created, the dynamic
|
| 241 |
+
configuration file should not be manually altered. Changed are only made
|
| 242 |
+
through the new reconfiguration commands outlined below. Note that
|
| 243 |
+
changing the config of an offline cluster could result in an
|
| 244 |
+
inconsistency with respect to configuration information stored in the
|
| 245 |
+
ZooKeeper log (and the special configuration znode, populated from the
|
| 246 |
+
log) and is therefore highly discouraged.
|
| 247 |
+
|
| 248 |
+
**Example 2**
|
| 249 |
+
|
| 250 |
+
Users may prefer to initially specify a single configuration file.
|
| 251 |
+
The following is thus also legal:
|
| 252 |
+
|
| 253 |
+
#### zoo_replicated1.cfg
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
tickTime=2000
|
| 257 |
+
dataDir=/zookeeper/data/zookeeper1
|
| 258 |
+
initLimit=5
|
| 259 |
+
syncLimit=2
|
| 260 |
+
clientPort=
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
The configuration files on each server will be automatically split
|
| 264 |
+
into dynamic and static files, if they are not already in this format.
|
| 265 |
+
So the configuration file above will be automatically transformed into
|
| 266 |
+
the two files in Example 1. Note that the clientPort and
|
| 267 |
+
clientPortAddress lines (if specified) will be automatically removed
|
| 268 |
+
during this process, if they are redundant (as in the example above).
|
| 269 |
+
The original static configuration file is backed up (in a .bak
|
| 270 |
+
file).
|
| 271 |
+
|
| 272 |
+
<a name="sc_reconfig_backward"></a>
|
| 273 |
+
|
| 274 |
+
### Backward compatibility
|
| 275 |
+
|
| 276 |
+
We still support the old configuration format. For example, the
|
| 277 |
+
following configuration file is acceptable (but not recommended):
|
| 278 |
+
|
| 279 |
+
#### zoo_replicated1.cfg
|
| 280 |
+
|
| 281 |
+
tickTime=2000
|
| 282 |
+
dataDir=/zookeeper/data/zookeeper1
|
| 283 |
+
initLimit=5
|
| 284 |
+
syncLimit=2
|
| 285 |
+
clientPort=2791
|
| 286 |
+
server.1=125.23.63.23:2780:2783:participant
|
| 287 |
+
server.2=125.23.63.24:2781:2784:participant
|
| 288 |
+
server.3=125.23.63.25:2782:2785:participant
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
During boot, a dynamic configuration file is created and contains
|
| 292 |
+
the dynamic part of the configuration as explained earlier. In this
|
| 293 |
+
case, however, the line "clientPort=2791" will remain in the static
|
| 294 |
+
configuration file of server 1 since it is not redundant -- it was not
|
| 295 |
+
specified as part of the "server.1=..." using the format explained in
|
| 296 |
+
the section [Changes to Configuration Format](#ch_reconfig_format). If a reconfiguration
|
| 297 |
+
is invoked that sets the client port of server 1, we remove
|
| 298 |
+
"clientPort=2791" from the static configuration file (the dynamic file
|
| 299 |
+
now contain this information as part of the specification of server
|
| 300 |
+
1).
|
| 301 |
+
|
| 302 |
+
<a name="ch_reconfig_upgrade"></a>
|
| 303 |
+
|
| 304 |
+
## Upgrading to 3.5.0
|
| 305 |
+
|
| 306 |
+
Upgrading a running ZooKeeper ensemble to 3.5.0 should be done only
|
| 307 |
+
after upgrading your ensemble to the 3.4.6 release. Note that this is only
|
| 308 |
+
necessary for rolling upgrades (if you're fine with shutting down the
|
| 309 |
+
system completely, you don't have to go through 3.4.6). If you attempt a
|
| 310 |
+
rolling upgrade without going through 3.4.6 (for example from 3.4.5), you
|
| 311 |
+
may get the following error:
|
| 312 |
+
|
| 313 |
+
2013-01-30 11:32:10,663 [myid:2] - INFO [localhost/127.0.0.1:2784:QuorumCnxManager$Listener@498] - Received connection request /127.0.0.1:60876
|
| 314 |
+
2013-01-30 11:32:10,663 [myid:2] - WARN [localhost/127.0.0.1:2784:QuorumCnxManager@349] - Invalid server id: -65536
|
| 315 |
+
|
| 316 |
+
During a rolling upgrade, each server is taken down in turn and
|
| 317 |
+
rebooted with the new 3.5.0 binaries. Before starting the server with
|
| 318 |
+
3.5.0 binaries, we highly recommend updating the configuration file so
|
| 319 |
+
that all server statements "server.x=..." contain client ports (see the
|
| 320 |
+
section [Specifying the client port](#sc_reconfig_clientport)). As explained earlier
|
| 321 |
+
you may leave the configuration in a single file, as well as leave the
|
| 322 |
+
clientPort/clientPortAddress statements (although if you specify client
|
| 323 |
+
ports in the new format, these statements are now redundant).
|
| 324 |
+
|
| 325 |
+
<a name="ch_reconfig_dyn"></a>
|
| 326 |
+
|
| 327 |
+
## Dynamic Reconfiguration of the ZooKeeper Ensemble
|
| 328 |
+
|
| 329 |
+
The ZooKeeper Java and C API were extended with getConfig and reconfig
|
| 330 |
+
commands that facilitate reconfiguration. Both commands have a synchronous
|
| 331 |
+
(blocking) variant and an asynchronous one. We demonstrate these commands
|
| 332 |
+
here using the Java CLI, but note that you can similarly use the C CLI or
|
| 333 |
+
invoke the commands directly from a program just like any other ZooKeeper
|
| 334 |
+
command.
|
| 335 |
+
|
| 336 |
+
<a name="ch_reconfig_api"></a>
|
| 337 |
+
|
| 338 |
+
### API
|
| 339 |
+
|
| 340 |
+
There are two sets of APIs for both Java and C client.
|
| 341 |
+
|
| 342 |
+
* ***Reconfiguration API*** :
|
| 343 |
+
Reconfiguration API is used to reconfigure the ZooKeeper cluster.
|
| 344 |
+
Starting with 3.5.3, reconfiguration Java APIs are moved into ZooKeeperAdmin class
|
| 345 |
+
from ZooKeeper class, and use of this API requires ACL setup and user
|
| 346 |
+
authentication (see [Security](#sc_reconfig_access_control) for more information.).
|
| 347 |
+
|
| 348 |
+
* ***Get Configuration API*** :
|
| 349 |
+
Get configuration APIs are used to retrieve ZooKeeper cluster configuration information
|
| 350 |
+
stored in /zookeeper/config znode. Use of this API does not require specific setup or authentication,
|
| 351 |
+
because /zookeeper/config is readable to any users.
|
| 352 |
+
|
| 353 |
+
<a name="sc_reconfig_access_control"></a>
|
| 354 |
+
|
| 355 |
+
### Security
|
| 356 |
+
|
| 357 |
+
Prior to **3.5.3**, there is no enforced security mechanism
|
| 358 |
+
over reconfig so any ZooKeeper clients that can connect to ZooKeeper server ensemble
|
| 359 |
+
will have the ability to change the state of a ZooKeeper cluster via reconfig.
|
| 360 |
+
It is thus possible for a malicious client to add compromised server to an ensemble,
|
| 361 |
+
e.g., add a compromised server, or remove legitimate servers.
|
| 362 |
+
Cases like these could be security vulnerabilities on a case by case basis.
|
| 363 |
+
|
| 364 |
+
To address this security concern, we introduced access control over reconfig
|
| 365 |
+
starting from **3.5.3** such that only a specific set of users
|
| 366 |
+
can use reconfig commands or APIs, and these users need be configured explicitly. In addition,
|
| 367 |
+
the setup of ZooKeeper cluster must enable authentication so ZooKeeper clients can be authenticated.
|
| 368 |
+
|
| 369 |
+
We also provide an escape hatch for users who operate and interact with a ZooKeeper ensemble in a secured
|
| 370 |
+
environment (i.e. behind company firewall). For those users who want to use reconfiguration feature but
|
| 371 |
+
don't want the overhead of configuring an explicit list of authorized user for reconfig access checks,
|
| 372 |
+
they can set ["skipACL"](zookeeperAdmin.html#sc_authOptions) to "yes" which will
|
| 373 |
+
skip ACL check and allow any user to reconfigure cluster.
|
| 374 |
+
|
| 375 |
+
Overall, ZooKeeper provides flexible configuration options for the reconfigure feature
|
| 376 |
+
that allow a user to choose based on user's security requirement.
|
| 377 |
+
We leave to the discretion of the user to decide appropriate security measure are in place.
|
| 378 |
+
|
| 379 |
+
* ***Access Control*** :
|
| 380 |
+
The dynamic configuration is stored in a special znode
|
| 381 |
+
ZooDefs.CONFIG_NODE = /zookeeper/config. This node by default is read only
|
| 382 |
+
for all users, except super user and users that's explicitly configured for write
|
| 383 |
+
access.
|
| 384 |
+
Clients that need to use reconfig commands or reconfig API should be configured as users
|
| 385 |
+
that have write access to CONFIG_NODE. By default, only the super user has full control including
|
| 386 |
+
write access to CONFIG_NODE. Additional users can be granted write access through superuser
|
| 387 |
+
by setting an ACL that has write permission associated with specified user.
|
| 388 |
+
A few examples of how to setup ACLs and use reconfiguration API with authentication can be found in
|
| 389 |
+
ReconfigExceptionTest.java and TestReconfigServer.cc.
|
| 390 |
+
|
| 391 |
+
* ***Authentication*** :
|
| 392 |
+
Authentication of users is orthogonal to the access control and is delegated to
|
| 393 |
+
existing authentication mechanism supported by ZooKeeper's pluggable authentication schemes.
|
| 394 |
+
See [ZooKeeper and SASL](https://cwiki.apache.org/confluence/display/ZOOKEEPER/Zookeeper+and+SASL) for more details on this topic.
|
| 395 |
+
|
| 396 |
+
* ***Disable ACL check*** :
|
| 397 |
+
ZooKeeper supports ["skipACL"](zookeeperAdmin.html#sc_authOptions) option such that ACL
|
| 398 |
+
check will be completely skipped, if skipACL is set to "yes". In such cases any unauthenticated
|
| 399 |
+
users can use reconfig API.
|
| 400 |
+
|
| 401 |
+
<a name="sc_reconfig_retrieving"></a>
|
| 402 |
+
|
| 403 |
+
### Retrieving the current dynamic configuration
|
| 404 |
+
|
| 405 |
+
The dynamic configuration is stored in a special znode
|
| 406 |
+
ZooDefs.CONFIG_NODE = /zookeeper/config. The new
|
| 407 |
+
`config` CLI command reads this znode (currently it is
|
| 408 |
+
simply a wrapper to `get /zookeeper/config`). As with
|
| 409 |
+
normal reads, to retrieve the latest committed value you should do a
|
| 410 |
+
`sync` first.
|
| 411 |
+
|
| 412 |
+
[zk: 127.0.0.1:2791(CONNECTED) 3] config
|
| 413 |
+
server.1=localhost:2780:2783:participant;localhost:2791
|
| 414 |
+
server.2=localhost:2781:2784:participant;localhost:2792
|
| 415 |
+
server.3=localhost:2782:2785:participant;localhost:2793
|
| 416 |
+
|
| 417 |
+
Notice the last line of the output. This is the configuration
|
| 418 |
+
version. The version equals to the zxid of the reconfiguration command
|
| 419 |
+
which created this configuration. The version of the first established
|
| 420 |
+
configuration equals to the zxid of the NEWLEADER message sent by the
|
| 421 |
+
first successfully established leader. When a configuration is written
|
| 422 |
+
to a dynamic configuration file, the version automatically becomes part
|
| 423 |
+
of the filename and the static configuration file is updated with the
|
| 424 |
+
path to the new dynamic configuration file. Configuration files
|
| 425 |
+
corresponding to earlier versions are retained for backup
|
| 426 |
+
purposes.
|
| 427 |
+
|
| 428 |
+
During boot time the version (if it exists) is extracted from the
|
| 429 |
+
filename. The version should never be altered manually by users or the
|
| 430 |
+
system administrator. It is used by the system to know which
|
| 431 |
+
configuration is most up-to-date. Manipulating it manually can result in
|
| 432 |
+
data loss and inconsistency.
|
| 433 |
+
|
| 434 |
+
Just like a `get` command, the
|
| 435 |
+
`config` CLI command accepts the _-w_
|
| 436 |
+
flag for setting a watch on the znode, and _-s_ flag for
|
| 437 |
+
displaying the Stats of the znode. It additionally accepts a new flag
|
| 438 |
+
_-c_ which outputs only the version and the client
|
| 439 |
+
connection string corresponding to the current configuration. For
|
| 440 |
+
example, for the configuration above we would get:
|
| 441 |
+
|
| 442 |
+
[zk: 127.0.0.1:2791(CONNECTED) 17] config -c
|
| 443 |
+
400000003 localhost:2791,localhost:2793,localhost:2792
|
| 444 |
+
|
| 445 |
+
Note that when using the API directly, this command is called
|
| 446 |
+
`getConfig`.
|
| 447 |
+
|
| 448 |
+
As any read command it returns the configuration known to the
|
| 449 |
+
follower to which your client is connected, which may be slightly
|
| 450 |
+
out-of-date. One can use the `sync` command for
|
| 451 |
+
stronger guarantees. For example using the Java API:
|
| 452 |
+
|
| 453 |
+
zk.sync(ZooDefs.CONFIG_NODE, void_callback, context);
|
| 454 |
+
zk.getConfig(watcher, callback, context);
|
| 455 |
+
|
| 456 |
+
Note: in 3.5.0 it doesn't really matter which path is passed to the
|
| 457 |
+
`sync()` command as all the server's state is brought
|
| 458 |
+
up to date with the leader (so one could use a different path instead of
|
| 459 |
+
ZooDefs.CONFIG_NODE). However, this may change in the future.
|
| 460 |
+
|
| 461 |
+
<a name="sc_reconfig_modifying"></a>
|
| 462 |
+
|
| 463 |
+
### Modifying the current dynamic configuration
|
| 464 |
+
|
| 465 |
+
Modifying the configuration is done through the
|
| 466 |
+
`reconfig` command. There are two modes of
|
| 467 |
+
reconfiguration: incremental and non-incremental (bulk). The
|
| 468 |
+
non-incremental simply specifies the new dynamic configuration of the
|
| 469 |
+
system. The incremental specifies changes to the current configuration.
|
| 470 |
+
The `reconfig` command returns the new
|
| 471 |
+
configuration.
|
| 472 |
+
|
| 473 |
+
A few examples are in: *ReconfigTest.java*,
|
| 474 |
+
*ReconfigRecoveryTest.java* and
|
| 475 |
+
*TestReconfigServer.cc*.
|
| 476 |
+
|
| 477 |
+
<a name="sc_reconfig_general"></a>
|
| 478 |
+
|
| 479 |
+
#### General
|
| 480 |
+
|
| 481 |
+
**Removing servers:** Any server can
|
| 482 |
+
be removed, including the leader (although removing the leader will
|
| 483 |
+
result in a short unavailability, see Figures 6 and 8 in the [paper](https://www.usenix.org/conference/usenixfederatedconferencesweek/dynamic-recon%EF%AC%81guration-primarybackup-clusters)). The server will not be shut-down automatically.
|
| 484 |
+
Instead, it becomes a "non-voting follower". This is somewhat similar
|
| 485 |
+
to an observer in that its votes don't count towards the Quorum of
|
| 486 |
+
votes necessary to commit operations. However, unlike a non-voting
|
| 487 |
+
follower, an observer doesn't actually see any operation proposals and
|
| 488 |
+
does not ACK them. Thus a non-voting follower has a more significant
|
| 489 |
+
negative effect on system throughput compared to an observer.
|
| 490 |
+
Non-voting follower mode should only be used as a temporary mode,
|
| 491 |
+
before shutting the server down, or adding it as a follower or as an
|
| 492 |
+
observer to the ensemble. We do not shut the server down automatically
|
| 493 |
+
for two main reasons. The first reason is that we do not want all the
|
| 494 |
+
clients connected to this server to be immediately disconnected,
|
| 495 |
+
causing a flood of connection requests to other servers. Instead, it
|
| 496 |
+
is better if each client decides when to migrate independently. The
|
| 497 |
+
second reason is that removing a server may sometimes (rarely) be
|
| 498 |
+
necessary in order to change it from "observer" to "participant" (this
|
| 499 |
+
is explained in the section [Additional comments](#sc_reconfig_additional)).
|
| 500 |
+
|
| 501 |
+
Note that the new configuration should have some minimal number of
|
| 502 |
+
participants in order to be considered legal. If the proposed change
|
| 503 |
+
would leave the cluster with less than 2 participants and standalone
|
| 504 |
+
mode is enabled (standaloneEnabled=true, see the section [The _standaloneEnabled_ flag](#sc_reconfig_standaloneEnabled)), the reconfig will not be
|
| 505 |
+
processed (BadArgumentsException). If standalone mode is disabled
|
| 506 |
+
(standaloneEnabled=false) then it's legal to remain with 1 or more
|
| 507 |
+
participants.
|
| 508 |
+
|
| 509 |
+
**Adding servers:** Before a
|
| 510 |
+
reconfiguration is invoked, the administrator must make sure that a
|
| 511 |
+
quorum (majority) of participants from the new configuration are
|
| 512 |
+
already connected and synced with the current leader. To achieve this
|
| 513 |
+
we need to connect a new joining server to the leader before it is
|
| 514 |
+
officially part of the ensemble. This is done by starting the joining
|
| 515 |
+
server using an initial list of servers which is technically not a
|
| 516 |
+
legal configuration of the system but (a) contains the joiner, and (b)
|
| 517 |
+
gives sufficient information to the joiner in order for it to find and
|
| 518 |
+
connect to the current leader. We list a few different options of
|
| 519 |
+
doing this safely.
|
| 520 |
+
|
| 521 |
+
1. Initial configuration of joiners is comprised of servers in
|
| 522 |
+
the last committed configuration and one or more joiners, where
|
| 523 |
+
**joiners are listed as observers.**
|
| 524 |
+
For example, if servers D and E are added at the same time to (A,
|
| 525 |
+
B, C) and server C is being removed, the initial configuration of
|
| 526 |
+
D could be (A, B, C, D) or (A, B, C, D, E), where D and E are
|
| 527 |
+
listed as observers. Similarly, the configuration of E could be
|
| 528 |
+
(A, B, C, E) or (A, B, C, D, E), where D and E are listed as
|
| 529 |
+
observers. **Note that listing the joiners as
|
| 530 |
+
observers will not actually make them observers - it will only
|
| 531 |
+
prevent them from accidentally forming a quorum with other
|
| 532 |
+
joiners.** Instead, they will contact the servers in the
|
| 533 |
+
current configuration and adopt the last committed configuration
|
| 534 |
+
(A, B, C), where the joiners are absent. Configuration files of
|
| 535 |
+
joiners are backed up and replaced automatically as this happens.
|
| 536 |
+
After connecting to the current leader, joiners become non-voting
|
| 537 |
+
followers until the system is reconfigured and they are added to
|
| 538 |
+
the ensemble (as participant or observer, as appropriate).
|
| 539 |
+
1. Initial configuration of each joiner is comprised of servers
|
| 540 |
+
in the last committed configuration + **the
|
| 541 |
+
joiner itself, listed as a participant.** For example, to
|
| 542 |
+
add a new server D to a configuration consisting of servers (A, B,
|
| 543 |
+
C), the administrator can start D using an initial configuration
|
| 544 |
+
file consisting of servers (A, B, C, D). If both D and E are added
|
| 545 |
+
at the same time to (A, B, C), the initial configuration of D
|
| 546 |
+
could be (A, B, C, D) and the configuration of E could be (A, B,
|
| 547 |
+
C, E). Similarly, if D is added and C is removed at the same time,
|
| 548 |
+
the initial configuration of D could be (A, B, C, D). Never list
|
| 549 |
+
more than one joiner as participant in the initial configuration
|
| 550 |
+
(see warning below).
|
| 551 |
+
1. Whether listing the joiner as an observer or as participant,
|
| 552 |
+
it is also fine not to list all the current configuration servers,
|
| 553 |
+
as long as the current leader is in the list. For example, when
|
| 554 |
+
adding D we could start D with a configuration file consisting of
|
| 555 |
+
just (A, D) if A is the current leader. however this is more
|
| 556 |
+
fragile since if A fails before D officially joins the ensemble, D
|
| 557 |
+
doesn’t know anyone else and therefore the administrator will have
|
| 558 |
+
to intervene and restart D with another server list.
|
| 559 |
+
|
| 560 |
+
######Note
|
| 561 |
+
>##### Warning
|
| 562 |
+
|
| 563 |
+
>Never specify more than one joining server in the same initial
|
| 564 |
+
configuration as participants. Currently, the joining servers don’t
|
| 565 |
+
know that they are joining an existing ensemble; if multiple joiners
|
| 566 |
+
are listed as participants they may form an independent quorum
|
| 567 |
+
creating a split-brain situation such as processing operations
|
| 568 |
+
independently from your main ensemble. It is OK to list multiple
|
| 569 |
+
joiners as observers in an initial config.
|
| 570 |
+
|
| 571 |
+
If the configuration of existing servers changes or they become unavailable
|
| 572 |
+
before the joiner succeeds to connect and learn about configuration changes, the
|
| 573 |
+
joiner may need to be restarted with an updated configuration file in order to be
|
| 574 |
+
able to connect.
|
| 575 |
+
|
| 576 |
+
Finally, note that once connected to the leader, a joiner adopts
|
| 577 |
+
the last committed configuration, in which it is absent (the initial
|
| 578 |
+
config of the joiner is backed up before being rewritten). If the
|
| 579 |
+
joiner restarts in this state, it will not be able to boot since it is
|
| 580 |
+
absent from its configuration file. In order to start it you’ll once
|
| 581 |
+
again have to specify an initial configuration.
|
| 582 |
+
|
| 583 |
+
**Modifying server parameters:** One
|
| 584 |
+
can modify any of the ports of a server, or its role
|
| 585 |
+
(participant/observer) by adding it to the ensemble with different
|
| 586 |
+
parameters. This works in both the incremental and the bulk
|
| 587 |
+
reconfiguration modes. It is not necessary to remove the server and
|
| 588 |
+
then add it back; just specify the new parameters as if the server is
|
| 589 |
+
not yet in the system. The server will detect the configuration change
|
| 590 |
+
and perform the necessary adjustments. See an example in the section
|
| 591 |
+
[Incremental mode](#sc_reconfig_incremental) and an exception to this
|
| 592 |
+
rule in the section [Additional comments](#sc_reconfig_additional).
|
| 593 |
+
|
| 594 |
+
It is also possible to change the Quorum System used by the
|
| 595 |
+
ensemble (for example, change the Majority Quorum System to a
|
| 596 |
+
Hierarchical Quorum System on the fly). This, however, is only allowed
|
| 597 |
+
using the bulk (non-incremental) reconfiguration mode. In general,
|
| 598 |
+
incremental reconfiguration only works with the Majority Quorum
|
| 599 |
+
System. Bulk reconfiguration works with both Hierarchical and Majority
|
| 600 |
+
Quorum Systems.
|
| 601 |
+
|
| 602 |
+
**Performance Impact:** There is
|
| 603 |
+
practically no performance impact when removing a follower, since it
|
| 604 |
+
is not being automatically shut down (the effect of removal is that
|
| 605 |
+
the server's votes are no longer being counted). When adding a server,
|
| 606 |
+
there is no leader change and no noticeable performance disruption.
|
| 607 |
+
For details and graphs please see Figures 6, 7 and 8 in the [paper](https://www.usenix.org/conference/usenixfederatedconferencesweek/dynamic-recon%EF%AC%81guration-primarybackup-clusters).
|
| 608 |
+
|
| 609 |
+
The most significant disruption will happen when a leader change
|
| 610 |
+
is caused, in one of the following cases:
|
| 611 |
+
|
| 612 |
+
1. Leader is removed from the ensemble.
|
| 613 |
+
1. Leader's role is changed from participant to observer.
|
| 614 |
+
1. The port used by the leader to send transactions to others
|
| 615 |
+
(quorum port) is modified.
|
| 616 |
+
|
| 617 |
+
In these cases we perform a leader hand-off where the old leader
|
| 618 |
+
nominates a new leader. The resulting unavailability is usually
|
| 619 |
+
shorter than when a leader crashes since detecting leader failure is
|
| 620 |
+
unnecessary and electing a new leader can usually be avoided during a
|
| 621 |
+
hand-off (see Figures 6 and 8 in the [paper](https://www.usenix.org/conference/usenixfederatedconferencesweek/dynamic-recon%EF%AC%81guration-primarybackup-clusters)).
|
| 622 |
+
|
| 623 |
+
When the client port of a server is modified, it does not drop
|
| 624 |
+
existing client connections. New connections to the server will have
|
| 625 |
+
to use the new client port.
|
| 626 |
+
|
| 627 |
+
**Progress guarantees:** Up to the
|
| 628 |
+
invocation of the reconfig operation, a quorum of the old
|
| 629 |
+
configuration is required to be available and connected for ZooKeeper
|
| 630 |
+
to be able to make progress. Once reconfig is invoked, a quorum of
|
| 631 |
+
both the old and of the new configurations must be available. The
|
| 632 |
+
final transition happens once (a) the new configuration is activated,
|
| 633 |
+
and (b) all operations scheduled before the new configuration is
|
| 634 |
+
activated by the leader are committed. Once (a) and (b) happen, only a
|
| 635 |
+
quorum of the new configuration is required. Note, however, that
|
| 636 |
+
neither (a) nor (b) are visible to a client. Specifically, when a
|
| 637 |
+
reconfiguration operation commits, it only means that an activation
|
| 638 |
+
message was sent out by the leader. It does not necessarily mean that
|
| 639 |
+
a quorum of the new configuration got this message (which is required
|
| 640 |
+
in order to activate it) or that (b) has happened. If one wants to
|
| 641 |
+
make sure that both (a) and (b) has already occurred (for example, in
|
| 642 |
+
order to know that it is safe to shut down old servers that were
|
| 643 |
+
removed), one can simply invoke an update
|
| 644 |
+
(`set-data`, or some other quorum operation, but not
|
| 645 |
+
a `sync`) and wait for it to commit. An alternative
|
| 646 |
+
way to achieve this was to introduce another round to the
|
| 647 |
+
reconfiguration protocol (which, for simplicity and compatibility with
|
| 648 |
+
Zab, we decided to avoid).
|
| 649 |
+
|
| 650 |
+
<a name="sc_reconfig_incremental"></a>
|
| 651 |
+
|
| 652 |
+
#### Incremental mode
|
| 653 |
+
|
| 654 |
+
The incremental mode allows adding and removing servers to the
|
| 655 |
+
current configuration. Multiple changes are allowed. For
|
| 656 |
+
example:
|
| 657 |
+
|
| 658 |
+
> reconfig -remove 3 -add
|
| 659 |
+
server.5=125.23.63.23:1234:1235;1236
|
| 660 |
+
|
| 661 |
+
Both the add and the remove options get a list of comma separated
|
| 662 |
+
arguments (no spaces):
|
| 663 |
+
|
| 664 |
+
> reconfig -remove 3,4 -add
|
| 665 |
+
server.5=localhost:2111:2112;2113,6=localhost:2114:2115:observer;2116
|
| 666 |
+
|
| 667 |
+
The format of the server statement is exactly the same as
|
| 668 |
+
described in the section [Specifying the client port](#sc_reconfig_clientport) and
|
| 669 |
+
includes the client port. Notice that here instead of "server.5=" you
|
| 670 |
+
can just say "5=". In the example above, if server 5 is already in the
|
| 671 |
+
system, but has different ports or is not an observer, it is updated
|
| 672 |
+
and once the configuration commits becomes an observer and starts
|
| 673 |
+
using these new ports. This is an easy way to turn participants into
|
| 674 |
+
observers and vice versa or change any of their ports, without
|
| 675 |
+
rebooting the server.
|
| 676 |
+
|
| 677 |
+
ZooKeeper supports two types of Quorum Systems – the simple
|
| 678 |
+
Majority system (where the leader commits operations after receiving
|
| 679 |
+
ACKs from a majority of voters) and a more complex Hierarchical
|
| 680 |
+
system, where votes of different servers have different weights and
|
| 681 |
+
servers are divided into voting groups. Currently, incremental
|
| 682 |
+
reconfiguration is allowed only if the last proposed configuration
|
| 683 |
+
known to the leader uses a Majority Quorum System
|
| 684 |
+
(BadArgumentsException is thrown otherwise).
|
| 685 |
+
|
| 686 |
+
Incremental mode - examples using the Java API:
|
| 687 |
+
|
| 688 |
+
List<String> leavingServers = new ArrayList<String>();
|
| 689 |
+
leavingServers.add("1");
|
| 690 |
+
leavingServers.add("2");
|
| 691 |
+
byte[] config = zk.reconfig(null, leavingServers, null, -1, new Stat());
|
| 692 |
+
|
| 693 |
+
List<String> leavingServers = new ArrayList<String>();
|
| 694 |
+
List<String> joiningServers = new ArrayList<String>();
|
| 695 |
+
leavingServers.add("1");
|
| 696 |
+
joiningServers.add("server.4=localhost:1234:1235;1236");
|
| 697 |
+
byte[] config = zk.reconfig(joiningServers, leavingServers, null, -1, new Stat());
|
| 698 |
+
|
| 699 |
+
String configStr = new String(config);
|
| 700 |
+
System.out.println(configStr);
|
| 701 |
+
|
| 702 |
+
There is also an asynchronous API, and an API accepting comma
|
| 703 |
+
separated Strings instead of List<String>. See
|
| 704 |
+
src/java/main/org/apache/zookeeper/ZooKeeper.java.
|
| 705 |
+
|
| 706 |
+
<a name="sc_reconfig_nonincremental"></a>
|
| 707 |
+
|
| 708 |
+
#### Non-incremental mode
|
| 709 |
+
|
| 710 |
+
The second mode of reconfiguration is non-incremental, whereby a
|
| 711 |
+
client gives a complete specification of the new dynamic system
|
| 712 |
+
configuration. The new configuration can either be given in place or
|
| 713 |
+
read from a file:
|
| 714 |
+
|
| 715 |
+
> reconfig -file newconfig.cfg
|
| 716 |
+
|
| 717 |
+
//newconfig.cfg is a dynamic config file, see [Dynamic configuration file](#sc_reconfig_file)
|
| 718 |
+
|
| 719 |
+
> reconfig -members
|
| 720 |
+
server.1=125.23.63.23:2780:2783:participant;2791,server.2=125.23.63.24:2781:2784:participant;2792,server.3=125.23.63.25:2782:2785:participant;2793}}
|
| 721 |
+
|
| 722 |
+
The new configuration may use a different Quorum System. For
|
| 723 |
+
example, you may specify a Hierarchical Quorum System even if the
|
| 724 |
+
current ensemble uses a Majority Quorum System.
|
| 725 |
+
|
| 726 |
+
Bulk mode - example using the Java API:
|
| 727 |
+
|
| 728 |
+
List<String> newMembers = new ArrayList<String>();
|
| 729 |
+
newMembers.add("server.1=1111:1234:1235;1236");
|
| 730 |
+
newMembers.add("server.2=1112:1237:1238;1239");
|
| 731 |
+
newMembers.add("server.3=1114:1240:1241:observer;1242");
|
| 732 |
+
|
| 733 |
+
byte[] config = zk.reconfig(null, null, newMembers, -1, new Stat());
|
| 734 |
+
|
| 735 |
+
String configStr = new String(config);
|
| 736 |
+
System.out.println(configStr);
|
| 737 |
+
|
| 738 |
+
There is also an asynchronous API, and an API accepting comma
|
| 739 |
+
separated String containing the new members instead of
|
| 740 |
+
List<String>. See
|
| 741 |
+
src/java/main/org/apache/zookeeper/ZooKeeper.java.
|
| 742 |
+
|
| 743 |
+
<a name="sc_reconfig_conditional"></a>
|
| 744 |
+
|
| 745 |
+
#### Conditional reconfig
|
| 746 |
+
|
| 747 |
+
Sometimes (especially in non-incremental mode) a new proposed
|
| 748 |
+
configuration depends on what the client "believes" to be the current
|
| 749 |
+
configuration, and should be applied only to that configuration.
|
| 750 |
+
Specifically, the `reconfig` succeeds only if the
|
| 751 |
+
last configuration at the leader has the specified version.
|
| 752 |
+
|
| 753 |
+
> reconfig -file <filename> -v <version>
|
| 754 |
+
|
| 755 |
+
In the previously listed Java examples, instead of -1 one could
|
| 756 |
+
specify a configuration version to condition the
|
| 757 |
+
reconfiguration.
|
| 758 |
+
|
| 759 |
+
<a name="sc_reconfig_errors"></a>
|
| 760 |
+
|
| 761 |
+
#### Error conditions
|
| 762 |
+
|
| 763 |
+
In addition to normal ZooKeeper error conditions, a
|
| 764 |
+
reconfiguration may fail for the following reasons:
|
| 765 |
+
|
| 766 |
+
1. another reconfig is currently in progress
|
| 767 |
+
(ReconfigInProgress)
|
| 768 |
+
1. the proposed change would leave the cluster with less than 2
|
| 769 |
+
participants, in case standalone mode is enabled, or, if
|
| 770 |
+
standalone mode is disabled then its legal to remain with 1 or
|
| 771 |
+
more participants (BadArgumentsException)
|
| 772 |
+
1. no quorum of the new configuration was connected and
|
| 773 |
+
up-to-date with the leader when the reconfiguration processing
|
| 774 |
+
began (NewConfigNoQuorum)
|
| 775 |
+
1. `-v x` was specified, but the version
|
| 776 |
+
`y` of the latest configuration is not
|
| 777 |
+
`x` (BadVersionException)
|
| 778 |
+
1. an incremental reconfiguration was requested but the last
|
| 779 |
+
configuration at the leader uses a Quorum System which is
|
| 780 |
+
different from the Majority system (BadArgumentsException)
|
| 781 |
+
1. syntax error (BadArgumentsException)
|
| 782 |
+
1. I/O exception when reading the configuration from a file
|
| 783 |
+
(BadArgumentsException)
|
| 784 |
+
|
| 785 |
+
Most of these are illustrated by test-cases in
|
| 786 |
+
*ReconfigFailureCases.java*.
|
| 787 |
+
|
| 788 |
+
<a name="sc_reconfig_additional"></a>
|
| 789 |
+
|
| 790 |
+
#### Additional comments
|
| 791 |
+
|
| 792 |
+
**Liveness:** To better understand
|
| 793 |
+
the difference between incremental and non-incremental
|
| 794 |
+
reconfiguration, suppose that client C1 adds server D to the system
|
| 795 |
+
while a different client C2 adds server E. With the non-incremental
|
| 796 |
+
mode, each client would first invoke `config` to find
|
| 797 |
+
out the current configuration, and then locally create a new list of
|
| 798 |
+
servers by adding its own suggested server. The new configuration can
|
| 799 |
+
then be submitted using the non-incremental
|
| 800 |
+
`reconfig` command. After both reconfigurations
|
| 801 |
+
complete, only one of E or D will be added (not both), depending on
|
| 802 |
+
which client's request arrives second to the leader, overwriting the
|
| 803 |
+
previous configuration. The other client can repeat the process until
|
| 804 |
+
its change takes effect. This method guarantees system-wide progress
|
| 805 |
+
(i.e., for one of the clients), but does not ensure that every client
|
| 806 |
+
succeeds. To have more control C2 may request to only execute the
|
| 807 |
+
reconfiguration in case the version of the current configuration
|
| 808 |
+
hasn't changed, as explained in the section [Conditional reconfig](#sc_reconfig_conditional). In this way it may avoid blindly
|
| 809 |
+
overwriting the configuration of C1 if C1's configuration reached the
|
| 810 |
+
leader first.
|
| 811 |
+
|
| 812 |
+
With incremental reconfiguration, both changes will take effect as
|
| 813 |
+
they are simply applied by the leader one after the other to the
|
| 814 |
+
current configuration, whatever that is (assuming that the second
|
| 815 |
+
reconfig request reaches the leader after it sends a commit message
|
| 816 |
+
for the first reconfig request -- currently the leader will refuse to
|
| 817 |
+
propose a reconfiguration if another one is already pending). Since
|
| 818 |
+
both clients are guaranteed to make progress, this method guarantees
|
| 819 |
+
stronger liveness. In practice, multiple concurrent reconfigurations
|
| 820 |
+
are probably rare. Non-incremental reconfiguration is currently the
|
| 821 |
+
only way to dynamically change the Quorum System. Incremental
|
| 822 |
+
configuration is currently only allowed with the Majority Quorum
|
| 823 |
+
System.
|
| 824 |
+
|
| 825 |
+
**Changing an observer into a
|
| 826 |
+
follower:** Clearly, changing a server that participates in
|
| 827 |
+
voting into an observer may fail if error (2) occurs, i.e., if fewer
|
| 828 |
+
than the minimal allowed number of participants would remain. However,
|
| 829 |
+
converting an observer into a participant may sometimes fail for a
|
| 830 |
+
more subtle reason: Suppose, for example, that the current
|
| 831 |
+
configuration is (A, B, C, D), where A is the leader, B and C are
|
| 832 |
+
followers and D is an observer. In addition, suppose that B has
|
| 833 |
+
crashed. If a reconfiguration is submitted where D is said to become a
|
| 834 |
+
follower, it will fail with error (3) since in this configuration, a
|
| 835 |
+
majority of voters in the new configuration (any 3 voters), must be
|
| 836 |
+
connected and up-to-date with the leader. An observer cannot
|
| 837 |
+
acknowledge the history prefix sent during reconfiguration, and
|
| 838 |
+
therefore it does not count towards these 3 required servers and the
|
| 839 |
+
reconfiguration will be aborted. In case this happens, a client can
|
| 840 |
+
achieve the same task by two reconfig commands: first invoke a
|
| 841 |
+
reconfig to remove D from the configuration and then invoke a second
|
| 842 |
+
command to add it back as a participant (follower). During the
|
| 843 |
+
intermediate state D is a non-voting follower and can ACK the state
|
| 844 |
+
transfer performed during the second reconfig command.
|
| 845 |
+
|
| 846 |
+
<a name="ch_reconfig_rebalancing"></a>
|
| 847 |
+
|
| 848 |
+
## Rebalancing Client Connections
|
| 849 |
+
|
| 850 |
+
When a ZooKeeper cluster is started, if each client is given the same
|
| 851 |
+
connection string (list of servers), the client will randomly choose a
|
| 852 |
+
server in the list to connect to, which makes the expected number of
|
| 853 |
+
client connections per server the same for each of the servers. We
|
| 854 |
+
implemented a method that preserves this property when the set of servers
|
| 855 |
+
changes through reconfiguration. See Sections 4 and 5.1 in the [paper](https://www.usenix.org/conference/usenixfederatedconferencesweek/dynamic-recon%EF%AC%81guration-primarybackup-clusters).
|
| 856 |
+
|
| 857 |
+
In order for the method to work, all clients must subscribe to
|
| 858 |
+
configuration changes (by setting a watch on /zookeeper/config either
|
| 859 |
+
directly or through the `getConfig` API command). When
|
| 860 |
+
the watch is triggered, the client should read the new configuration by
|
| 861 |
+
invoking `sync` and `getConfig` and if
|
| 862 |
+
the configuration is indeed new invoke the
|
| 863 |
+
`updateServerList` API command. To avoid mass client
|
| 864 |
+
migration at the same time, it is better to have each client sleep a
|
| 865 |
+
random short period of time before invoking
|
| 866 |
+
`updateServerList`.
|
| 867 |
+
|
| 868 |
+
A few examples can be found in:
|
| 869 |
+
*StaticHostProviderTest.java* and
|
| 870 |
+
*TestReconfig.cc*
|
| 871 |
+
|
| 872 |
+
Example (this is not a recipe, but a simplified example just to
|
| 873 |
+
explain the general idea):
|
| 874 |
+
|
| 875 |
+
public void process(WatchedEvent event) {
|
| 876 |
+
synchronized (this) {
|
| 877 |
+
if (event.getType() == EventType.None) {
|
| 878 |
+
connected = (event.getState() == KeeperState.SyncConnected);
|
| 879 |
+
notifyAll();
|
| 880 |
+
} else if (event.getPath()!=null && event.getPath().equals(ZooDefs.CONFIG_NODE)) {
|
| 881 |
+
// in prod code never block the event thread!
|
| 882 |
+
zk.sync(ZooDefs.CONFIG_NODE, this, null);
|
| 883 |
+
zk.getConfig(this, this, null);
|
| 884 |
+
}
|
| 885 |
+
}
|
| 886 |
+
}
|
| 887 |
+
|
| 888 |
+
public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) {
|
| 889 |
+
if (path!=null && path.equals(ZooDefs.CONFIG_NODE)) {
|
| 890 |
+
String config[] = ConfigUtils.getClientConfigStr(new String(data)).split(" "); // similar to config -c
|
| 891 |
+
long version = Long.parseLong(config[0], 16);
|
| 892 |
+
if (this.configVersion == null){
|
| 893 |
+
this.configVersion = version;
|
| 894 |
+
} else if (version > this.configVersion) {
|
| 895 |
+
hostList = config[1];
|
| 896 |
+
try {
|
| 897 |
+
// the following command is not blocking but may cause the client to close the socket and
|
| 898 |
+
// migrate to a different server. In practice it's better to wait a short period of time, chosen
|
| 899 |
+
// randomly, so that different clients migrate at different times
|
| 900 |
+
zk.updateServerList(hostList);
|
| 901 |
+
} catch (IOException e) {
|
| 902 |
+
System.err.println("Error updating server list");
|
| 903 |
+
e.printStackTrace();
|
| 904 |
+
}
|
| 905 |
+
this.configVersion = version;
|
| 906 |
+
}
|
| 907 |
+
}
|
| 908 |
+
}
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperStarted.md
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2022 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper Getting Started Guide
|
| 18 |
+
|
| 19 |
+
* [Getting Started: Coordinating Distributed Applications with ZooKeeper](#getting-started-coordinating-distributed-applications-with-zooKeeper)
|
| 20 |
+
* [Pre-requisites](#sc_Prerequisites)
|
| 21 |
+
* [Download](#sc_Download)
|
| 22 |
+
* [Standalone Operation](#sc_InstallingSingleMode)
|
| 23 |
+
* [Managing ZooKeeper Storage](#sc_FileManagement)
|
| 24 |
+
* [Connecting to ZooKeeper](#sc_ConnectingToZooKeeper)
|
| 25 |
+
* [Programming to ZooKeeper](#sc_ProgrammingToZooKeeper)
|
| 26 |
+
* [Running Replicated ZooKeeper](#sc_RunningReplicatedZooKeeper)
|
| 27 |
+
* [Other Optimizations](#other-optimizations)
|
| 28 |
+
|
| 29 |
+
<a name="getting-started-coordinating-distributed-applications-with-zooKeeper"></a>
|
| 30 |
+
|
| 31 |
+
## Getting Started: Coordinating Distributed Applications with ZooKeeper
|
| 32 |
+
|
| 33 |
+
This document contains information to get you started quickly with
|
| 34 |
+
ZooKeeper. It is aimed primarily at developers hoping to try it out, and
|
| 35 |
+
contains simple installation instructions for a single ZooKeeper server, a
|
| 36 |
+
few commands to verify that it is running, and a simple programming
|
| 37 |
+
example. Finally, as a convenience, there are a few sections regarding
|
| 38 |
+
more complicated installations, for example running replicated
|
| 39 |
+
deployments, and optimizing the transaction log. However for the complete
|
| 40 |
+
instructions for commercial deployments, please refer to the [ZooKeeper
|
| 41 |
+
Administrator's Guide](zookeeperAdmin.html).
|
| 42 |
+
|
| 43 |
+
<a name="sc_Prerequisites"></a>
|
| 44 |
+
|
| 45 |
+
### Pre-requisites
|
| 46 |
+
|
| 47 |
+
See [System Requirements](zookeeperAdmin.html#sc_systemReq) in the Admin guide.
|
| 48 |
+
|
| 49 |
+
<a name="sc_Download"></a>
|
| 50 |
+
|
| 51 |
+
### Download
|
| 52 |
+
|
| 53 |
+
To get a ZooKeeper distribution, download a recent
|
| 54 |
+
[stable](http://zookeeper.apache.org/releases.html) release from one of the Apache Download
|
| 55 |
+
Mirrors.
|
| 56 |
+
|
| 57 |
+
<a name="sc_InstallingSingleMode"></a>
|
| 58 |
+
|
| 59 |
+
### Standalone Operation
|
| 60 |
+
|
| 61 |
+
Setting up a ZooKeeper server in standalone mode is
|
| 62 |
+
straightforward. The server is contained in a single JAR file,
|
| 63 |
+
so installation consists of creating a configuration.
|
| 64 |
+
|
| 65 |
+
Once you've downloaded a stable ZooKeeper release unpack
|
| 66 |
+
it and cd to the root
|
| 67 |
+
|
| 68 |
+
To start ZooKeeper you need a configuration file. Here is a sample,
|
| 69 |
+
create it in **conf/zoo.cfg**:
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
tickTime=2000
|
| 73 |
+
dataDir=/var/lib/zookeeper
|
| 74 |
+
clientPort=2181
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
This file can be called anything, but for the sake of this
|
| 78 |
+
discussion call
|
| 79 |
+
it **conf/zoo.cfg**. Change the
|
| 80 |
+
value of **dataDir** to specify an
|
| 81 |
+
existing (empty to start with) directory. Here are the meanings
|
| 82 |
+
for each of the fields:
|
| 83 |
+
|
| 84 |
+
* ***tickTime*** :
|
| 85 |
+
the basic time unit in milliseconds used by ZooKeeper. It is
|
| 86 |
+
used to do heartbeats and the minimum session timeout will be
|
| 87 |
+
twice the tickTime.
|
| 88 |
+
|
| 89 |
+
* ***dataDir*** :
|
| 90 |
+
the location to store the in-memory database snapshots and,
|
| 91 |
+
unless specified otherwise, the transaction log of updates to the
|
| 92 |
+
database.
|
| 93 |
+
|
| 94 |
+
* ***clientPort*** :
|
| 95 |
+
the port to listen for client connections
|
| 96 |
+
|
| 97 |
+
Now that you created the configuration file, you can start
|
| 98 |
+
ZooKeeper:
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
bin/zkServer.sh start
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
ZooKeeper logs messages using _logback_ -- more detail
|
| 105 |
+
available in the
|
| 106 |
+
[Logging](zookeeperProgrammers.html#Logging)
|
| 107 |
+
section of the Programmer's Guide. You will see log messages
|
| 108 |
+
coming to the console (default) and/or a log file depending on
|
| 109 |
+
the logback configuration.
|
| 110 |
+
|
| 111 |
+
The steps outlined here run ZooKeeper in standalone mode. There is
|
| 112 |
+
no replication, so if ZooKeeper process fails, the service will go down.
|
| 113 |
+
This is fine for most development situations, but to run ZooKeeper in
|
| 114 |
+
replicated mode, please see [Running Replicated
|
| 115 |
+
ZooKeeper](#sc_RunningReplicatedZooKeeper).
|
| 116 |
+
|
| 117 |
+
<a name="sc_FileManagement"></a>
|
| 118 |
+
|
| 119 |
+
### Managing ZooKeeper Storage
|
| 120 |
+
|
| 121 |
+
For long running production systems ZooKeeper storage must
|
| 122 |
+
be managed externally (dataDir and logs). See the section on
|
| 123 |
+
[maintenance](zookeeperAdmin.html#sc_maintenance) for
|
| 124 |
+
more details.
|
| 125 |
+
|
| 126 |
+
<a name="sc_ConnectingToZooKeeper"></a>
|
| 127 |
+
|
| 128 |
+
### Connecting to ZooKeeper
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
$ bin/zkCli.sh -server 127.0.0.1:2181
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
This lets you perform simple, file-like operations.
|
| 135 |
+
|
| 136 |
+
Once you have connected, you should see something like:
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
Connecting to localhost:2181
|
| 140 |
+
...
|
| 141 |
+
Welcome to ZooKeeper!
|
| 142 |
+
JLine support is enabled
|
| 143 |
+
[zkshell: 0]
|
| 144 |
+
|
| 145 |
+
From the shell, type `help` to get a listing of commands that can be executed from the client, as in:
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
[zkshell: 0] help
|
| 149 |
+
ZooKeeper -server host:port cmd args
|
| 150 |
+
addauth scheme auth
|
| 151 |
+
close
|
| 152 |
+
config [-c] [-w] [-s]
|
| 153 |
+
connect host:port
|
| 154 |
+
create [-s] [-e] [-c] [-t ttl] path [data] [acl]
|
| 155 |
+
delete [-v version] path
|
| 156 |
+
deleteall path
|
| 157 |
+
delquota [-n|-b] path
|
| 158 |
+
get [-s] [-w] path
|
| 159 |
+
getAcl [-s] path
|
| 160 |
+
getAllChildrenNumber path
|
| 161 |
+
getEphemerals path
|
| 162 |
+
history
|
| 163 |
+
listquota path
|
| 164 |
+
ls [-s] [-w] [-R] path
|
| 165 |
+
printwatches on|off
|
| 166 |
+
quit
|
| 167 |
+
reconfig [-s] [-v version] [[-file path] | [-members serverID=host:port1:port2;port3[,...]*]] | [-add serverId=host:port1:port2;port3[,...]]* [-remove serverId[,...]*]
|
| 168 |
+
redo cmdno
|
| 169 |
+
removewatches path [-c|-d|-a] [-l]
|
| 170 |
+
set [-s] [-v version] path data
|
| 171 |
+
setAcl [-s] [-v version] [-R] path acl
|
| 172 |
+
setquota -n|-b val path
|
| 173 |
+
stat [-w] path
|
| 174 |
+
sync path
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
From here, you can try a few simple commands to get a feel for this simple command line interface. First, start by issuing the list command, as
|
| 178 |
+
in `ls`, yielding:
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
[zkshell: 8] ls /
|
| 182 |
+
[zookeeper]
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
Next, create a new znode by running `create /zk_test my_data`. This creates a new znode and associates the string "my_data" with the node.
|
| 186 |
+
You should see:
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
[zkshell: 9] create /zk_test my_data
|
| 190 |
+
Created /zk_test
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
Issue another `ls /` command to see what the directory looks like:
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
[zkshell: 11] ls /
|
| 197 |
+
[zookeeper, zk_test]
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
Notice that the zk_test directory has now been created.
|
| 201 |
+
|
| 202 |
+
Next, verify that the data was associated with the znode by running the `get` command, as in:
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
[zkshell: 12] get /zk_test
|
| 206 |
+
my_data
|
| 207 |
+
cZxid = 5
|
| 208 |
+
ctime = Fri Jun 05 13:57:06 PDT 2009
|
| 209 |
+
mZxid = 5
|
| 210 |
+
mtime = Fri Jun 05 13:57:06 PDT 2009
|
| 211 |
+
pZxid = 5
|
| 212 |
+
cversion = 0
|
| 213 |
+
dataVersion = 0
|
| 214 |
+
aclVersion = 0
|
| 215 |
+
ephemeralOwner = 0
|
| 216 |
+
dataLength = 7
|
| 217 |
+
numChildren = 0
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
We can change the data associated with zk_test by issuing the `set` command, as in:
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
[zkshell: 14] set /zk_test junk
|
| 224 |
+
cZxid = 5
|
| 225 |
+
ctime = Fri Jun 05 13:57:06 PDT 2009
|
| 226 |
+
mZxid = 6
|
| 227 |
+
mtime = Fri Jun 05 14:01:52 PDT 2009
|
| 228 |
+
pZxid = 5
|
| 229 |
+
cversion = 0
|
| 230 |
+
dataVersion = 1
|
| 231 |
+
aclVersion = 0
|
| 232 |
+
ephemeralOwner = 0
|
| 233 |
+
dataLength = 4
|
| 234 |
+
numChildren = 0
|
| 235 |
+
[zkshell: 15] get /zk_test
|
| 236 |
+
junk
|
| 237 |
+
cZxid = 5
|
| 238 |
+
ctime = Fri Jun 05 13:57:06 PDT 2009
|
| 239 |
+
mZxid = 6
|
| 240 |
+
mtime = Fri Jun 05 14:01:52 PDT 2009
|
| 241 |
+
pZxid = 5
|
| 242 |
+
cversion = 0
|
| 243 |
+
dataVersion = 1
|
| 244 |
+
aclVersion = 0
|
| 245 |
+
ephemeralOwner = 0
|
| 246 |
+
dataLength = 4
|
| 247 |
+
numChildren = 0
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
(Notice we did a `get` after setting the data and it did, indeed, change.
|
| 251 |
+
|
| 252 |
+
Finally, let's `delete` the node by issuing:
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
[zkshell: 16] delete /zk_test
|
| 256 |
+
[zkshell: 17] ls /
|
| 257 |
+
[zookeeper]
|
| 258 |
+
[zkshell: 18]
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
That's it for now. To explore more, see the [Zookeeper CLI](zookeeperCLI.html).
|
| 262 |
+
|
| 263 |
+
<a name="sc_ProgrammingToZooKeeper"></a>
|
| 264 |
+
|
| 265 |
+
### Programming to ZooKeeper
|
| 266 |
+
|
| 267 |
+
ZooKeeper has a Java bindings and C bindings. They are
|
| 268 |
+
functionally equivalent. The C bindings exist in two variants: single
|
| 269 |
+
threaded and multi-threaded. These differ only in how the messaging loop
|
| 270 |
+
is done. For more information, see the [Programming
|
| 271 |
+
Examples in the ZooKeeper Programmer's Guide](zookeeperProgrammers.html#ch_programStructureWithExample) for
|
| 272 |
+
sample code using the different APIs.
|
| 273 |
+
|
| 274 |
+
<a name="sc_RunningReplicatedZooKeeper"></a>
|
| 275 |
+
|
| 276 |
+
### Running Replicated ZooKeeper
|
| 277 |
+
|
| 278 |
+
Running ZooKeeper in standalone mode is convenient for evaluation,
|
| 279 |
+
some development, and testing. But in production, you should run
|
| 280 |
+
ZooKeeper in replicated mode. A replicated group of servers in the same
|
| 281 |
+
application is called a _quorum_, and in replicated
|
| 282 |
+
mode, all servers in the quorum have copies of the same configuration
|
| 283 |
+
file.
|
| 284 |
+
|
| 285 |
+
######Note
|
| 286 |
+
>For replicated mode, a minimum of three servers are required,
|
| 287 |
+
and it is strongly recommended that you have an odd number of
|
| 288 |
+
servers. If you only have two servers, then you are in a
|
| 289 |
+
situation where if one of them fails, there are not enough
|
| 290 |
+
machines to form a majority quorum. Two servers are inherently
|
| 291 |
+
**less** stable than a single server, because there are two single
|
| 292 |
+
points of failure.
|
| 293 |
+
|
| 294 |
+
The required
|
| 295 |
+
**conf/zoo.cfg**
|
| 296 |
+
file for replicated mode is similar to the one used in standalone
|
| 297 |
+
mode, but with a few differences. Here is an example:
|
| 298 |
+
|
| 299 |
+
tickTime=2000
|
| 300 |
+
dataDir=/var/lib/zookeeper
|
| 301 |
+
clientPort=2181
|
| 302 |
+
initLimit=5
|
| 303 |
+
syncLimit=2
|
| 304 |
+
server.1=zoo1:2888:3888
|
| 305 |
+
server.2=zoo2:2888:3888
|
| 306 |
+
server.3=zoo3:2888:3888
|
| 307 |
+
|
| 308 |
+
The new entry, **initLimit** is
|
| 309 |
+
timeouts ZooKeeper uses to limit the length of time the ZooKeeper
|
| 310 |
+
servers in quorum have to connect to a leader. The entry **syncLimit** limits how far out of date a server can
|
| 311 |
+
be from a leader.
|
| 312 |
+
|
| 313 |
+
With both of these timeouts, you specify the unit of time using
|
| 314 |
+
**tickTime**. In this example, the timeout
|
| 315 |
+
for initLimit is 5 ticks at 2000 milliseconds a tick, or 10
|
| 316 |
+
seconds.
|
| 317 |
+
|
| 318 |
+
The entries of the form _server.X_ list the
|
| 319 |
+
servers that make up the ZooKeeper service. When the server starts up,
|
| 320 |
+
it knows which server it is by looking for the file
|
| 321 |
+
_myid_ in the data directory. That file has the
|
| 322 |
+
contains the server number, in ASCII.
|
| 323 |
+
|
| 324 |
+
Finally, note the two port numbers after each server
|
| 325 |
+
name: " 2888" and "3888". Peers use the former port to connect
|
| 326 |
+
to other peers. Such a connection is necessary so that peers
|
| 327 |
+
can communicate, for example, to agree upon the order of
|
| 328 |
+
updates. More specifically, a ZooKeeper server uses this port
|
| 329 |
+
to connect followers to the leader. When a new leader arises, a
|
| 330 |
+
follower opens a TCP connection to the leader using this
|
| 331 |
+
port. Because the default leader election also uses TCP, we
|
| 332 |
+
currently require another port for leader election. This is the
|
| 333 |
+
second port in the server entry.
|
| 334 |
+
|
| 335 |
+
######Note
|
| 336 |
+
>If you want to test multiple servers on a single
|
| 337 |
+
machine, specify the servername
|
| 338 |
+
as _localhost_ with unique quorum &
|
| 339 |
+
leader election ports (i.e. 2888:3888, 2889:3889, 2890:3890 in
|
| 340 |
+
the example above) for each server.X in that server's config
|
| 341 |
+
file. Of course separate _dataDir_s and
|
| 342 |
+
distinct _clientPort_s are also necessary
|
| 343 |
+
(in the above replicated example, running on a
|
| 344 |
+
single _localhost_, you would still have
|
| 345 |
+
three config files).
|
| 346 |
+
|
| 347 |
+
>Please be aware that setting up multiple servers on a single
|
| 348 |
+
machine will not create any redundancy. If something were to
|
| 349 |
+
happen which caused the machine to die, all of the zookeeper
|
| 350 |
+
servers would be offline. Full redundancy requires that each
|
| 351 |
+
server have its own machine. It must be a completely separate
|
| 352 |
+
physical server. Multiple virtual machines on the same physical
|
| 353 |
+
host are still vulnerable to the complete failure of that host.
|
| 354 |
+
|
| 355 |
+
>If you have multiple network interfaces in your ZooKeeper machines,
|
| 356 |
+
you can also instruct ZooKeeper to bind on all of your interfaces and
|
| 357 |
+
automatically switch to a healthy interface in case of a network failure.
|
| 358 |
+
For details, see the [Configuration Parameters](zookeeperAdmin.html#id_multi_address).
|
| 359 |
+
|
| 360 |
+
<a name="other-optimizations"></a>
|
| 361 |
+
|
| 362 |
+
### Other Optimizations
|
| 363 |
+
|
| 364 |
+
There are a couple of other configuration parameters that can
|
| 365 |
+
greatly increase performance:
|
| 366 |
+
|
| 367 |
+
* To get low latencies on updates it is important to
|
| 368 |
+
have a dedicated transaction log directory. By default
|
| 369 |
+
transaction logs are put in the same directory as the data
|
| 370 |
+
snapshots and _myid_ file. The dataLogDir
|
| 371 |
+
parameters indicates a different directory to use for the
|
| 372 |
+
transaction logs.
|
| 373 |
+
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperTools.md
ADDED
|
@@ -0,0 +1,698 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2022 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# A series of tools for ZooKeeper
|
| 18 |
+
|
| 19 |
+
* [Scripts](#Scripts)
|
| 20 |
+
* [zkServer.sh](#zkServer)
|
| 21 |
+
* [zkCli.sh](#zkCli)
|
| 22 |
+
* [zkEnv.sh](#zkEnv)
|
| 23 |
+
* [zkCleanup.sh](#zkCleanup)
|
| 24 |
+
* [zkTxnLogToolkit.sh](#zkTxnLogToolkit)
|
| 25 |
+
* [zkSnapShotToolkit.sh](#zkSnapShotToolkit)
|
| 26 |
+
* [zkSnapshotRecursiveSummaryToolkit.sh](#zkSnapshotRecursiveSummaryToolkit)
|
| 27 |
+
* [zkSnapshotComparer.sh](#zkSnapshotComparer)
|
| 28 |
+
|
| 29 |
+
* [Benchmark](#Benchmark)
|
| 30 |
+
* [YCSB](#YCSB)
|
| 31 |
+
* [zk-smoketest](#zk-smoketest)
|
| 32 |
+
|
| 33 |
+
* [Testing](#Testing)
|
| 34 |
+
* [Fault Injection Framework](#fault-injection)
|
| 35 |
+
* [Byteman](#Byteman)
|
| 36 |
+
* [Jepsen Test](#jepsen-test)
|
| 37 |
+
|
| 38 |
+
<a name="Scripts"></a>
|
| 39 |
+
|
| 40 |
+
## Scripts
|
| 41 |
+
|
| 42 |
+
<a name="zkServer"></a>
|
| 43 |
+
|
| 44 |
+
### zkServer.sh
|
| 45 |
+
A command for the operations for the ZooKeeper server.
|
| 46 |
+
|
| 47 |
+
```bash
|
| 48 |
+
Usage: ./zkServer.sh {start|start-foreground|stop|version|restart|status|upgrade|print-cmd}
|
| 49 |
+
# start the server
|
| 50 |
+
./zkServer.sh start
|
| 51 |
+
|
| 52 |
+
# start the server in the foreground for debugging
|
| 53 |
+
./zkServer.sh start-foreground
|
| 54 |
+
|
| 55 |
+
# stop the server
|
| 56 |
+
./zkServer.sh stop
|
| 57 |
+
|
| 58 |
+
# restart the server
|
| 59 |
+
./zkServer.sh restart
|
| 60 |
+
|
| 61 |
+
# show the status,mode,role of the server
|
| 62 |
+
./zkServer.sh status
|
| 63 |
+
JMX enabled by default
|
| 64 |
+
Using config: /data/software/zookeeper/conf/zoo.cfg
|
| 65 |
+
Mode: standalone
|
| 66 |
+
|
| 67 |
+
# Deprecated
|
| 68 |
+
./zkServer.sh upgrade
|
| 69 |
+
|
| 70 |
+
# print the parameters of the start-up
|
| 71 |
+
./zkServer.sh print-cmd
|
| 72 |
+
|
| 73 |
+
# show the version of the ZooKeeper server
|
| 74 |
+
./zkServer.sh version
|
| 75 |
+
Apache ZooKeeper, version 3.6.0-SNAPSHOT 06/11/2019 05:39 GMT
|
| 76 |
+
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
The `status` command establishes a client connection to the server to execute diagnostic commands.
|
| 80 |
+
When the ZooKeeper cluster is started in client SSL only mode (by omitting the clientPort
|
| 81 |
+
from the zoo.cfg), then additional SSL related configuration has to be provided before using
|
| 82 |
+
the `./zkServer.sh status` command to find out if the ZooKeeper server is running. An example:
|
| 83 |
+
|
| 84 |
+
CLIENT_JVMFLAGS="-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.ssl.trustStore.location=/tmp/clienttrust.jks -Dzookeeper.ssl.trustStore.password=password -Dzookeeper.ssl.keyStore.location=/tmp/client.jks -Dzookeeper.ssl.keyStore.password=password -Dzookeeper.client.secure=true" ./zkServer.sh status
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
<a name="zkCli"></a>
|
| 88 |
+
|
| 89 |
+
### zkCli.sh
|
| 90 |
+
Look at the [ZooKeeperCLI](zookeeperCLI.html)
|
| 91 |
+
|
| 92 |
+
<a name="zkEnv"></a>
|
| 93 |
+
|
| 94 |
+
### zkEnv.sh
|
| 95 |
+
The environment setting for the ZooKeeper server
|
| 96 |
+
|
| 97 |
+
```bash
|
| 98 |
+
# the setting of log property
|
| 99 |
+
ZOO_LOG_DIR: the directory to store the logs
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
<a name="zkCleanup"></a>
|
| 103 |
+
|
| 104 |
+
### zkCleanup.sh
|
| 105 |
+
Clean up the old snapshots and transaction logs.
|
| 106 |
+
|
| 107 |
+
```bash
|
| 108 |
+
Usage:
|
| 109 |
+
* args dataLogDir [snapDir] -n count
|
| 110 |
+
* dataLogDir -- path to the txn log directory
|
| 111 |
+
* snapDir -- path to the snapshot directory
|
| 112 |
+
* count -- the number of old snaps/logs you want to keep, value should be greater than or equal to 3
|
| 113 |
+
# Keep the latest 5 logs and snapshots
|
| 114 |
+
./zkCleanup.sh -n 5
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
<a name="zkTxnLogToolkit"></a>
|
| 118 |
+
|
| 119 |
+
### zkTxnLogToolkit.sh
|
| 120 |
+
TxnLogToolkit is a command line tool shipped with ZooKeeper which
|
| 121 |
+
is capable of recovering transaction log entries with broken CRC.
|
| 122 |
+
|
| 123 |
+
Running it without any command line parameters or with the `-h,--help` argument, it outputs the following help page:
|
| 124 |
+
|
| 125 |
+
$ bin/zkTxnLogToolkit.sh
|
| 126 |
+
usage: TxnLogToolkit [-dhrv] txn_log_file_name
|
| 127 |
+
-d,--dump Dump mode. Dump all entries of the log file. (this is the default)
|
| 128 |
+
-h,--help Print help message
|
| 129 |
+
-r,--recover Recovery mode. Re-calculate CRC for broken entries.
|
| 130 |
+
-v,--verbose Be verbose in recovery mode: print all entries, not just fixed ones.
|
| 131 |
+
-y,--yes Non-interactive mode: repair all CRC errors without asking
|
| 132 |
+
|
| 133 |
+
The default behaviour is safe: it dumps the entries of the given
|
| 134 |
+
transaction log file to the screen: (same as using `-d,--dump` parameter)
|
| 135 |
+
|
| 136 |
+
$ bin/zkTxnLogToolkit.sh log.100000001
|
| 137 |
+
ZooKeeper Transactional Log File with dbid 0 txnlog format version 2
|
| 138 |
+
4/5/18 2:15:58 PM CEST session 0x16295bafcc40000 cxid 0x0 zxid 0x100000001 createSession 30000
|
| 139 |
+
CRC ERROR - 4/5/18 2:16:05 PM CEST session 0x16295bafcc40000 cxid 0x1 zxid 0x100000002 closeSession null
|
| 140 |
+
4/5/18 2:16:05 PM CEST session 0x16295bafcc40000 cxid 0x1 zxid 0x100000002 closeSession null
|
| 141 |
+
4/5/18 2:16:12 PM CEST session 0x26295bafcc90000 cxid 0x0 zxid 0x100000003 createSession 30000
|
| 142 |
+
4/5/18 2:17:34 PM CEST session 0x26295bafcc90000 cxid 0x0 zxid 0x200000001 closeSession null
|
| 143 |
+
4/5/18 2:17:34 PM CEST session 0x16295bd23720000 cxid 0x0 zxid 0x200000002 createSession 30000
|
| 144 |
+
4/5/18 2:18:02 PM CEST session 0x16295bd23720000 cxid 0x2 zxid 0x200000003 create '/andor,#626262,v{s{31,s{'world,'anyone}}},F,1
|
| 145 |
+
EOF reached after 6 txns.
|
| 146 |
+
|
| 147 |
+
There's a CRC error in the 2nd entry of the above transaction log file. In **dump**
|
| 148 |
+
mode, the toolkit only prints this information to the screen without touching the original file. In
|
| 149 |
+
**recovery** mode (`-r,--recover` flag) the original file still remains
|
| 150 |
+
untouched and all transactions will be copied over to a new txn log file with ".fixed" suffix. It recalculates
|
| 151 |
+
CRC values and copies the calculated value, if it doesn't match the original txn entry.
|
| 152 |
+
By default, the tool works interactively: it asks for confirmation whenever CRC error encountered.
|
| 153 |
+
|
| 154 |
+
$ bin/zkTxnLogToolkit.sh -r log.100000001
|
| 155 |
+
ZooKeeper Transactional Log File with dbid 0 txnlog format version 2
|
| 156 |
+
CRC ERROR - 4/5/18 2:16:05 PM CEST session 0x16295bafcc40000 cxid 0x1 zxid 0x100000002 closeSession null
|
| 157 |
+
Would you like to fix it (Yes/No/Abort) ?
|
| 158 |
+
|
| 159 |
+
Answering **Yes** means the newly calculated CRC value will be outputted
|
| 160 |
+
to the new file. **No** means that the original CRC value will be copied over.
|
| 161 |
+
**Abort** will abort the entire operation and exits.
|
| 162 |
+
(In this case the ".fixed" will not be deleted and left in a half-complete state: contains only entries which
|
| 163 |
+
have already been processed or only the header if the operation was aborted at the first entry.)
|
| 164 |
+
|
| 165 |
+
$ bin/zkTxnLogToolkit.sh -r log.100000001
|
| 166 |
+
ZooKeeper Transactional Log File with dbid 0 txnlog format version 2
|
| 167 |
+
CRC ERROR - 4/5/18 2:16:05 PM CEST session 0x16295bafcc40000 cxid 0x1 zxid 0x100000002 closeSession null
|
| 168 |
+
Would you like to fix it (Yes/No/Abort) ? y
|
| 169 |
+
EOF reached after 6 txns.
|
| 170 |
+
Recovery file log.100000001.fixed has been written with 1 fixed CRC error(s)
|
| 171 |
+
|
| 172 |
+
The default behaviour of recovery is to be silent: only entries with CRC error get printed to the screen.
|
| 173 |
+
One can turn on verbose mode with the `-v,--verbose` parameter to see all records.
|
| 174 |
+
Interactive mode can be turned off with the `-y,--yes` parameter. In this case all CRC errors will be fixed
|
| 175 |
+
in the new transaction file.
|
| 176 |
+
|
| 177 |
+
<a name="zkSnapShotToolkit"></a>
|
| 178 |
+
|
| 179 |
+
### zkSnapShotToolkit.sh
|
| 180 |
+
Dump a snapshot file to stdout, showing the detailed information of the each zk-node.
|
| 181 |
+
|
| 182 |
+
```bash
|
| 183 |
+
# help
|
| 184 |
+
./zkSnapShotToolkit.sh
|
| 185 |
+
/usr/bin/java
|
| 186 |
+
USAGE: SnapshotFormatter [-d|-json] snapshot_file
|
| 187 |
+
-d dump the data for each znode
|
| 188 |
+
-json dump znode info in json format
|
| 189 |
+
|
| 190 |
+
# show the each zk-node info without data content
|
| 191 |
+
./zkSnapShotToolkit.sh /data/zkdata/version-2/snapshot.fa01000186d
|
| 192 |
+
/zk-latencies_4/session_946
|
| 193 |
+
cZxid = 0x00000f0003110b
|
| 194 |
+
ctime = Wed Sep 19 21:58:22 CST 2018
|
| 195 |
+
mZxid = 0x00000f0003110b
|
| 196 |
+
mtime = Wed Sep 19 21:58:22 CST 2018
|
| 197 |
+
pZxid = 0x00000f0003110b
|
| 198 |
+
cversion = 0
|
| 199 |
+
dataVersion = 0
|
| 200 |
+
aclVersion = 0
|
| 201 |
+
ephemeralOwner = 0x00000000000000
|
| 202 |
+
dataLength = 100
|
| 203 |
+
|
| 204 |
+
# [-d] show the each zk-node info with data content
|
| 205 |
+
./zkSnapShotToolkit.sh -d /data/zkdata/version-2/snapshot.fa01000186d
|
| 206 |
+
/zk-latencies2/session_26229
|
| 207 |
+
cZxid = 0x00000900007ba0
|
| 208 |
+
ctime = Wed Aug 15 20:13:52 CST 2018
|
| 209 |
+
mZxid = 0x00000900007ba0
|
| 210 |
+
mtime = Wed Aug 15 20:13:52 CST 2018
|
| 211 |
+
pZxid = 0x00000900007ba0
|
| 212 |
+
cversion = 0
|
| 213 |
+
dataVersion = 0
|
| 214 |
+
aclVersion = 0
|
| 215 |
+
ephemeralOwner = 0x00000000000000
|
| 216 |
+
data = eHh4eHh4eHh4eHh4eA==
|
| 217 |
+
|
| 218 |
+
# [-json] show the each zk-node info with json format
|
| 219 |
+
./zkSnapShotToolkit.sh -json /data/zkdata/version-2/snapshot.fa01000186d
|
| 220 |
+
[[1,0,{"progname":"SnapshotFormatter.java","progver":"0.01","timestamp":1559788148637},[{"name":"\/","asize":0,"dsize":0,"dev":0,"ino":1001},[{"name":"zookeeper","asize":0,"dsize":0,"dev":0,"ino":1002},{"name":"config","asize":0,"dsize":0,"dev":0,"ino":1003},[{"name":"quota","asize":0,"dsize":0,"dev":0,"ino":1004},[{"name":"test","asize":0,"dsize":0,"dev":0,"ino":1005},{"name":"zookeeper_limits","asize":52,"dsize":52,"dev":0,"ino":1006},{"name":"zookeeper_stats","asize":15,"dsize":15,"dev":0,"ino":1007}]]],{"name":"test","asize":0,"dsize":0,"dev":0,"ino":1008}]]
|
| 221 |
+
```
|
| 222 |
+
<a name="zkSnapshotRecursiveSummaryToolkit"></a>
|
| 223 |
+
|
| 224 |
+
### zkSnapshotRecursiveSummaryToolkit.sh
|
| 225 |
+
Recursively collect and display child count and data size for a selected node.
|
| 226 |
+
|
| 227 |
+
$./zkSnapshotRecursiveSummaryToolkit.sh
|
| 228 |
+
USAGE:
|
| 229 |
+
|
| 230 |
+
SnapshotRecursiveSummary <snapshot_file> <starting_node> <max_depth>
|
| 231 |
+
|
| 232 |
+
snapshot_file: path to the zookeeper snapshot
|
| 233 |
+
starting_node: the path in the zookeeper tree where the traversal should begin
|
| 234 |
+
max_depth: defines the depth where the tool still writes to the output. 0 means there is no depth limit, every non-leaf node's stats will be displayed, 1 means it will only contain the starting node's and it's children's stats, 2 ads another level and so on. This ONLY affects the level of details displayed, NOT the calculation.
|
| 235 |
+
|
| 236 |
+
```bash
|
| 237 |
+
# recursively collect and display child count and data for the root node and 2 levels below it
|
| 238 |
+
./zkSnapshotRecursiveSummaryToolkit.sh /data/zkdata/version-2/snapshot.fa01000186d / 2
|
| 239 |
+
|
| 240 |
+
/
|
| 241 |
+
children: 1250511
|
| 242 |
+
data: 1952186580
|
| 243 |
+
-- /zookeeper
|
| 244 |
+
-- children: 1
|
| 245 |
+
-- data: 0
|
| 246 |
+
-- /solr
|
| 247 |
+
-- children: 1773
|
| 248 |
+
-- data: 8419162
|
| 249 |
+
---- /solr/configs
|
| 250 |
+
---- children: 1640
|
| 251 |
+
---- data: 8407643
|
| 252 |
+
---- /solr/overseer
|
| 253 |
+
---- children: 6
|
| 254 |
+
---- data: 0
|
| 255 |
+
---- /solr/live_nodes
|
| 256 |
+
---- children: 3
|
| 257 |
+
---- data: 0
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
<a name="zkSnapshotComparer"></a>
|
| 261 |
+
|
| 262 |
+
### zkSnapshotComparer.sh
|
| 263 |
+
SnapshotComparer is a tool that loads and compares two snapshots with configurable threshold and various filters, and outputs information about the delta.
|
| 264 |
+
|
| 265 |
+
The delta includes specific znode paths added, updated, deleted comparing one snapshot to another.
|
| 266 |
+
|
| 267 |
+
It's useful in use cases that involve snapshot analysis, such as offline data consistency checking, and data trending analysis (e.g. what's growing under which zNode path during when).
|
| 268 |
+
|
| 269 |
+
This tool only outputs information about permanent nodes, ignoring both sessions and ephemeral nodes.
|
| 270 |
+
|
| 271 |
+
It provides two tuning parameters to help filter out noise:
|
| 272 |
+
1. `--nodes` Threshold number of children added/removed;
|
| 273 |
+
2. `--bytes` Threshold number of bytes added/removed.
|
| 274 |
+
|
| 275 |
+
#### Locate Snapshots
|
| 276 |
+
Snapshots can be found in [Zookeeper Data Directory](zookeeperAdmin.html#The+Data+Directory) which configured in [conf/zoo.cfg](zookeeperStarted.html#sc_InstallingSingleMode) when set up Zookeeper server.
|
| 277 |
+
|
| 278 |
+
#### Supported Snapshot Formats
|
| 279 |
+
This tool supports uncompressed snapshot format, and compressed snapshot file formats: `snappy` and `gz`. Snapshots with different formats can be compared using this tool directly without decompression.
|
| 280 |
+
|
| 281 |
+
#### Running the Tool
|
| 282 |
+
Running the tool with no command line argument or an unrecognized argument, it outputs the following help page:
|
| 283 |
+
|
| 284 |
+
```
|
| 285 |
+
usage: java -cp <classPath> org.apache.zookeeper.server.SnapshotComparer
|
| 286 |
+
-b,--bytes <BYTETHRESHOLD> (Required) The node data delta size threshold, in bytes, for printing the node.
|
| 287 |
+
-d,--debug Use debug output.
|
| 288 |
+
-i,--interactive Enter interactive mode.
|
| 289 |
+
-l,--left <LEFT> (Required) The left snapshot file.
|
| 290 |
+
-n,--nodes <NODETHRESHOLD> (Required) The descendant node delta size threshold, in nodes, for printing the node.
|
| 291 |
+
-r,--right <RIGHT> (Required) The right snapshot file.
|
| 292 |
+
```
|
| 293 |
+
Example Command:
|
| 294 |
+
|
| 295 |
+
```
|
| 296 |
+
./bin/zkSnapshotComparer.sh -l /zookeeper-data/backup/snapshot.d.snappy -r /zookeeper-data/backup/snapshot.44 -b 2 -n 1
|
| 297 |
+
```
|
| 298 |
+
|
| 299 |
+
Example Output:
|
| 300 |
+
```
|
| 301 |
+
...
|
| 302 |
+
Deserialized snapshot in snapshot.44 in 0.002741 seconds
|
| 303 |
+
Processed data tree in 0.000361 seconds
|
| 304 |
+
Node count: 10
|
| 305 |
+
Total size: 0
|
| 306 |
+
Max depth: 4
|
| 307 |
+
Count of nodes at depth 0: 1
|
| 308 |
+
Count of nodes at depth 1: 2
|
| 309 |
+
Count of nodes at depth 2: 4
|
| 310 |
+
Count of nodes at depth 3: 3
|
| 311 |
+
|
| 312 |
+
Node count: 22
|
| 313 |
+
Total size: 2903
|
| 314 |
+
Max depth: 5
|
| 315 |
+
Count of nodes at depth 0: 1
|
| 316 |
+
Count of nodes at depth 1: 2
|
| 317 |
+
Count of nodes at depth 2: 4
|
| 318 |
+
Count of nodes at depth 3: 7
|
| 319 |
+
Count of nodes at depth 4: 8
|
| 320 |
+
|
| 321 |
+
Printing analysis for nodes difference larger than 2 bytes or node count difference larger than 1.
|
| 322 |
+
Analysis for depth 0
|
| 323 |
+
Node found in both trees. Delta: 2903 bytes, 12 descendants
|
| 324 |
+
Analysis for depth 1
|
| 325 |
+
Node /zk_test found in both trees. Delta: 2903 bytes, 12 descendants
|
| 326 |
+
Analysis for depth 2
|
| 327 |
+
Node /zk_test/gz found in both trees. Delta: 730 bytes, 3 descendants
|
| 328 |
+
Node /zk_test/snappy found in both trees. Delta: 2173 bytes, 9 descendants
|
| 329 |
+
Analysis for depth 3
|
| 330 |
+
Node /zk_test/gz/12345 found in both trees. Delta: 9 bytes, 1 descendants
|
| 331 |
+
Node /zk_test/gz/a found only in right tree. Descendant size: 721. Descendant count: 0
|
| 332 |
+
Node /zk_test/snappy/anotherTest found in both trees. Delta: 1738 bytes, 2 descendants
|
| 333 |
+
Node /zk_test/snappy/test_1 found only in right tree. Descendant size: 344. Descendant count: 3
|
| 334 |
+
Node /zk_test/snappy/test_2 found only in right tree. Descendant size: 91. Descendant count: 2
|
| 335 |
+
Analysis for depth 4
|
| 336 |
+
Node /zk_test/gz/12345/abcdef found only in right tree. Descendant size: 9. Descendant count: 0
|
| 337 |
+
Node /zk_test/snappy/anotherTest/abc found only in right tree. Descendant size: 1738. Descendant count: 0
|
| 338 |
+
Node /zk_test/snappy/test_1/a found only in right tree. Descendant size: 93. Descendant count: 0
|
| 339 |
+
Node /zk_test/snappy/test_1/b found only in right tree. Descendant size: 251. Descendant count: 0
|
| 340 |
+
Node /zk_test/snappy/test_2/xyz found only in right tree. Descendant size: 33. Descendant count: 0
|
| 341 |
+
Node /zk_test/snappy/test_2/y found only in right tree. Descendant size: 58. Descendant count: 0
|
| 342 |
+
All layers compared.
|
| 343 |
+
```
|
| 344 |
+
|
| 345 |
+
#### Interactive Mode
|
| 346 |
+
Use "-i" or "--interactive" to enter interactive mode:
|
| 347 |
+
```
|
| 348 |
+
./bin/zkSnapshotComparer.sh -l /zookeeper-data/backup/snapshot.d.snappy -r /zookeeper-data/backup/snapshot.44 -b 2 -n 1 -i
|
| 349 |
+
```
|
| 350 |
+
|
| 351 |
+
There are three options to proceed:
|
| 352 |
+
```
|
| 353 |
+
- Press enter to move to print current depth layer;
|
| 354 |
+
- Type a number to jump to and print all nodes at a given depth;
|
| 355 |
+
- Enter an ABSOLUTE path to print the immediate subtree of a node. Path must start with '/'.
|
| 356 |
+
```
|
| 357 |
+
|
| 358 |
+
Note: As indicated by the interactive messages, the tool only shows analysis on the result that filtered by tuning parameters bytes threshold and nodes threshold.
|
| 359 |
+
|
| 360 |
+
Press enter to print current depth layer:
|
| 361 |
+
|
| 362 |
+
```
|
| 363 |
+
Current depth is 0
|
| 364 |
+
Press enter to move to print current depth layer;
|
| 365 |
+
...
|
| 366 |
+
Printing analysis for nodes difference larger than 2 bytes or node count difference larger than 1.
|
| 367 |
+
Analysis for depth 0
|
| 368 |
+
Node found in both trees. Delta: 2903 bytes, 12 descendants
|
| 369 |
+
```
|
| 370 |
+
|
| 371 |
+
Type a number to jump to and print all nodes at a given depth:
|
| 372 |
+
|
| 373 |
+
(Jump forward)
|
| 374 |
+
|
| 375 |
+
```
|
| 376 |
+
Current depth is 1
|
| 377 |
+
...
|
| 378 |
+
Type a number to jump to and print all nodes at a given depth;
|
| 379 |
+
...
|
| 380 |
+
3
|
| 381 |
+
Printing analysis for nodes difference larger than 2 bytes or node count difference larger than 1.
|
| 382 |
+
Analysis for depth 3
|
| 383 |
+
Node /zk_test/gz/12345 found in both trees. Delta: 9 bytes, 1 descendants
|
| 384 |
+
Node /zk_test/gz/a found only in right tree. Descendant size: 721. Descendant count: 0
|
| 385 |
+
Filtered node /zk_test/gz/anotherOne of left size 0, right size 0
|
| 386 |
+
Filtered right node /zk_test/gz/b of size 0
|
| 387 |
+
Node /zk_test/snappy/anotherTest found in both trees. Delta: 1738 bytes, 2 descendants
|
| 388 |
+
Node /zk_test/snappy/test_1 found only in right tree. Descendant size: 344. Descendant count: 3
|
| 389 |
+
Node /zk_test/snappy/test_2 found only in right tree. Descendant size: 91. Descendant count: 2
|
| 390 |
+
```
|
| 391 |
+
|
| 392 |
+
(Jump back)
|
| 393 |
+
|
| 394 |
+
```
|
| 395 |
+
Current depth is 3
|
| 396 |
+
...
|
| 397 |
+
Type a number to jump to and print all nodes at a given depth;
|
| 398 |
+
...
|
| 399 |
+
0
|
| 400 |
+
Printing analysis for nodes difference larger than 2 bytes or node count difference larger than 1.
|
| 401 |
+
Analysis for depth 0
|
| 402 |
+
Node found in both trees. Delta: 2903 bytes, 12 descendants
|
| 403 |
+
```
|
| 404 |
+
|
| 405 |
+
Out of range depth is handled:
|
| 406 |
+
|
| 407 |
+
```
|
| 408 |
+
Current depth is 1
|
| 409 |
+
...
|
| 410 |
+
Type a number to jump to and print all nodes at a given depth;
|
| 411 |
+
...
|
| 412 |
+
10
|
| 413 |
+
Printing analysis for nodes difference larger than 2 bytes or node count difference larger than 1.
|
| 414 |
+
Depth must be in range [0, 4]
|
| 415 |
+
```
|
| 416 |
+
|
| 417 |
+
Enter an ABSOLUTE path to print the immediate subtree of a node:
|
| 418 |
+
|
| 419 |
+
```
|
| 420 |
+
Current depth is 3
|
| 421 |
+
...
|
| 422 |
+
Enter an ABSOLUTE path to print the immediate subtree of a node.
|
| 423 |
+
/zk_test
|
| 424 |
+
Printing analysis for nodes difference larger than 2 bytes or node count difference larger than 1.
|
| 425 |
+
Analysis for node /zk_test
|
| 426 |
+
Node /zk_test/gz found in both trees. Delta: 730 bytes, 3 descendants
|
| 427 |
+
Node /zk_test/snappy found in both trees. Delta: 2173 bytes, 9 descendants
|
| 428 |
+
```
|
| 429 |
+
|
| 430 |
+
Invalid path is handled:
|
| 431 |
+
|
| 432 |
+
```
|
| 433 |
+
Current depth is 3
|
| 434 |
+
...
|
| 435 |
+
Enter an ABSOLUTE path to print the immediate subtree of a node.
|
| 436 |
+
/non-exist-path
|
| 437 |
+
Printing analysis for nodes difference larger than 2 bytes or node count difference larger than 1.
|
| 438 |
+
Analysis for node /non-exist-path
|
| 439 |
+
Path /non-exist-path is neither found in left tree nor right tree.
|
| 440 |
+
```
|
| 441 |
+
|
| 442 |
+
Invalid input is handled:
|
| 443 |
+
```
|
| 444 |
+
Current depth is 1
|
| 445 |
+
- Press enter to move to print current depth layer;
|
| 446 |
+
- Type a number to jump to and print all nodes at a given depth;
|
| 447 |
+
- Enter an ABSOLUTE path to print the immediate subtree of a node. Path must start with '/'.
|
| 448 |
+
12223999999999999999999999999999999999999
|
| 449 |
+
Printing analysis for nodes difference larger than 2 bytes or node count difference larger than 1.
|
| 450 |
+
Input 12223999999999999999999999999999999999999 is not valid. Depth must be in range [0, 4]. Path must be an absolute path which starts with '/'.
|
| 451 |
+
```
|
| 452 |
+
|
| 453 |
+
Exit interactive mode automatically when all layers are compared:
|
| 454 |
+
|
| 455 |
+
```
|
| 456 |
+
Printing analysis for nodes difference larger than 2 bytes or node count difference larger than 1.
|
| 457 |
+
Analysis for depth 4
|
| 458 |
+
Node /zk_test/gz/12345/abcdef found only in right tree. Descendant size: 9. Descendant count: 0
|
| 459 |
+
Node /zk_test/snappy/anotherTest/abc found only in right tree. Descendant size: 1738. Descendant count: 0
|
| 460 |
+
Filtered right node /zk_test/snappy/anotherTest/abcd of size 0
|
| 461 |
+
Node /zk_test/snappy/test_1/a found only in right tree. Descendant size: 93. Descendant count: 0
|
| 462 |
+
Node /zk_test/snappy/test_1/b found only in right tree. Descendant size: 251. Descendant count: 0
|
| 463 |
+
Filtered right node /zk_test/snappy/test_1/c of size 0
|
| 464 |
+
Node /zk_test/snappy/test_2/xyz found only in right tree. Descendant size: 33. Descendant count: 0
|
| 465 |
+
Node /zk_test/snappy/test_2/y found only in right tree. Descendant size: 58. Descendant count: 0
|
| 466 |
+
All layers compared.
|
| 467 |
+
```
|
| 468 |
+
|
| 469 |
+
Or use `^c` to exit interactive mode anytime.
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
<a name="Benchmark"></a>
|
| 473 |
+
|
| 474 |
+
## Benchmark
|
| 475 |
+
|
| 476 |
+
<a name="YCSB"></a>
|
| 477 |
+
|
| 478 |
+
### YCSB
|
| 479 |
+
|
| 480 |
+
#### Quick Start
|
| 481 |
+
|
| 482 |
+
This section describes how to run YCSB on ZooKeeper.
|
| 483 |
+
|
| 484 |
+
#### 1. Start ZooKeeper Server(s)
|
| 485 |
+
|
| 486 |
+
#### 2. Install Java and Maven
|
| 487 |
+
|
| 488 |
+
#### 3. Set Up YCSB
|
| 489 |
+
|
| 490 |
+
Git clone YCSB and compile:
|
| 491 |
+
|
| 492 |
+
git clone http://github.com/brianfrankcooper/YCSB.git
|
| 493 |
+
# more details in the landing page for instructions on downloading YCSB(https://github.com/brianfrankcooper/YCSB#getting-started).
|
| 494 |
+
cd YCSB
|
| 495 |
+
mvn -pl site.ycsb:zookeeper-binding -am clean package -DskipTests
|
| 496 |
+
|
| 497 |
+
#### 4. Provide ZooKeeper Connection Parameters
|
| 498 |
+
|
| 499 |
+
Set connectString, sessionTimeout, watchFlag in the workload you plan to run.
|
| 500 |
+
|
| 501 |
+
- `zookeeper.connectString`
|
| 502 |
+
- `zookeeper.sessionTimeout`
|
| 503 |
+
- `zookeeper.watchFlag`
|
| 504 |
+
* A parameter for enabling ZooKeeper's watch, optional values:true or false.the default value is false.
|
| 505 |
+
* This parameter cannot test the watch performance, but for testing what effect will take on the read/write requests when enabling the watch.
|
| 506 |
+
|
| 507 |
+
```bash
|
| 508 |
+
./bin/ycsb run zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p zookeeper.watchFlag=true
|
| 509 |
+
```
|
| 510 |
+
|
| 511 |
+
Or, you can set configs with the shell command, EG:
|
| 512 |
+
|
| 513 |
+
# create a /benchmark namespace for sake of cleaning up the workspace after test.
|
| 514 |
+
# e.g the CLI:create /benchmark
|
| 515 |
+
./bin/ycsb run zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p zookeeper.sessionTimeout=30000
|
| 516 |
+
|
| 517 |
+
#### 5. Load data and run tests
|
| 518 |
+
|
| 519 |
+
Load the data:
|
| 520 |
+
|
| 521 |
+
# -p recordcount,the count of records/paths you want to insert
|
| 522 |
+
./bin/ycsb load zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p recordcount=10000 > outputLoad.txt
|
| 523 |
+
|
| 524 |
+
Run the workload test:
|
| 525 |
+
|
| 526 |
+
# YCSB workloadb is the most suitable workload for read-heavy workload for the ZooKeeper in the real world.
|
| 527 |
+
|
| 528 |
+
# -p fieldlength, test the length of value/data-content took effect on performance
|
| 529 |
+
./bin/ycsb run zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p fieldlength=1000
|
| 530 |
+
|
| 531 |
+
# -p fieldcount
|
| 532 |
+
./bin/ycsb run zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p fieldcount=20
|
| 533 |
+
|
| 534 |
+
# -p hdrhistogram.percentiles,show the hdrhistogram benchmark result
|
| 535 |
+
./bin/ycsb run zookeeper -threads 1 -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p hdrhistogram.percentiles=10,25,50,75,90,95,99,99.9 -p histogram.buckets=500
|
| 536 |
+
|
| 537 |
+
# -threads: multi-clients test, increase the **maxClientCnxns** in the zoo.cfg to handle more connections.
|
| 538 |
+
./bin/ycsb run zookeeper -threads 10 -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark
|
| 539 |
+
|
| 540 |
+
# show the timeseries benchmark result
|
| 541 |
+
./bin/ycsb run zookeeper -threads 1 -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p measurementtype=timeseries -p timeseries.granularity=50
|
| 542 |
+
|
| 543 |
+
# cluster test
|
| 544 |
+
./bin/ycsb run zookeeper -P workloads/workloadb -p zookeeper.connectString=192.168.10.43:2181,192.168.10.45:2181,192.168.10.27:2181/benchmark
|
| 545 |
+
|
| 546 |
+
# test leader's read/write performance by setting zookeeper.connectString to leader's(192.168.10.43:2181)
|
| 547 |
+
./bin/ycsb run zookeeper -P workloads/workloadb -p zookeeper.connectString=192.168.10.43:2181/benchmark
|
| 548 |
+
|
| 549 |
+
# test for large znode(by default: jute.maxbuffer is 1048575 bytes/1 MB ). Notice:jute.maxbuffer should also be set the same value in all the zk servers.
|
| 550 |
+
./bin/ycsb run zookeeper -jvm-args="-Djute.maxbuffer=4194304" -s -P workloads/workloadc -p zookeeper.connectString=127.0.0.1:2181/benchmark
|
| 551 |
+
|
| 552 |
+
# Cleaning up the workspace after finishing the benchmark.
|
| 553 |
+
# e.g the CLI:deleteall /benchmark
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
<a name="zk-smoketest"></a>
|
| 557 |
+
|
| 558 |
+
### zk-smoketest
|
| 559 |
+
|
| 560 |
+
**zk-smoketest** provides a simple smoketest client for a ZooKeeper ensemble. Useful for verifying new, updated,
|
| 561 |
+
existing installations. More details are [here](https://github.com/phunt/zk-smoketest).
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
<a name="Testing"></a>
|
| 565 |
+
|
| 566 |
+
## Testing
|
| 567 |
+
|
| 568 |
+
<a name="fault-injection"></a>
|
| 569 |
+
|
| 570 |
+
### Fault Injection Framework
|
| 571 |
+
|
| 572 |
+
<a name="Byteman"></a>
|
| 573 |
+
|
| 574 |
+
#### Byteman
|
| 575 |
+
|
| 576 |
+
- **Byteman** is a tool which makes it easy to trace, monitor and test the behaviour of Java application and JDK runtime code.
|
| 577 |
+
It injects Java code into your application methods or into Java runtime methods without the need for you to recompile, repackage or even redeploy your application.
|
| 578 |
+
Injection can be performed at JVM startup or after startup while the application is still running.
|
| 579 |
+
- Visit the official [website](https://byteman.jboss.org/) to download the latest release
|
| 580 |
+
- A brief tutorial can be found [here](https://developer.jboss.org/wiki/ABytemanTutorial)
|
| 581 |
+
|
| 582 |
+
```bash
|
| 583 |
+
Preparations:
|
| 584 |
+
# attach the byteman to 3 zk servers during runtime
|
| 585 |
+
# 55001,55002,55003 is byteman binding port; 714,740,758 is the zk server pid
|
| 586 |
+
./bminstall.sh -b -Dorg.jboss.byteman.transform.all -Dorg.jboss.byteman.verbose -p 55001 714
|
| 587 |
+
./bminstall.sh -b -Dorg.jboss.byteman.transform.all -Dorg.jboss.byteman.verbose -p 55002 740
|
| 588 |
+
./bminstall.sh -b -Dorg.jboss.byteman.transform.all -Dorg.jboss.byteman.verbose -p 55003 758
|
| 589 |
+
|
| 590 |
+
# load the fault injection script
|
| 591 |
+
./bmsubmit.sh -p 55002 -l my_zk_fault_injection.btm
|
| 592 |
+
# unload the fault injection script
|
| 593 |
+
./bmsubmit.sh -p 55002 -u my_zk_fault_injectionr.btm
|
| 594 |
+
```
|
| 595 |
+
|
| 596 |
+
Look at the below examples to customize your byteman fault injection script
|
| 597 |
+
|
| 598 |
+
Example 1: This script makes leader's zxid roll over, to force re-election.
|
| 599 |
+
|
| 600 |
+
```bash
|
| 601 |
+
cat zk_leader_zxid_roll_over.btm
|
| 602 |
+
|
| 603 |
+
RULE trace zk_leader_zxid_roll_over
|
| 604 |
+
CLASS org.apache.zookeeper.server.quorum.Leader
|
| 605 |
+
METHOD propose
|
| 606 |
+
IF true
|
| 607 |
+
DO
|
| 608 |
+
traceln("*** Leader zxid has rolled over, forcing re-election ***");
|
| 609 |
+
$1.zxid = 4294967295L
|
| 610 |
+
ENDRULE
|
| 611 |
+
```
|
| 612 |
+
|
| 613 |
+
Example 2: This script makes the leader drop the ping packet to a specific follower.
|
| 614 |
+
The leader will close the **LearnerHandler** with that follower, and the follower will enter the state:LOOKING
|
| 615 |
+
then re-enter the quorum with the state:FOLLOWING
|
| 616 |
+
|
| 617 |
+
```bash
|
| 618 |
+
cat zk_leader_drop_ping_packet.btm
|
| 619 |
+
|
| 620 |
+
RULE trace zk_leader_drop_ping_packet
|
| 621 |
+
CLASS org.apache.zookeeper.server.quorum.LearnerHandler
|
| 622 |
+
METHOD ping
|
| 623 |
+
AT ENTRY
|
| 624 |
+
IF $0.sid == 2
|
| 625 |
+
DO
|
| 626 |
+
traceln("*** Leader drops ping packet to sid: 2 ***");
|
| 627 |
+
return;
|
| 628 |
+
ENDRULE
|
| 629 |
+
```
|
| 630 |
+
|
| 631 |
+
Example 3: This script makes one follower drop ACK packet which has no big effect in the broadcast phrase, since after receiving
|
| 632 |
+
the majority of ACKs from the followers, the leader can commit that proposal
|
| 633 |
+
|
| 634 |
+
```bash
|
| 635 |
+
cat zk_leader_drop_ping_packet.btm
|
| 636 |
+
|
| 637 |
+
RULE trace zk.follower_drop_ack_packet
|
| 638 |
+
CLASS org.apache.zookeeper.server.quorum.SendAckRequestProcessor
|
| 639 |
+
METHOD processRequest
|
| 640 |
+
AT ENTRY
|
| 641 |
+
IF true
|
| 642 |
+
DO
|
| 643 |
+
traceln("*** Follower drops ACK packet ***");
|
| 644 |
+
return;
|
| 645 |
+
ENDRULE
|
| 646 |
+
```
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
<a name="jepsen-test"></a>
|
| 650 |
+
|
| 651 |
+
### Jepsen Test
|
| 652 |
+
A framework for distributed systems verification, with fault injection.
|
| 653 |
+
Jepsen has been used to verify everything from eventually-consistent commutative databases to linearizable coordination systems to distributed task schedulers.
|
| 654 |
+
more details can be found in [jepsen-io](https://github.com/jepsen-io/jepsen)
|
| 655 |
+
|
| 656 |
+
Running the [Dockerized Jepsen](https://github.com/jepsen-io/jepsen/blob/master/docker/README.md) is the simplest way to use the Jepsen.
|
| 657 |
+
|
| 658 |
+
Installation:
|
| 659 |
+
|
| 660 |
+
```bash
|
| 661 |
+
git clone git@github.com:jepsen-io/jepsen.git
|
| 662 |
+
cd docker
|
| 663 |
+
# maybe a long time for the first init.
|
| 664 |
+
./up.sh
|
| 665 |
+
# docker ps to check one control node and five db nodes are up
|
| 666 |
+
docker ps
|
| 667 |
+
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
| 668 |
+
8265f1d3f89c docker_control "/bin/sh -c /init.sh" 9 hours ago Up 4 hours 0.0.0.0:32769->8080/tcp jepsen-control
|
| 669 |
+
8a646102da44 docker_n5 "/run.sh" 9 hours ago Up 3 hours 22/tcp jepsen-n5
|
| 670 |
+
385454d7e520 docker_n1 "/run.sh" 9 hours ago Up 9 hours 22/tcp jepsen-n1
|
| 671 |
+
a62d6a9d5f8e docker_n2 "/run.sh" 9 hours ago Up 9 hours 22/tcp jepsen-n2
|
| 672 |
+
1485e89d0d9a docker_n3 "/run.sh" 9 hours ago Up 9 hours 22/tcp jepsen-n3
|
| 673 |
+
27ae01e1a0c5 docker_node "/run.sh" 9 hours ago Up 9 hours 22/tcp jepsen-node
|
| 674 |
+
53c444b00ebd docker_n4 "/run.sh" 9 hours ago Up 9 hours 22/tcp jepsen-n4
|
| 675 |
+
```
|
| 676 |
+
|
| 677 |
+
Running & Test
|
| 678 |
+
|
| 679 |
+
```bash
|
| 680 |
+
# Enter into the container:jepsen-control
|
| 681 |
+
docker exec -it jepsen-control bash
|
| 682 |
+
# Test
|
| 683 |
+
cd zookeeper && lein run test --concurrency 10
|
| 684 |
+
# See something like the following to assert that ZooKeeper has passed the Jepsen test
|
| 685 |
+
INFO [2019-04-01 11:25:23,719] jepsen worker 8 - jepsen.util 8 :ok :read 2
|
| 686 |
+
INFO [2019-04-01 11:25:23,722] jepsen worker 3 - jepsen.util 3 :invoke :cas [0 4]
|
| 687 |
+
INFO [2019-04-01 11:25:23,760] jepsen worker 3 - jepsen.util 3 :fail :cas [0 4]
|
| 688 |
+
INFO [2019-04-01 11:25:23,791] jepsen worker 1 - jepsen.util 1 :invoke :read nil
|
| 689 |
+
INFO [2019-04-01 11:25:23,794] jepsen worker 1 - jepsen.util 1 :ok :read 2
|
| 690 |
+
INFO [2019-04-01 11:25:24,038] jepsen worker 0 - jepsen.util 0 :invoke :write 4
|
| 691 |
+
INFO [2019-04-01 11:25:24,073] jepsen worker 0 - jepsen.util 0 :ok :write 4
|
| 692 |
+
...............................................................................
|
| 693 |
+
Everything looks good! ヽ(‘ー`)ノ
|
| 694 |
+
|
| 695 |
+
```
|
| 696 |
+
|
| 697 |
+
Reference:
|
| 698 |
+
read [this blog](https://aphyr.com/posts/291-call-me-maybe-zookeeper) to learn more about the Jepsen test for the Zookeeper.
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperTutorial.md
ADDED
|
@@ -0,0 +1,666 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2004 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# Programming with ZooKeeper - A basic tutorial
|
| 18 |
+
|
| 19 |
+
* [Introduction](#ch_Introduction)
|
| 20 |
+
* [Barriers](#sc_barriers)
|
| 21 |
+
* [Producer-Consumer Queues](#sc_producerConsumerQueues)
|
| 22 |
+
* [Complete example](#Complete+example)
|
| 23 |
+
* [Queue test](#Queue+test)
|
| 24 |
+
* [Barrier test](#Barrier+test)
|
| 25 |
+
* [Source Listing](#sc_sourceListing)
|
| 26 |
+
|
| 27 |
+
<a name="ch_Introduction"></a>
|
| 28 |
+
|
| 29 |
+
## Introduction
|
| 30 |
+
|
| 31 |
+
In this tutorial, we show simple implementations of barriers and
|
| 32 |
+
producer-consumer queues using ZooKeeper. We call the respective classes Barrier and Queue.
|
| 33 |
+
These examples assume that you have at least one ZooKeeper server running.
|
| 34 |
+
|
| 35 |
+
Both primitives use the following common excerpt of code:
|
| 36 |
+
|
| 37 |
+
static ZooKeeper zk = null;
|
| 38 |
+
static Integer mutex;
|
| 39 |
+
|
| 40 |
+
String root;
|
| 41 |
+
|
| 42 |
+
SyncPrimitive(String address) {
|
| 43 |
+
if(zk == null){
|
| 44 |
+
try {
|
| 45 |
+
System.out.println("Starting ZK:");
|
| 46 |
+
zk = new ZooKeeper(address, 3000, this);
|
| 47 |
+
mutex = new Integer(-1);
|
| 48 |
+
System.out.println("Finished starting ZK: " + zk);
|
| 49 |
+
} catch (IOException e) {
|
| 50 |
+
System.out.println(e.toString());
|
| 51 |
+
zk = null;
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
synchronized public void process(WatchedEvent event) {
|
| 57 |
+
synchronized (mutex) {
|
| 58 |
+
mutex.notify();
|
| 59 |
+
}
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Both classes extend SyncPrimitive. In this way, we execute steps that are
|
| 65 |
+
common to all primitives in the constructor of SyncPrimitive. To keep the examples
|
| 66 |
+
simple, we create a ZooKeeper object the first time we instantiate either a barrier
|
| 67 |
+
object or a queue object, and we declare a static variable that is a reference
|
| 68 |
+
to this object. The subsequent instances of Barrier and Queue check whether a
|
| 69 |
+
ZooKeeper object exists. Alternatively, we could have the application creating a
|
| 70 |
+
ZooKeeper object and passing it to the constructor of Barrier and Queue.
|
| 71 |
+
|
| 72 |
+
We use the process() method to process notifications triggered due to watches.
|
| 73 |
+
In the following discussion, we present code that sets watches. A watch is internal
|
| 74 |
+
structure that enables ZooKeeper to notify a client of a change to a node. For example,
|
| 75 |
+
if a client is waiting for other clients to leave a barrier, then it can set a watch and
|
| 76 |
+
wait for modifications to a particular node, which can indicate that it is the end of the wait.
|
| 77 |
+
This point becomes clear once we go over the examples.
|
| 78 |
+
|
| 79 |
+
<a name="sc_barriers"></a>
|
| 80 |
+
|
| 81 |
+
## Barriers
|
| 82 |
+
|
| 83 |
+
A barrier is a primitive that enables a group of processes to synchronize the
|
| 84 |
+
beginning and the end of a computation. The general idea of this implementation
|
| 85 |
+
is to have a barrier node that serves the purpose of being a parent for individual
|
| 86 |
+
process nodes. Suppose that we call the barrier node "/b1". Each process "p" then
|
| 87 |
+
creates a node "/b1/p". Once enough processes have created their corresponding
|
| 88 |
+
nodes, joined processes can start the computation.
|
| 89 |
+
|
| 90 |
+
In this example, each process instantiates a Barrier object, and its constructor takes as parameters:
|
| 91 |
+
|
| 92 |
+
* the address of a ZooKeeper server (e.g., "zoo1.foo.com:2181")
|
| 93 |
+
* the path of the barrier node on ZooKeeper (e.g., "/b1")
|
| 94 |
+
* the size of the group of processes
|
| 95 |
+
|
| 96 |
+
The constructor of Barrier passes the address of the Zookeeper server to the
|
| 97 |
+
constructor of the parent class. The parent class creates a ZooKeeper instance if
|
| 98 |
+
one does not exist. The constructor of Barrier then creates a
|
| 99 |
+
barrier node on ZooKeeper, which is the parent node of all process nodes, and
|
| 100 |
+
we call root (**Note:** This is not the ZooKeeper root "/").
|
| 101 |
+
|
| 102 |
+
/**
|
| 103 |
+
* Barrier constructor
|
| 104 |
+
*
|
| 105 |
+
* @param address
|
| 106 |
+
* @param root
|
| 107 |
+
* @param size
|
| 108 |
+
*/
|
| 109 |
+
Barrier(String address, String root, int size) {
|
| 110 |
+
super(address);
|
| 111 |
+
this.root = root;
|
| 112 |
+
this.size = size;
|
| 113 |
+
// Create barrier node
|
| 114 |
+
if (zk != null) {
|
| 115 |
+
try {
|
| 116 |
+
Stat s = zk.exists(root, false);
|
| 117 |
+
if (s == null) {
|
| 118 |
+
zk.create(root, new byte[0], Ids.OPEN_ACL_UNSAFE,
|
| 119 |
+
CreateMode.PERSISTENT);
|
| 120 |
+
}
|
| 121 |
+
} catch (KeeperException e) {
|
| 122 |
+
System.out
|
| 123 |
+
.println("Keeper exception when instantiating queue: "
|
| 124 |
+
+ e.toString());
|
| 125 |
+
} catch (InterruptedException e) {
|
| 126 |
+
System.out.println("Interrupted exception");
|
| 127 |
+
}
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
// My node name
|
| 131 |
+
try {
|
| 132 |
+
name = new String(InetAddress.getLocalHost().getCanonicalHostName().toString());
|
| 133 |
+
} catch (UnknownHostException e) {
|
| 134 |
+
System.out.println(e.toString());
|
| 135 |
+
}
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
To enter the barrier, a process calls enter(). The process creates a node under
|
| 140 |
+
the root to represent it, using its host name to form the node name. It then wait
|
| 141 |
+
until enough processes have entered the barrier. A process does it by checking
|
| 142 |
+
the number of children the root node has with "getChildren()", and waiting for
|
| 143 |
+
notifications in the case it does not have enough. To receive a notification when
|
| 144 |
+
there is a change to the root node, a process has to set a watch, and does it
|
| 145 |
+
through the call to "getChildren()". In the code, we have that "getChildren()"
|
| 146 |
+
has two parameters. The first one states the node to read from, and the second is
|
| 147 |
+
a boolean flag that enables the process to set a watch. In the code the flag is true.
|
| 148 |
+
|
| 149 |
+
/**
|
| 150 |
+
* Join barrier
|
| 151 |
+
*
|
| 152 |
+
* @return
|
| 153 |
+
* @throws KeeperException
|
| 154 |
+
* @throws InterruptedException
|
| 155 |
+
*/
|
| 156 |
+
|
| 157 |
+
boolean enter() throws KeeperException, InterruptedException{
|
| 158 |
+
zk.create(root + "/" + name, new byte[0], Ids.OPEN_ACL_UNSAFE,
|
| 159 |
+
CreateMode.EPHEMERAL);
|
| 160 |
+
while (true) {
|
| 161 |
+
synchronized (mutex) {
|
| 162 |
+
List<String> list = zk.getChildren(root, true);
|
| 163 |
+
|
| 164 |
+
if (list.size() < size) {
|
| 165 |
+
mutex.wait();
|
| 166 |
+
} else {
|
| 167 |
+
return true;
|
| 168 |
+
}
|
| 169 |
+
}
|
| 170 |
+
}
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
Note that enter() throws both KeeperException and InterruptedException, so it is
|
| 175 |
+
the responsibility of the application to catch and handle such exceptions.
|
| 176 |
+
|
| 177 |
+
Once the computation is finished, a process calls leave() to leave the barrier.
|
| 178 |
+
First it deletes its corresponding node, and then it gets the children of the root
|
| 179 |
+
node. If there is at least one child, then it waits for a notification (obs: note
|
| 180 |
+
that the second parameter of the call to getChildren() is true, meaning that
|
| 181 |
+
ZooKeeper has to set a watch on the root node). Upon reception of a notification,
|
| 182 |
+
it checks once more whether the root node has any children.
|
| 183 |
+
|
| 184 |
+
/**
|
| 185 |
+
* Wait until all reach barrier
|
| 186 |
+
*
|
| 187 |
+
* @return
|
| 188 |
+
* @throws KeeperException
|
| 189 |
+
* @throws InterruptedException
|
| 190 |
+
*/
|
| 191 |
+
|
| 192 |
+
boolean leave() throws KeeperException, InterruptedException {
|
| 193 |
+
zk.delete(root + "/" + name, 0);
|
| 194 |
+
while (true) {
|
| 195 |
+
synchronized (mutex) {
|
| 196 |
+
List<String> list = zk.getChildren(root, true);
|
| 197 |
+
if (list.size() > 0) {
|
| 198 |
+
mutex.wait();
|
| 199 |
+
} else {
|
| 200 |
+
return true;
|
| 201 |
+
}
|
| 202 |
+
}
|
| 203 |
+
}
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
<a name="sc_producerConsumerQueues"></a>
|
| 208 |
+
|
| 209 |
+
## Producer-Consumer Queues
|
| 210 |
+
|
| 211 |
+
A producer-consumer queue is a distributed data structure that groups of processes
|
| 212 |
+
use to generate and consume items. Producer processes create new elements and add
|
| 213 |
+
them to the queue. Consumer processes remove elements from the list, and process them.
|
| 214 |
+
In this implementation, the elements are simple integers. The queue is represented
|
| 215 |
+
by a root node, and to add an element to the queue, a producer process creates a new node,
|
| 216 |
+
a child of the root node.
|
| 217 |
+
|
| 218 |
+
The following excerpt of code corresponds to the constructor of the object. As
|
| 219 |
+
with Barrier objects, it first calls the constructor of the parent class, SyncPrimitive,
|
| 220 |
+
that creates a ZooKeeper object if one doesn't exist. It then verifies if the root
|
| 221 |
+
node of the queue exists, and creates if it doesn't.
|
| 222 |
+
|
| 223 |
+
/**
|
| 224 |
+
* Constructor of producer-consumer queue
|
| 225 |
+
*
|
| 226 |
+
* @param address
|
| 227 |
+
* @param name
|
| 228 |
+
*/
|
| 229 |
+
Queue(String address, String name) {
|
| 230 |
+
super(address);
|
| 231 |
+
this.root = name;
|
| 232 |
+
// Create ZK node name
|
| 233 |
+
if (zk != null) {
|
| 234 |
+
try {
|
| 235 |
+
Stat s = zk.exists(root, false);
|
| 236 |
+
if (s == null) {
|
| 237 |
+
zk.create(root, new byte[0], Ids.OPEN_ACL_UNSAFE,
|
| 238 |
+
CreateMode.PERSISTENT);
|
| 239 |
+
}
|
| 240 |
+
} catch (KeeperException e) {
|
| 241 |
+
System.out
|
| 242 |
+
.println("Keeper exception when instantiating queue: "
|
| 243 |
+
+ e.toString());
|
| 244 |
+
} catch (InterruptedException e) {
|
| 245 |
+
System.out.println("Interrupted exception");
|
| 246 |
+
}
|
| 247 |
+
}
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
A producer process calls "produce()" to add an element to the queue, and passes
|
| 252 |
+
an integer as an argument. To add an element to the queue, the method creates a
|
| 253 |
+
new node using "create()", and uses the SEQUENCE flag to instruct ZooKeeper to
|
| 254 |
+
append the value of the sequencer counter associated to the root node. In this way,
|
| 255 |
+
we impose a total order on the elements of the queue, thus guaranteeing that the
|
| 256 |
+
oldest element of the queue is the next one consumed.
|
| 257 |
+
|
| 258 |
+
/**
|
| 259 |
+
* Add element to the queue.
|
| 260 |
+
*
|
| 261 |
+
* @param i
|
| 262 |
+
* @return
|
| 263 |
+
*/
|
| 264 |
+
|
| 265 |
+
boolean produce(int i) throws KeeperException, InterruptedException{
|
| 266 |
+
ByteBuffer b = ByteBuffer.allocate(4);
|
| 267 |
+
byte[] value;
|
| 268 |
+
|
| 269 |
+
// Add child with value i
|
| 270 |
+
b.putInt(i);
|
| 271 |
+
value = b.array();
|
| 272 |
+
zk.create(root + "/element", value, Ids.OPEN_ACL_UNSAFE,
|
| 273 |
+
CreateMode.PERSISTENT_SEQUENTIAL);
|
| 274 |
+
|
| 275 |
+
return true;
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
To consume an element, a consumer process obtains the children of the root node,
|
| 280 |
+
reads the node with smallest counter value, and returns the element. Note that
|
| 281 |
+
if there is a conflict, then one of the two contending processes won't be able to
|
| 282 |
+
delete the node and the delete operation will throw an exception.
|
| 283 |
+
|
| 284 |
+
A call to getChildren() returns the list of children in lexicographic order.
|
| 285 |
+
As lexicographic order does not necessarily follow the numerical order of the counter
|
| 286 |
+
values, we need to decide which element is the smallest. To decide which one has
|
| 287 |
+
the smallest counter value, we traverse the list, and remove the prefix "element"
|
| 288 |
+
from each one.
|
| 289 |
+
|
| 290 |
+
/**
|
| 291 |
+
* Remove first element from the queue.
|
| 292 |
+
*
|
| 293 |
+
* @return
|
| 294 |
+
* @throws KeeperException
|
| 295 |
+
* @throws InterruptedException
|
| 296 |
+
*/
|
| 297 |
+
int consume() throws KeeperException, InterruptedException{
|
| 298 |
+
int retvalue = -1;
|
| 299 |
+
Stat stat = null;
|
| 300 |
+
|
| 301 |
+
// Get the first element available
|
| 302 |
+
while (true) {
|
| 303 |
+
synchronized (mutex) {
|
| 304 |
+
List<String> list = zk.getChildren(root, true);
|
| 305 |
+
if (list.size() == 0) {
|
| 306 |
+
System.out.println("Going to wait");
|
| 307 |
+
mutex.wait();
|
| 308 |
+
} else {
|
| 309 |
+
Integer min = new Integer(list.get(0).substring(7));
|
| 310 |
+
for(String s : list){
|
| 311 |
+
Integer tempValue = new Integer(s.substring(7));
|
| 312 |
+
//System.out.println("Temporary value: " + tempValue);
|
| 313 |
+
if(tempValue < min) min = tempValue;
|
| 314 |
+
}
|
| 315 |
+
System.out.println("Temporary value: " + root + "/element" + min);
|
| 316 |
+
byte[] b = zk.getData(root + "/element" + min,
|
| 317 |
+
false, stat);
|
| 318 |
+
zk.delete(root + "/element" + min, 0);
|
| 319 |
+
ByteBuffer buffer = ByteBuffer.wrap(b);
|
| 320 |
+
retvalue = buffer.getInt();
|
| 321 |
+
|
| 322 |
+
return retvalue;
|
| 323 |
+
}
|
| 324 |
+
}
|
| 325 |
+
}
|
| 326 |
+
}
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
<a name="Complete+example"></a>
|
| 331 |
+
|
| 332 |
+
## Complete example
|
| 333 |
+
|
| 334 |
+
In the following section you can find a complete command line application to demonstrate the above mentioned
|
| 335 |
+
recipes. Use the following command to run it.
|
| 336 |
+
|
| 337 |
+
ZOOBINDIR="[path_to_distro]/bin"
|
| 338 |
+
. "$ZOOBINDIR"/zkEnv.sh
|
| 339 |
+
java SyncPrimitive [Test Type] [ZK server] [No of elements] [Client type]
|
| 340 |
+
|
| 341 |
+
<a name="Queue+test"></a>
|
| 342 |
+
|
| 343 |
+
### Queue test
|
| 344 |
+
|
| 345 |
+
Start a producer to create 100 elements
|
| 346 |
+
|
| 347 |
+
java SyncPrimitive qTest localhost 100 p
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
Start a consumer to consume 100 elements
|
| 351 |
+
|
| 352 |
+
java SyncPrimitive qTest localhost 100 c
|
| 353 |
+
|
| 354 |
+
<a name="Barrier+test"></a>
|
| 355 |
+
|
| 356 |
+
### Barrier test
|
| 357 |
+
|
| 358 |
+
Start a barrier with 2 participants (start as many times as many participants you'd like to enter)
|
| 359 |
+
|
| 360 |
+
java SyncPrimitive bTest localhost 2
|
| 361 |
+
|
| 362 |
+
<a name="sc_sourceListing"></a>
|
| 363 |
+
|
| 364 |
+
### Source Listing
|
| 365 |
+
|
| 366 |
+
#### SyncPrimitive.Java
|
| 367 |
+
|
| 368 |
+
import java.io.IOException;
|
| 369 |
+
import java.net.InetAddress;
|
| 370 |
+
import java.net.UnknownHostException;
|
| 371 |
+
import java.nio.ByteBuffer;
|
| 372 |
+
import java.util.List;
|
| 373 |
+
import java.util.Random;
|
| 374 |
+
|
| 375 |
+
import org.apache.zookeeper.CreateMode;
|
| 376 |
+
import org.apache.zookeeper.KeeperException;
|
| 377 |
+
import org.apache.zookeeper.WatchedEvent;
|
| 378 |
+
import org.apache.zookeeper.Watcher;
|
| 379 |
+
import org.apache.zookeeper.ZooKeeper;
|
| 380 |
+
import org.apache.zookeeper.ZooDefs.Ids;
|
| 381 |
+
import org.apache.zookeeper.data.Stat;
|
| 382 |
+
|
| 383 |
+
public class SyncPrimitive implements Watcher {
|
| 384 |
+
|
| 385 |
+
static ZooKeeper zk = null;
|
| 386 |
+
static Integer mutex;
|
| 387 |
+
String root;
|
| 388 |
+
|
| 389 |
+
SyncPrimitive(String address) {
|
| 390 |
+
if(zk == null){
|
| 391 |
+
try {
|
| 392 |
+
System.out.println("Starting ZK:");
|
| 393 |
+
zk = new ZooKeeper(address, 3000, this);
|
| 394 |
+
mutex = new Integer(-1);
|
| 395 |
+
System.out.println("Finished starting ZK: " + zk);
|
| 396 |
+
} catch (IOException e) {
|
| 397 |
+
System.out.println(e.toString());
|
| 398 |
+
zk = null;
|
| 399 |
+
}
|
| 400 |
+
}
|
| 401 |
+
//else mutex = new Integer(-1);
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
synchronized public void process(WatchedEvent event) {
|
| 405 |
+
synchronized (mutex) {
|
| 406 |
+
//System.out.println("Process: " + event.getType());
|
| 407 |
+
mutex.notify();
|
| 408 |
+
}
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
/**
|
| 412 |
+
* Barrier
|
| 413 |
+
*/
|
| 414 |
+
static public class Barrier extends SyncPrimitive {
|
| 415 |
+
int size;
|
| 416 |
+
String name;
|
| 417 |
+
|
| 418 |
+
/**
|
| 419 |
+
* Barrier constructor
|
| 420 |
+
*
|
| 421 |
+
* @param address
|
| 422 |
+
* @param root
|
| 423 |
+
* @param size
|
| 424 |
+
*/
|
| 425 |
+
Barrier(String address, String root, int size) {
|
| 426 |
+
super(address);
|
| 427 |
+
this.root = root;
|
| 428 |
+
this.size = size;
|
| 429 |
+
|
| 430 |
+
// Create barrier node
|
| 431 |
+
if (zk != null) {
|
| 432 |
+
try {
|
| 433 |
+
Stat s = zk.exists(root, false);
|
| 434 |
+
if (s == null) {
|
| 435 |
+
zk.create(root, new byte[0], Ids.OPEN_ACL_UNSAFE,
|
| 436 |
+
CreateMode.PERSISTENT);
|
| 437 |
+
}
|
| 438 |
+
} catch (KeeperException e) {
|
| 439 |
+
System.out
|
| 440 |
+
.println("Keeper exception when instantiating queue: "
|
| 441 |
+
+ e.toString());
|
| 442 |
+
} catch (InterruptedException e) {
|
| 443 |
+
System.out.println("Interrupted exception");
|
| 444 |
+
}
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
// My node name
|
| 448 |
+
try {
|
| 449 |
+
name = new String(InetAddress.getLocalHost().getCanonicalHostName().toString());
|
| 450 |
+
} catch (UnknownHostException e) {
|
| 451 |
+
System.out.println(e.toString());
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
/**
|
| 457 |
+
* Join barrier
|
| 458 |
+
*
|
| 459 |
+
* @return
|
| 460 |
+
* @throws KeeperException
|
| 461 |
+
* @throws InterruptedException
|
| 462 |
+
*/
|
| 463 |
+
|
| 464 |
+
boolean enter() throws KeeperException, InterruptedException{
|
| 465 |
+
zk.create(root + "/" + name, new byte[0], Ids.OPEN_ACL_UNSAFE,
|
| 466 |
+
CreateMode.EPHEMERAL);
|
| 467 |
+
while (true) {
|
| 468 |
+
synchronized (mutex) {
|
| 469 |
+
List<String> list = zk.getChildren(root, true);
|
| 470 |
+
|
| 471 |
+
if (list.size() < size) {
|
| 472 |
+
mutex.wait();
|
| 473 |
+
} else {
|
| 474 |
+
return true;
|
| 475 |
+
}
|
| 476 |
+
}
|
| 477 |
+
}
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
/**
|
| 481 |
+
* Wait until all reach barrier
|
| 482 |
+
*
|
| 483 |
+
* @return
|
| 484 |
+
* @throws KeeperException
|
| 485 |
+
* @throws InterruptedException
|
| 486 |
+
*/
|
| 487 |
+
boolean leave() throws KeeperException, InterruptedException{
|
| 488 |
+
zk.delete(root + "/" + name, 0);
|
| 489 |
+
while (true) {
|
| 490 |
+
synchronized (mutex) {
|
| 491 |
+
List<String> list = zk.getChildren(root, true);
|
| 492 |
+
if (list.size() > 0) {
|
| 493 |
+
mutex.wait();
|
| 494 |
+
} else {
|
| 495 |
+
return true;
|
| 496 |
+
}
|
| 497 |
+
}
|
| 498 |
+
}
|
| 499 |
+
}
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
/**
|
| 503 |
+
* Producer-Consumer queue
|
| 504 |
+
*/
|
| 505 |
+
static public class Queue extends SyncPrimitive {
|
| 506 |
+
|
| 507 |
+
/**
|
| 508 |
+
* Constructor of producer-consumer queue
|
| 509 |
+
*
|
| 510 |
+
* @param address
|
| 511 |
+
* @param name
|
| 512 |
+
*/
|
| 513 |
+
Queue(String address, String name) {
|
| 514 |
+
super(address);
|
| 515 |
+
this.root = name;
|
| 516 |
+
// Create ZK node name
|
| 517 |
+
if (zk != null) {
|
| 518 |
+
try {
|
| 519 |
+
Stat s = zk.exists(root, false);
|
| 520 |
+
if (s == null) {
|
| 521 |
+
zk.create(root, new byte[0], Ids.OPEN_ACL_UNSAFE,
|
| 522 |
+
CreateMode.PERSISTENT);
|
| 523 |
+
}
|
| 524 |
+
} catch (KeeperException e) {
|
| 525 |
+
System.out
|
| 526 |
+
.println("Keeper exception when instantiating queue: "
|
| 527 |
+
+ e.toString());
|
| 528 |
+
} catch (InterruptedException e) {
|
| 529 |
+
System.out.println("Interrupted exception");
|
| 530 |
+
}
|
| 531 |
+
}
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
/**
|
| 535 |
+
* Add element to the queue.
|
| 536 |
+
*
|
| 537 |
+
* @param i
|
| 538 |
+
* @return
|
| 539 |
+
*/
|
| 540 |
+
|
| 541 |
+
boolean produce(int i) throws KeeperException, InterruptedException{
|
| 542 |
+
ByteBuffer b = ByteBuffer.allocate(4);
|
| 543 |
+
byte[] value;
|
| 544 |
+
|
| 545 |
+
// Add child with value i
|
| 546 |
+
b.putInt(i);
|
| 547 |
+
value = b.array();
|
| 548 |
+
zk.create(root + "/element", value, Ids.OPEN_ACL_UNSAFE,
|
| 549 |
+
CreateMode.PERSISTENT_SEQUENTIAL);
|
| 550 |
+
|
| 551 |
+
return true;
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
/**
|
| 555 |
+
* Remove first element from the queue.
|
| 556 |
+
*
|
| 557 |
+
* @return
|
| 558 |
+
* @throws KeeperException
|
| 559 |
+
* @throws InterruptedException
|
| 560 |
+
*/
|
| 561 |
+
int consume() throws KeeperException, InterruptedException{
|
| 562 |
+
int retvalue = -1;
|
| 563 |
+
Stat stat = null;
|
| 564 |
+
|
| 565 |
+
// Get the first element available
|
| 566 |
+
while (true) {
|
| 567 |
+
synchronized (mutex) {
|
| 568 |
+
List<String> list = zk.getChildren(root, true);
|
| 569 |
+
if (list.size() == 0) {
|
| 570 |
+
System.out.println("Going to wait");
|
| 571 |
+
mutex.wait();
|
| 572 |
+
} else {
|
| 573 |
+
Integer min = new Integer(list.get(0).substring(7));
|
| 574 |
+
String minNode = list.get(0);
|
| 575 |
+
for(String s : list){
|
| 576 |
+
Integer tempValue = new Integer(s.substring(7));
|
| 577 |
+
//System.out.println("Temporary value: " + tempValue);
|
| 578 |
+
if(tempValue < min) {
|
| 579 |
+
min = tempValue;
|
| 580 |
+
minNode = s;
|
| 581 |
+
}
|
| 582 |
+
}
|
| 583 |
+
System.out.println("Temporary value: " + root + "/" + minNode);
|
| 584 |
+
byte[] b = zk.getData(root + "/" + minNode,
|
| 585 |
+
false, stat);
|
| 586 |
+
zk.delete(root + "/" + minNode, 0);
|
| 587 |
+
ByteBuffer buffer = ByteBuffer.wrap(b);
|
| 588 |
+
retvalue = buffer.getInt();
|
| 589 |
+
|
| 590 |
+
return retvalue;
|
| 591 |
+
}
|
| 592 |
+
}
|
| 593 |
+
}
|
| 594 |
+
}
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
public static void main(String args[]) {
|
| 598 |
+
if (args[0].equals("qTest"))
|
| 599 |
+
queueTest(args);
|
| 600 |
+
else
|
| 601 |
+
barrierTest(args);
|
| 602 |
+
}
|
| 603 |
+
|
| 604 |
+
public static void queueTest(String args[]) {
|
| 605 |
+
Queue q = new Queue(args[1], "/app1");
|
| 606 |
+
|
| 607 |
+
System.out.println("Input: " + args[1]);
|
| 608 |
+
int i;
|
| 609 |
+
Integer max = new Integer(args[2]);
|
| 610 |
+
|
| 611 |
+
if (args[3].equals("p")) {
|
| 612 |
+
System.out.println("Producer");
|
| 613 |
+
for (i = 0; i < max; i++)
|
| 614 |
+
try{
|
| 615 |
+
q.produce(10 + i);
|
| 616 |
+
} catch (KeeperException e){
|
| 617 |
+
|
| 618 |
+
} catch (InterruptedException e){
|
| 619 |
+
|
| 620 |
+
}
|
| 621 |
+
} else {
|
| 622 |
+
System.out.println("Consumer");
|
| 623 |
+
|
| 624 |
+
for (i = 0; i < max; i++) {
|
| 625 |
+
try{
|
| 626 |
+
int r = q.consume();
|
| 627 |
+
System.out.println("Item: " + r);
|
| 628 |
+
} catch (KeeperException e){
|
| 629 |
+
i--;
|
| 630 |
+
} catch (InterruptedException e){
|
| 631 |
+
}
|
| 632 |
+
}
|
| 633 |
+
}
|
| 634 |
+
}
|
| 635 |
+
|
| 636 |
+
public static void barrierTest(String args[]) {
|
| 637 |
+
Barrier b = new Barrier(args[1], "/b1", new Integer(args[2]));
|
| 638 |
+
try{
|
| 639 |
+
boolean flag = b.enter();
|
| 640 |
+
System.out.println("Entered barrier: " + args[2]);
|
| 641 |
+
if(!flag) System.out.println("Error when entering the barrier");
|
| 642 |
+
} catch (KeeperException e){
|
| 643 |
+
} catch (InterruptedException e){
|
| 644 |
+
}
|
| 645 |
+
|
| 646 |
+
// Generate random integer
|
| 647 |
+
Random rand = new Random();
|
| 648 |
+
int r = rand.nextInt(100);
|
| 649 |
+
// Loop for rand iterations
|
| 650 |
+
for (int i = 0; i < r; i++) {
|
| 651 |
+
try {
|
| 652 |
+
Thread.sleep(100);
|
| 653 |
+
} catch (InterruptedException e) {
|
| 654 |
+
}
|
| 655 |
+
}
|
| 656 |
+
try{
|
| 657 |
+
b.leave();
|
| 658 |
+
} catch (KeeperException e){
|
| 659 |
+
|
| 660 |
+
} catch (InterruptedException e){
|
| 661 |
+
|
| 662 |
+
}
|
| 663 |
+
System.out.println("Left barrier");
|
| 664 |
+
}
|
| 665 |
+
}
|
| 666 |
+
|
local-test-zookeeper-delta-01/afc-zookeeper/zookeeper-docs/src/main/resources/markdown/zookeeperUseCases.md
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Copyright 2002-2021 The Apache Software Foundation
|
| 3 |
+
|
| 4 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
you may not use this file except in compliance with the License.
|
| 6 |
+
You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
See the License for the specific language governing permissions and
|
| 14 |
+
limitations under the License.
|
| 15 |
+
//-->
|
| 16 |
+
|
| 17 |
+
# ZooKeeper Use Cases
|
| 18 |
+
|
| 19 |
+
- Applications and organizations using ZooKeeper include (alphabetically) [1].
|
| 20 |
+
- If your use case wants to be listed here. Please do not hesitate, submit a pull request or write an email to **dev@zookeeper.apache.org**,
|
| 21 |
+
and then, your use case will be included.
|
| 22 |
+
- If this documentation has violated your intellectual property rights or you and your company's privacy, write an email to **dev@zookeeper.apache.org**,
|
| 23 |
+
we will handle them in a timely manner.
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
## Free Software Projects
|
| 27 |
+
|
| 28 |
+
### [AdroitLogic UltraESB](http://adroitlogic.org/)
|
| 29 |
+
- Uses ZooKeeper to implement node coordination, in clustering support. This allows the management of the complete cluster,
|
| 30 |
+
or any specific node - from any other node connected via JMX. A Cluster wide command framework developed on top of the
|
| 31 |
+
ZooKeeper coordination allows commands that fail on some nodes to be retried etc. We also support the automated graceful
|
| 32 |
+
round-robin-restart of a complete cluster of nodes using the same framework [1].
|
| 33 |
+
|
| 34 |
+
### [Akka](http://akka.io/)
|
| 35 |
+
- Akka is the platform for the next generation event-driven, scalable and fault-tolerant architectures on the JVM.
|
| 36 |
+
Or: Akka is a toolkit and runtime for building highly concurrent, distributed, and fault tolerant event-driven applications on the JVM [1].
|
| 37 |
+
|
| 38 |
+
### [Eclipse Communication Framework](http://www.eclipse.org/ecf)
|
| 39 |
+
- The Eclipse ECF project provides an implementation of its Abstract Discovery services using Zookeeper. ECF itself
|
| 40 |
+
is used in many projects providing base functionality for communication, all based on OSGi [1].
|
| 41 |
+
|
| 42 |
+
### [Eclipse Gyrex](http://www.eclipse.org/gyrex)
|
| 43 |
+
- The Eclipse Gyrex project provides a platform for building your own Java OSGi based clouds.
|
| 44 |
+
- ZooKeeper is used as the core cloud component for node membership and management, coordination of jobs executing among workers,
|
| 45 |
+
a lock service and a simple queue service and a lot more [1].
|
| 46 |
+
|
| 47 |
+
### [GoldenOrb](http://www.goldenorbos.org/)
|
| 48 |
+
- massive-scale Graph analysis [1].
|
| 49 |
+
|
| 50 |
+
### [Juju](https://juju.ubuntu.com/)
|
| 51 |
+
- Service deployment and orchestration framework, formerly called Ensemble [1].
|
| 52 |
+
|
| 53 |
+
### [Katta](http://katta.sourceforge.net/)
|
| 54 |
+
- Katta serves distributed Lucene indexes in a grid environment.
|
| 55 |
+
- Zookeeper is used for node, master and index management in the grid [1].
|
| 56 |
+
|
| 57 |
+
### [KeptCollections](https://github.com/anthonyu/KeptCollections)
|
| 58 |
+
- KeptCollections is a library of drop-in replacements for the data structures in the Java Collections framework.
|
| 59 |
+
- KeptCollections uses Apache ZooKeeper as a backing store, thus making its data structures distributed and scalable [1].
|
| 60 |
+
|
| 61 |
+
### [Neo4j](https://neo4j.com/)
|
| 62 |
+
- Neo4j is a Graph Database. It's a disk based, ACID compliant transactional storage engine for big graphs and fast graph traversals,
|
| 63 |
+
using external indices like Lucene/Solr for global searches.
|
| 64 |
+
- We use ZooKeeper in the Neo4j High Availability components for write-master election,
|
| 65 |
+
read slave coordination and other cool stuff. ZooKeeper is a great and focused project - we like! [1].
|
| 66 |
+
|
| 67 |
+
### [Norbert](http://sna-projects.com/norbert)
|
| 68 |
+
- Partitioned routing and cluster management [1].
|
| 69 |
+
|
| 70 |
+
### [spring-cloud-zookeeper](https://spring.io/projects/spring-cloud-zookeeper)
|
| 71 |
+
- Spring Cloud Zookeeper provides Apache Zookeeper integrations for Spring Boot apps through autoconfiguration
|
| 72 |
+
and binding to the Spring Environment and other Spring programming model idioms. With a few simple annotations
|
| 73 |
+
you can quickly enable and configure the common patterns inside your application and build large distributed systems with Zookeeper.
|
| 74 |
+
The patterns provided include Service Discovery and Distributed Configuration [38].
|
| 75 |
+
|
| 76 |
+
### [spring-statemachine](https://projects.spring.io/spring-statemachine/)
|
| 77 |
+
- Spring Statemachine is a framework for application developers to use state machine concepts with Spring applications.
|
| 78 |
+
- Spring Statemachine can provide this feature:Distributed state machine based on a Zookeeper [31,32].
|
| 79 |
+
|
| 80 |
+
### [spring-xd](https://projects.spring.io/spring-xd/)
|
| 81 |
+
- Spring XD is a unified, distributed, and extensible system for data ingestion, real time analytics, batch processing, and data export.
|
| 82 |
+
The project’s goal is to simplify the development of big data applications.
|
| 83 |
+
- ZooKeeper - Provides all runtime information for the XD cluster. Tracks running containers, in which containers modules
|
| 84 |
+
and jobs are deployed, stream definitions, deployment manifests, and the like [30,31].
|
| 85 |
+
|
| 86 |
+
### [Talend ESB](http://www.talend.com/products-application-integration/application-integration-esb-se.php)
|
| 87 |
+
- Talend ESB is a versatile and flexible, enterprise service bus.
|
| 88 |
+
- It uses ZooKeeper as endpoint repository of both REST and SOAP Web services.
|
| 89 |
+
By using ZooKeeper Talend ESB is able to provide failover and load balancing capabilities in a very light-weight manner [1].
|
| 90 |
+
|
| 91 |
+
### [redis_failover](https://github.com/ryanlecompte/redis_failover)
|
| 92 |
+
- Redis Failover is a ZooKeeper-based automatic master/slave failover solution for Ruby [1].
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
## Apache Projects
|
| 96 |
+
|
| 97 |
+
### [Apache Accumulo](https://accumulo.apache.org/)
|
| 98 |
+
- Accumulo is a distributed key/value store that provides expressive, cell-level access labels.
|
| 99 |
+
- Apache ZooKeeper plays a central role within the Accumulo architecture. Its quorum consistency model supports an overall
|
| 100 |
+
Accumulo architecture with no single points of failure. Beyond that, Accumulo leverages ZooKeeper to store and communication
|
| 101 |
+
configuration information for users and tables, as well as operational states of processes and tablets [2].
|
| 102 |
+
|
| 103 |
+
### [Apache Atlas](http://atlas.apache.org)
|
| 104 |
+
- Atlas is a scalable and extensible set of core foundational governance services – enabling enterprises to effectively and efficiently meet
|
| 105 |
+
their compliance requirements within Hadoop and allows integration with the whole enterprise data ecosystem.
|
| 106 |
+
- Atlas uses Zookeeper for coordination to provide redundancy and high availability of HBase,Kafka [31,35].
|
| 107 |
+
|
| 108 |
+
### [Apache BookKeeper](https://bookkeeper.apache.org/)
|
| 109 |
+
- A scalable, fault-tolerant, and low-latency storage service optimized for real-time workloads.
|
| 110 |
+
- BookKeeper requires a metadata storage service to store information related to ledgers and available bookies. BookKeeper currently uses
|
| 111 |
+
ZooKeeper for this and other tasks [3].
|
| 112 |
+
|
| 113 |
+
### [Apache CXF DOSGi](http://cxf.apache.org/distributed-osgi.html)
|
| 114 |
+
- Apache CXF is an open source services framework. CXF helps you build and develop services using frontend programming
|
| 115 |
+
APIs, like JAX-WS and JAX-RS. These services can speak a variety of protocols such as SOAP, XML/HTTP, RESTful HTTP,
|
| 116 |
+
or CORBA and work over a variety of transports such as HTTP, JMS or JBI.
|
| 117 |
+
- The Distributed OSGi implementation at Apache CXF uses ZooKeeper for its Discovery functionality [4].
|
| 118 |
+
|
| 119 |
+
### [Apache Drill](http://drill.apache.org/)
|
| 120 |
+
- Schema-free SQL Query Engine for Hadoop, NoSQL and Cloud Storage
|
| 121 |
+
- ZooKeeper maintains ephemeral cluster membership information. The Drillbits use ZooKeeper to find other Drillbits in the cluster,
|
| 122 |
+
and the client uses ZooKeeper to find Drillbits to submit a query [28].
|
| 123 |
+
|
| 124 |
+
### [Apache Druid](https://druid.apache.org/)
|
| 125 |
+
- Apache Druid is a high performance real-time analytics database.
|
| 126 |
+
- Apache Druid uses Apache ZooKeeper (ZK) for management of current cluster state. The operations that happen over ZK are [27]:
|
| 127 |
+
- Coordinator leader election
|
| 128 |
+
- Segment "publishing" protocol from Historical and Realtime
|
| 129 |
+
- Segment load/drop protocol between Coordinator and Historical
|
| 130 |
+
- Overlord leader election
|
| 131 |
+
- Overlord and MiddleManager task management
|
| 132 |
+
|
| 133 |
+
### [Apache Dubbo](http://dubbo.apache.org)
|
| 134 |
+
- Apache Dubbo is a high-performance, java based open source RPC framework.
|
| 135 |
+
- Zookeeper is used for service registration discovery and configuration management in Dubbo [6].
|
| 136 |
+
|
| 137 |
+
### [Apache Flink](https://flink.apache.org/)
|
| 138 |
+
- Apache Flink is a framework and distributed processing engine for stateful computations over unbounded and bounded data streams.
|
| 139 |
+
Flink has been designed to run in all common cluster environments, perform computations at in-memory speed and at any scale.
|
| 140 |
+
- To enable JobManager High Availability you have to set the high-availability mode to zookeeper, configure a ZooKeeper quorum and set up a masters file with all JobManagers hosts and their web UI ports.
|
| 141 |
+
Flink leverages ZooKeeper for distributed coordination between all running JobManager instances. ZooKeeper is a separate service from Flink,
|
| 142 |
+
which provides highly reliable distributed coordination via leader election and light-weight consistent state storage [23].
|
| 143 |
+
|
| 144 |
+
### [Apache Flume](https://flume.apache.org/)
|
| 145 |
+
- Flume is a distributed, reliable, and available service for efficiently collecting, aggregating, and moving large amounts
|
| 146 |
+
of log data. It has a simple and flexible architecture based on streaming data flows. It is robust and fault tolerant
|
| 147 |
+
with tunable reliability mechanisms and many failover and recovery mechanisms. It uses a simple extensible data model
|
| 148 |
+
that allows for online analytic application.
|
| 149 |
+
- Flume supports Agent configurations via Zookeeper. This is an experimental feature [5].
|
| 150 |
+
|
| 151 |
+
### [Apache Fluo](https://fluo.apache.org/)
|
| 152 |
+
- Apache Fluo is a distributed processing system that lets users make incremental updates to large data sets.
|
| 153 |
+
- Apache Fluo is built on Apache Accumulo which uses Apache Zookeeper for consensus [31,37].
|
| 154 |
+
|
| 155 |
+
### [Apache Griffin](https://griffin.apache.org/)
|
| 156 |
+
- Big Data Quality Solution For Batch and Streaming.
|
| 157 |
+
- Griffin uses Zookeeper for coordination to provide redundancy and high availability of Kafka [31,36].
|
| 158 |
+
|
| 159 |
+
### [Apache Hadoop](http://hadoop.apache.org/)
|
| 160 |
+
- The Apache Hadoop software library is a framework that allows for the distributed processing of large data sets across
|
| 161 |
+
clusters of computers using simple programming models. It is designed to scale up from single servers to thousands of machines,
|
| 162 |
+
each offering local computation and storage. Rather than rely on hardware to deliver high-availability,
|
| 163 |
+
the library itself is designed to detect and handle failures at the application layer, so delivering a highly-available service on top of a cluster of computers, each of which may be prone to failures.
|
| 164 |
+
- The implementation of automatic HDFS failover relies on ZooKeeper for the following things:
|
| 165 |
+
- **Failure detection** - each of the NameNode machines in the cluster maintains a persistent session in ZooKeeper.
|
| 166 |
+
If the machine crashes, the ZooKeeper session will expire, notifying the other NameNode that a failover should be triggered.
|
| 167 |
+
- **Active NameNode election** - ZooKeeper provides a simple mechanism to exclusively elect a node as active. If the current active NameNode crashes,
|
| 168 |
+
another node may take a special exclusive lock in ZooKeeper indicating that it should become the next active.
|
| 169 |
+
- The ZKFailoverController (ZKFC) is a new component which is a ZooKeeper client which also monitors and manages the state of the NameNode.
|
| 170 |
+
Each of the machines which runs a NameNode also runs a ZKFC, and that ZKFC is responsible for:
|
| 171 |
+
- **Health monitoring** - the ZKFC pings its local NameNode on a periodic basis with a health-check command.
|
| 172 |
+
So long as the NameNode responds in a timely fashion with a healthy status, the ZKFC considers the node healthy.
|
| 173 |
+
If the node has crashed, frozen, or otherwise entered an unhealthy state, the health monitor will mark it as unhealthy.
|
| 174 |
+
- **ZooKeeper session management** - when the local NameNode is healthy, the ZKFC holds a session open in ZooKeeper.
|
| 175 |
+
If the local NameNode is active, it also holds a special “lock” znode. This lock uses ZooKeeper’s support for “ephemeral” nodes;
|
| 176 |
+
if the session expires, the lock node will be automatically deleted.
|
| 177 |
+
- **ZooKeeper-based election** - if the local NameNode is healthy, and the ZKFC sees that no other node currently holds the lock znode,
|
| 178 |
+
it will itself try to acquire the lock. If it succeeds, then it has “won the election”, and is responsible for running a failover to make its local NameNode active.
|
| 179 |
+
The failover process is similar to the manual failover described above: first, the previous active is fenced if necessary,
|
| 180 |
+
and then the local NameNode transitions to active state [7].
|
| 181 |
+
|
| 182 |
+
### [Apache HBase](https://hbase.apache.org/)
|
| 183 |
+
- HBase is the Hadoop database. It's an open-source, distributed, column-oriented store model.
|
| 184 |
+
- HBase uses ZooKeeper for master election, server lease management, bootstrapping, and coordination between servers.
|
| 185 |
+
A distributed Apache HBase installation depends on a running ZooKeeper cluster. All participating nodes and clients
|
| 186 |
+
need to be able to access the running ZooKeeper ensemble [8].
|
| 187 |
+
- As you can see, ZooKeeper is a fundamental part of HBase. All operations that require coordination, such as Regions
|
| 188 |
+
assignment, Master-Failover, replication, and snapshots, are built on ZooKeeper [20].
|
| 189 |
+
|
| 190 |
+
### [Apache Helix](http://helix.apache.org/)
|
| 191 |
+
- A cluster management framework for partitioned and replicated distributed resources.
|
| 192 |
+
- We need a distributed store to maintain the state of the cluster and a notification system to notify if there is any change in the cluster state.
|
| 193 |
+
Helix uses Apache ZooKeeper to achieve this functionality [21].
|
| 194 |
+
Zookeeper provides:
|
| 195 |
+
- A way to represent PERSISTENT state which remains until its deleted
|
| 196 |
+
- A way to represent TRANSIENT/EPHEMERAL state which vanishes when the process that created the state dies
|
| 197 |
+
- A notification mechanism when there is a change in PERSISTENT and EPHEMERAL state
|
| 198 |
+
|
| 199 |
+
### [Apache Hive](https://hive.apache.org)
|
| 200 |
+
- The Apache Hive data warehouse software facilitates reading, writing, and managing large datasets residing in distributed
|
| 201 |
+
storage using SQL. Structure can be projected onto data already in storage. A command line tool and JDBC driver are provided to connect users to Hive.
|
| 202 |
+
- Hive has been using ZooKeeper as distributed lock manager to support concurrency in HiveServer2 [25,26].
|
| 203 |
+
|
| 204 |
+
### [Apache Ignite](https://ignite.apache.org/)
|
| 205 |
+
- Ignite is a memory-centric distributed database, caching, and processing platform for
|
| 206 |
+
transactional, analytical, and streaming workloads delivering in-memory speeds at petabyte scale
|
| 207 |
+
- Apache Ignite discovery mechanism goes with a ZooKeeper implementations which allows scaling Ignite clusters to 100s and 1000s of nodes
|
| 208 |
+
preserving linear scalability and performance [31,34].
|
| 209 |
+
|
| 210 |
+
### [Apache James Mailbox](http://james.apache.org/mailbox/)
|
| 211 |
+
- The Apache James Mailbox is a library providing a flexible Mailbox storage accessible by mail protocols
|
| 212 |
+
(IMAP4, POP3, SMTP,...) and other protocols.
|
| 213 |
+
- Uses Zookeeper and Curator Framework for generating distributed unique ID's [31].
|
| 214 |
+
|
| 215 |
+
### [Apache Kafka](https://kafka.apache.org/)
|
| 216 |
+
- Kafka is a distributed publish/subscribe messaging system
|
| 217 |
+
- Apache Kafka relies on ZooKeeper for the following things:
|
| 218 |
+
- **Controller election**
|
| 219 |
+
The controller is one of the most important broking entity in a Kafka ecosystem, and it also has the responsibility
|
| 220 |
+
to maintain the leader-follower relationship across all the partitions. If a node by some reason is shutting down,
|
| 221 |
+
it’s the controller’s responsibility to tell all the replicas to act as partition leaders in order to fulfill the
|
| 222 |
+
duties of the partition leaders on the node that is about to fail. So, whenever a node shuts down, a new controller
|
| 223 |
+
can be elected and it can also be made sure that at any given time, there is only one controller and all the follower nodes have agreed on that.
|
| 224 |
+
- **Configuration Of Topics**
|
| 225 |
+
The configuration regarding all the topics including the list of existing topics, the number of partitions for each topic,
|
| 226 |
+
the location of all the replicas, list of configuration overrides for all topics and which node is the preferred leader, etc.
|
| 227 |
+
- **Access control lists**
|
| 228 |
+
Access control lists or ACLs for all the topics are also maintained within Zookeeper.
|
| 229 |
+
- **Membership of the cluster**
|
| 230 |
+
Zookeeper also maintains a list of all the brokers that are functioning at any given moment and are a part of the cluster [9].
|
| 231 |
+
|
| 232 |
+
### [Apache Kylin](http://kylin.apache.org/)
|
| 233 |
+
- Apache Kylin is an open source Distributed Analytics Engine designed to provide SQL interface and multi-dimensional analysis (OLAP) on Hadoop/Spark supporting extremely large datasets,
|
| 234 |
+
original contributed from eBay Inc.
|
| 235 |
+
- Apache Kylin leverages Zookeeper for job coordination [31,33].
|
| 236 |
+
|
| 237 |
+
### [Apache Mesos](http://mesos.apache.org/)
|
| 238 |
+
- Apache Mesos abstracts CPU, memory, storage, and other compute resources away from machines (physical or virtual),
|
| 239 |
+
enabling fault-tolerant and elastic distributed systems to easily be built and run effectively.
|
| 240 |
+
- Mesos has a high-availability mode that uses multiple Mesos masters: one active master (called the leader or leading master)
|
| 241 |
+
and several backups in case it fails. The masters elect the leader, with Apache ZooKeeper both coordinating the election
|
| 242 |
+
and handling leader detection by masters, agents, and scheduler drivers [10].
|
| 243 |
+
|
| 244 |
+
### [Apache Oozie](https://oozie.apache.org)
|
| 245 |
+
- Oozie is a workflow scheduler system to manage Apache Hadoop jobs.
|
| 246 |
+
- the Oozie servers use it for coordinating access to the database and communicating with each other. In order to have full HA,
|
| 247 |
+
there should be at least 3 ZooKeeper servers [29].
|
| 248 |
+
|
| 249 |
+
### [Apache Pulsar](https://pulsar.apache.org)
|
| 250 |
+
- Apache Pulsar is an open-source distributed pub-sub messaging system originally created at Yahoo and now part of the Apache Software Foundation
|
| 251 |
+
- Pulsar uses Apache Zookeeper for metadata storage, cluster configuration, and coordination. In a Pulsar instance:
|
| 252 |
+
- A configuration store quorum stores configuration for tenants, namespaces, and other entities that need to be globally consistent.
|
| 253 |
+
- Each cluster has its own local ZooKeeper ensemble that stores cluster-specific configuration and coordination such as ownership metadata,
|
| 254 |
+
broker load reports, BookKeeper ledger metadata, and more [24].
|
| 255 |
+
|
| 256 |
+
### [Apache Solr](https://lucene.apache.org/solr/)
|
| 257 |
+
- Solr is the popular, blazing-fast, open source enterprise search platform built on Apache Lucene.
|
| 258 |
+
- In the "Cloud" edition (v4.x and up) of enterprise search engine Apache Solr, ZooKeeper is used for configuration,
|
| 259 |
+
leader election and more [12,13].
|
| 260 |
+
|
| 261 |
+
### [Apache Spark](https://spark.apache.org/)
|
| 262 |
+
- Apache Spark is a unified analytics engine for large-scale data processing.
|
| 263 |
+
- Utilizing ZooKeeper to provide leader election and some state storage, you can launch multiple Masters in your cluster connected to the same ZooKeeper instance.
|
| 264 |
+
One will be elected “leader” and the others will remain in standby mode. If the current leader dies, another Master will be elected,
|
| 265 |
+
recover the old Master’s state, and then resume scheduling [14].
|
| 266 |
+
|
| 267 |
+
### [Apache Storm](http://storm.apache.org)
|
| 268 |
+
- Apache Storm is a free and open source distributed realtime computation system. Apache Storm makes it easy to reliably
|
| 269 |
+
process unbounded streams of data, doing for realtime processing what Hadoop did for batch processing.
|
| 270 |
+
Apache Storm is simple, can be used with any programming language, and is a lot of fun to use!
|
| 271 |
+
- Storm uses Zookeeper for coordinating the cluster [22].
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
## Companies
|
| 275 |
+
|
| 276 |
+
### [AGETO](http://www.ageto.de/)
|
| 277 |
+
- The AGETO RnD team uses ZooKeeper in a variety of internal as well as external consulting projects [1].
|
| 278 |
+
|
| 279 |
+
### [Benipal Technologies](http://www.benipaltechnologies.com/)
|
| 280 |
+
- ZooKeeper is used for internal application development with Solr and Hadoop with Hbase [1].
|
| 281 |
+
|
| 282 |
+
### [Box](http://box.net/)
|
| 283 |
+
- Box uses ZooKeeper for service discovery, service coordination, Solr and Hadoop support, etc [1].
|
| 284 |
+
|
| 285 |
+
### [Deepdyve](http://www.deepdyve.com/)
|
| 286 |
+
- We do search for research and provide access to high quality content using advanced search technologies Zookeeper is used to
|
| 287 |
+
manage server state, control index deployment and a myriad other tasks [1].
|
| 288 |
+
|
| 289 |
+
### [Facebook](https://www.facebook.com/)
|
| 290 |
+
- Facebook uses the Zeus ([17,18]) for configuration management which is a forked version of ZooKeeper, with many scalability
|
| 291 |
+
and performance en- hancements in order to work at the Facebook scale.
|
| 292 |
+
It runs a consensus protocol among servers distributed across mul- tiple regions for resilience. If the leader fails,
|
| 293 |
+
a follower is converted into a new leader.
|
| 294 |
+
|
| 295 |
+
### [Idium Portal](http://www.idium.no/no/idium_portal/)
|
| 296 |
+
- Idium Portal is a hosted web-publishing system delivered by Norwegian company, Idium AS.
|
| 297 |
+
- ZooKeeper is used for cluster messaging, service bootstrapping, and service coordination [1].
|
| 298 |
+
|
| 299 |
+
### [Makara](http://www.makara.com/)
|
| 300 |
+
- Using ZooKeeper on 2-node cluster on VMware workstation, Amazon EC2, Zen
|
| 301 |
+
- Using zkpython
|
| 302 |
+
- Looking into expanding into 100 node cluster [1].
|
| 303 |
+
|
| 304 |
+
### [Midokura](http://www.midokura.com/)
|
| 305 |
+
- We do virtualized networking for the cloud computing era. We use ZooKeeper for various aspects of our distributed control plane [1].
|
| 306 |
+
|
| 307 |
+
### [Pinterest](https://www.pinterest.com/)
|
| 308 |
+
- Pinterest uses the ZooKeeper for Service discovery and dynamic configuration.Like many large scale web sites, Pinterest’s infrastructure consists of servers that communicate with
|
| 309 |
+
backend services composed of a number of individual servers for managing load and fault tolerance. Ideally, we’d like the configuration to reflect only the active hosts,
|
| 310 |
+
so clients don’t need to deal with bad hosts as often. ZooKeeper provides a well known pattern to solve this problem [19].
|
| 311 |
+
|
| 312 |
+
### [Rackspace](http://www.rackspace.com/email_hosting)
|
| 313 |
+
- The Email & Apps team uses ZooKeeper to coordinate sharding and responsibility changes in a distributed e-mail client
|
| 314 |
+
that pulls and indexes data for search. ZooKeeper also provides distributed locking for connections to prevent a cluster from overwhelming servers [1].
|
| 315 |
+
|
| 316 |
+
### [Sematext](http://sematext.com/)
|
| 317 |
+
- Uses ZooKeeper in SPM (which includes ZooKeeper monitoring component, too!), Search Analytics, and Logsene [1].
|
| 318 |
+
|
| 319 |
+
### [Tubemogul](http://tubemogul.com/)
|
| 320 |
+
- Uses ZooKeeper for leader election, configuration management, locking, group membership [1].
|
| 321 |
+
|
| 322 |
+
### [Twitter](https://twitter.com/)
|
| 323 |
+
- ZooKeeper is used at Twitter as the source of truth for storing critical metadata. It serves as a coordination kernel to
|
| 324 |
+
provide distributed coordination services, such as leader election and distributed locking.
|
| 325 |
+
Some concrete examples of ZooKeeper in action include [15,16]:
|
| 326 |
+
- ZooKeeper is used to store service registry, which is used by Twitter’s naming service for service discovery.
|
| 327 |
+
- Manhattan (Twitter’s in-house key-value database), Nighthawk (sharded Redis), and Blobstore (in-house photo and video storage),
|
| 328 |
+
stores its cluster topology information in ZooKeeper.
|
| 329 |
+
- EventBus, Twitter’s pub-sub messaging system, stores critical metadata in ZooKeeper and uses ZooKeeper for leader election.
|
| 330 |
+
- Mesos, Twitter’s compute platform, uses ZooKeeper for leader election.
|
| 331 |
+
|
| 332 |
+
### [Vast.com](http://www.vast.com/)
|
| 333 |
+
- Used internally as a part of sharding services, distributed synchronization of data/index updates, configuration management and failover support [1].
|
| 334 |
+
|
| 335 |
+
### [Wealthfront](http://wealthfront.com/)
|
| 336 |
+
- Wealthfront uses ZooKeeper for service discovery, leader election and distributed locking among its many backend services.
|
| 337 |
+
ZK is an essential part of Wealthfront's continuous [deployment infrastructure](http://eng.wealthfront.com/2010/05/02/deployment-infrastructure-for-continuous-deployment/) [1].
|
| 338 |
+
|
| 339 |
+
### [Yahoo!](http://www.yahoo.com/)
|
| 340 |
+
- ZooKeeper is used for a myriad of services inside Yahoo! for doing leader election, configuration management, sharding, locking, group membership etc [1].
|
| 341 |
+
|
| 342 |
+
### [Zynga](http://www.zynga.com/)
|
| 343 |
+
- ZooKeeper at Zynga is used for a variety of services including configuration management, leader election, sharding and more [1].
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
#### References
|
| 347 |
+
- [1] https://cwiki.apache.org/confluence/display/ZOOKEEPER/PoweredBy
|
| 348 |
+
- [2] https://www.youtube.com/watch?v=Ew53T6h9oRw
|
| 349 |
+
- [3] https://bookkeeper.apache.org/docs/4.7.3/getting-started/concepts/#ledgers
|
| 350 |
+
- [4] http://cxf.apache.org/dosgi-discovery-demo-page.html
|
| 351 |
+
- [5] https://flume.apache.org/FlumeUserGuide.html
|
| 352 |
+
- [6] http://dubbo.apache.org/en-us/blog/dubbo-zk.html
|
| 353 |
+
- [7] https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html
|
| 354 |
+
- [8] https://hbase.apache.org/book.html#zookeeper
|
| 355 |
+
- [9] https://www.cloudkarafka.com/blog/2018-07-04-cloudkarafka_what_is_zookeeper.html
|
| 356 |
+
- [10] http://mesos.apache.org/documentation/latest/high-availability/
|
| 357 |
+
- [11] http://incubator.apache.org/projects/s4.html
|
| 358 |
+
- [12] https://lucene.apache.org/solr/guide/6_6/using-zookeeper-to-manage-configuration-files.html#UsingZooKeepertoManageConfigurationFiles-StartupBootstrap
|
| 359 |
+
- [13] https://lucene.apache.org/solr/guide/6_6/setting-up-an-external-zookeeper-ensemble.html
|
| 360 |
+
- [14] https://spark.apache.org/docs/latest/spark-standalone.html#standby-masters-with-zookeeper
|
| 361 |
+
- [15] https://blog.twitter.com/engineering/en_us/topics/infrastructure/2018/zookeeper-at-twitter.html
|
| 362 |
+
- [16] https://blog.twitter.com/engineering/en_us/topics/infrastructure/2018/dynamic-configuration-at-twitter.html
|
| 363 |
+
- [17] TANG, C., KOOBURAT, T., VENKATACHALAM, P.,CHANDER, A., WEN, Z., NARAYANAN, A., DOWELL,P., AND KARL, R. Holistic Configuration Management
|
| 364 |
+
at Facebook. In Proceedings of the 25th Symposium on Operating System Principles (SOSP’15) (Monterey, CA,USA, Oct. 2015).
|
| 365 |
+
- [18] https://www.youtube.com/watch?v=SeZV373gUZc
|
| 366 |
+
- [19] https://medium.com/@Pinterest_Engineering/zookeeper-resilience-at-pinterest-adfd8acf2a6b
|
| 367 |
+
- [20] https://blog.cloudera.com/what-are-hbase-znodes/
|
| 368 |
+
- [21] https://helix.apache.org/Architecture.html
|
| 369 |
+
- [22] http://storm.apache.org/releases/current/Setting-up-a-Storm-cluster.html
|
| 370 |
+
- [23] https://ci.apache.org/projects/flink/flink-docs-release-1.9/ops/jobmanager_high_availability.html
|
| 371 |
+
- [24] https://pulsar.apache.org/docs/en/concepts-architecture-overview/#metadata-store
|
| 372 |
+
- [25] https://cwiki.apache.org/confluence/display/Hive/Locking
|
| 373 |
+
- [26] *ZooKeeperHiveLockManager* implementation in the [hive](https://github.com/apache/hive/) code base
|
| 374 |
+
- [27] https://druid.apache.org/docs/latest/dependencies/zookeeper.html
|
| 375 |
+
- [28] https://mapr.com/blog/apache-drill-architecture-ultimate-guide/
|
| 376 |
+
- [29] https://oozie.apache.org/docs/4.1.0/AG_Install.html
|
| 377 |
+
- [30] https://docs.spring.io/spring-xd/docs/current/reference/html/
|
| 378 |
+
- [31] https://cwiki.apache.org/confluence/display/CURATOR/Powered+By
|
| 379 |
+
- [32] https://projects.spring.io/spring-statemachine/
|
| 380 |
+
- [33] https://www.tigeranalytics.com/blog/apache-kylin-architecture/
|
| 381 |
+
- [34] https://apacheignite.readme.io/docs/cluster-discovery
|
| 382 |
+
- [35] http://atlas.apache.org/HighAvailability.html
|
| 383 |
+
- [36] http://griffin.apache.org/docs/usecases.html
|
| 384 |
+
- [37] https://fluo.apache.org/
|
| 385 |
+
- [38] https://spring.io/projects/spring-cloud-zookeeper
|