From 4e295a0bef7e78f7ff210ae337dc6075dda8d43a Mon Sep 17 00:00:00 2001 From: "zhongyan.feng" Date: Tue, 23 Sep 2014 11:16:32 +0800 Subject: [PATCH] Release 0.9.6 --- README.md | Bin 2414 -> 2516 bytes history.md | 16 +- jstorm-client-extension/pom.xml | 18 +- .../jstorm/client/ConfigExtension.java | 32 + .../jstorm/client/metric/MetricCallback.java | 7 + .../jstorm/client/metric/MetricClient.java | 66 + .../jstorm/metric}/JStormHistogram.java | 74 +- .../alibaba/jstorm/metric}/JStormTimer.java | 128 +- .../com/alibaba/jstorm/metric/MetricDef.java | 43 + .../com/alibaba/jstorm/metric/MetricInfo.java | 27 + .../alibaba/jstorm/metric}/MetricJstack.java | 246 +- .../com/alibaba/jstorm/metric/Metrics.java | 330 ++ .../alibaba/jstorm/metric/UserDefMetric.java | 106 + .../jstorm/metric/UserDefMetricData.java | 126 + .../jstorm/metric/metrdata/CounterData.java | 23 + .../jstorm/metric/metrdata/GaugeData.java | 23 + .../jstorm/metric/metrdata/HistogramData.java | 112 + .../jstorm/metric/metrdata/MeterData.java | 58 + .../jstorm/metric/metrdata/TimerData.java | 149 + .../com/alibaba/jstorm/utils/JStormUtils.java | 60 + jstorm-client/pom.xml | 10 +- .../java/backtype/storm/command/list.java | 58 + .../storm/command/metrics_monitor.java | 56 + .../storm/generated/MonitorOptions.java | 320 ++ .../java/backtype/storm/generated/Nimbus.java | 3592 ++++++++++++----- .../storm/generated/TaskMetricData.java | 1135 ++++++ .../backtype/storm/generated/TaskSummary.java | 97 +- .../storm/generated/TopologyMetricInfo.java | 594 +++ .../storm/generated/TopologySummary.java | 101 +- .../storm/generated/WorkerMetricData.java | 1135 ++++++ .../src/main/py/storm/DistributedRPC.py | 6 + .../py/storm/DistributedRPCInvocations.py | 18 + jstorm-client/src/main/py/storm/Nimbus-remote | 42 + jstorm-client/src/main/py/storm/Nimbus.py | 1686 +++++++- jstorm-client/src/main/py/storm/ttypes.py | 2241 ++++++++-- jstorm-server/bin/jstorm.py | 28 +- jstorm-server/conf/jstorm.log4j.properties | 10 +- jstorm-server/conf/storm.yaml | 10 + jstorm-server/pom.xml | 31 +- .../impl/DelayStatusTransitionCallback.java | 96 + .../callback/impl/KillTransitionCallback.java | 70 +- .../impl/RebalanceTransitionCallback.java | 61 +- .../impl/RemoveTransitionCallback.java | 4 - .../com/alibaba/jstorm/cluster/Cluster.java | 63 + .../jstorm/cluster/StormClusterState.java | 51 + .../alibaba/jstorm/cluster/StormMonitor.java | 33 + .../alibaba/jstorm/cluster/StormStatus.java | 25 +- .../jstorm/cluster/StormZkClusterState.java | 259 +- .../jstorm/container/SystemOperation.java | 4 +- .../jstorm/daemon/nimbus/NimbusServer.java | 22 + .../jstorm/daemon/nimbus/NimbusUtils.java | 99 +- .../jstorm/daemon/nimbus/ServiceHandler.java | 154 +- .../daemon/nimbus/StatusTransition.java | 2 +- .../jstorm/daemon/nimbus/StatusType.java | 15 +- .../jstorm/daemon/nimbus/TopologyAssign.java | 25 +- .../daemon/supervisor/CgroupManager.java | 13 +- .../jstorm/daemon/supervisor/Heartbeat.java | 10 +- .../daemon/supervisor/ShutdownWork.java | 109 +- .../daemon/supervisor/StateHeartbeat.java | 8 + .../jstorm/daemon/supervisor/Supervisor.java | 11 +- .../daemon/supervisor/SupervisorManger.java | 14 +- .../daemon/supervisor/SyncProcessEvent.java | 32 +- .../daemon/worker/BatchTupleRunable.java | 16 +- .../jstorm/daemon/worker/DrainerRunable.java | 10 +- .../jstorm/daemon/worker/RefreshActive.java | 29 +- .../daemon/worker/VirtualPortDispatch.java | 11 +- .../alibaba/jstorm/daemon/worker/Worker.java | 51 +- .../jstorm/daemon/worker/WorkerData.java | 12 + .../jstorm/daemon/worker/WorkerHeartbeat.java | 8 +- .../daemon/worker/WorkerMetricInfo.java | 149 + .../jstorm/daemon/worker/WorkerShutdown.java | 56 +- .../worker/metrics/AlimonitorClient.java | 277 ++ .../daemon/worker/metrics/MetricKVMsg.java | 201 + .../daemon/worker/metrics/MetricReporter.java | 45 +- .../jstorm/daemon/worker/metrics/Metrics.java | 128 - .../worker/metrics/StormMetricReporter.java | 424 ++ .../worker/metrics/TopoCommStatsInfo.java | 229 ++ .../worker/metrics/UploadMetricFromZK.java | 248 ++ .../worker/metrics/UploadSupervMetric.java | 183 + .../alibaba/jstorm/event/EventManagerImp.java | 5 + .../jstorm/event/EventManagerImpExecute.java | 2 +- .../alibaba/jstorm/local/LocalClusterMap.java | 7 +- .../jstorm/message/netty/ControlMessage.java | 12 +- .../jstorm/message/netty/MessageDecoder.java | 87 +- .../jstorm/message/netty/NettyClient.java | 139 +- .../message/netty/NettyClientAsync.java | 18 +- .../jstorm/message/netty/NettyClientSync.java | 34 +- .../jstorm/message/netty/NettyContext.java | 1 + .../message/netty/StormClientHandler.java | 12 +- .../message/zeroMq/ZMQSendConnection.java | 17 +- .../alibaba/jstorm/stats/CommonStatsData.java | 61 + .../com/alibaba/jstorm/task/Assignment.java | 22 + .../java/com/alibaba/jstorm/task/Task.java | 46 +- .../com/alibaba/jstorm/task/TaskInfo.java | 18 +- .../alibaba/jstorm/task/TaskMetricInfo.java | 136 + .../jstorm/task/TaskShutdownDameon.java | 24 +- .../com/alibaba/jstorm/task/TaskTransfer.java | 10 +- .../jstorm/task/execute/BaseExecutors.java | 11 +- .../jstorm/task/execute/BoltCollector.java | 10 +- .../jstorm/task/execute/BoltExecutors.java | 8 +- .../spout/MultipleThreadSpoutExecutors.java | 9 +- .../spout/SingleThreadSpoutExecutors.java | 7 +- .../task/execute/spout/SpoutCollector.java | 9 +- .../task/execute/spout/SpoutExecutors.java | 14 +- .../jstorm/task/heartbeat/TaskHeartbeat.java | 12 +- .../task/heartbeat/TaskHeartbeatRunable.java | 18 +- .../jstorm/task/heartbeat/TaskStats.java | 21 + .../jstorm/utils/DisruptorRunable.java | 15 +- .../jstorm/utils/JStormServerUtils.java | 14 + .../src/main/resources/defaults.yaml | 8 +- jstorm-ui/pom.xml | 2 +- .../java/com/alibaba/jstorm/ui/UIUtils.java | 23 +- .../alibaba/jstorm/ui/model/TaskMetrics.java | 146 + .../alibaba/jstorm/ui/model/TopologySumm.java | 9 + .../jstorm/ui/model/WorkerMetrics.java | 179 + .../alibaba/jstorm/ui/model/WorkerSumm.java | 15 +- .../jstorm/ui/model/data/BoltPage.java | 39 + .../alibaba/jstorm/ui/model/data/LogPage.java | 6 +- .../jstorm/ui/model/data/SpoutPage.java | 28 + .../jstorm/ui/model/data/SupervisorPage.java | 65 +- jstorm-ui/src/main/webapp/bolt.xhtml | 98 + jstorm-ui/src/main/webapp/cluster.xhtml | 64 +- jstorm-ui/src/main/webapp/spout.xhtml | 98 + jstorm-ui/src/main/webapp/supervisor.xhtml | 132 +- other/storm.thrift | 32 +- pom.xml | 2 +- version | 2 +- 127 files changed, 15234 insertions(+), 2584 deletions(-) create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/metric/MetricCallback.java create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/metric/MetricClient.java rename {jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics => jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric}/JStormHistogram.java (86%) rename {jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics => jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric}/JStormTimer.java (91%) create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricDef.java create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricInfo.java rename {jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics => jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric}/MetricJstack.java (95%) create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/Metrics.java create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/UserDefMetric.java create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/UserDefMetricData.java create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/CounterData.java create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/GaugeData.java create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/HistogramData.java create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/MeterData.java create mode 100644 jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/TimerData.java create mode 100644 jstorm-client/src/main/java/backtype/storm/command/list.java create mode 100644 jstorm-client/src/main/java/backtype/storm/command/metrics_monitor.java create mode 100644 jstorm-client/src/main/java/backtype/storm/generated/MonitorOptions.java create mode 100644 jstorm-client/src/main/java/backtype/storm/generated/TaskMetricData.java create mode 100644 jstorm-client/src/main/java/backtype/storm/generated/TopologyMetricInfo.java create mode 100644 jstorm-client/src/main/java/backtype/storm/generated/WorkerMetricData.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/DelayStatusTransitionCallback.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormMonitor.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerMetricInfo.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/AlimonitorClient.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricKVMsg.java delete mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/Metrics.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/StormMetricReporter.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/TopoCommStatsInfo.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/UploadMetricFromZK.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/UploadSupervMetric.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskMetricInfo.java create mode 100644 jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskStats.java create mode 100644 jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/TaskMetrics.java create mode 100644 jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/WorkerMetrics.java diff --git a/README.md b/README.md index 3bef27a040efc6017a4f4891a627c56634509cce..b17f985ebf2fe2f338d45d2b81edc42aeed05d1e 100644 GIT binary patch delta 59 zcmaDSbVYbW9-EXzkls`chG+%{h9rhWhGK>ihD?SWATDK?EWoVAk0!V|noXD)0L0Y| A*#H0l delta 11 Scmca2{7z^?9^2w8Y|H>3Yy^w| diff --git a/history.md b/history.md index 3210b402a..830b2b49c 100644 --- a/history.md +++ b/history.md @@ -1,4 +1,18 @@ -[JStorm 0.9.0 ](http://wenku.baidu.com/view/59e81017dd36a32d7375818b.html) +[JStorm 0.9.0 介绍](http://wenku.baidu.com/view/59e81017dd36a32d7375818b.html) + +#Release 0.9.6 +1. Update UI + - Display the metrics information of task and worker + - Add warning flag when errors occur for a topology + - Add link from supervisor page to task page +2. Send metrics data to Alimonitor +3. Add metrics interface for user +4. Add task.cleanup.timeout.sec setting to let task gently cleanup +5. Set the worker's log name as topologyName-worker-port.log +6. Add setting "worker.redirect.output.file", so worker can redirect System.out/System.err to one setting file +7. Add storm list command +8. Add closing channel check in netty client to avoid double close +9. Add connecting check in netty client to avoid connecting one server twice at one time #Release 0.9.5.1 1. Add netty sync mode diff --git a/jstorm-client-extension/pom.xml b/jstorm-client-extension/pom.xml index fa3fab4c7..72de9ef71 100644 --- a/jstorm-client-extension/pom.xml +++ b/jstorm-client-extension/pom.xml @@ -4,18 +4,18 @@ com.alibaba.jstorm jstorm-all - 0.9.5.1 + 0.9.6 .. - + + --> 4.0.0 com.alibaba.jstorm jstorm-client-extension - 0.9.5.1 + 0.9.6 jar ${project.artifactId}-${project.version} @@ -88,6 +88,16 @@ org.slf4j slf4j-log4j12 1.7.5 + + + com.codahale.metrics + metrics-core + 3.0.1 + + + com.codahale.metrics + metrics-jvm + 3.0.1 \ No newline at end of file diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/ConfigExtension.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/ConfigExtension.java index 15307b541..eb5bd797d 100644 --- a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/ConfigExtension.java +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/ConfigExtension.java @@ -73,6 +73,16 @@ public static boolean getWorkerRedirectOutput(Map conf) { return true; return (Boolean) result; } + + protected static final String WOREKER_REDIRECT_OUTPUT_FILE = "worker.redirect.output.file"; + + public static void setWorkerRedirectOutputFile(Map conf, String outputPath) { + conf.put(WOREKER_REDIRECT_OUTPUT_FILE, outputPath); + } + + public static String getWorkerRedirectOutputFile(Map conf) { + return (String)conf.get(WOREKER_REDIRECT_OUTPUT_FILE); + } /** * Usually, spout finish prepare before bolt, so spout need wait several @@ -385,4 +395,26 @@ public static boolean isNettyASyncBlock(Map conf) { public static void setNettyASyncBlock(Map conf, boolean block) { conf.put(NETTY_ASYNC_BLOCK, block); } + + protected static String ALIMONITOR_METRICS_POST = "topology.alimonitor.metrics.post"; + + public static boolean isAlimonitorMetricsPost(Map conf) { + return JStormUtils.parseBoolean(conf.get(ALIMONITOR_METRICS_POST), true); + } + + public static void setAlimonitorMetricsPost(Map conf, boolean post) { + conf.put(ALIMONITOR_METRICS_POST, post); + } + + protected static String TASK_CLEANUP_TIMEOUT_SEC = "task.cleanup.timeout.sec"; + + public static int getTaskCleanupTimeoutSec(Map conf) { + return JStormUtils.parseInt(conf.get(TASK_CLEANUP_TIMEOUT_SEC), 10); + } + + public static void setTaskCleanupTimeoutSec(Map conf, int timeout) { + conf.put(TASK_CLEANUP_TIMEOUT_SEC, timeout); + } + + } diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/metric/MetricCallback.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/metric/MetricCallback.java new file mode 100644 index 000000000..964913ef8 --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/metric/MetricCallback.java @@ -0,0 +1,7 @@ +package com.alibaba.jstorm.client.metric; + +import com.codahale.metrics.Metric; + +public interface MetricCallback { + void callback(T metric); +} diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/metric/MetricClient.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/metric/MetricClient.java new file mode 100644 index 000000000..becc36510 --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/client/metric/MetricClient.java @@ -0,0 +1,66 @@ +package com.alibaba.jstorm.client.metric; + +import backtype.storm.task.TopologyContext; + +import com.alibaba.jstorm.metric.Metrics; +import com.codahale.metrics.Counter; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.codahale.metrics.Timer; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.JStormHistogram; + +public class MetricClient { + + private final int taskid; + + public MetricClient(TopologyContext context) { + taskid = context.getThisTaskId(); + } + + private String getMetricName(Integer taskid, String name) { + return "task-" + String.valueOf(taskid) + ":" + name; + } + + public Gauge registerGauge(String name, Gauge gauge, MetricCallback> callback) { + String userMetricName = getMetricName(taskid, name); + Gauge ret = Metrics.registerGauge(userMetricName, gauge); + Metrics.registerUserDefine(userMetricName, gauge, callback); + return ret; + } + + public Counter registerCounter(String name, MetricCallback callback) { + String userMetricName = getMetricName(taskid, name); + Counter ret = Metrics.registerCounter(userMetricName); + Metrics.registerUserDefine(userMetricName, ret, callback); + return ret; + } + + public Meter registerMeter(String name, MetricCallback callback) { + String userMetricName = getMetricName(taskid, name); + Meter ret = Metrics.registerMeter(userMetricName); + Metrics.registerUserDefine(userMetricName, ret, callback); + return ret; + } + + public JStormTimer registerTimer(String name, MetricCallback callback) { + String userMetricName = getMetricName(taskid, name); + JStormTimer ret = Metrics.registerTimer(userMetricName); + Metrics.registerUserDefine(userMetricName, ret, callback); + return ret; + } + + public JStormHistogram registerHistogram(String name, MetricCallback callback) { + String userMetricName = getMetricName(taskid, name); + JStormHistogram ret = Metrics.registerHistograms(userMetricName); + Metrics.registerUserDefine(userMetricName, ret, callback); + return ret; + } + + public boolean unregister(String name, Integer taskid) { + String userMetricName = getMetricName(taskid, name); + return Metrics.unregisterUserDefine(userMetricName); + } + +} diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/JStormHistogram.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/JStormHistogram.java similarity index 86% rename from jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/JStormHistogram.java rename to jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/JStormHistogram.java index d4cbb8d4c..863deaa5c 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/JStormHistogram.java +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/JStormHistogram.java @@ -1,35 +1,39 @@ -package com.alibaba.jstorm.daemon.worker.metrics; - -import com.codahale.metrics.Histogram; - -public class JStormHistogram { - private static boolean isEnable = true; - - public static boolean isEnable() { - return isEnable; - } - - public static void setEnable(boolean isEnable) { - JStormHistogram.isEnable = isEnable; - } - - private Histogram instance; - private String name; - - public JStormHistogram(String name, Histogram instance) { - this.name = name; - this.instance = instance; - } - - public void update(int value) { - if (isEnable == true) { - instance.update(value); - } - } - - public void update(long value) { - if (isEnable == true) { - instance.update(value); - } - } -} +package com.alibaba.jstorm.metric; + +import com.codahale.metrics.Histogram; + +public class JStormHistogram { + private static boolean isEnable = true; + + public static boolean isEnable() { + return isEnable; + } + + public static void setEnable(boolean isEnable) { + JStormHistogram.isEnable = isEnable; + } + + private Histogram instance; + private String name; + + public JStormHistogram(String name, Histogram instance) { + this.name = name; + this.instance = instance; + } + + public void update(int value) { + if (isEnable == true) { + instance.update(value); + } + } + + public void update(long value) { + if (isEnable == true) { + instance.update(value); + } + } + + public Histogram getInstance() { + return instance; + } +} diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/JStormTimer.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/JStormTimer.java similarity index 91% rename from jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/JStormTimer.java rename to jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/JStormTimer.java index 25f23856a..2927b9d7a 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/JStormTimer.java +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/JStormTimer.java @@ -1,64 +1,64 @@ -package com.alibaba.jstorm.daemon.worker.metrics; - - -import java.util.concurrent.atomic.AtomicReference; - -import org.apache.log4j.Logger; - -import com.codahale.metrics.Timer; - -public class JStormTimer { - private static final Logger LOG = Logger.getLogger(JStormTimer.class); - private static boolean isEnable = true; - - public static boolean isEnable() { - return isEnable; - } - - public static void setEnable(boolean isEnable) { - JStormTimer.isEnable = isEnable; - } - - - private Timer instance; - private String name; - public JStormTimer(String name, Timer instance) { - this.name = name; - this.instance = instance; - this.timerContext = new AtomicReference(); - } - - /** - * This logic isn't perfect, it will miss metrics when it is called - * in the same time. But this method performance is better than - * create a new instance wrapper Timer.Context - */ - private AtomicReference timerContext = null; - public void start() { - if (JStormTimer.isEnable == false) { - return ; - } - - if (timerContext.get() != null) { - LOG.warn("Already start timer " + name); - return ; - } - - - timerContext.set(instance.time()); - - } - - public void stop() { - Timer.Context context = timerContext.getAndSet(null); - if (context != null) { - context.stop(); - } - } - - public Timer getInstance() { - return instance; - } - - -} +package com.alibaba.jstorm.metric; + + +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.log4j.Logger; + +import com.codahale.metrics.Timer; + +public class JStormTimer { + private static final Logger LOG = Logger.getLogger(JStormTimer.class); + private static boolean isEnable = true; + + public static boolean isEnable() { + return isEnable; + } + + public static void setEnable(boolean isEnable) { + JStormTimer.isEnable = isEnable; + } + + + private Timer instance; + private String name; + public JStormTimer(String name, Timer instance) { + this.name = name; + this.instance = instance; + this.timerContext = new AtomicReference(); + } + + /** + * This logic isn't perfect, it will miss metrics when it is called + * in the same time. But this method performance is better than + * create a new instance wrapper Timer.Context + */ + private AtomicReference timerContext = null; + public void start() { + if (JStormTimer.isEnable == false) { + return ; + } + + if (timerContext.get() != null) { + LOG.warn("Already start timer " + name); + return ; + } + + + timerContext.set(instance.time()); + + } + + public void stop() { + Timer.Context context = timerContext.getAndSet(null); + if (context != null) { + context.stop(); + } + } + + public Timer getInstance() { + return instance; + } + + +} diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricDef.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricDef.java new file mode 100644 index 000000000..a28c9719d --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricDef.java @@ -0,0 +1,43 @@ +package com.alibaba.jstorm.metric; + +public class MetricDef { + // metric name for task + public static final String DESERIALIZE_QUEUE = "Deserialize_Queue"; + public static final String DESERIALIZE_TIME = "Deserialize_Time"; + public static final String SERIALIZE_QUEUE = "Serialize_Queue"; + public static final String SERIALIZE_TIME = "Serialize_Time"; + public static final String EXECUTE_QUEUE = "Executor_Queue"; + public static final String EXECUTE_TIME = "Execute_Time"; + public static final String ACKER_TIME = "Acker_Time"; + public static final String EMPTY_CPU_RATIO = "Empty_Cpu_Ratio"; + public static final String PENDING_MAP = "Pending_Num"; + public static final String EMIT_TIME = "Emit_Time"; + + // metric name for worker + public static final String NETWORK_MSG_TRANS_TIME = "Network_Transmit_Time"; + public static final String NETTY_SERV_DECODE_TIME = "Netty_Server_Decode_Time"; + public static final String DISPATCH_TIME = "Virtual_Port_Dispatch_Time"; + public static final String DISPATCH_QUEUE = "Virtual_Port_Dispatch_Queue"; + public static final String BATCH_TUPLE_TIME = "Batch_Tuple_Time"; + public static final String BATCH_TUPLE_QUEUE = "Batch_Tuple_Queue"; + public static final String DRAINER_TIME = "Drainer_Time"; + public static final String DRAINER_QUEUE = "Drainer_Queue"; + public static final String NETTY_CLI_SEND_TIME = "Netty_Client_Send_Time"; + public static final String NETTY_CLI_BATCH_SIZE = "Netty_Client_Send_Batch_Size"; + public static final String NETTY_CLI_SEND_PENDING = "Netty_Client_Send_Pendings"; + public static final String NETTY_CLI_SYNC_BATCH_QUEUE = "Netty_Client_Sync_BatchQueue"; + public static final String NETTY_CLI_SYNC_DISR_QUEUE = "Netty_Client_Sync_DisrQueue"; + + public static final String ZMQ_SEND_TIME = "ZMQ_Send_Time"; + public static final String ZMQ_SEND_MSG_SIZE = "ZMQ_Send_MSG_Size"; + + public static final String CPU_USED_RATIO = "Used_Cpu"; + public static final String MEMORY_USED = "Used_Memory"; + + public static final String REMOTE_CLI_ADDR = "Remote_Client_Address"; + public static final String REMOTE_SERV_ADDR = "Remote_Server_Address"; + + // monitor name in Alimonitor + public static final String TASK_MONITOR_NAME = "jstorm_task_metrics"; + public static final String WORKER_MONITOR_NAME = "jstorm_worker_metrics"; +} diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricInfo.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricInfo.java new file mode 100644 index 000000000..09a2a107a --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricInfo.java @@ -0,0 +1,27 @@ +package com.alibaba.jstorm.metric; + +import com.codahale.metrics.Metric; + +public class MetricInfo { + private Metric metric; + private String prefix; + private String name; + + public MetricInfo(String prefix, String name, Metric metric) { + this.prefix = prefix; + this.name = name; + this.metric = metric; + } + + public String getPrefix() { + return prefix; + } + + public String getName() { + return name; + } + + public Metric getMetric() { + return metric; + } +} \ No newline at end of file diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricJstack.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricJstack.java similarity index 95% rename from jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricJstack.java rename to jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricJstack.java index 1a169aca6..c60525a5f 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricJstack.java +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/MetricJstack.java @@ -1,123 +1,123 @@ -package com.alibaba.jstorm.daemon.worker.metrics; - -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; - -import com.codahale.metrics.Gauge; - -public class MetricJstack implements Gauge { - - private String getTaskName(long id, String name) { - if (name == null) { - return Long.toString(id); - } - return id + " (" + name + ")"; - } - - public String dumpThread() throws Exception { - StringBuilder writer = new StringBuilder(); - - ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); - - boolean contention = threadMXBean.isThreadContentionMonitoringEnabled(); - - long[] threadIds = threadMXBean.getAllThreadIds(); - writer.append(threadIds.length + " active threads:"); - for (long tid : threadIds) { - writer.append(tid).append(" "); - } - writer.append("\n"); - - long[] deadLockTids = threadMXBean.findDeadlockedThreads(); - if (deadLockTids != null) { - writer.append(threadIds.length + " deadlocked threads:"); - for (long tid : deadLockTids) { - writer.append(tid).append(" "); - } - writer.append("\n"); - } - - long[] deadLockMonitorTids = threadMXBean - .findMonitorDeadlockedThreads(); - if (deadLockMonitorTids != null) { - writer.append(threadIds.length + " deadlocked monitor threads:"); - for (long tid : deadLockMonitorTids) { - writer.append(tid).append(" "); - } - writer.append("\n"); - } - - for (long tid : threadIds) { - ThreadInfo info = threadMXBean - .getThreadInfo(tid, Integer.MAX_VALUE); - if (info == null) { - writer.append(" Inactive").append("\n"); - continue; - } - writer.append( - "Thread " - + getTaskName(info.getThreadId(), - info.getThreadName()) + ":").append("\n"); - Thread.State state = info.getThreadState(); - writer.append(" State: " + state).append("\n"); - writer.append(" Blocked count: " + info.getBlockedCount()).append( - "\n"); - writer.append(" Waited count: " + info.getWaitedCount()).append( - "\n"); - writer.append(" Cpu time:") - .append(threadMXBean.getThreadCpuTime(tid) / 1000000) - .append("ms").append("\n"); - writer.append(" User time:") - .append(threadMXBean.getThreadUserTime(tid) / 1000000) - .append("ms").append("\n"); - if (contention) { - writer.append(" Blocked time: " + info.getBlockedTime()) - .append("\n"); - writer.append(" Waited time: " + info.getWaitedTime()).append( - "\n"); - } - if (state == Thread.State.WAITING) { - writer.append(" Waiting on " + info.getLockName()) - .append("\n"); - } else if (state == Thread.State.BLOCKED) { - writer.append(" Blocked on " + info.getLockName()) - .append("\n"); - writer.append( - " Blocked by " - + getTaskName(info.getLockOwnerId(), - info.getLockOwnerName())).append("\n"); - } - - } - for (long tid : threadIds) { - ThreadInfo info = threadMXBean - .getThreadInfo(tid, Integer.MAX_VALUE); - if (info == null) { - writer.append(" Inactive").append("\n"); - continue; - } - - writer.append( - "Thread " - + getTaskName(info.getThreadId(), - info.getThreadName()) + ": Stack").append( - "\n"); - for (StackTraceElement frame : info.getStackTrace()) { - writer.append(" " + frame.toString()).append("\n"); - } - } - - return writer.toString(); - } - - @Override - public String getValue() { - try { - return dumpThread(); - } catch (Exception e) { - return "Failed to get jstack thread info"; - } - } - -} +package com.alibaba.jstorm.metric; + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; + +import com.codahale.metrics.Gauge; + +public class MetricJstack implements Gauge { + + private String getTaskName(long id, String name) { + if (name == null) { + return Long.toString(id); + } + return id + " (" + name + ")"; + } + + public String dumpThread() throws Exception { + StringBuilder writer = new StringBuilder(); + + ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); + + boolean contention = threadMXBean.isThreadContentionMonitoringEnabled(); + + long[] threadIds = threadMXBean.getAllThreadIds(); + writer.append(threadIds.length + " active threads:"); + for (long tid : threadIds) { + writer.append(tid).append(" "); + } + writer.append("\n"); + + long[] deadLockTids = threadMXBean.findDeadlockedThreads(); + if (deadLockTids != null) { + writer.append(threadIds.length + " deadlocked threads:"); + for (long tid : deadLockTids) { + writer.append(tid).append(" "); + } + writer.append("\n"); + } + + long[] deadLockMonitorTids = threadMXBean + .findMonitorDeadlockedThreads(); + if (deadLockMonitorTids != null) { + writer.append(threadIds.length + " deadlocked monitor threads:"); + for (long tid : deadLockMonitorTids) { + writer.append(tid).append(" "); + } + writer.append("\n"); + } + + for (long tid : threadIds) { + ThreadInfo info = threadMXBean + .getThreadInfo(tid, Integer.MAX_VALUE); + if (info == null) { + writer.append(" Inactive").append("\n"); + continue; + } + writer.append( + "Thread " + + getTaskName(info.getThreadId(), + info.getThreadName()) + ":").append("\n"); + Thread.State state = info.getThreadState(); + writer.append(" State: " + state).append("\n"); + writer.append(" Blocked count: " + info.getBlockedCount()).append( + "\n"); + writer.append(" Waited count: " + info.getWaitedCount()).append( + "\n"); + writer.append(" Cpu time:") + .append(threadMXBean.getThreadCpuTime(tid) / 1000000) + .append("ms").append("\n"); + writer.append(" User time:") + .append(threadMXBean.getThreadUserTime(tid) / 1000000) + .append("ms").append("\n"); + if (contention) { + writer.append(" Blocked time: " + info.getBlockedTime()) + .append("\n"); + writer.append(" Waited time: " + info.getWaitedTime()).append( + "\n"); + } + if (state == Thread.State.WAITING) { + writer.append(" Waiting on " + info.getLockName()) + .append("\n"); + } else if (state == Thread.State.BLOCKED) { + writer.append(" Blocked on " + info.getLockName()) + .append("\n"); + writer.append( + " Blocked by " + + getTaskName(info.getLockOwnerId(), + info.getLockOwnerName())).append("\n"); + } + + } + for (long tid : threadIds) { + ThreadInfo info = threadMXBean + .getThreadInfo(tid, Integer.MAX_VALUE); + if (info == null) { + writer.append(" Inactive").append("\n"); + continue; + } + + writer.append( + "Thread " + + getTaskName(info.getThreadId(), + info.getThreadName()) + ": Stack").append( + "\n"); + for (StackTraceElement frame : info.getStackTrace()) { + writer.append(" " + frame.toString()).append("\n"); + } + } + + return writer.toString(); + } + + @Override + public String getValue() { + try { + return dumpThread(); + } catch (Exception e) { + return "Failed to get jstack thread info"; + } + } + +} diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/Metrics.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/Metrics.java new file mode 100644 index 000000000..3e50c0ad4 --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/Metrics.java @@ -0,0 +1,330 @@ +package com.alibaba.jstorm.metric; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.log4j.Logger; + +import backtype.storm.utils.DisruptorQueue; + +import com.alibaba.jstorm.client.metric.MetricCallback; +//import com.alibaba.jstorm.daemon.worker.Worker; +import com.codahale.metrics.Counter; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.codahale.metrics.Metric; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.MetricSet; +import com.codahale.metrics.Snapshot; +import com.codahale.metrics.Timer; +import com.codahale.metrics.jvm.GarbageCollectorMetricSet; +import com.codahale.metrics.jvm.MemoryUsageGaugeSet; +import com.codahale.metrics.jvm.ThreadStatesGaugeSet; + +public class Metrics { + + public enum MetricType { + TASK, WORKER + } + + private static final Logger LOG = Logger.getLogger(Metrics.class); + //private static final Logger DEFAULT_LOG = Logger.getLogger(Worker.class); + + private static final MetricRegistry metrics = new MetricRegistry(); + + private static final MetricRegistry jstack = new MetricRegistry(); + + private static Map> taskMetricMap = new ConcurrentHashMap>(); + private static List workerMetricList = new ArrayList(); + private static UserDefMetric userDefMetric = new UserDefMetric(); + + static { + try { + registerAll("jvm-thread-state", new ThreadStatesGaugeSet()); + registerAll("jvm-mem", new MemoryUsageGaugeSet()); + registerAll("jvm-gc", new GarbageCollectorMetricSet()); + + jstack.register("jstack", new MetricJstack()); + } catch (Exception e) { + LOG.warn("Failed to regist jvm metrics"); + } + } + + public static MetricRegistry getMetrics() { + return metrics; + } + + public static MetricRegistry getJstack() { + return jstack; + } + + public static UserDefMetric getUserDefMetric() { + return userDefMetric; + } + + public static boolean unregister(String name) { + LOG.info("Unregister metric " + name); + return metrics.remove(name); + } + + public static boolean unregister(String prefix, String name, String id, Metrics.MetricType type) { + String MetricName; + if (prefix == null) + MetricName = name; + else + MetricName = prefix + "-" + name; + boolean ret = unregister(MetricName); + + if (ret == true) { + List metricList = null; + if (type == MetricType.WORKER) { + metricList = workerMetricList; + } else { + metricList = taskMetricMap.get(id); + } + + boolean found = false; + if (metricList != null) { + for (MetricInfo metric : metricList) { + if(metric.getName().equals(name)) { + if (prefix != null) { + if (metric.getPrefix().equals(prefix)) { + metricList.remove(metric); + found = true; + break; + } + } else { + if (metric.getPrefix() == null) { + metricList.remove(metric); + found = true; + break; + } + } + } + } + } + if (found != true) + LOG.warn("Name " + name + " is not found when unregister from metricList"); + } + return ret; + } + + public static boolean unregisterUserDefine(String name) { + boolean ret = unregister(name); + + if (ret == true) { + userDefMetric.remove(name); + userDefMetric.unregisterCallback(name); + } + + return ret; + } + + public static T register(String name, T metric) + throws IllegalArgumentException { + LOG.info("Register Metric " + name); + return metrics.register(name, metric); + } + + public static T register(String prefix, String name, T metric, + String idStr, MetricType metricType) throws IllegalArgumentException { + String metricName; + if (prefix == null) + metricName = name; + else + metricName = prefix + "-" + name; + T ret = register(metricName, metric); + updateMetric(prefix, name, metricType, ret, idStr); + return ret; + } + + public static void registerUserDefine(String name, Object metric, MetricCallback callback) { + if(metric instanceof Gauge) { + userDefMetric.addToGauge(name, (Gauge)metric); + } else if (metric instanceof Timer) { + userDefMetric.addToTimer(name, (Timer)metric); + } else if (metric instanceof Counter) { + userDefMetric.addToCounter(name, (Counter)metric); + } else if (metric instanceof Meter) { + userDefMetric.addToMeter(name, (Meter)metric); + } else if (metric instanceof Histogram) { + userDefMetric.addToHistogram(name, (Histogram)metric); + } else if (metric instanceof JStormTimer) { + userDefMetric.addToTimer(name, ((JStormTimer)metric).getInstance()); + } else if (metric instanceof JStormHistogram) { + userDefMetric.addToHistogram(name, ((JStormHistogram)metric).getInstance()); + } else { + LOG.warn("registerUserDefine, unknow Metric type, name=" + name); + } + + if (callback != null) { + userDefMetric.registerCallback(callback, name); + } + } + + + // copy from MetricRegistry + public static void registerAll(String prefix, MetricSet metrics) + throws IllegalArgumentException { + for (Map.Entry entry : metrics.getMetrics().entrySet()) { + if (entry.getValue() instanceof MetricSet) { + registerAll(MetricRegistry.name(prefix, entry.getKey()), + (MetricSet) entry.getValue()); + } else { + register(MetricRegistry.name(prefix, entry.getKey()), + entry.getValue()); + } + } + } + + private static void updateMetric(String prefix, String name, MetricType metricType, + Metric metric, String idStr) { + Map> metricMap; + List metricList; + if (metricType == MetricType.TASK) { + metricMap = taskMetricMap; + metricList = metricMap.get(idStr); + if (null == metricList) { + metricList = new ArrayList(); + metricMap.put(idStr, metricList); + } + } else if (metricType == MetricType.WORKER) { + metricList = workerMetricList; + } else { + LOG.error("updateMetricMap: unknown metric type"); + return; + } + + MetricInfo metricInfo = new MetricInfo(prefix, name, metric); + metricList.add(metricInfo); + + } + + public static Map> getTaskMetricMap() { + return taskMetricMap; + } + + public static List getWorkerMetricList() { + return workerMetricList; + } + + public static class QueueGauge implements Gauge { + DisruptorQueue queue; + String name; + + public QueueGauge(String name, DisruptorQueue queue) { + this.queue = queue; + this.name = name; + } + + @Override + public Float getValue() { + Float ret = queue.pctFull(); + if (ret > 0.8) { + //DEFAULT_LOG.info("Queue " + name + "is full " + ret); + } + + return ret; + } + + } + + public static Gauge registerQueue(String name, DisruptorQueue queue) { + LOG.info("Register Metric " + name); + return metrics.register(name, new QueueGauge(name, queue)); + } + + public static Gauge registerQueue(String prefix, String name, DisruptorQueue queue, + String idStr, MetricType metricType) { + String metricName; + if (prefix == null) + metricName = name; + else + metricName = prefix + "-" + name; + Gauge ret = registerQueue(metricName, queue); + updateMetric(prefix, name, metricType, ret, idStr); + return ret; + } + + public static Gauge registerGauge(String name, Gauge gauge) { + LOG.info("Register Metric " + name); + return metrics.register(name, gauge); + } + + public static Counter registerCounter(String name) { + LOG.info("Register Metric " + name); + return metrics.counter(name); + } + + public static Counter registerCounter(String prefix, String name, + String idStr, MetricType metricType) { + String metricName; + if (prefix == null) + metricName = name; + else + metricName = prefix + "-" + name; + Counter ret = registerCounter(metricName); + updateMetric(prefix, name, metricType, ret, idStr); + return ret; + } + + public static Meter registerMeter(String name) { + LOG.info("Register Metric " + name); + return metrics.meter(name); + } + + public static Meter registerMeter(String prefix, String name, + String idStr, MetricType metricType) { + String metricName; + if (prefix == null) + metricName = name; + else + metricName = prefix + "-" + name; + Meter ret = registerMeter(metricName); + updateMetric(prefix, name, metricType, ret, idStr); + return ret; + } + + public static JStormHistogram registerHistograms(String name) { + LOG.info("Register Metric " + name); + Histogram instance = metrics.histogram(name); + + return new JStormHistogram(name, instance); + } + + public static JStormHistogram registerHistograms(String prefix, String name, + String idStr, MetricType metricType) { + String metricName; + if (prefix == null) + metricName = name; + else + metricName = prefix + "-" + name; + JStormHistogram ret = registerHistograms(metricName); + updateMetric(prefix, name, metricType, ret.getInstance(), idStr); + return ret; + } + + public static JStormTimer registerTimer(String name) { + LOG.info("Register Metric " + name); + + Timer instance = metrics.timer(name); + return new JStormTimer(name, instance); + } + + public static JStormTimer registerTimer(String prefix, String name, + String idStr, MetricType metricType) { + String metricName; + if (prefix == null) + metricName = name; + else + metricName = prefix + "-" + name; + JStormTimer ret = registerTimer(metricName); + updateMetric(prefix, name, metricType, ret.getInstance(), idStr); + return ret; + } + +} diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/UserDefMetric.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/UserDefMetric.java new file mode 100644 index 000000000..5bc7c4d8e --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/UserDefMetric.java @@ -0,0 +1,106 @@ +package com.alibaba.jstorm.metric; + +import java.util.Map; +import java.util.HashMap; +import java.util.Map.Entry; +import java.io.Serializable; + +import com.codahale.metrics.Metric; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Sampling; +import com.codahale.metrics.Snapshot; +import com.codahale.metrics.Timer; +import com.codahale.metrics.Counter; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.alibaba.jstorm.client.metric.MetricCallback; +import com.alibaba.jstorm.metric.MetricInfo; + + +/** + * /storm-zk-root/Monitor/{topologyid}/UserDefMetrics/{workerid} data + */ +public class UserDefMetric implements Serializable { + + private static final long serialVersionUID = 4547327064057659279L; + + private Map> gaugeMap = new HashMap>(); + private Map counterMap = new HashMap(); + private Map histogramMap = new HashMap(); + private Map timerMap = new HashMap(); + private Map meterMap = new HashMap(); + private Map callbacks = new HashMap(); + + public UserDefMetric() { + } + + public Map> getGauge() { + return this.gaugeMap; + } + public void registerCallback(MetricCallback callback, String name) { + if (callbacks.containsKey(name) != true) { + callbacks.put(name, callback); + } + } + public void unregisterCallback(String name) { + callbacks.remove(name); + } + public Map getCallbacks() { + return callbacks; + } + public void addToGauge(String name, Gauge gauge) { + gaugeMap.put(name, gauge); + } + + public Map getCounter() { + return this.counterMap; + } + + public void addToCounter(String name, Counter counter) { + counterMap.put(name, counter); + } + + public Map getHistogram() { + return this.histogramMap; + } + + public void addToHistogram(String name, Histogram histogram) { + histogramMap.put(name, histogram); + } + + + public Map getTimer() { + return this.timerMap; + } + + public void addToTimer(String name, Timer timer) { + timerMap.put(name, timer); + } + + public Map getMeter() { + return this.meterMap; + } + + public void addToMeter(String name, Meter meter) { + meterMap.put(name, meter); + } + + public void remove(String name) { + if (gaugeMap.containsKey(name)) { + gaugeMap.remove(name); + } else if (counterMap.containsKey(name)) { + counterMap.remove(name); + } else if (histogramMap.containsKey(name)) { + histogramMap.remove(name); + } else if (timerMap.containsKey(name)) { + timerMap.remove(name); + } else if (meterMap.containsKey(name)) { + meterMap.remove(name); + } + + if (callbacks.containsKey(name)) { + callbacks.remove(name); + } + } + +} \ No newline at end of file diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/UserDefMetricData.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/UserDefMetricData.java new file mode 100644 index 000000000..cab04e0d7 --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/UserDefMetricData.java @@ -0,0 +1,126 @@ +package com.alibaba.jstorm.metric; + +import java.util.Map; +import java.util.HashMap; +import java.util.Map.Entry; +import java.io.Serializable; + +import com.codahale.metrics.Metric; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Sampling; +import com.codahale.metrics.Snapshot; +import com.codahale.metrics.Timer; +import com.codahale.metrics.Counter; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.alibaba.jstorm.client.metric.MetricCallback; +import com.alibaba.jstorm.metric.metrdata.*; + + +/** + * /storm-zk-root/Monitor/{topologyid}/user/{workerid} data + */ +public class UserDefMetricData implements Serializable { + + private static final long serialVersionUID = 954727168057659270L; + + private Map gaugeDataMap = new HashMap(); + private Map counterDataMap = new HashMap(); + private Map timerDataMap = new HashMap(); + private Map meterDataMap = new HashMap(); + private Map histogramDataMap = new HashMap(); + + public UserDefMetricData() { + } + + public Map getGaugeDataMap() { + return gaugeDataMap; + } + + public Map getCounterDataMap() { + return counterDataMap; + } + + public Map getTimerDataMap() { + return timerDataMap; + } + + public Map getMeterDataMap() { + return meterDataMap; + } + + public Map getHistogramDataMap() { + return histogramDataMap; + } + + public void updateFromGauge(Map> gaugeMap) { + for(Entry> entry : gaugeMap.entrySet()) { + GaugeData gaugeData = new GaugeData(); + gaugeData.setValue((Double)(entry.getValue().getValue())); + gaugeDataMap.put(entry.getKey(), gaugeData); + } + } + + public void updateFromCounter(Map counterMap) { + for(Entry entry : counterMap.entrySet()) { + CounterData counterData = new CounterData(); + counterData.setValue(entry.getValue().getCount()); + counterDataMap.put(entry.getKey(), counterData); + } + } + + public void updateFromMeterData(Map meterMap) { + for(Entry entry : meterMap.entrySet()) { + Meter meter = entry.getValue(); + MeterData meterData = new MeterData(); + meterData.setCount(meter.getCount()); + meterData.setMeanRate(meter.getMeanRate()); + meterData.setOneMinuteRate(meter.getOneMinuteRate()); + meterData.setFiveMinuteRate(meter.getFiveMinuteRate()); + meterData.setFifteenMinuteRate(meter.getFifteenMinuteRate()); + meterDataMap.put(entry.getKey(), meterData); + } + } + + public void updateFromHistogramData(Map histogramMap) { + for(Entry entry : histogramMap.entrySet()) { + Histogram histogram = entry.getValue(); + HistogramData histogramData = new HistogramData(); + histogramData.setCount(histogram.getCount()); + histogramData.setMax(histogram.getSnapshot().getMax()); + histogramData.setMin(histogram.getSnapshot().getMin()); + histogramData.setMean(histogram.getSnapshot().getMean()); + histogramData.setMedian(histogram.getSnapshot().getMedian()); + histogramData.setStdDev(histogram.getSnapshot().getStdDev()); + histogramData.setPercent75th(histogram.getSnapshot().get75thPercentile()); + histogramData.setPercent95th(histogram.getSnapshot().get95thPercentile()); + histogramData.setPercent98th(histogram.getSnapshot().get98thPercentile()); + histogramData.setPercent99th(histogram.getSnapshot().get99thPercentile()); + histogramData.setPercent999th(histogram.getSnapshot().get999thPercentile()); + histogramDataMap.put(entry.getKey(), histogramData); + } + } + + public void updateFromTimerData(Map timerMap) { + for(Entry entry : timerMap.entrySet()) { + Timer timer = entry.getValue(); + TimerData timerData = new TimerData(); + timerData.setCount(timer.getCount()); + timerData.setMax(timer.getSnapshot().getMax()); + timerData.setMin(timer.getSnapshot().getMin()); + timerData.setMean(timer.getSnapshot().getMean()); + timerData.setMedian(timer.getSnapshot().getMedian()); + timerData.setStdDev(timer.getSnapshot().getStdDev()); + timerData.setPercent75th(timer.getSnapshot().get75thPercentile()); + timerData.setPercent95th(timer.getSnapshot().get95thPercentile()); + timerData.setPercent98th(timer.getSnapshot().get98thPercentile()); + timerData.setPercent99th(timer.getSnapshot().get99thPercentile()); + timerData.setPercent999th(timer.getSnapshot().get999thPercentile()); + timerData.setMeanRate(timer.getMeanRate()); + timerData.setOneMinuteRate(timer.getOneMinuteRate()); + timerData.setFiveMinuteRate(timer.getFiveMinuteRate()); + timerData.setFifteenMinuteRate(timer.getFifteenMinuteRate()); + timerDataMap.put(entry.getKey(), timerData); + } + } +} \ No newline at end of file diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/CounterData.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/CounterData.java new file mode 100644 index 000000000..727cb9da3 --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/CounterData.java @@ -0,0 +1,23 @@ +package com.alibaba.jstorm.metric.metrdata; + +import java.io.Serializable; + + +public class CounterData implements Serializable { + + private static final long serialVersionUID = 954627168057659219L; + + private long value; + + public CounterData () { + value = 0l; + } + + public long getValue() { + return value; + } + + public void setValue(long value) { + this.value = value; + } +} \ No newline at end of file diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/GaugeData.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/GaugeData.java new file mode 100644 index 000000000..9f64bf3db --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/GaugeData.java @@ -0,0 +1,23 @@ +package com.alibaba.jstorm.metric.metrdata; + +import java.io.Serializable; + + +public class GaugeData implements Serializable { + + private static final long serialVersionUID = 954627168057659279L; + + private double value; + + public GaugeData () { + value = 0.0; + } + + public double getValue() { + return value; + } + + public void setValue(double value) { + this.value = value; + } +} \ No newline at end of file diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/HistogramData.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/HistogramData.java new file mode 100644 index 000000000..ec3985148 --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/HistogramData.java @@ -0,0 +1,112 @@ +package com.alibaba.jstorm.metric.metrdata; + +import java.io.Serializable; + + +public class HistogramData implements Serializable { + + private static final long serialVersionUID = 954627168057639289L; + + private long count; + private long min; + private long max; + private double mean; + private double stdDev; + private double median; + private double percent75th; + private double percent95th; + private double percent98th; + private double percent99th; + private double percent999th; + + public HistogramData() { + } + + public long getCount() { + return count; + } + + public void setCount(long count) { + this.count = count; + } + + public long getMin() { + return min; + } + + public void setMin(long min) { + this.min = min; + } + + public long getMax() { + return max; + } + + public void setMax(long max) { + this.max = max; + } + + public double getMean() { + return mean; + } + + public void setMean(double mean) { + this.mean = mean; + } + + public double getStdDev() { + return stdDev; + } + + public void setStdDev(double stdDev) { + this.stdDev = stdDev; + } + + public double getMedian() { + return median; + } + + public void setMedian(double median) { + this.median = median; + } + + public double getPercent75th() { + return percent75th; + } + + public void setPercent75th(double percent75th) { + this.percent75th = percent75th; + } + + public double getPercent95th() { + return percent95th; + } + + public void setPercent95th(double percent95th) { + this.percent95th = percent95th; + } + + public double getPercent98th() { + return percent98th; + } + + public void setPercent98th(double percent98th) { + this.percent98th = percent98th; + } + + public double getPercent99th() { + return percent99th; + } + + public void setPercent99th(double percent99th) { + this.percent99th = percent99th; + } + + public double getPercent999th() { + return percent999th; + } + + public void setPercent999th(double percent999th) { + this.percent999th = percent999th; + } +} \ No newline at end of file diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/MeterData.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/MeterData.java new file mode 100644 index 000000000..865a3c418 --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/MeterData.java @@ -0,0 +1,58 @@ +package com.alibaba.jstorm.metric.metrdata; + +import java.io.Serializable; + + +public class MeterData implements Serializable { + + private static final long serialVersionUID = 954627168057659269L; + + private long count; + private double meanRate; + private double oneMinuteRate; + private double fiveMinuteRate; + private double fifteenMinuteRate; + + public MeterData() { + } + + public void setCount(long count) { + this.count = count; + } + + public long getCount() { + return this.count; + } + + public void setMeanRate(double meanRate) { + this.meanRate = meanRate; + } + + public double getMeanRate() { + return this.meanRate; + } + + public void setOneMinuteRate(double oneMinuteRate) { + this.oneMinuteRate = oneMinuteRate; + } + + public double getOneMinuteRate() { + return this.oneMinuteRate; + } + + public void setFiveMinuteRate(double fiveMinuteRate) { + this.fiveMinuteRate = fiveMinuteRate; + } + + public double getFiveMinuteRate() { + return this.fiveMinuteRate; + } + + public void setFifteenMinuteRate(double fifteenMinuteRate) { + this.fifteenMinuteRate = fifteenMinuteRate; + } + + public double getFifteenMinuteRate() { + return this.fifteenMinuteRate; + } +} \ No newline at end of file diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/TimerData.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/TimerData.java new file mode 100644 index 000000000..5aaab01b4 --- /dev/null +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/metrdata/TimerData.java @@ -0,0 +1,149 @@ +package com.alibaba.jstorm.metric.metrdata; + +import java.io.Serializable; + + +public class TimerData implements Serializable { + + private static final long serialVersionUID = 954627168057659239L; + + private long count; + private double meanRate; + private double oneMinuteRate; + private double fiveMinuteRate; + private double fifteenMinuteRate; + private long min; + private long max; + private double mean; + private double stdDev; + private double median; + private double percent75th; + private double percent95th; + private double percent98th; + private double percent99th; + private double percent999th; + + public TimerData() { + + } + + public long getCount() { + return count; + } + + public void setCount(long count) { + this.count = count; + } + + public long getMin() { + return min; + } + + public void setMin(long min) { + this.min = min; + } + + public long getMax() { + return max; + } + + public void setMax(long max) { + this.max = max; + } + + public double getMean() { + return mean; + } + + public void setMean(double mean) { + this.mean = mean; + } + + public double getStdDev() { + return stdDev; + } + + public void setStdDev(double stdDev) { + this.stdDev = stdDev; + } + + public double getMedian() { + return median; + } + + public void setMedian(double median) { + this.median = median; + } + + public double getPercent75th() { + return percent75th; + } + + public void setPercent75th(double percent75th) { + this.percent75th = percent75th; + } + + public double getPercent95th() { + return percent95th; + } + + public void setPercent95th(double percent95th) { + this.percent95th = percent95th; + } + + public double getPercent98th() { + return percent98th; + } + + public void setPercent98th(double percent98th) { + this.percent98th = percent98th; + } + + public double getPercent99th() { + return percent99th; + } + + public void setPercent99th(double percent99th) { + this.percent99th = percent99th; + } + + public double getPercent999th() { + return percent999th; + } + + public void setPercent999th(double percent999th) { + this.percent999th = percent999th; + } + + public void setMeanRate(double meanRate) { + this.meanRate = meanRate; + } + + public double getMeanRate() { + return this.meanRate; + } + + public void setOneMinuteRate(double oneMinuteRate) { + this.oneMinuteRate = oneMinuteRate; + } + + public double getOneMinuteRate() { + return this.oneMinuteRate; + } + + public void setFiveMinuteRate(double fiveMinuteRate) { + this.fiveMinuteRate = fiveMinuteRate; + } + + public double getFiveMinuteRate() { + return this.fiveMinuteRate; + } + + public void setFifteenMinuteRate(double fifteenMinuteRate) { + this.fifteenMinuteRate = fifteenMinuteRate; + } + + public double getFifteenMinuteRate() { + return this.fifteenMinuteRate; + } +} \ No newline at end of file diff --git a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/utils/JStormUtils.java b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/utils/JStormUtils.java index a4172670d..da72be493 100644 --- a/jstorm-client-extension/src/main/java/com/alibaba/jstorm/utils/JStormUtils.java +++ b/jstorm-client-extension/src/main/java/com/alibaba/jstorm/utils/JStormUtils.java @@ -258,6 +258,18 @@ public static void kill(Integer pid) { ensure_process_killed(pid); } + + public static void kill_signal(Integer pid, String signal) { + String cmd = "kill " + signal + " " + pid; + try { + exec_command(cmd); + LOG.info(cmd); + } catch (ExecuteException e) { + LOG.info("Error when run " + cmd + ". Process has been killed. "); + } catch (Exception e) { + LOG.info("Error when run " + cmd + ". Exception ", e); + } + } public static java.lang.Process launch_process(String command, Map environment) throws IOException { @@ -653,6 +665,50 @@ public static String formatSimpleDouble(Double value) { } } + + public static double formatDoubleDecPoint2(Double value) { + try { + java.text.DecimalFormat form = new java.text.DecimalFormat( + "##.00"); + String s = form.format(value); + return Double.valueOf(s); + } catch (Exception e) { + return 0.0; + } + } + + public static double formatDoubleDecPoint4(Double value) { + try { + java.text.DecimalFormat form = new java.text.DecimalFormat( + "###.0000"); + String s = form.format(value); + return Double.valueOf(s); + } catch (Exception e) { + return 0.0; + } + } + + public static Double convertToDouble(Object value) { + Double ret; + + if (value == null) { + ret = null; + } else { + if (value instanceof Integer) { + ret = ((Integer) value).doubleValue(); + } else if (value instanceof Long) { + ret = ((Long) value).doubleValue(); + } else if (value instanceof Float) { + ret = ((Float) value).doubleValue(); + } else if (value instanceof Double) { + ret = (Double) value; + } else { + ret = null; + } + } + + return ret; + } public static String formatValue(Object value) { if (value == null) { @@ -728,6 +784,10 @@ public static Long getPhysicMemorySize() { return ret; } + + public static String genLogName(String topology, Integer port) { + return topology + "-worker-" + port + ".log"; + } public static String getLogFileName() { Enumeration enumAppender = Logger.getRootLogger() diff --git a/jstorm-client/pom.xml b/jstorm-client/pom.xml index c07152e6c..852932f4c 100644 --- a/jstorm-client/pom.xml +++ b/jstorm-client/pom.xml @@ -5,18 +5,18 @@ com.alibaba.jstorm jstorm-all - 0.9.5.1 + 0.9.6 .. - + --> 4.0.0 com.alibaba.jstorm jstorm-client - 0.9.5.1 + 0.9.6 jar ${project.artifactId}-${project.version} @@ -76,7 +76,7 @@ org.apache.httpcomponents httpclient - 4.1.1 + 4.3.2 storm diff --git a/jstorm-client/src/main/java/backtype/storm/command/list.java b/jstorm-client/src/main/java/backtype/storm/command/list.java new file mode 100644 index 000000000..3176be8d9 --- /dev/null +++ b/jstorm-client/src/main/java/backtype/storm/command/list.java @@ -0,0 +1,58 @@ +package backtype.storm.command; + +import java.util.Map; + +import org.apache.commons.lang.StringUtils; + +import backtype.storm.generated.ClusterSummary; +import backtype.storm.generated.TopologyInfo; +import backtype.storm.utils.NimbusClient; +import backtype.storm.utils.Utils; + +/** + * Activate topology + * + * @author longda + * + */ +public class list { + + + /** + * @param args + */ + public static void main(String[] args) { + + NimbusClient client = null; + try { + + Map conf = Utils.readStormConfig(); + client = NimbusClient.getConfiguredClient(conf); + + if (args.length > 0 && StringUtils.isBlank(args[0]) == false) { + String topologyId = args[0]; + TopologyInfo info = client.getClient().getTopologyInfo(topologyId); + + System.out.println("Successfully get topology info \n" + + info.toString()); + }else { + ClusterSummary clusterSummary = client.getClient().getClusterInfo(); + + + System.out.println("Successfully get cluster info \n" + + clusterSummary.toString()); + } + + + } catch (Exception e) { + System.out.println(e.getMessage()); + e.printStackTrace(); + throw new RuntimeException(e); + } finally { + if (client != null) { + client.close(); + } + } + } + +} diff --git a/jstorm-client/src/main/java/backtype/storm/command/metrics_monitor.java b/jstorm-client/src/main/java/backtype/storm/command/metrics_monitor.java new file mode 100644 index 000000000..bb339d462 --- /dev/null +++ b/jstorm-client/src/main/java/backtype/storm/command/metrics_monitor.java @@ -0,0 +1,56 @@ +package backtype.storm.command; + +import java.util.Map; +import java.security.InvalidParameterException; + +import backtype.storm.generated.MonitorOptions; +import backtype.storm.utils.NimbusClient; +import backtype.storm.utils.Utils; + +/** + * Monitor topology + * + * @author Basti + * + */ +public class metrics_monitor { + + /** + * @param args + */ + public static void main(String[] args) { + // TODO Auto-generated method stub + if (args == null || args.length <= 1) { + throw new InvalidParameterException("Should input topology name and enable flag"); + } + + String topologyName = args[0]; + + NimbusClient client = null; + try { + + Map conf = Utils.readStormConfig(); + client = NimbusClient.getConfiguredClient(conf); + + boolean isEnable = Boolean.valueOf(args[1]).booleanValue(); + + MonitorOptions options = new MonitorOptions(); + options.set_isEnable(isEnable); + + client.getClient().metricMonitor(topologyName, options); + + String str = (isEnable) ? "enable" : "disable"; + System.out.println("Successfully submit command to " + str + + " the monitor of " + topologyName); + } catch (Exception e) { + System.out.println(e.getMessage()); + e.printStackTrace(); + throw new RuntimeException(e); + } finally { + if (client != null) { + client.close(); + } + } + } + +} diff --git a/jstorm-client/src/main/java/backtype/storm/generated/MonitorOptions.java b/jstorm-client/src/main/java/backtype/storm/generated/MonitorOptions.java new file mode 100644 index 000000000..fa0adf35c --- /dev/null +++ b/jstorm-client/src/main/java/backtype/storm/generated/MonitorOptions.java @@ -0,0 +1,320 @@ +/** + * Autogenerated by Thrift Compiler (0.7.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + */ +package backtype.storm.generated; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MonitorOptions implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("MonitorOptions"); + + private static final org.apache.thrift7.protocol.TField IS_ENABLE_FIELD_DESC = new org.apache.thrift7.protocol.TField("isEnable", org.apache.thrift7.protocol.TType.BOOL, (short)1); + + private boolean isEnable; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift7.TFieldIdEnum { + IS_ENABLE((short)1, "isEnable"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // IS_ENABLE + return IS_ENABLE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __ISENABLE_ISSET_ID = 0; + private BitSet __isset_bit_vector = new BitSet(1); + + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.IS_ENABLE, new org.apache.thrift7.meta_data.FieldMetaData("isEnable", org.apache.thrift7.TFieldRequirementType.OPTIONAL, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(MonitorOptions.class, metaDataMap); + } + + public MonitorOptions() { + } + + /** + * Performs a deep copy on other. + */ + public MonitorOptions(MonitorOptions other) { + __isset_bit_vector.clear(); + __isset_bit_vector.or(other.__isset_bit_vector); + this.isEnable = other.isEnable; + } + + public MonitorOptions deepCopy() { + return new MonitorOptions(this); + } + + @Override + public void clear() { + set_isEnable_isSet(false); + this.isEnable = false; + } + + public boolean is_isEnable() { + return this.isEnable; + } + + public void set_isEnable(boolean isEnable) { + this.isEnable = isEnable; + set_isEnable_isSet(true); + } + + public void unset_isEnable() { + __isset_bit_vector.clear(__ISENABLE_ISSET_ID); + } + + /** Returns true if field isEnable is set (has been assigned a value) and false otherwise */ + public boolean is_set_isEnable() { + return __isset_bit_vector.get(__ISENABLE_ISSET_ID); + } + + public void set_isEnable_isSet(boolean value) { + __isset_bit_vector.set(__ISENABLE_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case IS_ENABLE: + if (value == null) { + unset_isEnable(); + } else { + set_isEnable((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case IS_ENABLE: + return Boolean.valueOf(is_isEnable()); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case IS_ENABLE: + return is_set_isEnable(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof MonitorOptions) + return this.equals((MonitorOptions)that); + return false; + } + + public boolean equals(MonitorOptions that) { + if (that == null) + return false; + + boolean this_present_isEnable = true && this.is_set_isEnable(); + boolean that_present_isEnable = true && that.is_set_isEnable(); + if (this_present_isEnable || that_present_isEnable) { + if (!(this_present_isEnable && that_present_isEnable)) + return false; + if (this.isEnable != that.isEnable) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_isEnable = true && (is_set_isEnable()); + builder.append(present_isEnable); + if (present_isEnable) + builder.append(isEnable); + + return builder.toHashCode(); + } + + public int compareTo(MonitorOptions other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + MonitorOptions typedOther = (MonitorOptions)other; + + lastComparison = Boolean.valueOf(is_set_isEnable()).compareTo(typedOther.is_set_isEnable()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_isEnable()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.isEnable, typedOther.isEnable); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache.thrift7.TException { + org.apache.thrift7.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift7.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // IS_ENABLE + if (field.type == org.apache.thrift7.protocol.TType.BOOL) { + this.isEnable = iprot.readBool(); + set_isEnable_isSet(true); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (is_set_isEnable()) { + oprot.writeFieldBegin(IS_ENABLE_FIELD_DESC); + oprot.writeBool(this.isEnable); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("MonitorOptions("); + boolean first = true; + + if (is_set_isEnable()) { + sb.append("isEnable:"); + sb.append(this.isEnable); + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift7.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); + read(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + +} + diff --git a/jstorm-client/src/main/java/backtype/storm/generated/Nimbus.java b/jstorm-client/src/main/java/backtype/storm/generated/Nimbus.java index 0291a740e..5b4729810 100644 --- a/jstorm-client/src/main/java/backtype/storm/generated/Nimbus.java +++ b/jstorm-client/src/main/java/backtype/storm/generated/Nimbus.java @@ -39,6 +39,8 @@ public interface Iface { public void rebalance(String name, RebalanceOptions options) throws NotAliveException, InvalidTopologyException, org.apache.thrift7.TException; + public void metricMonitor(String name, MonitorOptions options) throws NotAliveException, org.apache.thrift7.TException; + public void beginLibUpload(String libName) throws org.apache.thrift7.TException; public String beginFileUpload() throws org.apache.thrift7.TException; @@ -65,6 +67,8 @@ public interface Iface { public StormTopology getUserTopology(String id) throws NotAliveException, org.apache.thrift7.TException; + public TopologyMetricInfo getTopologyMetric(String id) throws NotAliveException, org.apache.thrift7.TException; + } public interface AsyncIface { @@ -83,6 +87,8 @@ public interface AsyncIface { public void rebalance(String name, RebalanceOptions options, org.apache.thrift7.async.AsyncMethodCallback resultHandler) throws org.apache.thrift7.TException; + public void metricMonitor(String name, MonitorOptions options, org.apache.thrift7.async.AsyncMethodCallback resultHandler) throws org.apache.thrift7.TException; + public void beginLibUpload(String libName, org.apache.thrift7.async.AsyncMethodCallback resultHandler) throws org.apache.thrift7.TException; public void beginFileUpload(org.apache.thrift7.async.AsyncMethodCallback resultHandler) throws org.apache.thrift7.TException; @@ -109,6 +115,8 @@ public interface AsyncIface { public void getUserTopology(String id, org.apache.thrift7.async.AsyncMethodCallback resultHandler) throws org.apache.thrift7.TException; + public void getTopologyMetric(String id, org.apache.thrift7.async.AsyncMethodCallback resultHandler) throws org.apache.thrift7.TException; + } public static class Client extends org.apache.thrift7.TServiceClient implements Iface { @@ -316,6 +324,30 @@ public void recv_rebalance() throws NotAliveException, InvalidTopologyException, return; } + public void metricMonitor(String name, MonitorOptions options) throws NotAliveException, org.apache.thrift7.TException + { + send_metricMonitor(name, options); + recv_metricMonitor(); + } + + public void send_metricMonitor(String name, MonitorOptions options) throws org.apache.thrift7.TException + { + metricMonitor_args args = new metricMonitor_args(); + args.set_name(name); + args.set_options(options); + sendBase("metricMonitor", args); + } + + public void recv_metricMonitor() throws NotAliveException, org.apache.thrift7.TException + { + metricMonitor_result result = new metricMonitor_result(); + receiveBase(result, "metricMonitor"); + if (result.e != null) { + throw result.e; + } + return; + } + public void beginLibUpload(String libName) throws org.apache.thrift7.TException { send_beginLibUpload(libName); @@ -619,6 +651,32 @@ public StormTopology recv_getUserTopology() throws NotAliveException, org.apache throw new org.apache.thrift7.TApplicationException(org.apache.thrift7.TApplicationException.MISSING_RESULT, "getUserTopology failed: unknown result"); } + public TopologyMetricInfo getTopologyMetric(String id) throws NotAliveException, org.apache.thrift7.TException + { + send_getTopologyMetric(id); + return recv_getTopologyMetric(); + } + + public void send_getTopologyMetric(String id) throws org.apache.thrift7.TException + { + getTopologyMetric_args args = new getTopologyMetric_args(); + args.set_id(id); + sendBase("getTopologyMetric", args); + } + + public TopologyMetricInfo recv_getTopologyMetric() throws NotAliveException, org.apache.thrift7.TException + { + getTopologyMetric_result result = new getTopologyMetric_result(); + receiveBase(result, "getTopologyMetric"); + if (result.is_set_success()) { + return result.success; + } + if (result.e != null) { + throw result.e; + } + throw new org.apache.thrift7.TApplicationException(org.apache.thrift7.TApplicationException.MISSING_RESULT, "getTopologyMetric failed: unknown result"); + } + } public static class AsyncClient extends org.apache.thrift7.async.TAsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift7.async.TAsyncClientFactory { @@ -888,6 +946,41 @@ public void getResult() throws NotAliveException, InvalidTopologyException, org. } } + public void metricMonitor(String name, MonitorOptions options, org.apache.thrift7.async.AsyncMethodCallback resultHandler) throws org.apache.thrift7.TException { + checkReady(); + metricMonitor_call method_call = new metricMonitor_call(name, options, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class metricMonitor_call extends org.apache.thrift7.async.TAsyncMethodCall { + private String name; + private MonitorOptions options; + public metricMonitor_call(String name, MonitorOptions options, org.apache.thrift7.async.AsyncMethodCallback resultHandler, org.apache.thrift7.async.TAsyncClient client, org.apache.thrift7.protocol.TProtocolFactory protocolFactory, org.apache.thrift7.transport.TNonblockingTransport transport) throws org.apache.thrift7.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.name = name; + this.options = options; + } + + public void write_args(org.apache.thrift7.protocol.TProtocol prot) throws org.apache.thrift7.TException { + prot.writeMessageBegin(new org.apache.thrift7.protocol.TMessage("metricMonitor", org.apache.thrift7.protocol.TMessageType.CALL, 0)); + metricMonitor_args args = new metricMonitor_args(); + args.set_name(name); + args.set_options(options); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws NotAliveException, org.apache.thrift7.TException { + if (getState() != org.apache.thrift7.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift7.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift7.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift7.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_metricMonitor(); + } + } + public void beginLibUpload(String libName, org.apache.thrift7.async.AsyncMethodCallback resultHandler) throws org.apache.thrift7.TException { checkReady(); beginLibUpload_call method_call = new beginLibUpload_call(libName, resultHandler, this, ___protocolFactory, ___transport); @@ -1298,6 +1391,38 @@ public StormTopology getResult() throws NotAliveException, org.apache.thrift7.TE } } + public void getTopologyMetric(String id, org.apache.thrift7.async.AsyncMethodCallback resultHandler) throws org.apache.thrift7.TException { + checkReady(); + getTopologyMetric_call method_call = new getTopologyMetric_call(id, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class getTopologyMetric_call extends org.apache.thrift7.async.TAsyncMethodCall { + private String id; + public getTopologyMetric_call(String id, org.apache.thrift7.async.AsyncMethodCallback resultHandler, org.apache.thrift7.async.TAsyncClient client, org.apache.thrift7.protocol.TProtocolFactory protocolFactory, org.apache.thrift7.transport.TNonblockingTransport transport) throws org.apache.thrift7.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.id = id; + } + + public void write_args(org.apache.thrift7.protocol.TProtocol prot) throws org.apache.thrift7.TException { + prot.writeMessageBegin(new org.apache.thrift7.protocol.TMessage("getTopologyMetric", org.apache.thrift7.protocol.TMessageType.CALL, 0)); + getTopologyMetric_args args = new getTopologyMetric_args(); + args.set_id(id); + args.write(prot); + prot.writeMessageEnd(); + } + + public TopologyMetricInfo getResult() throws NotAliveException, org.apache.thrift7.TException { + if (getState() != org.apache.thrift7.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift7.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift7.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift7.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_getTopologyMetric(); + } + } + } public static class Processor extends org.apache.thrift7.TBaseProcessor implements org.apache.thrift7.TProcessor { @@ -1318,6 +1443,7 @@ protected Processor(I iface, Map extends org.apache.thrift7.ProcessFunction { + public metricMonitor() { + super("metricMonitor"); + } + + protected metricMonitor_args getEmptyArgsInstance() { + return new metricMonitor_args(); + } + + protected metricMonitor_result getResult(I iface, metricMonitor_args args) throws org.apache.thrift7.TException { + metricMonitor_result result = new metricMonitor_result(); + try { + iface.metricMonitor(args.name, args.options); + } catch (NotAliveException e) { + result.e = e; + } + return result; + } + } + private static class beginLibUpload extends org.apache.thrift7.ProcessFunction { public beginLibUpload() { super("beginLibUpload"); @@ -1712,6 +1859,26 @@ protected getUserTopology_result getResult(I iface, getUserTopology_args args) t } } + private static class getTopologyMetric extends org.apache.thrift7.ProcessFunction { + public getTopologyMetric() { + super("getTopologyMetric"); + } + + protected getTopologyMetric_args getEmptyArgsInstance() { + return new getTopologyMetric_args(); + } + + protected getTopologyMetric_result getResult(I iface, getTopologyMetric_args args) throws org.apache.thrift7.TException { + getTopologyMetric_result result = new getTopologyMetric_result(); + try { + result.success = iface.getTopologyMetric(args.id); + } catch (NotAliveException e) { + result.e = e; + } + return result; + } + } + } public static class submitTopology_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { @@ -7235,16 +7402,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class beginLibUpload_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginLibUpload_args"); + public static class metricMonitor_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("metricMonitor_args"); - private static final org.apache.thrift7.protocol.TField LIB_NAME_FIELD_DESC = new org.apache.thrift7.protocol.TField("libName", org.apache.thrift7.protocol.TType.STRING, (short)1); + private static final org.apache.thrift7.protocol.TField NAME_FIELD_DESC = new org.apache.thrift7.protocol.TField("name", org.apache.thrift7.protocol.TType.STRING, (short)1); + private static final org.apache.thrift7.protocol.TField OPTIONS_FIELD_DESC = new org.apache.thrift7.protocol.TField("options", org.apache.thrift7.protocol.TType.STRUCT, (short)2); - private String libName; // required + private String name; // required + private MonitorOptions options; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { - LIB_NAME((short)1, "libName"); + NAME((short)1, "name"), + OPTIONS((short)2, "options"); private static final Map byName = new HashMap(); @@ -7259,8 +7429,10 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // LIB_NAME - return LIB_NAME; + case 1: // NAME + return NAME; + case 2: // OPTIONS + return OPTIONS; default: return null; } @@ -7305,70 +7477,109 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.LIB_NAME, new org.apache.thrift7.meta_data.FieldMetaData("libName", org.apache.thrift7.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.NAME, new org.apache.thrift7.meta_data.FieldMetaData("name", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); + tmpMap.put(_Fields.OPTIONS, new org.apache.thrift7.meta_data.FieldMetaData("options", org.apache.thrift7.TFieldRequirementType.DEFAULT, + new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, MonitorOptions.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginLibUpload_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(metricMonitor_args.class, metaDataMap); } - public beginLibUpload_args() { + public metricMonitor_args() { } - public beginLibUpload_args( - String libName) + public metricMonitor_args( + String name, + MonitorOptions options) { this(); - this.libName = libName; + this.name = name; + this.options = options; } /** * Performs a deep copy on other. */ - public beginLibUpload_args(beginLibUpload_args other) { - if (other.is_set_libName()) { - this.libName = other.libName; + public metricMonitor_args(metricMonitor_args other) { + if (other.is_set_name()) { + this.name = other.name; + } + if (other.is_set_options()) { + this.options = new MonitorOptions(other.options); } } - public beginLibUpload_args deepCopy() { - return new beginLibUpload_args(this); + public metricMonitor_args deepCopy() { + return new metricMonitor_args(this); } @Override public void clear() { - this.libName = null; + this.name = null; + this.options = null; } - public String get_libName() { - return this.libName; + public String get_name() { + return this.name; } - public void set_libName(String libName) { - this.libName = libName; + public void set_name(String name) { + this.name = name; } - public void unset_libName() { - this.libName = null; + public void unset_name() { + this.name = null; } - /** Returns true if field libName is set (has been assigned a value) and false otherwise */ - public boolean is_set_libName() { - return this.libName != null; + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean is_set_name() { + return this.name != null; } - public void set_libName_isSet(boolean value) { + public void set_name_isSet(boolean value) { if (!value) { - this.libName = null; + this.name = null; + } + } + + public MonitorOptions get_options() { + return this.options; + } + + public void set_options(MonitorOptions options) { + this.options = options; + } + + public void unset_options() { + this.options = null; + } + + /** Returns true if field options is set (has been assigned a value) and false otherwise */ + public boolean is_set_options() { + return this.options != null; + } + + public void set_options_isSet(boolean value) { + if (!value) { + this.options = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case LIB_NAME: + case NAME: if (value == null) { - unset_libName(); + unset_name(); } else { - set_libName((String)value); + set_name((String)value); + } + break; + + case OPTIONS: + if (value == null) { + unset_options(); + } else { + set_options((MonitorOptions)value); } break; @@ -7377,8 +7588,11 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case LIB_NAME: - return get_libName(); + case NAME: + return get_name(); + + case OPTIONS: + return get_options(); } throw new IllegalStateException(); @@ -7391,8 +7605,10 @@ public boolean isSet(_Fields field) { } switch (field) { - case LIB_NAME: - return is_set_libName(); + case NAME: + return is_set_name(); + case OPTIONS: + return is_set_options(); } throw new IllegalStateException(); } @@ -7401,21 +7617,30 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof beginLibUpload_args) - return this.equals((beginLibUpload_args)that); + if (that instanceof metricMonitor_args) + return this.equals((metricMonitor_args)that); return false; } - public boolean equals(beginLibUpload_args that) { + public boolean equals(metricMonitor_args that) { if (that == null) return false; - boolean this_present_libName = true && this.is_set_libName(); - boolean that_present_libName = true && that.is_set_libName(); - if (this_present_libName || that_present_libName) { - if (!(this_present_libName && that_present_libName)) + boolean this_present_name = true && this.is_set_name(); + boolean that_present_name = true && that.is_set_name(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) return false; - if (!this.libName.equals(that.libName)) + if (!this.name.equals(that.name)) + return false; + } + + boolean this_present_options = true && this.is_set_options(); + boolean that_present_options = true && that.is_set_options(); + if (this_present_options || that_present_options) { + if (!(this_present_options && that_present_options)) + return false; + if (!this.options.equals(that.options)) return false; } @@ -7426,28 +7651,43 @@ public boolean equals(beginLibUpload_args that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); - boolean present_libName = true && (is_set_libName()); - builder.append(present_libName); - if (present_libName) - builder.append(libName); + boolean present_name = true && (is_set_name()); + builder.append(present_name); + if (present_name) + builder.append(name); + + boolean present_options = true && (is_set_options()); + builder.append(present_options); + if (present_options) + builder.append(options); return builder.toHashCode(); } - public int compareTo(beginLibUpload_args other) { + public int compareTo(metricMonitor_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - beginLibUpload_args typedOther = (beginLibUpload_args)other; + metricMonitor_args typedOther = (metricMonitor_args)other; - lastComparison = Boolean.valueOf(is_set_libName()).compareTo(typedOther.is_set_libName()); + lastComparison = Boolean.valueOf(is_set_name()).compareTo(typedOther.is_set_name()); if (lastComparison != 0) { return lastComparison; } - if (is_set_libName()) { - lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.libName, typedOther.libName); + if (is_set_name()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.name, typedOther.name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_options()).compareTo(typedOther.is_set_options()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_options()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.options, typedOther.options); if (lastComparison != 0) { return lastComparison; } @@ -7469,9 +7709,17 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { - case 1: // LIB_NAME + case 1: // NAME if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.libName = iprot.readString(); + this.name = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // OPTIONS + if (field.type == org.apache.thrift7.protocol.TType.STRUCT) { + this.options = new MonitorOptions(); + this.options.read(iprot); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -7489,9 +7737,14 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.libName != null) { - oprot.writeFieldBegin(LIB_NAME_FIELD_DESC); - oprot.writeString(this.libName); + if (this.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(this.name); + oprot.writeFieldEnd(); + } + if (this.options != null) { + oprot.writeFieldBegin(OPTIONS_FIELD_DESC); + this.options.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -7500,14 +7753,22 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("beginLibUpload_args("); + StringBuilder sb = new StringBuilder("metricMonitor_args("); boolean first = true; - sb.append("libName:"); - if (this.libName == null) { + sb.append("name:"); + if (this.name == null) { sb.append("null"); } else { - sb.append(this.libName); + sb.append(this.name); + } + first = false; + if (!first) sb.append(", "); + sb.append("options:"); + if (this.options == null) { + sb.append("null"); + } else { + sb.append(this.options); } first = false; sb.append(")"); @@ -7536,14 +7797,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class beginLibUpload_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginLibUpload_result"); + public static class metricMonitor_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("metricMonitor_result"); + private static final org.apache.thrift7.protocol.TField E_FIELD_DESC = new org.apache.thrift7.protocol.TField("e", org.apache.thrift7.protocol.TType.STRUCT, (short)1); + private NotAliveException e; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { -; + E((short)1, "e"); private static final Map byName = new HashMap(); @@ -7558,6 +7821,8 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // E + return E; default: return null; } @@ -7596,37 +7861,87 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.E, new org.apache.thrift7.meta_data.FieldMetaData("e", org.apache.thrift7.TFieldRequirementType.DEFAULT, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginLibUpload_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(metricMonitor_result.class, metaDataMap); } - public beginLibUpload_result() { + public metricMonitor_result() { + } + + public metricMonitor_result( + NotAliveException e) + { + this(); + this.e = e; } /** * Performs a deep copy on other. */ - public beginLibUpload_result(beginLibUpload_result other) { + public metricMonitor_result(metricMonitor_result other) { + if (other.is_set_e()) { + this.e = new NotAliveException(other.e); + } } - public beginLibUpload_result deepCopy() { - return new beginLibUpload_result(this); + public metricMonitor_result deepCopy() { + return new metricMonitor_result(this); } @Override public void clear() { + this.e = null; + } + + public NotAliveException get_e() { + return this.e; + } + + public void set_e(NotAliveException e) { + this.e = e; + } + + public void unset_e() { + this.e = null; + } + + /** Returns true if field e is set (has been assigned a value) and false otherwise */ + public boolean is_set_e() { + return this.e != null; + } + + public void set_e_isSet(boolean value) { + if (!value) { + this.e = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case E: + if (value == null) { + unset_e(); + } else { + set_e((NotAliveException)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case E: + return get_e(); + } throw new IllegalStateException(); } @@ -7638,6 +7953,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case E: + return is_set_e(); } throw new IllegalStateException(); } @@ -7646,15 +7963,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof beginLibUpload_result) - return this.equals((beginLibUpload_result)that); + if (that instanceof metricMonitor_result) + return this.equals((metricMonitor_result)that); return false; } - public boolean equals(beginLibUpload_result that) { + public boolean equals(metricMonitor_result that) { if (that == null) return false; + boolean this_present_e = true && this.is_set_e(); + boolean that_present_e = true && that.is_set_e(); + if (this_present_e || that_present_e) { + if (!(this_present_e && that_present_e)) + return false; + if (!this.e.equals(that.e)) + return false; + } + return true; } @@ -7662,17 +7988,32 @@ public boolean equals(beginLibUpload_result that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); + boolean present_e = true && (is_set_e()); + builder.append(present_e); + if (present_e) + builder.append(e); + return builder.toHashCode(); } - public int compareTo(beginLibUpload_result other) { + public int compareTo(metricMonitor_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - beginLibUpload_result typedOther = (beginLibUpload_result)other; + metricMonitor_result typedOther = (metricMonitor_result)other; + lastComparison = Boolean.valueOf(is_set_e()).compareTo(typedOther.is_set_e()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_e()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.e, typedOther.e); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -7690,6 +8031,14 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { + case 1: // E + if (field.type == org.apache.thrift7.protocol.TType.STRUCT) { + this.e = new NotAliveException(); + this.e.read(iprot); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -7702,15 +8051,27 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { oprot.writeStructBegin(STRUCT_DESC); + if (this.is_set_e()) { + oprot.writeFieldBegin(E_FIELD_DESC); + this.e.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("beginLibUpload_result("); + StringBuilder sb = new StringBuilder("metricMonitor_result("); boolean first = true; + sb.append("e:"); + if (this.e == null) { + sb.append("null"); + } else { + sb.append(this.e); + } + first = false; sb.append(")"); return sb.toString(); } @@ -7737,14 +8098,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class beginFileUpload_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginFileUpload_args"); + public static class beginLibUpload_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginLibUpload_args"); + private static final org.apache.thrift7.protocol.TField LIB_NAME_FIELD_DESC = new org.apache.thrift7.protocol.TField("libName", org.apache.thrift7.protocol.TType.STRING, (short)1); + private String libName; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { -; + LIB_NAME((short)1, "libName"); private static final Map byName = new HashMap(); @@ -7759,6 +8122,8 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // LIB_NAME + return LIB_NAME; default: return null; } @@ -7797,37 +8162,87 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.LIB_NAME, new org.apache.thrift7.meta_data.FieldMetaData("libName", org.apache.thrift7.TFieldRequirementType.DEFAULT, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginFileUpload_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginLibUpload_args.class, metaDataMap); } - public beginFileUpload_args() { + public beginLibUpload_args() { + } + + public beginLibUpload_args( + String libName) + { + this(); + this.libName = libName; } /** * Performs a deep copy on other. */ - public beginFileUpload_args(beginFileUpload_args other) { + public beginLibUpload_args(beginLibUpload_args other) { + if (other.is_set_libName()) { + this.libName = other.libName; + } } - public beginFileUpload_args deepCopy() { - return new beginFileUpload_args(this); + public beginLibUpload_args deepCopy() { + return new beginLibUpload_args(this); } @Override public void clear() { + this.libName = null; + } + + public String get_libName() { + return this.libName; + } + + public void set_libName(String libName) { + this.libName = libName; + } + + public void unset_libName() { + this.libName = null; + } + + /** Returns true if field libName is set (has been assigned a value) and false otherwise */ + public boolean is_set_libName() { + return this.libName != null; + } + + public void set_libName_isSet(boolean value) { + if (!value) { + this.libName = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case LIB_NAME: + if (value == null) { + unset_libName(); + } else { + set_libName((String)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case LIB_NAME: + return get_libName(); + } throw new IllegalStateException(); } @@ -7839,6 +8254,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case LIB_NAME: + return is_set_libName(); } throw new IllegalStateException(); } @@ -7847,15 +8264,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof beginFileUpload_args) - return this.equals((beginFileUpload_args)that); + if (that instanceof beginLibUpload_args) + return this.equals((beginLibUpload_args)that); return false; } - public boolean equals(beginFileUpload_args that) { + public boolean equals(beginLibUpload_args that) { if (that == null) return false; + boolean this_present_libName = true && this.is_set_libName(); + boolean that_present_libName = true && that.is_set_libName(); + if (this_present_libName || that_present_libName) { + if (!(this_present_libName && that_present_libName)) + return false; + if (!this.libName.equals(that.libName)) + return false; + } + return true; } @@ -7863,17 +8289,32 @@ public boolean equals(beginFileUpload_args that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); - return builder.toHashCode(); - } - - public int compareTo(beginFileUpload_args other) { + boolean present_libName = true && (is_set_libName()); + builder.append(present_libName); + if (present_libName) + builder.append(libName); + + return builder.toHashCode(); + } + + public int compareTo(beginLibUpload_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - beginFileUpload_args typedOther = (beginFileUpload_args)other; + beginLibUpload_args typedOther = (beginLibUpload_args)other; + lastComparison = Boolean.valueOf(is_set_libName()).compareTo(typedOther.is_set_libName()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_libName()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.libName, typedOther.libName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -7891,6 +8332,13 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { + case 1: // LIB_NAME + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.libName = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -7904,15 +8352,27 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache validate(); oprot.writeStructBegin(STRUCT_DESC); + if (this.libName != null) { + oprot.writeFieldBegin(LIB_NAME_FIELD_DESC); + oprot.writeString(this.libName); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("beginFileUpload_args("); + StringBuilder sb = new StringBuilder("beginLibUpload_args("); boolean first = true; + sb.append("libName:"); + if (this.libName == null) { + sb.append("null"); + } else { + sb.append(this.libName); + } + first = false; sb.append(")"); return sb.toString(); } @@ -7939,16 +8399,14 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class beginFileUpload_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginFileUpload_result"); + public static class beginLibUpload_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginLibUpload_result"); - private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRING, (short)0); - private String success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { - SUCCESS((short)0, "success"); +; private static final Map byName = new HashMap(); @@ -7963,8 +8421,6 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; default: return null; } @@ -8003,87 +8459,1144 @@ public String getFieldName() { return _fieldName; } } + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginLibUpload_result.class, metaDataMap); + } - // isset id assignments + public beginLibUpload_result() { + } + + /** + * Performs a deep copy on other. + */ + public beginLibUpload_result(beginLibUpload_result other) { + } + + public beginLibUpload_result deepCopy() { + return new beginLibUpload_result(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof beginLibUpload_result) + return this.equals((beginLibUpload_result)that); + return false; + } + + public boolean equals(beginLibUpload_result that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + return builder.toHashCode(); + } + + public int compareTo(beginLibUpload_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + beginLibUpload_result typedOther = (beginLibUpload_result)other; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache.thrift7.TException { + org.apache.thrift7.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift7.protocol.TType.STOP) { + break; + } + switch (field.id) { + default: + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { + oprot.writeStructBegin(STRUCT_DESC); + + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("beginLibUpload_result("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift7.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class beginFileUpload_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginFileUpload_args"); + + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift7.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginFileUpload_args.class, metaDataMap); + } + + public beginFileUpload_args() { + } + + /** + * Performs a deep copy on other. + */ + public beginFileUpload_args(beginFileUpload_args other) { + } + + public beginFileUpload_args deepCopy() { + return new beginFileUpload_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof beginFileUpload_args) + return this.equals((beginFileUpload_args)that); + return false; + } + + public boolean equals(beginFileUpload_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + return builder.toHashCode(); + } + + public int compareTo(beginFileUpload_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + beginFileUpload_args typedOther = (beginFileUpload_args)other; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache.thrift7.TException { + org.apache.thrift7.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift7.protocol.TType.STOP) { + break; + } + switch (field.id) { + default: + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("beginFileUpload_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift7.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class beginFileUpload_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginFileUpload_result"); + + private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRING, (short)0); + + private String success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift7.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginFileUpload_result.class, metaDataMap); + } + + public beginFileUpload_result() { + } + + public beginFileUpload_result( + String success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public beginFileUpload_result(beginFileUpload_result other) { + if (other.is_set_success()) { + this.success = other.success; + } + } + + public beginFileUpload_result deepCopy() { + return new beginFileUpload_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public String get_success() { + return this.success; + } + + public void set_success(String success) { + this.success = success; + } + + public void unset_success() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean is_set_success() { + return this.success != null; + } + + public void set_success_isSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unset_success(); + } else { + set_success((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return get_success(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return is_set_success(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof beginFileUpload_result) + return this.equals((beginFileUpload_result)that); + return false; + } + + public boolean equals(beginFileUpload_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.is_set_success(); + boolean that_present_success = true && that.is_set_success(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (is_set_success()); + builder.append(present_success); + if (present_success) + builder.append(success); + + return builder.toHashCode(); + } + + public int compareTo(beginFileUpload_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + beginFileUpload_result typedOther = (beginFileUpload_result)other; + + lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_success()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.success, typedOther.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache.thrift7.TException { + org.apache.thrift7.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift7.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 0: // SUCCESS + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.success = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.is_set_success()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeString(this.success); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("beginFileUpload_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift7.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class uploadChunk_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("uploadChunk_args"); + + private static final org.apache.thrift7.protocol.TField LOCATION_FIELD_DESC = new org.apache.thrift7.protocol.TField("location", org.apache.thrift7.protocol.TType.STRING, (short)1); + private static final org.apache.thrift7.protocol.TField CHUNK_FIELD_DESC = new org.apache.thrift7.protocol.TField("chunk", org.apache.thrift7.protocol.TType.STRING, (short)2); + + private String location; // required + private ByteBuffer chunk; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift7.TFieldIdEnum { + LOCATION((short)1, "location"), + CHUNK((short)2, "chunk"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // LOCATION + return LOCATION; + case 2: // CHUNK + return CHUNK; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.LOCATION, new org.apache.thrift7.meta_data.FieldMetaData("location", org.apache.thrift7.TFieldRequirementType.DEFAULT, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); + tmpMap.put(_Fields.CHUNK, new org.apache.thrift7.meta_data.FieldMetaData("chunk", org.apache.thrift7.TFieldRequirementType.DEFAULT, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING , true))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(uploadChunk_args.class, metaDataMap); + } + + public uploadChunk_args() { + } + + public uploadChunk_args( + String location, + ByteBuffer chunk) + { + this(); + this.location = location; + this.chunk = chunk; + } + + /** + * Performs a deep copy on other. + */ + public uploadChunk_args(uploadChunk_args other) { + if (other.is_set_location()) { + this.location = other.location; + } + if (other.is_set_chunk()) { + this.chunk = org.apache.thrift7.TBaseHelper.copyBinary(other.chunk); +; + } + } + + public uploadChunk_args deepCopy() { + return new uploadChunk_args(this); + } + + @Override + public void clear() { + this.location = null; + this.chunk = null; + } + + public String get_location() { + return this.location; + } + + public void set_location(String location) { + this.location = location; + } + + public void unset_location() { + this.location = null; + } + + /** Returns true if field location is set (has been assigned a value) and false otherwise */ + public boolean is_set_location() { + return this.location != null; + } + + public void set_location_isSet(boolean value) { + if (!value) { + this.location = null; + } + } + + public byte[] get_chunk() { + set_chunk(org.apache.thrift7.TBaseHelper.rightSize(chunk)); + return chunk == null ? null : chunk.array(); + } + + public ByteBuffer buffer_for_chunk() { + return chunk; + } + + public void set_chunk(byte[] chunk) { + set_chunk(chunk == null ? (ByteBuffer)null : ByteBuffer.wrap(chunk)); + } + + public void set_chunk(ByteBuffer chunk) { + this.chunk = chunk; + } + + public void unset_chunk() { + this.chunk = null; + } + + /** Returns true if field chunk is set (has been assigned a value) and false otherwise */ + public boolean is_set_chunk() { + return this.chunk != null; + } + + public void set_chunk_isSet(boolean value) { + if (!value) { + this.chunk = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case LOCATION: + if (value == null) { + unset_location(); + } else { + set_location((String)value); + } + break; + + case CHUNK: + if (value == null) { + unset_chunk(); + } else { + set_chunk((ByteBuffer)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case LOCATION: + return get_location(); + + case CHUNK: + return get_chunk(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case LOCATION: + return is_set_location(); + case CHUNK: + return is_set_chunk(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof uploadChunk_args) + return this.equals((uploadChunk_args)that); + return false; + } + + public boolean equals(uploadChunk_args that) { + if (that == null) + return false; + + boolean this_present_location = true && this.is_set_location(); + boolean that_present_location = true && that.is_set_location(); + if (this_present_location || that_present_location) { + if (!(this_present_location && that_present_location)) + return false; + if (!this.location.equals(that.location)) + return false; + } + + boolean this_present_chunk = true && this.is_set_chunk(); + boolean that_present_chunk = true && that.is_set_chunk(); + if (this_present_chunk || that_present_chunk) { + if (!(this_present_chunk && that_present_chunk)) + return false; + if (!this.chunk.equals(that.chunk)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_location = true && (is_set_location()); + builder.append(present_location); + if (present_location) + builder.append(location); + + boolean present_chunk = true && (is_set_chunk()); + builder.append(present_chunk); + if (present_chunk) + builder.append(chunk); + + return builder.toHashCode(); + } + + public int compareTo(uploadChunk_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + uploadChunk_args typedOther = (uploadChunk_args)other; + + lastComparison = Boolean.valueOf(is_set_location()).compareTo(typedOther.is_set_location()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_location()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.location, typedOther.location); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_chunk()).compareTo(typedOther.is_set_chunk()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_chunk()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.chunk, typedOther.chunk); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache.thrift7.TException { + org.apache.thrift7.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift7.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // LOCATION + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.location = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // CHUNK + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.chunk = iprot.readBinary(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.location != null) { + oprot.writeFieldBegin(LOCATION_FIELD_DESC); + oprot.writeString(this.location); + oprot.writeFieldEnd(); + } + if (this.chunk != null) { + oprot.writeFieldBegin(CHUNK_FIELD_DESC); + oprot.writeBinary(this.chunk); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("uploadChunk_args("); + boolean first = true; + + sb.append("location:"); + if (this.location == null) { + sb.append("null"); + } else { + sb.append(this.location); + } + first = false; + if (!first) sb.append(", "); + sb.append("chunk:"); + if (this.chunk == null) { + sb.append("null"); + } else { + org.apache.thrift7.TBaseHelper.toString(this.chunk, sb); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift7.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class uploadChunk_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("uploadChunk_result"); + + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift7.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginFileUpload_result.class, metaDataMap); - } - - public beginFileUpload_result() { + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(uploadChunk_result.class, metaDataMap); } - public beginFileUpload_result( - String success) - { - this(); - this.success = success; + public uploadChunk_result() { } /** * Performs a deep copy on other. */ - public beginFileUpload_result(beginFileUpload_result other) { - if (other.is_set_success()) { - this.success = other.success; - } + public uploadChunk_result(uploadChunk_result other) { } - public beginFileUpload_result deepCopy() { - return new beginFileUpload_result(this); + public uploadChunk_result deepCopy() { + return new uploadChunk_result(this); } @Override public void clear() { - this.success = null; - } - - public String get_success() { - return this.success; - } - - public void set_success(String success) { - this.success = success; - } - - public void unset_success() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean is_set_success() { - return this.success != null; - } - - public void set_success_isSet(boolean value) { - if (!value) { - this.success = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: - if (value == null) { - unset_success(); - } else { - set_success((String)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return get_success(); - } throw new IllegalStateException(); } @@ -8095,8 +9608,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return is_set_success(); } throw new IllegalStateException(); } @@ -8105,24 +9616,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof beginFileUpload_result) - return this.equals((beginFileUpload_result)that); + if (that instanceof uploadChunk_result) + return this.equals((uploadChunk_result)that); return false; } - public boolean equals(beginFileUpload_result that) { + public boolean equals(uploadChunk_result that) { if (that == null) return false; - boolean this_present_success = true && this.is_set_success(); - boolean that_present_success = true && that.is_set_success(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - return true; } @@ -8130,32 +9632,17 @@ public boolean equals(beginFileUpload_result that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); - boolean present_success = true && (is_set_success()); - builder.append(present_success); - if (present_success) - builder.append(success); - return builder.toHashCode(); } - public int compareTo(beginFileUpload_result other) { + public int compareTo(uploadChunk_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - beginFileUpload_result typedOther = (beginFileUpload_result)other; + uploadChunk_result typedOther = (uploadChunk_result)other; - lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); - if (lastComparison != 0) { - return lastComparison; - } - if (is_set_success()) { - lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -8173,13 +9660,6 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { - case 0: // SUCCESS - if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.success = iprot.readString(); - } else { - org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); - } - break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -8192,27 +9672,15 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { oprot.writeStructBegin(STRUCT_DESC); - if (this.is_set_success()) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeString(this.success); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("beginFileUpload_result("); + StringBuilder sb = new StringBuilder("uploadChunk_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; sb.append(")"); return sb.toString(); } @@ -8239,19 +9707,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class uploadChunk_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("uploadChunk_args"); + public static class finishFileUpload_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("finishFileUpload_args"); private static final org.apache.thrift7.protocol.TField LOCATION_FIELD_DESC = new org.apache.thrift7.protocol.TField("location", org.apache.thrift7.protocol.TType.STRING, (short)1); - private static final org.apache.thrift7.protocol.TField CHUNK_FIELD_DESC = new org.apache.thrift7.protocol.TField("chunk", org.apache.thrift7.protocol.TType.STRING, (short)2); private String location; // required - private ByteBuffer chunk; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { - LOCATION((short)1, "location"), - CHUNK((short)2, "chunk"); + LOCATION((short)1, "location"); private static final Map byName = new HashMap(); @@ -8268,8 +9733,6 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // LOCATION return LOCATION; - case 2: // CHUNK - return CHUNK; default: return null; } @@ -8316,45 +9779,36 @@ public String getFieldName() { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.LOCATION, new org.apache.thrift7.meta_data.FieldMetaData("location", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); - tmpMap.put(_Fields.CHUNK, new org.apache.thrift7.meta_data.FieldMetaData("chunk", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING , true))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(uploadChunk_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(finishFileUpload_args.class, metaDataMap); } - public uploadChunk_args() { + public finishFileUpload_args() { } - public uploadChunk_args( - String location, - ByteBuffer chunk) + public finishFileUpload_args( + String location) { this(); this.location = location; - this.chunk = chunk; } /** * Performs a deep copy on other. */ - public uploadChunk_args(uploadChunk_args other) { + public finishFileUpload_args(finishFileUpload_args other) { if (other.is_set_location()) { this.location = other.location; } - if (other.is_set_chunk()) { - this.chunk = org.apache.thrift7.TBaseHelper.copyBinary(other.chunk); -; - } } - public uploadChunk_args deepCopy() { - return new uploadChunk_args(this); + public finishFileUpload_args deepCopy() { + return new finishFileUpload_args(this); } @Override public void clear() { this.location = null; - this.chunk = null; } public String get_location() { @@ -8380,38 +9834,6 @@ public void set_location_isSet(boolean value) { } } - public byte[] get_chunk() { - set_chunk(org.apache.thrift7.TBaseHelper.rightSize(chunk)); - return chunk == null ? null : chunk.array(); - } - - public ByteBuffer buffer_for_chunk() { - return chunk; - } - - public void set_chunk(byte[] chunk) { - set_chunk(chunk == null ? (ByteBuffer)null : ByteBuffer.wrap(chunk)); - } - - public void set_chunk(ByteBuffer chunk) { - this.chunk = chunk; - } - - public void unset_chunk() { - this.chunk = null; - } - - /** Returns true if field chunk is set (has been assigned a value) and false otherwise */ - public boolean is_set_chunk() { - return this.chunk != null; - } - - public void set_chunk_isSet(boolean value) { - if (!value) { - this.chunk = null; - } - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case LOCATION: @@ -8422,14 +9844,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case CHUNK: - if (value == null) { - unset_chunk(); - } else { - set_chunk((ByteBuffer)value); - } - break; - } } @@ -8438,9 +9852,6 @@ public Object getFieldValue(_Fields field) { case LOCATION: return get_location(); - case CHUNK: - return get_chunk(); - } throw new IllegalStateException(); } @@ -8454,8 +9865,6 @@ public boolean isSet(_Fields field) { switch (field) { case LOCATION: return is_set_location(); - case CHUNK: - return is_set_chunk(); } throw new IllegalStateException(); } @@ -8464,12 +9873,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof uploadChunk_args) - return this.equals((uploadChunk_args)that); + if (that instanceof finishFileUpload_args) + return this.equals((finishFileUpload_args)that); return false; } - public boolean equals(uploadChunk_args that) { + public boolean equals(finishFileUpload_args that) { if (that == null) return false; @@ -8482,15 +9891,6 @@ public boolean equals(uploadChunk_args that) { return false; } - boolean this_present_chunk = true && this.is_set_chunk(); - boolean that_present_chunk = true && that.is_set_chunk(); - if (this_present_chunk || that_present_chunk) { - if (!(this_present_chunk && that_present_chunk)) - return false; - if (!this.chunk.equals(that.chunk)) - return false; - } - return true; } @@ -8503,21 +9903,16 @@ public int hashCode() { if (present_location) builder.append(location); - boolean present_chunk = true && (is_set_chunk()); - builder.append(present_chunk); - if (present_chunk) - builder.append(chunk); - return builder.toHashCode(); } - public int compareTo(uploadChunk_args other) { + public int compareTo(finishFileUpload_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - uploadChunk_args typedOther = (uploadChunk_args)other; + finishFileUpload_args typedOther = (finishFileUpload_args)other; lastComparison = Boolean.valueOf(is_set_location()).compareTo(typedOther.is_set_location()); if (lastComparison != 0) { @@ -8529,16 +9924,6 @@ public int compareTo(uploadChunk_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(is_set_chunk()).compareTo(typedOther.is_set_chunk()); - if (lastComparison != 0) { - return lastComparison; - } - if (is_set_chunk()) { - lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.chunk, typedOther.chunk); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -8563,13 +9948,6 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } break; - case 2: // CHUNK - if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.chunk = iprot.readBinary(); - } else { - org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); - } - break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -8588,18 +9966,13 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache oprot.writeString(this.location); oprot.writeFieldEnd(); } - if (this.chunk != null) { - oprot.writeFieldBegin(CHUNK_FIELD_DESC); - oprot.writeBinary(this.chunk); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("uploadChunk_args("); + StringBuilder sb = new StringBuilder("finishFileUpload_args("); boolean first = true; sb.append("location:"); @@ -8609,14 +9982,6 @@ public String toString() { sb.append(this.location); } first = false; - if (!first) sb.append(", "); - sb.append("chunk:"); - if (this.chunk == null) { - sb.append("null"); - } else { - org.apache.thrift7.TBaseHelper.toString(this.chunk, sb); - } - first = false; sb.append(")"); return sb.toString(); } @@ -8643,8 +10008,8 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class uploadChunk_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("uploadChunk_result"); + public static class finishFileUpload_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("finishFileUpload_result"); @@ -8707,20 +10072,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(uploadChunk_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(finishFileUpload_result.class, metaDataMap); } - public uploadChunk_result() { + public finishFileUpload_result() { } /** * Performs a deep copy on other. */ - public uploadChunk_result(uploadChunk_result other) { + public finishFileUpload_result(finishFileUpload_result other) { } - public uploadChunk_result deepCopy() { - return new uploadChunk_result(this); + public finishFileUpload_result deepCopy() { + return new finishFileUpload_result(this); } @Override @@ -8753,12 +10118,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof uploadChunk_result) - return this.equals((uploadChunk_result)that); + if (that instanceof finishFileUpload_result) + return this.equals((finishFileUpload_result)that); return false; } - public boolean equals(uploadChunk_result that) { + public boolean equals(finishFileUpload_result that) { if (that == null) return false; @@ -8772,13 +10137,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(uploadChunk_result other) { + public int compareTo(finishFileUpload_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - uploadChunk_result typedOther = (uploadChunk_result)other; + finishFileUpload_result typedOther = (finishFileUpload_result)other; return 0; } @@ -8815,7 +10180,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("uploadChunk_result("); + StringBuilder sb = new StringBuilder("finishFileUpload_result("); boolean first = true; sb.append(")"); @@ -8844,16 +10209,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class finishFileUpload_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("finishFileUpload_args"); + public static class beginFileDownload_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginFileDownload_args"); - private static final org.apache.thrift7.protocol.TField LOCATION_FIELD_DESC = new org.apache.thrift7.protocol.TField("location", org.apache.thrift7.protocol.TType.STRING, (short)1); + private static final org.apache.thrift7.protocol.TField FILE_FIELD_DESC = new org.apache.thrift7.protocol.TField("file", org.apache.thrift7.protocol.TType.STRING, (short)1); - private String location; // required + private String file; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { - LOCATION((short)1, "location"); + FILE((short)1, "file"); private static final Map byName = new HashMap(); @@ -8868,8 +10233,8 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // LOCATION - return LOCATION; + case 1: // FILE + return FILE; default: return null; } @@ -8914,70 +10279,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.LOCATION, new org.apache.thrift7.meta_data.FieldMetaData("location", org.apache.thrift7.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.FILE, new org.apache.thrift7.meta_data.FieldMetaData("file", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(finishFileUpload_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginFileDownload_args.class, metaDataMap); } - public finishFileUpload_args() { + public beginFileDownload_args() { } - public finishFileUpload_args( - String location) + public beginFileDownload_args( + String file) { this(); - this.location = location; + this.file = file; } /** * Performs a deep copy on other. */ - public finishFileUpload_args(finishFileUpload_args other) { - if (other.is_set_location()) { - this.location = other.location; + public beginFileDownload_args(beginFileDownload_args other) { + if (other.is_set_file()) { + this.file = other.file; } } - public finishFileUpload_args deepCopy() { - return new finishFileUpload_args(this); + public beginFileDownload_args deepCopy() { + return new beginFileDownload_args(this); } @Override public void clear() { - this.location = null; + this.file = null; } - public String get_location() { - return this.location; + public String get_file() { + return this.file; } - public void set_location(String location) { - this.location = location; + public void set_file(String file) { + this.file = file; } - public void unset_location() { - this.location = null; + public void unset_file() { + this.file = null; } - /** Returns true if field location is set (has been assigned a value) and false otherwise */ - public boolean is_set_location() { - return this.location != null; + /** Returns true if field file is set (has been assigned a value) and false otherwise */ + public boolean is_set_file() { + return this.file != null; } - public void set_location_isSet(boolean value) { + public void set_file_isSet(boolean value) { if (!value) { - this.location = null; + this.file = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case LOCATION: + case FILE: if (value == null) { - unset_location(); + unset_file(); } else { - set_location((String)value); + set_file((String)value); } break; @@ -8986,8 +10351,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case LOCATION: - return get_location(); + case FILE: + return get_file(); } throw new IllegalStateException(); @@ -9000,8 +10365,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case LOCATION: - return is_set_location(); + case FILE: + return is_set_file(); } throw new IllegalStateException(); } @@ -9010,21 +10375,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof finishFileUpload_args) - return this.equals((finishFileUpload_args)that); + if (that instanceof beginFileDownload_args) + return this.equals((beginFileDownload_args)that); return false; } - public boolean equals(finishFileUpload_args that) { + public boolean equals(beginFileDownload_args that) { if (that == null) return false; - boolean this_present_location = true && this.is_set_location(); - boolean that_present_location = true && that.is_set_location(); - if (this_present_location || that_present_location) { - if (!(this_present_location && that_present_location)) + boolean this_present_file = true && this.is_set_file(); + boolean that_present_file = true && that.is_set_file(); + if (this_present_file || that_present_file) { + if (!(this_present_file && that_present_file)) return false; - if (!this.location.equals(that.location)) + if (!this.file.equals(that.file)) return false; } @@ -9035,28 +10400,28 @@ public boolean equals(finishFileUpload_args that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); - boolean present_location = true && (is_set_location()); - builder.append(present_location); - if (present_location) - builder.append(location); + boolean present_file = true && (is_set_file()); + builder.append(present_file); + if (present_file) + builder.append(file); return builder.toHashCode(); } - public int compareTo(finishFileUpload_args other) { + public int compareTo(beginFileDownload_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - finishFileUpload_args typedOther = (finishFileUpload_args)other; + beginFileDownload_args typedOther = (beginFileDownload_args)other; - lastComparison = Boolean.valueOf(is_set_location()).compareTo(typedOther.is_set_location()); + lastComparison = Boolean.valueOf(is_set_file()).compareTo(typedOther.is_set_file()); if (lastComparison != 0) { return lastComparison; } - if (is_set_location()) { - lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.location, typedOther.location); + if (is_set_file()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.file, typedOther.file); if (lastComparison != 0) { return lastComparison; } @@ -9078,9 +10443,9 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { - case 1: // LOCATION + case 1: // FILE if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.location = iprot.readString(); + this.file = iprot.readString(); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -9098,9 +10463,9 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.location != null) { - oprot.writeFieldBegin(LOCATION_FIELD_DESC); - oprot.writeString(this.location); + if (this.file != null) { + oprot.writeFieldBegin(FILE_FIELD_DESC); + oprot.writeString(this.file); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -9109,14 +10474,14 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("finishFileUpload_args("); + StringBuilder sb = new StringBuilder("beginFileDownload_args("); boolean first = true; - sb.append("location:"); - if (this.location == null) { + sb.append("file:"); + if (this.file == null) { sb.append("null"); } else { - sb.append(this.location); + sb.append(this.file); } first = false; sb.append(")"); @@ -9145,14 +10510,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class finishFileUpload_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("finishFileUpload_result"); + public static class beginFileDownload_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginFileDownload_result"); + private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRING, (short)0); + private String success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { -; + SUCCESS((short)0, "success"); private static final Map byName = new HashMap(); @@ -9167,6 +10534,8 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; default: return null; } @@ -9205,37 +10574,87 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(finishFileUpload_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginFileDownload_result.class, metaDataMap); } - public finishFileUpload_result() { + public beginFileDownload_result() { + } + + public beginFileDownload_result( + String success) + { + this(); + this.success = success; } /** * Performs a deep copy on other. */ - public finishFileUpload_result(finishFileUpload_result other) { + public beginFileDownload_result(beginFileDownload_result other) { + if (other.is_set_success()) { + this.success = other.success; + } } - public finishFileUpload_result deepCopy() { - return new finishFileUpload_result(this); + public beginFileDownload_result deepCopy() { + return new beginFileDownload_result(this); } @Override public void clear() { + this.success = null; + } + + public String get_success() { + return this.success; + } + + public void set_success(String success) { + this.success = success; + } + + public void unset_success() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean is_set_success() { + return this.success != null; + } + + public void set_success_isSet(boolean value) { + if (!value) { + this.success = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unset_success(); + } else { + set_success((String)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return get_success(); + } throw new IllegalStateException(); } @@ -9247,6 +10666,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return is_set_success(); } throw new IllegalStateException(); } @@ -9255,15 +10676,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof finishFileUpload_result) - return this.equals((finishFileUpload_result)that); + if (that instanceof beginFileDownload_result) + return this.equals((beginFileDownload_result)that); return false; } - public boolean equals(finishFileUpload_result that) { + public boolean equals(beginFileDownload_result that) { if (that == null) return false; + boolean this_present_success = true && this.is_set_success(); + boolean that_present_success = true && that.is_set_success(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + return true; } @@ -9271,17 +10701,32 @@ public boolean equals(finishFileUpload_result that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); + boolean present_success = true && (is_set_success()); + builder.append(present_success); + if (present_success) + builder.append(success); + return builder.toHashCode(); } - public int compareTo(finishFileUpload_result other) { + public int compareTo(beginFileDownload_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - finishFileUpload_result typedOther = (finishFileUpload_result)other; + beginFileDownload_result typedOther = (beginFileDownload_result)other; + lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_success()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.success, typedOther.success); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -9299,6 +10744,13 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { + case 0: // SUCCESS + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.success = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -9311,15 +10763,27 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { oprot.writeStructBegin(STRUCT_DESC); + if (this.is_set_success()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeString(this.success); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("finishFileUpload_result("); + StringBuilder sb = new StringBuilder("beginFileDownload_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; sb.append(")"); return sb.toString(); } @@ -9346,16 +10810,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class beginFileDownload_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginFileDownload_args"); + public static class downloadChunk_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("downloadChunk_args"); - private static final org.apache.thrift7.protocol.TField FILE_FIELD_DESC = new org.apache.thrift7.protocol.TField("file", org.apache.thrift7.protocol.TType.STRING, (short)1); + private static final org.apache.thrift7.protocol.TField ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("id", org.apache.thrift7.protocol.TType.STRING, (short)1); - private String file; // required + private String id; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { - FILE((short)1, "file"); + ID((short)1, "id"); private static final Map byName = new HashMap(); @@ -9370,8 +10834,8 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // FILE - return FILE; + case 1: // ID + return ID; default: return null; } @@ -9416,70 +10880,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.FILE, new org.apache.thrift7.meta_data.FieldMetaData("file", org.apache.thrift7.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.ID, new org.apache.thrift7.meta_data.FieldMetaData("id", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginFileDownload_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(downloadChunk_args.class, metaDataMap); } - public beginFileDownload_args() { + public downloadChunk_args() { } - public beginFileDownload_args( - String file) + public downloadChunk_args( + String id) { this(); - this.file = file; + this.id = id; } /** * Performs a deep copy on other. */ - public beginFileDownload_args(beginFileDownload_args other) { - if (other.is_set_file()) { - this.file = other.file; + public downloadChunk_args(downloadChunk_args other) { + if (other.is_set_id()) { + this.id = other.id; } } - public beginFileDownload_args deepCopy() { - return new beginFileDownload_args(this); + public downloadChunk_args deepCopy() { + return new downloadChunk_args(this); } @Override public void clear() { - this.file = null; + this.id = null; } - public String get_file() { - return this.file; + public String get_id() { + return this.id; } - public void set_file(String file) { - this.file = file; + public void set_id(String id) { + this.id = id; } - public void unset_file() { - this.file = null; + public void unset_id() { + this.id = null; } - /** Returns true if field file is set (has been assigned a value) and false otherwise */ - public boolean is_set_file() { - return this.file != null; + /** Returns true if field id is set (has been assigned a value) and false otherwise */ + public boolean is_set_id() { + return this.id != null; } - public void set_file_isSet(boolean value) { + public void set_id_isSet(boolean value) { if (!value) { - this.file = null; + this.id = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case FILE: + case ID: if (value == null) { - unset_file(); + unset_id(); } else { - set_file((String)value); + set_id((String)value); } break; @@ -9488,8 +10952,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case FILE: - return get_file(); + case ID: + return get_id(); } throw new IllegalStateException(); @@ -9502,8 +10966,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case FILE: - return is_set_file(); + case ID: + return is_set_id(); } throw new IllegalStateException(); } @@ -9512,21 +10976,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof beginFileDownload_args) - return this.equals((beginFileDownload_args)that); + if (that instanceof downloadChunk_args) + return this.equals((downloadChunk_args)that); return false; } - public boolean equals(beginFileDownload_args that) { + public boolean equals(downloadChunk_args that) { if (that == null) return false; - boolean this_present_file = true && this.is_set_file(); - boolean that_present_file = true && that.is_set_file(); - if (this_present_file || that_present_file) { - if (!(this_present_file && that_present_file)) + boolean this_present_id = true && this.is_set_id(); + boolean that_present_id = true && that.is_set_id(); + if (this_present_id || that_present_id) { + if (!(this_present_id && that_present_id)) return false; - if (!this.file.equals(that.file)) + if (!this.id.equals(that.id)) return false; } @@ -9537,28 +11001,28 @@ public boolean equals(beginFileDownload_args that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); - boolean present_file = true && (is_set_file()); - builder.append(present_file); - if (present_file) - builder.append(file); + boolean present_id = true && (is_set_id()); + builder.append(present_id); + if (present_id) + builder.append(id); return builder.toHashCode(); } - public int compareTo(beginFileDownload_args other) { + public int compareTo(downloadChunk_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - beginFileDownload_args typedOther = (beginFileDownload_args)other; + downloadChunk_args typedOther = (downloadChunk_args)other; - lastComparison = Boolean.valueOf(is_set_file()).compareTo(typedOther.is_set_file()); + lastComparison = Boolean.valueOf(is_set_id()).compareTo(typedOther.is_set_id()); if (lastComparison != 0) { return lastComparison; } - if (is_set_file()) { - lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.file, typedOther.file); + if (is_set_id()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.id, typedOther.id); if (lastComparison != 0) { return lastComparison; } @@ -9580,9 +11044,9 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { - case 1: // FILE + case 1: // ID if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.file = iprot.readString(); + this.id = iprot.readString(); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -9600,9 +11064,9 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.file != null) { - oprot.writeFieldBegin(FILE_FIELD_DESC); - oprot.writeString(this.file); + if (this.id != null) { + oprot.writeFieldBegin(ID_FIELD_DESC); + oprot.writeString(this.id); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -9611,14 +11075,14 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("beginFileDownload_args("); + StringBuilder sb = new StringBuilder("downloadChunk_args("); boolean first = true; - sb.append("file:"); - if (this.file == null) { + sb.append("id:"); + if (this.id == null) { sb.append("null"); } else { - sb.append(this.file); + sb.append(this.id); } first = false; sb.append(")"); @@ -9647,12 +11111,12 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class beginFileDownload_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("beginFileDownload_result"); + public static class downloadChunk_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("downloadChunk_result"); private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRING, (short)0); - private String success; // required + private ByteBuffer success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { @@ -9718,16 +11182,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING , true))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(beginFileDownload_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(downloadChunk_result.class, metaDataMap); } - public beginFileDownload_result() { + public downloadChunk_result() { } - public beginFileDownload_result( - String success) + public downloadChunk_result( + ByteBuffer success) { this(); this.success = success; @@ -9736,14 +11200,15 @@ public beginFileDownload_result( /** * Performs a deep copy on other. */ - public beginFileDownload_result(beginFileDownload_result other) { + public downloadChunk_result(downloadChunk_result other) { if (other.is_set_success()) { - this.success = other.success; + this.success = org.apache.thrift7.TBaseHelper.copyBinary(other.success); +; } } - public beginFileDownload_result deepCopy() { - return new beginFileDownload_result(this); + public downloadChunk_result deepCopy() { + return new downloadChunk_result(this); } @Override @@ -9751,11 +11216,20 @@ public void clear() { this.success = null; } - public String get_success() { - return this.success; + public byte[] get_success() { + set_success(org.apache.thrift7.TBaseHelper.rightSize(success)); + return success == null ? null : success.array(); } - public void set_success(String success) { + public ByteBuffer buffer_for_success() { + return success; + } + + public void set_success(byte[] success) { + set_success(success == null ? (ByteBuffer)null : ByteBuffer.wrap(success)); + } + + public void set_success(ByteBuffer success) { this.success = success; } @@ -9780,7 +11254,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unset_success(); } else { - set_success((String)value); + set_success((ByteBuffer)value); } break; @@ -9813,12 +11287,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof beginFileDownload_result) - return this.equals((beginFileDownload_result)that); + if (that instanceof downloadChunk_result) + return this.equals((downloadChunk_result)that); return false; } - public boolean equals(beginFileDownload_result that) { + public boolean equals(downloadChunk_result that) { if (that == null) return false; @@ -9846,13 +11320,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(beginFileDownload_result other) { + public int compareTo(downloadChunk_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - beginFileDownload_result typedOther = (beginFileDownload_result)other; + downloadChunk_result typedOther = (downloadChunk_result)other; lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); if (lastComparison != 0) { @@ -9883,7 +11357,7 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. switch (field.id) { case 0: // SUCCESS if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.success = iprot.readString(); + this.success = iprot.readBinary(); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -9902,7 +11376,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache if (this.is_set_success()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeString(this.success); + oprot.writeBinary(this.success); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -9911,14 +11385,14 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("beginFileDownload_result("); + StringBuilder sb = new StringBuilder("downloadChunk_result("); boolean first = true; sb.append("success:"); if (this.success == null) { sb.append("null"); } else { - sb.append(this.success); + org.apache.thrift7.TBaseHelper.toString(this.success, sb); } first = false; sb.append(")"); @@ -9947,16 +11421,14 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class downloadChunk_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("downloadChunk_args"); + public static class getNimbusConf_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getNimbusConf_args"); - private static final org.apache.thrift7.protocol.TField ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("id", org.apache.thrift7.protocol.TType.STRING, (short)1); - private String id; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { - ID((short)1, "id"); +; private static final Map byName = new HashMap(); @@ -9971,8 +11443,6 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // ID - return ID; default: return null; } @@ -10007,91 +11477,41 @@ public short getThriftFieldId() { return _thriftId; } - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - - public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.ID, new org.apache.thrift7.meta_data.FieldMetaData("id", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(downloadChunk_args.class, metaDataMap); - } - - public downloadChunk_args() { - } - - public downloadChunk_args( - String id) - { - this(); - this.id = id; - } - - /** - * Performs a deep copy on other. - */ - public downloadChunk_args(downloadChunk_args other) { - if (other.is_set_id()) { - this.id = other.id; - } - } - - public downloadChunk_args deepCopy() { - return new downloadChunk_args(this); - } - - @Override - public void clear() { - this.id = null; - } - - public String get_id() { - return this.id; + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getNimbusConf_args.class, metaDataMap); } - public void set_id(String id) { - this.id = id; + public getNimbusConf_args() { } - public void unset_id() { - this.id = null; + /** + * Performs a deep copy on other. + */ + public getNimbusConf_args(getNimbusConf_args other) { } - /** Returns true if field id is set (has been assigned a value) and false otherwise */ - public boolean is_set_id() { - return this.id != null; + public getNimbusConf_args deepCopy() { + return new getNimbusConf_args(this); } - public void set_id_isSet(boolean value) { - if (!value) { - this.id = null; - } + @Override + public void clear() { } public void setFieldValue(_Fields field, Object value) { switch (field) { - case ID: - if (value == null) { - unset_id(); - } else { - set_id((String)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case ID: - return get_id(); - } throw new IllegalStateException(); } @@ -10103,8 +11523,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case ID: - return is_set_id(); } throw new IllegalStateException(); } @@ -10113,24 +11531,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof downloadChunk_args) - return this.equals((downloadChunk_args)that); + if (that instanceof getNimbusConf_args) + return this.equals((getNimbusConf_args)that); return false; } - public boolean equals(downloadChunk_args that) { + public boolean equals(getNimbusConf_args that) { if (that == null) return false; - boolean this_present_id = true && this.is_set_id(); - boolean that_present_id = true && that.is_set_id(); - if (this_present_id || that_present_id) { - if (!(this_present_id && that_present_id)) - return false; - if (!this.id.equals(that.id)) - return false; - } - return true; } @@ -10138,32 +11547,17 @@ public boolean equals(downloadChunk_args that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); - boolean present_id = true && (is_set_id()); - builder.append(present_id); - if (present_id) - builder.append(id); - return builder.toHashCode(); } - public int compareTo(downloadChunk_args other) { + public int compareTo(getNimbusConf_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - downloadChunk_args typedOther = (downloadChunk_args)other; + getNimbusConf_args typedOther = (getNimbusConf_args)other; - lastComparison = Boolean.valueOf(is_set_id()).compareTo(typedOther.is_set_id()); - if (lastComparison != 0) { - return lastComparison; - } - if (is_set_id()) { - lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.id, typedOther.id); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -10181,13 +11575,6 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { - case 1: // ID - if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.id = iprot.readString(); - } else { - org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); - } - break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -10201,27 +11588,15 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.id != null) { - oprot.writeFieldBegin(ID_FIELD_DESC); - oprot.writeString(this.id); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("downloadChunk_args("); + StringBuilder sb = new StringBuilder("getNimbusConf_args("); boolean first = true; - sb.append("id:"); - if (this.id == null) { - sb.append("null"); - } else { - sb.append(this.id); - } - first = false; sb.append(")"); return sb.toString(); } @@ -10248,12 +11623,12 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class downloadChunk_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("downloadChunk_result"); + public static class getNimbusConf_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getNimbusConf_result"); private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRING, (short)0); - private ByteBuffer success; // required + private String success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { @@ -10319,16 +11694,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING , true))); + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(downloadChunk_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getNimbusConf_result.class, metaDataMap); } - public downloadChunk_result() { + public getNimbusConf_result() { } - public downloadChunk_result( - ByteBuffer success) + public getNimbusConf_result( + String success) { this(); this.success = success; @@ -10337,15 +11712,14 @@ public downloadChunk_result( /** * Performs a deep copy on other. */ - public downloadChunk_result(downloadChunk_result other) { + public getNimbusConf_result(getNimbusConf_result other) { if (other.is_set_success()) { - this.success = org.apache.thrift7.TBaseHelper.copyBinary(other.success); -; + this.success = other.success; } } - public downloadChunk_result deepCopy() { - return new downloadChunk_result(this); + public getNimbusConf_result deepCopy() { + return new getNimbusConf_result(this); } @Override @@ -10353,20 +11727,11 @@ public void clear() { this.success = null; } - public byte[] get_success() { - set_success(org.apache.thrift7.TBaseHelper.rightSize(success)); - return success == null ? null : success.array(); - } - - public ByteBuffer buffer_for_success() { - return success; - } - - public void set_success(byte[] success) { - set_success(success == null ? (ByteBuffer)null : ByteBuffer.wrap(success)); + public String get_success() { + return this.success; } - public void set_success(ByteBuffer success) { + public void set_success(String success) { this.success = success; } @@ -10391,7 +11756,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unset_success(); } else { - set_success((ByteBuffer)value); + set_success((String)value); } break; @@ -10424,12 +11789,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof downloadChunk_result) - return this.equals((downloadChunk_result)that); + if (that instanceof getNimbusConf_result) + return this.equals((getNimbusConf_result)that); return false; } - public boolean equals(downloadChunk_result that) { + public boolean equals(getNimbusConf_result that) { if (that == null) return false; @@ -10457,13 +11822,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(downloadChunk_result other) { + public int compareTo(getNimbusConf_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - downloadChunk_result typedOther = (downloadChunk_result)other; + getNimbusConf_result typedOther = (getNimbusConf_result)other; lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); if (lastComparison != 0) { @@ -10494,7 +11859,7 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. switch (field.id) { case 0: // SUCCESS if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.success = iprot.readBinary(); + this.success = iprot.readString(); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -10513,7 +11878,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache if (this.is_set_success()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBinary(this.success); + oprot.writeString(this.success); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -10522,14 +11887,14 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("downloadChunk_result("); + StringBuilder sb = new StringBuilder("getNimbusConf_result("); boolean first = true; sb.append("success:"); if (this.success == null) { sb.append("null"); } else { - org.apache.thrift7.TBaseHelper.toString(this.success, sb); + sb.append(this.success); } first = false; sb.append(")"); @@ -10558,8 +11923,8 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getNimbusConf_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getNimbusConf_args"); + public static class getClusterInfo_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getClusterInfo_args"); @@ -10622,20 +11987,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getNimbusConf_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getClusterInfo_args.class, metaDataMap); } - public getNimbusConf_args() { + public getClusterInfo_args() { } /** * Performs a deep copy on other. */ - public getNimbusConf_args(getNimbusConf_args other) { + public getClusterInfo_args(getClusterInfo_args other) { } - public getNimbusConf_args deepCopy() { - return new getNimbusConf_args(this); + public getClusterInfo_args deepCopy() { + return new getClusterInfo_args(this); } @Override @@ -10668,12 +12033,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getNimbusConf_args) - return this.equals((getNimbusConf_args)that); + if (that instanceof getClusterInfo_args) + return this.equals((getClusterInfo_args)that); return false; } - public boolean equals(getNimbusConf_args that) { + public boolean equals(getClusterInfo_args that) { if (that == null) return false; @@ -10687,13 +12052,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getNimbusConf_args other) { + public int compareTo(getClusterInfo_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getNimbusConf_args typedOther = (getNimbusConf_args)other; + getClusterInfo_args typedOther = (getClusterInfo_args)other; return 0; } @@ -10731,7 +12096,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getNimbusConf_args("); + StringBuilder sb = new StringBuilder("getClusterInfo_args("); boolean first = true; sb.append(")"); @@ -10760,12 +12125,12 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getNimbusConf_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getNimbusConf_result"); + public static class getClusterInfo_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getClusterInfo_result"); - private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRING, (short)0); + private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRUCT, (short)0); - private String success; // required + private ClusterSummary success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { @@ -10831,16 +12196,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); + new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, ClusterSummary.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getNimbusConf_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getClusterInfo_result.class, metaDataMap); } - public getNimbusConf_result() { + public getClusterInfo_result() { } - public getNimbusConf_result( - String success) + public getClusterInfo_result( + ClusterSummary success) { this(); this.success = success; @@ -10849,14 +12214,14 @@ public getNimbusConf_result( /** * Performs a deep copy on other. */ - public getNimbusConf_result(getNimbusConf_result other) { + public getClusterInfo_result(getClusterInfo_result other) { if (other.is_set_success()) { - this.success = other.success; + this.success = new ClusterSummary(other.success); } } - public getNimbusConf_result deepCopy() { - return new getNimbusConf_result(this); + public getClusterInfo_result deepCopy() { + return new getClusterInfo_result(this); } @Override @@ -10864,11 +12229,11 @@ public void clear() { this.success = null; } - public String get_success() { + public ClusterSummary get_success() { return this.success; } - public void set_success(String success) { + public void set_success(ClusterSummary success) { this.success = success; } @@ -10893,7 +12258,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unset_success(); } else { - set_success((String)value); + set_success((ClusterSummary)value); } break; @@ -10926,12 +12291,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getNimbusConf_result) - return this.equals((getNimbusConf_result)that); + if (that instanceof getClusterInfo_result) + return this.equals((getClusterInfo_result)that); return false; } - public boolean equals(getNimbusConf_result that) { + public boolean equals(getClusterInfo_result that) { if (that == null) return false; @@ -10959,13 +12324,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getNimbusConf_result other) { + public int compareTo(getClusterInfo_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getNimbusConf_result typedOther = (getNimbusConf_result)other; + getClusterInfo_result typedOther = (getClusterInfo_result)other; lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); if (lastComparison != 0) { @@ -10995,8 +12360,9 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. } switch (field.id) { case 0: // SUCCESS - if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.success = iprot.readString(); + if (field.type == org.apache.thrift7.protocol.TType.STRUCT) { + this.success = new ClusterSummary(); + this.success.read(iprot); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -11015,7 +12381,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache if (this.is_set_success()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeString(this.success); + this.success.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -11024,7 +12390,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getNimbusConf_result("); + StringBuilder sb = new StringBuilder("getClusterInfo_result("); boolean first = true; sb.append("success:"); @@ -11060,14 +12426,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getClusterInfo_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getClusterInfo_args"); + public static class getTopologyInfo_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyInfo_args"); + private static final org.apache.thrift7.protocol.TField ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("id", org.apache.thrift7.protocol.TType.STRING, (short)1); + private String id; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { -; + ID((short)1, "id"); private static final Map byName = new HashMap(); @@ -11082,6 +12450,8 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // ID + return ID; default: return null; } @@ -11120,37 +12490,87 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.ID, new org.apache.thrift7.meta_data.FieldMetaData("id", org.apache.thrift7.TFieldRequirementType.DEFAULT, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getClusterInfo_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyInfo_args.class, metaDataMap); + } + + public getTopologyInfo_args() { } - public getClusterInfo_args() { + public getTopologyInfo_args( + String id) + { + this(); + this.id = id; } /** * Performs a deep copy on other. */ - public getClusterInfo_args(getClusterInfo_args other) { + public getTopologyInfo_args(getTopologyInfo_args other) { + if (other.is_set_id()) { + this.id = other.id; + } } - public getClusterInfo_args deepCopy() { - return new getClusterInfo_args(this); + public getTopologyInfo_args deepCopy() { + return new getTopologyInfo_args(this); } @Override public void clear() { + this.id = null; + } + + public String get_id() { + return this.id; + } + + public void set_id(String id) { + this.id = id; + } + + public void unset_id() { + this.id = null; + } + + /** Returns true if field id is set (has been assigned a value) and false otherwise */ + public boolean is_set_id() { + return this.id != null; + } + + public void set_id_isSet(boolean value) { + if (!value) { + this.id = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case ID: + if (value == null) { + unset_id(); + } else { + set_id((String)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case ID: + return get_id(); + } throw new IllegalStateException(); } @@ -11162,6 +12582,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case ID: + return is_set_id(); } throw new IllegalStateException(); } @@ -11170,15 +12592,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getClusterInfo_args) - return this.equals((getClusterInfo_args)that); + if (that instanceof getTopologyInfo_args) + return this.equals((getTopologyInfo_args)that); return false; } - public boolean equals(getClusterInfo_args that) { + public boolean equals(getTopologyInfo_args that) { if (that == null) return false; + boolean this_present_id = true && this.is_set_id(); + boolean that_present_id = true && that.is_set_id(); + if (this_present_id || that_present_id) { + if (!(this_present_id && that_present_id)) + return false; + if (!this.id.equals(that.id)) + return false; + } + return true; } @@ -11186,17 +12617,32 @@ public boolean equals(getClusterInfo_args that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); + boolean present_id = true && (is_set_id()); + builder.append(present_id); + if (present_id) + builder.append(id); + return builder.toHashCode(); } - public int compareTo(getClusterInfo_args other) { + public int compareTo(getTopologyInfo_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getClusterInfo_args typedOther = (getClusterInfo_args)other; + getTopologyInfo_args typedOther = (getTopologyInfo_args)other; + lastComparison = Boolean.valueOf(is_set_id()).compareTo(typedOther.is_set_id()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_id()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.id, typedOther.id); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -11214,6 +12660,13 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { + case 1: // ID + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.id = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -11227,15 +12680,27 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache validate(); oprot.writeStructBegin(STRUCT_DESC); + if (this.id != null) { + oprot.writeFieldBegin(ID_FIELD_DESC); + oprot.writeString(this.id); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @Override public String toString() { - StringBuilder sb = new StringBuilder("getClusterInfo_args("); + StringBuilder sb = new StringBuilder("getTopologyInfo_args("); boolean first = true; + sb.append("id:"); + if (this.id == null) { + sb.append("null"); + } else { + sb.append(this.id); + } + first = false; sb.append(")"); return sb.toString(); } @@ -11262,16 +12727,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getClusterInfo_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getClusterInfo_result"); + public static class getTopologyInfo_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyInfo_result"); private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift7.protocol.TField E_FIELD_DESC = new org.apache.thrift7.protocol.TField("e", org.apache.thrift7.protocol.TType.STRUCT, (short)1); - private ClusterSummary success; // required + private TopologyInfo success; // required + private NotAliveException e; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { - SUCCESS((short)0, "success"); + SUCCESS((short)0, "success"), + E((short)1, "e"); private static final Map byName = new HashMap(); @@ -11288,6 +12756,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 0: // SUCCESS return SUCCESS; + case 1: // E + return E; default: return null; } @@ -11333,44 +12803,52 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, ClusterSummary.class))); + new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, TopologyInfo.class))); + tmpMap.put(_Fields.E, new org.apache.thrift7.meta_data.FieldMetaData("e", org.apache.thrift7.TFieldRequirementType.DEFAULT, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getClusterInfo_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyInfo_result.class, metaDataMap); } - public getClusterInfo_result() { + public getTopologyInfo_result() { } - public getClusterInfo_result( - ClusterSummary success) + public getTopologyInfo_result( + TopologyInfo success, + NotAliveException e) { this(); this.success = success; + this.e = e; } /** * Performs a deep copy on other. */ - public getClusterInfo_result(getClusterInfo_result other) { + public getTopologyInfo_result(getTopologyInfo_result other) { if (other.is_set_success()) { - this.success = new ClusterSummary(other.success); + this.success = new TopologyInfo(other.success); + } + if (other.is_set_e()) { + this.e = new NotAliveException(other.e); } } - public getClusterInfo_result deepCopy() { - return new getClusterInfo_result(this); + public getTopologyInfo_result deepCopy() { + return new getTopologyInfo_result(this); } @Override public void clear() { this.success = null; + this.e = null; } - public ClusterSummary get_success() { + public TopologyInfo get_success() { return this.success; } - public void set_success(ClusterSummary success) { + public void set_success(TopologyInfo success) { this.success = success; } @@ -11389,13 +12867,44 @@ public void set_success_isSet(boolean value) { } } + public NotAliveException get_e() { + return this.e; + } + + public void set_e(NotAliveException e) { + this.e = e; + } + + public void unset_e() { + this.e = null; + } + + /** Returns true if field e is set (has been assigned a value) and false otherwise */ + public boolean is_set_e() { + return this.e != null; + } + + public void set_e_isSet(boolean value) { + if (!value) { + this.e = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unset_success(); } else { - set_success((ClusterSummary)value); + set_success((TopologyInfo)value); + } + break; + + case E: + if (value == null) { + unset_e(); + } else { + set_e((NotAliveException)value); } break; @@ -11407,6 +12916,9 @@ public Object getFieldValue(_Fields field) { case SUCCESS: return get_success(); + case E: + return get_e(); + } throw new IllegalStateException(); } @@ -11420,6 +12932,8 @@ public boolean isSet(_Fields field) { switch (field) { case SUCCESS: return is_set_success(); + case E: + return is_set_e(); } throw new IllegalStateException(); } @@ -11428,12 +12942,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getClusterInfo_result) - return this.equals((getClusterInfo_result)that); + if (that instanceof getTopologyInfo_result) + return this.equals((getTopologyInfo_result)that); return false; } - public boolean equals(getClusterInfo_result that) { + public boolean equals(getTopologyInfo_result that) { if (that == null) return false; @@ -11446,6 +12960,15 @@ public boolean equals(getClusterInfo_result that) { return false; } + boolean this_present_e = true && this.is_set_e(); + boolean that_present_e = true && that.is_set_e(); + if (this_present_e || that_present_e) { + if (!(this_present_e && that_present_e)) + return false; + if (!this.e.equals(that.e)) + return false; + } + return true; } @@ -11458,16 +12981,21 @@ public int hashCode() { if (present_success) builder.append(success); + boolean present_e = true && (is_set_e()); + builder.append(present_e); + if (present_e) + builder.append(e); + return builder.toHashCode(); } - public int compareTo(getClusterInfo_result other) { + public int compareTo(getTopologyInfo_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getClusterInfo_result typedOther = (getClusterInfo_result)other; + getTopologyInfo_result typedOther = (getTopologyInfo_result)other; lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); if (lastComparison != 0) { @@ -11479,6 +13007,16 @@ public int compareTo(getClusterInfo_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(is_set_e()).compareTo(typedOther.is_set_e()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_e()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.e, typedOther.e); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -11498,12 +13036,20 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. switch (field.id) { case 0: // SUCCESS if (field.type == org.apache.thrift7.protocol.TType.STRUCT) { - this.success = new ClusterSummary(); + this.success = new TopologyInfo(); this.success.read(iprot); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } break; + case 1: // E + if (field.type == org.apache.thrift7.protocol.TType.STRUCT) { + this.e = new NotAliveException(); + this.e.read(iprot); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -11520,6 +13066,10 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache oprot.writeFieldBegin(SUCCESS_FIELD_DESC); this.success.write(oprot); oprot.writeFieldEnd(); + } else if (this.is_set_e()) { + oprot.writeFieldBegin(E_FIELD_DESC); + this.e.write(oprot); + oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -11527,7 +13077,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getClusterInfo_result("); + StringBuilder sb = new StringBuilder("getTopologyInfo_result("); boolean first = true; sb.append("success:"); @@ -11537,6 +13087,14 @@ public String toString() { sb.append(this.success); } first = false; + if (!first) sb.append(", "); + sb.append("e:"); + if (this.e == null) { + sb.append("null"); + } else { + sb.append(this.e); + } + first = false; sb.append(")"); return sb.toString(); } @@ -11563,16 +13121,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getTopologyInfo_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyInfo_args"); + public static class getSupervisorWorkers_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getSupervisorWorkers_args"); - private static final org.apache.thrift7.protocol.TField ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("id", org.apache.thrift7.protocol.TType.STRING, (short)1); + private static final org.apache.thrift7.protocol.TField HOST_FIELD_DESC = new org.apache.thrift7.protocol.TField("host", org.apache.thrift7.protocol.TType.STRING, (short)1); - private String id; // required + private String host; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { - ID((short)1, "id"); + HOST((short)1, "host"); private static final Map byName = new HashMap(); @@ -11587,8 +13145,8 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // ID - return ID; + case 1: // HOST + return HOST; default: return null; } @@ -11633,70 +13191,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.ID, new org.apache.thrift7.meta_data.FieldMetaData("id", org.apache.thrift7.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.HOST, new org.apache.thrift7.meta_data.FieldMetaData("host", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyInfo_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getSupervisorWorkers_args.class, metaDataMap); } - public getTopologyInfo_args() { + public getSupervisorWorkers_args() { } - public getTopologyInfo_args( - String id) + public getSupervisorWorkers_args( + String host) { this(); - this.id = id; + this.host = host; } /** * Performs a deep copy on other. */ - public getTopologyInfo_args(getTopologyInfo_args other) { - if (other.is_set_id()) { - this.id = other.id; + public getSupervisorWorkers_args(getSupervisorWorkers_args other) { + if (other.is_set_host()) { + this.host = other.host; } } - public getTopologyInfo_args deepCopy() { - return new getTopologyInfo_args(this); + public getSupervisorWorkers_args deepCopy() { + return new getSupervisorWorkers_args(this); } @Override public void clear() { - this.id = null; + this.host = null; } - public String get_id() { - return this.id; + public String get_host() { + return this.host; } - public void set_id(String id) { - this.id = id; + public void set_host(String host) { + this.host = host; } - public void unset_id() { - this.id = null; + public void unset_host() { + this.host = null; } - /** Returns true if field id is set (has been assigned a value) and false otherwise */ - public boolean is_set_id() { - return this.id != null; + /** Returns true if field host is set (has been assigned a value) and false otherwise */ + public boolean is_set_host() { + return this.host != null; } - public void set_id_isSet(boolean value) { + public void set_host_isSet(boolean value) { if (!value) { - this.id = null; + this.host = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case ID: + case HOST: if (value == null) { - unset_id(); + unset_host(); } else { - set_id((String)value); + set_host((String)value); } break; @@ -11705,8 +13263,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case ID: - return get_id(); + case HOST: + return get_host(); } throw new IllegalStateException(); @@ -11719,8 +13277,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case ID: - return is_set_id(); + case HOST: + return is_set_host(); } throw new IllegalStateException(); } @@ -11729,21 +13287,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getTopologyInfo_args) - return this.equals((getTopologyInfo_args)that); + if (that instanceof getSupervisorWorkers_args) + return this.equals((getSupervisorWorkers_args)that); return false; } - public boolean equals(getTopologyInfo_args that) { + public boolean equals(getSupervisorWorkers_args that) { if (that == null) return false; - boolean this_present_id = true && this.is_set_id(); - boolean that_present_id = true && that.is_set_id(); - if (this_present_id || that_present_id) { - if (!(this_present_id && that_present_id)) + boolean this_present_host = true && this.is_set_host(); + boolean that_present_host = true && that.is_set_host(); + if (this_present_host || that_present_host) { + if (!(this_present_host && that_present_host)) return false; - if (!this.id.equals(that.id)) + if (!this.host.equals(that.host)) return false; } @@ -11754,28 +13312,28 @@ public boolean equals(getTopologyInfo_args that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); - boolean present_id = true && (is_set_id()); - builder.append(present_id); - if (present_id) - builder.append(id); + boolean present_host = true && (is_set_host()); + builder.append(present_host); + if (present_host) + builder.append(host); return builder.toHashCode(); } - public int compareTo(getTopologyInfo_args other) { + public int compareTo(getSupervisorWorkers_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getTopologyInfo_args typedOther = (getTopologyInfo_args)other; + getSupervisorWorkers_args typedOther = (getSupervisorWorkers_args)other; - lastComparison = Boolean.valueOf(is_set_id()).compareTo(typedOther.is_set_id()); + lastComparison = Boolean.valueOf(is_set_host()).compareTo(typedOther.is_set_host()); if (lastComparison != 0) { return lastComparison; } - if (is_set_id()) { - lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.id, typedOther.id); + if (is_set_host()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.host, typedOther.host); if (lastComparison != 0) { return lastComparison; } @@ -11797,9 +13355,9 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { - case 1: // ID + case 1: // HOST if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.id = iprot.readString(); + this.host = iprot.readString(); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -11817,9 +13375,9 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.id != null) { - oprot.writeFieldBegin(ID_FIELD_DESC); - oprot.writeString(this.id); + if (this.host != null) { + oprot.writeFieldBegin(HOST_FIELD_DESC); + oprot.writeString(this.host); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -11828,14 +13386,14 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getTopologyInfo_args("); + StringBuilder sb = new StringBuilder("getSupervisorWorkers_args("); boolean first = true; - sb.append("id:"); - if (this.id == null) { + sb.append("host:"); + if (this.host == null) { sb.append("null"); } else { - sb.append(this.id); + sb.append(this.host); } first = false; sb.append(")"); @@ -11864,13 +13422,13 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getTopologyInfo_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyInfo_result"); + public static class getSupervisorWorkers_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getSupervisorWorkers_result"); private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift7.protocol.TField E_FIELD_DESC = new org.apache.thrift7.protocol.TField("e", org.apache.thrift7.protocol.TType.STRUCT, (short)1); - private TopologyInfo success; // required + private SupervisorWorkers success; // required private NotAliveException e; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -11940,18 +13498,18 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, TopologyInfo.class))); + new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, SupervisorWorkers.class))); tmpMap.put(_Fields.E, new org.apache.thrift7.meta_data.FieldMetaData("e", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyInfo_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getSupervisorWorkers_result.class, metaDataMap); } - public getTopologyInfo_result() { + public getSupervisorWorkers_result() { } - public getTopologyInfo_result( - TopologyInfo success, + public getSupervisorWorkers_result( + SupervisorWorkers success, NotAliveException e) { this(); @@ -11962,17 +13520,17 @@ public getTopologyInfo_result( /** * Performs a deep copy on other. */ - public getTopologyInfo_result(getTopologyInfo_result other) { + public getSupervisorWorkers_result(getSupervisorWorkers_result other) { if (other.is_set_success()) { - this.success = new TopologyInfo(other.success); + this.success = new SupervisorWorkers(other.success); } if (other.is_set_e()) { this.e = new NotAliveException(other.e); } } - public getTopologyInfo_result deepCopy() { - return new getTopologyInfo_result(this); + public getSupervisorWorkers_result deepCopy() { + return new getSupervisorWorkers_result(this); } @Override @@ -11981,11 +13539,11 @@ public void clear() { this.e = null; } - public TopologyInfo get_success() { + public SupervisorWorkers get_success() { return this.success; } - public void set_success(TopologyInfo success) { + public void set_success(SupervisorWorkers success) { this.success = success; } @@ -12033,7 +13591,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unset_success(); } else { - set_success((TopologyInfo)value); + set_success((SupervisorWorkers)value); } break; @@ -12079,12 +13637,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getTopologyInfo_result) - return this.equals((getTopologyInfo_result)that); + if (that instanceof getSupervisorWorkers_result) + return this.equals((getSupervisorWorkers_result)that); return false; } - public boolean equals(getTopologyInfo_result that) { + public boolean equals(getSupervisorWorkers_result that) { if (that == null) return false; @@ -12126,13 +13684,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getTopologyInfo_result other) { + public int compareTo(getSupervisorWorkers_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getTopologyInfo_result typedOther = (getTopologyInfo_result)other; + getSupervisorWorkers_result typedOther = (getSupervisorWorkers_result)other; lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); if (lastComparison != 0) { @@ -12173,7 +13731,7 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. switch (field.id) { case 0: // SUCCESS if (field.type == org.apache.thrift7.protocol.TType.STRUCT) { - this.success = new TopologyInfo(); + this.success = new SupervisorWorkers(); this.success.read(iprot); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); @@ -12214,7 +13772,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getTopologyInfo_result("); + StringBuilder sb = new StringBuilder("getSupervisorWorkers_result("); boolean first = true; sb.append("success:"); @@ -12258,16 +13816,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getSupervisorWorkers_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getSupervisorWorkers_args"); + public static class getTopologyConf_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyConf_args"); - private static final org.apache.thrift7.protocol.TField HOST_FIELD_DESC = new org.apache.thrift7.protocol.TField("host", org.apache.thrift7.protocol.TType.STRING, (short)1); + private static final org.apache.thrift7.protocol.TField ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("id", org.apache.thrift7.protocol.TType.STRING, (short)1); - private String host; // required + private String id; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { - HOST((short)1, "host"); + ID((short)1, "id"); private static final Map byName = new HashMap(); @@ -12282,8 +13840,8 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // HOST - return HOST; + case 1: // ID + return ID; default: return null; } @@ -12328,70 +13886,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.HOST, new org.apache.thrift7.meta_data.FieldMetaData("host", org.apache.thrift7.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.ID, new org.apache.thrift7.meta_data.FieldMetaData("id", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getSupervisorWorkers_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyConf_args.class, metaDataMap); } - public getSupervisorWorkers_args() { + public getTopologyConf_args() { } - public getSupervisorWorkers_args( - String host) + public getTopologyConf_args( + String id) { this(); - this.host = host; + this.id = id; } /** * Performs a deep copy on other. */ - public getSupervisorWorkers_args(getSupervisorWorkers_args other) { - if (other.is_set_host()) { - this.host = other.host; + public getTopologyConf_args(getTopologyConf_args other) { + if (other.is_set_id()) { + this.id = other.id; } } - public getSupervisorWorkers_args deepCopy() { - return new getSupervisorWorkers_args(this); + public getTopologyConf_args deepCopy() { + return new getTopologyConf_args(this); } @Override public void clear() { - this.host = null; + this.id = null; } - public String get_host() { - return this.host; + public String get_id() { + return this.id; } - public void set_host(String host) { - this.host = host; + public void set_id(String id) { + this.id = id; } - public void unset_host() { - this.host = null; + public void unset_id() { + this.id = null; } - /** Returns true if field host is set (has been assigned a value) and false otherwise */ - public boolean is_set_host() { - return this.host != null; + /** Returns true if field id is set (has been assigned a value) and false otherwise */ + public boolean is_set_id() { + return this.id != null; } - public void set_host_isSet(boolean value) { + public void set_id_isSet(boolean value) { if (!value) { - this.host = null; + this.id = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case HOST: + case ID: if (value == null) { - unset_host(); + unset_id(); } else { - set_host((String)value); + set_id((String)value); } break; @@ -12400,8 +13958,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case HOST: - return get_host(); + case ID: + return get_id(); } throw new IllegalStateException(); @@ -12414,8 +13972,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case HOST: - return is_set_host(); + case ID: + return is_set_id(); } throw new IllegalStateException(); } @@ -12424,21 +13982,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getSupervisorWorkers_args) - return this.equals((getSupervisorWorkers_args)that); + if (that instanceof getTopologyConf_args) + return this.equals((getTopologyConf_args)that); return false; } - public boolean equals(getSupervisorWorkers_args that) { + public boolean equals(getTopologyConf_args that) { if (that == null) return false; - boolean this_present_host = true && this.is_set_host(); - boolean that_present_host = true && that.is_set_host(); - if (this_present_host || that_present_host) { - if (!(this_present_host && that_present_host)) + boolean this_present_id = true && this.is_set_id(); + boolean that_present_id = true && that.is_set_id(); + if (this_present_id || that_present_id) { + if (!(this_present_id && that_present_id)) return false; - if (!this.host.equals(that.host)) + if (!this.id.equals(that.id)) return false; } @@ -12447,30 +14005,30 @@ public boolean equals(getSupervisorWorkers_args that) { @Override public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_host = true && (is_set_host()); - builder.append(present_host); - if (present_host) - builder.append(host); + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_id = true && (is_set_id()); + builder.append(present_id); + if (present_id) + builder.append(id); return builder.toHashCode(); } - public int compareTo(getSupervisorWorkers_args other) { + public int compareTo(getTopologyConf_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getSupervisorWorkers_args typedOther = (getSupervisorWorkers_args)other; + getTopologyConf_args typedOther = (getTopologyConf_args)other; - lastComparison = Boolean.valueOf(is_set_host()).compareTo(typedOther.is_set_host()); + lastComparison = Boolean.valueOf(is_set_id()).compareTo(typedOther.is_set_id()); if (lastComparison != 0) { return lastComparison; } - if (is_set_host()) { - lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.host, typedOther.host); + if (is_set_id()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.id, typedOther.id); if (lastComparison != 0) { return lastComparison; } @@ -12492,9 +14050,9 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. break; } switch (field.id) { - case 1: // HOST + case 1: // ID if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.host = iprot.readString(); + this.id = iprot.readString(); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -12512,9 +14070,9 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache validate(); oprot.writeStructBegin(STRUCT_DESC); - if (this.host != null) { - oprot.writeFieldBegin(HOST_FIELD_DESC); - oprot.writeString(this.host); + if (this.id != null) { + oprot.writeFieldBegin(ID_FIELD_DESC); + oprot.writeString(this.id); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -12523,14 +14081,14 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getSupervisorWorkers_args("); + StringBuilder sb = new StringBuilder("getTopologyConf_args("); boolean first = true; - sb.append("host:"); - if (this.host == null) { + sb.append("id:"); + if (this.id == null) { sb.append("null"); } else { - sb.append(this.host); + sb.append(this.id); } first = false; sb.append(")"); @@ -12559,13 +14117,13 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getSupervisorWorkers_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getSupervisorWorkers_result"); + public static class getTopologyConf_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyConf_result"); - private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRING, (short)0); private static final org.apache.thrift7.protocol.TField E_FIELD_DESC = new org.apache.thrift7.protocol.TField("e", org.apache.thrift7.protocol.TType.STRUCT, (short)1); - private SupervisorWorkers success; // required + private String success; // required private NotAliveException e; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -12635,18 +14193,18 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, SupervisorWorkers.class))); + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); tmpMap.put(_Fields.E, new org.apache.thrift7.meta_data.FieldMetaData("e", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getSupervisorWorkers_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyConf_result.class, metaDataMap); } - public getSupervisorWorkers_result() { + public getTopologyConf_result() { } - public getSupervisorWorkers_result( - SupervisorWorkers success, + public getTopologyConf_result( + String success, NotAliveException e) { this(); @@ -12657,17 +14215,17 @@ public getSupervisorWorkers_result( /** * Performs a deep copy on other. */ - public getSupervisorWorkers_result(getSupervisorWorkers_result other) { + public getTopologyConf_result(getTopologyConf_result other) { if (other.is_set_success()) { - this.success = new SupervisorWorkers(other.success); + this.success = other.success; } if (other.is_set_e()) { this.e = new NotAliveException(other.e); } } - public getSupervisorWorkers_result deepCopy() { - return new getSupervisorWorkers_result(this); + public getTopologyConf_result deepCopy() { + return new getTopologyConf_result(this); } @Override @@ -12676,11 +14234,11 @@ public void clear() { this.e = null; } - public SupervisorWorkers get_success() { + public String get_success() { return this.success; } - public void set_success(SupervisorWorkers success) { + public void set_success(String success) { this.success = success; } @@ -12728,7 +14286,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unset_success(); } else { - set_success((SupervisorWorkers)value); + set_success((String)value); } break; @@ -12774,12 +14332,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getSupervisorWorkers_result) - return this.equals((getSupervisorWorkers_result)that); + if (that instanceof getTopologyConf_result) + return this.equals((getTopologyConf_result)that); return false; } - public boolean equals(getSupervisorWorkers_result that) { + public boolean equals(getTopologyConf_result that) { if (that == null) return false; @@ -12821,13 +14379,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getSupervisorWorkers_result other) { + public int compareTo(getTopologyConf_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getSupervisorWorkers_result typedOther = (getSupervisorWorkers_result)other; + getTopologyConf_result typedOther = (getTopologyConf_result)other; lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); if (lastComparison != 0) { @@ -12867,9 +14425,8 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. } switch (field.id) { case 0: // SUCCESS - if (field.type == org.apache.thrift7.protocol.TType.STRUCT) { - this.success = new SupervisorWorkers(); - this.success.read(iprot); + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.success = iprot.readString(); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -12896,7 +14453,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache if (this.is_set_success()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - this.success.write(oprot); + oprot.writeString(this.success); oprot.writeFieldEnd(); } else if (this.is_set_e()) { oprot.writeFieldBegin(E_FIELD_DESC); @@ -12909,7 +14466,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getSupervisorWorkers_result("); + StringBuilder sb = new StringBuilder("getTopologyConf_result("); boolean first = true; sb.append("success:"); @@ -12953,8 +14510,8 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getTopologyConf_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyConf_args"); + public static class getTopology_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopology_args"); private static final org.apache.thrift7.protocol.TField ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("id", org.apache.thrift7.protocol.TType.STRING, (short)1); @@ -13026,13 +14583,13 @@ public String getFieldName() { tmpMap.put(_Fields.ID, new org.apache.thrift7.meta_data.FieldMetaData("id", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyConf_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopology_args.class, metaDataMap); } - public getTopologyConf_args() { + public getTopology_args() { } - public getTopologyConf_args( + public getTopology_args( String id) { this(); @@ -13042,14 +14599,14 @@ public getTopologyConf_args( /** * Performs a deep copy on other. */ - public getTopologyConf_args(getTopologyConf_args other) { + public getTopology_args(getTopology_args other) { if (other.is_set_id()) { this.id = other.id; } } - public getTopologyConf_args deepCopy() { - return new getTopologyConf_args(this); + public getTopology_args deepCopy() { + return new getTopology_args(this); } @Override @@ -13119,12 +14676,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getTopologyConf_args) - return this.equals((getTopologyConf_args)that); + if (that instanceof getTopology_args) + return this.equals((getTopology_args)that); return false; } - public boolean equals(getTopologyConf_args that) { + public boolean equals(getTopology_args that) { if (that == null) return false; @@ -13152,13 +14709,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getTopologyConf_args other) { + public int compareTo(getTopology_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getTopologyConf_args typedOther = (getTopologyConf_args)other; + getTopology_args typedOther = (getTopology_args)other; lastComparison = Boolean.valueOf(is_set_id()).compareTo(typedOther.is_set_id()); if (lastComparison != 0) { @@ -13218,7 +14775,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getTopologyConf_args("); + StringBuilder sb = new StringBuilder("getTopology_args("); boolean first = true; sb.append("id:"); @@ -13254,13 +14811,13 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getTopologyConf_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyConf_result"); + public static class getTopology_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopology_result"); - private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRING, (short)0); + private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift7.protocol.TField E_FIELD_DESC = new org.apache.thrift7.protocol.TField("e", org.apache.thrift7.protocol.TType.STRUCT, (short)1); - private String success; // required + private StormTopology success; // required private NotAliveException e; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -13330,18 +14887,18 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); + new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, StormTopology.class))); tmpMap.put(_Fields.E, new org.apache.thrift7.meta_data.FieldMetaData("e", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyConf_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopology_result.class, metaDataMap); } - public getTopologyConf_result() { + public getTopology_result() { } - public getTopologyConf_result( - String success, + public getTopology_result( + StormTopology success, NotAliveException e) { this(); @@ -13352,17 +14909,17 @@ public getTopologyConf_result( /** * Performs a deep copy on other. */ - public getTopologyConf_result(getTopologyConf_result other) { + public getTopology_result(getTopology_result other) { if (other.is_set_success()) { - this.success = other.success; + this.success = new StormTopology(other.success); } if (other.is_set_e()) { this.e = new NotAliveException(other.e); } } - public getTopologyConf_result deepCopy() { - return new getTopologyConf_result(this); + public getTopology_result deepCopy() { + return new getTopology_result(this); } @Override @@ -13371,11 +14928,11 @@ public void clear() { this.e = null; } - public String get_success() { + public StormTopology get_success() { return this.success; } - public void set_success(String success) { + public void set_success(StormTopology success) { this.success = success; } @@ -13423,7 +14980,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unset_success(); } else { - set_success((String)value); + set_success((StormTopology)value); } break; @@ -13469,12 +15026,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getTopologyConf_result) - return this.equals((getTopologyConf_result)that); + if (that instanceof getTopology_result) + return this.equals((getTopology_result)that); return false; } - public boolean equals(getTopologyConf_result that) { + public boolean equals(getTopology_result that) { if (that == null) return false; @@ -13516,13 +15073,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getTopologyConf_result other) { + public int compareTo(getTopology_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getTopologyConf_result typedOther = (getTopologyConf_result)other; + getTopology_result typedOther = (getTopology_result)other; lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); if (lastComparison != 0) { @@ -13562,8 +15119,9 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. } switch (field.id) { case 0: // SUCCESS - if (field.type == org.apache.thrift7.protocol.TType.STRING) { - this.success = iprot.readString(); + if (field.type == org.apache.thrift7.protocol.TType.STRUCT) { + this.success = new StormTopology(); + this.success.read(iprot); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -13590,7 +15148,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache if (this.is_set_success()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeString(this.success); + this.success.write(oprot); oprot.writeFieldEnd(); } else if (this.is_set_e()) { oprot.writeFieldBegin(E_FIELD_DESC); @@ -13603,7 +15161,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getTopologyConf_result("); + StringBuilder sb = new StringBuilder("getTopology_result("); boolean first = true; sb.append("success:"); @@ -13647,8 +15205,8 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getTopology_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopology_args"); + public static class getUserTopology_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getUserTopology_args"); private static final org.apache.thrift7.protocol.TField ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("id", org.apache.thrift7.protocol.TType.STRING, (short)1); @@ -13720,13 +15278,13 @@ public String getFieldName() { tmpMap.put(_Fields.ID, new org.apache.thrift7.meta_data.FieldMetaData("id", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopology_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getUserTopology_args.class, metaDataMap); } - public getTopology_args() { + public getUserTopology_args() { } - public getTopology_args( + public getUserTopology_args( String id) { this(); @@ -13736,14 +15294,14 @@ public getTopology_args( /** * Performs a deep copy on other. */ - public getTopology_args(getTopology_args other) { + public getUserTopology_args(getUserTopology_args other) { if (other.is_set_id()) { this.id = other.id; } } - public getTopology_args deepCopy() { - return new getTopology_args(this); + public getUserTopology_args deepCopy() { + return new getUserTopology_args(this); } @Override @@ -13813,12 +15371,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getTopology_args) - return this.equals((getTopology_args)that); + if (that instanceof getUserTopology_args) + return this.equals((getUserTopology_args)that); return false; } - public boolean equals(getTopology_args that) { + public boolean equals(getUserTopology_args that) { if (that == null) return false; @@ -13846,13 +15404,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getTopology_args other) { + public int compareTo(getUserTopology_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getTopology_args typedOther = (getTopology_args)other; + getUserTopology_args typedOther = (getUserTopology_args)other; lastComparison = Boolean.valueOf(is_set_id()).compareTo(typedOther.is_set_id()); if (lastComparison != 0) { @@ -13912,7 +15470,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getTopology_args("); + StringBuilder sb = new StringBuilder("getUserTopology_args("); boolean first = true; sb.append("id:"); @@ -13948,8 +15506,8 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getTopology_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopology_result"); + public static class getUserTopology_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getUserTopology_result"); private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift7.protocol.TField E_FIELD_DESC = new org.apache.thrift7.protocol.TField("e", org.apache.thrift7.protocol.TType.STRUCT, (short)1); @@ -14028,13 +15586,13 @@ public String getFieldName() { tmpMap.put(_Fields.E, new org.apache.thrift7.meta_data.FieldMetaData("e", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopology_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getUserTopology_result.class, metaDataMap); } - public getTopology_result() { + public getUserTopology_result() { } - public getTopology_result( + public getUserTopology_result( StormTopology success, NotAliveException e) { @@ -14046,7 +15604,7 @@ public getTopology_result( /** * Performs a deep copy on other. */ - public getTopology_result(getTopology_result other) { + public getUserTopology_result(getUserTopology_result other) { if (other.is_set_success()) { this.success = new StormTopology(other.success); } @@ -14055,8 +15613,8 @@ public getTopology_result(getTopology_result other) { } } - public getTopology_result deepCopy() { - return new getTopology_result(this); + public getUserTopology_result deepCopy() { + return new getUserTopology_result(this); } @Override @@ -14163,12 +15721,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getTopology_result) - return this.equals((getTopology_result)that); + if (that instanceof getUserTopology_result) + return this.equals((getUserTopology_result)that); return false; } - public boolean equals(getTopology_result that) { + public boolean equals(getUserTopology_result that) { if (that == null) return false; @@ -14210,13 +15768,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getTopology_result other) { + public int compareTo(getUserTopology_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getTopology_result typedOther = (getTopology_result)other; + getUserTopology_result typedOther = (getUserTopology_result)other; lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); if (lastComparison != 0) { @@ -14298,7 +15856,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getTopology_result("); + StringBuilder sb = new StringBuilder("getUserTopology_result("); boolean first = true; sb.append("success:"); @@ -14342,8 +15900,8 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getUserTopology_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getUserTopology_args"); + public static class getTopologyMetric_args implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyMetric_args"); private static final org.apache.thrift7.protocol.TField ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("id", org.apache.thrift7.protocol.TType.STRING, (short)1); @@ -14415,13 +15973,13 @@ public String getFieldName() { tmpMap.put(_Fields.ID, new org.apache.thrift7.meta_data.FieldMetaData("id", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getUserTopology_args.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyMetric_args.class, metaDataMap); } - public getUserTopology_args() { + public getTopologyMetric_args() { } - public getUserTopology_args( + public getTopologyMetric_args( String id) { this(); @@ -14431,14 +15989,14 @@ public getUserTopology_args( /** * Performs a deep copy on other. */ - public getUserTopology_args(getUserTopology_args other) { + public getTopologyMetric_args(getTopologyMetric_args other) { if (other.is_set_id()) { this.id = other.id; } } - public getUserTopology_args deepCopy() { - return new getUserTopology_args(this); + public getTopologyMetric_args deepCopy() { + return new getTopologyMetric_args(this); } @Override @@ -14508,12 +16066,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getUserTopology_args) - return this.equals((getUserTopology_args)that); + if (that instanceof getTopologyMetric_args) + return this.equals((getTopologyMetric_args)that); return false; } - public boolean equals(getUserTopology_args that) { + public boolean equals(getTopologyMetric_args that) { if (that == null) return false; @@ -14541,13 +16099,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getUserTopology_args other) { + public int compareTo(getTopologyMetric_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getUserTopology_args typedOther = (getUserTopology_args)other; + getTopologyMetric_args typedOther = (getTopologyMetric_args)other; lastComparison = Boolean.valueOf(is_set_id()).compareTo(typedOther.is_set_id()); if (lastComparison != 0) { @@ -14607,7 +16165,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getUserTopology_args("); + StringBuilder sb = new StringBuilder("getTopologyMetric_args("); boolean first = true; sb.append("id:"); @@ -14643,13 +16201,13 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } - public static class getUserTopology_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getUserTopology_result"); + public static class getTopologyMetric_result implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("getTopologyMetric_result"); private static final org.apache.thrift7.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift7.protocol.TField("success", org.apache.thrift7.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift7.protocol.TField E_FIELD_DESC = new org.apache.thrift7.protocol.TField("e", org.apache.thrift7.protocol.TType.STRUCT, (short)1); - private StormTopology success; // required + private TopologyMetricInfo success; // required private NotAliveException e; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -14719,18 +16277,18 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift7.meta_data.FieldMetaData("success", org.apache.thrift7.TFieldRequirementType.DEFAULT, - new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, StormTopology.class))); + new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, TopologyMetricInfo.class))); tmpMap.put(_Fields.E, new org.apache.thrift7.meta_data.FieldMetaData("e", org.apache.thrift7.TFieldRequirementType.DEFAULT, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getUserTopology_result.class, metaDataMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(getTopologyMetric_result.class, metaDataMap); } - public getUserTopology_result() { + public getTopologyMetric_result() { } - public getUserTopology_result( - StormTopology success, + public getTopologyMetric_result( + TopologyMetricInfo success, NotAliveException e) { this(); @@ -14741,17 +16299,17 @@ public getUserTopology_result( /** * Performs a deep copy on other. */ - public getUserTopology_result(getUserTopology_result other) { + public getTopologyMetric_result(getTopologyMetric_result other) { if (other.is_set_success()) { - this.success = new StormTopology(other.success); + this.success = new TopologyMetricInfo(other.success); } if (other.is_set_e()) { this.e = new NotAliveException(other.e); } } - public getUserTopology_result deepCopy() { - return new getUserTopology_result(this); + public getTopologyMetric_result deepCopy() { + return new getTopologyMetric_result(this); } @Override @@ -14760,11 +16318,11 @@ public void clear() { this.e = null; } - public StormTopology get_success() { + public TopologyMetricInfo get_success() { return this.success; } - public void set_success(StormTopology success) { + public void set_success(TopologyMetricInfo success) { this.success = success; } @@ -14812,7 +16370,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unset_success(); } else { - set_success((StormTopology)value); + set_success((TopologyMetricInfo)value); } break; @@ -14858,12 +16416,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getUserTopology_result) - return this.equals((getUserTopology_result)that); + if (that instanceof getTopologyMetric_result) + return this.equals((getTopologyMetric_result)that); return false; } - public boolean equals(getUserTopology_result that) { + public boolean equals(getTopologyMetric_result that) { if (that == null) return false; @@ -14905,13 +16463,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(getUserTopology_result other) { + public int compareTo(getTopologyMetric_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getUserTopology_result typedOther = (getUserTopology_result)other; + getTopologyMetric_result typedOther = (getTopologyMetric_result)other; lastComparison = Boolean.valueOf(is_set_success()).compareTo(typedOther.is_set_success()); if (lastComparison != 0) { @@ -14952,7 +16510,7 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. switch (field.id) { case 0: // SUCCESS if (field.type == org.apache.thrift7.protocol.TType.STRUCT) { - this.success = new StormTopology(); + this.success = new TopologyMetricInfo(); this.success.read(iprot); } else { org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); @@ -14993,7 +16551,7 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache @Override public String toString() { - StringBuilder sb = new StringBuilder("getUserTopology_result("); + StringBuilder sb = new StringBuilder("getTopologyMetric_result("); boolean first = true; sb.append("success:"); diff --git a/jstorm-client/src/main/java/backtype/storm/generated/TaskMetricData.java b/jstorm-client/src/main/java/backtype/storm/generated/TaskMetricData.java new file mode 100644 index 000000000..f5311e43b --- /dev/null +++ b/jstorm-client/src/main/java/backtype/storm/generated/TaskMetricData.java @@ -0,0 +1,1135 @@ +/** + * Autogenerated by Thrift Compiler (0.7.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + */ +package backtype.storm.generated; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TaskMetricData implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("TaskMetricData"); + + private static final org.apache.thrift7.protocol.TField TASK_ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("task_id", org.apache.thrift7.protocol.TType.I32, (short)1); + private static final org.apache.thrift7.protocol.TField COMPONENT_ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("component_id", org.apache.thrift7.protocol.TType.STRING, (short)2); + private static final org.apache.thrift7.protocol.TField GAUGE_FIELD_DESC = new org.apache.thrift7.protocol.TField("gauge", org.apache.thrift7.protocol.TType.MAP, (short)3); + private static final org.apache.thrift7.protocol.TField COUNTER_FIELD_DESC = new org.apache.thrift7.protocol.TField("counter", org.apache.thrift7.protocol.TType.MAP, (short)4); + private static final org.apache.thrift7.protocol.TField METER_FIELD_DESC = new org.apache.thrift7.protocol.TField("meter", org.apache.thrift7.protocol.TType.MAP, (short)5); + private static final org.apache.thrift7.protocol.TField TIMER_FIELD_DESC = new org.apache.thrift7.protocol.TField("timer", org.apache.thrift7.protocol.TType.MAP, (short)6); + private static final org.apache.thrift7.protocol.TField HISTOGRAM_FIELD_DESC = new org.apache.thrift7.protocol.TField("histogram", org.apache.thrift7.protocol.TType.MAP, (short)7); + + private int task_id; // required + private String component_id; // required + private Map gauge; // required + private Map counter; // required + private Map meter; // required + private Map timer; // required + private Map histogram; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift7.TFieldIdEnum { + TASK_ID((short)1, "task_id"), + COMPONENT_ID((short)2, "component_id"), + GAUGE((short)3, "gauge"), + COUNTER((short)4, "counter"), + METER((short)5, "meter"), + TIMER((short)6, "timer"), + HISTOGRAM((short)7, "histogram"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TASK_ID + return TASK_ID; + case 2: // COMPONENT_ID + return COMPONENT_ID; + case 3: // GAUGE + return GAUGE; + case 4: // COUNTER + return COUNTER; + case 5: // METER + return METER; + case 6: // TIMER + return TIMER; + case 7: // HISTOGRAM + return HISTOGRAM; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __TASK_ID_ISSET_ID = 0; + private BitSet __isset_bit_vector = new BitSet(1); + + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TASK_ID, new org.apache.thrift7.meta_data.FieldMetaData("task_id", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.I32))); + tmpMap.put(_Fields.COMPONENT_ID, new org.apache.thrift7.meta_data.FieldMetaData("component_id", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); + tmpMap.put(_Fields.GAUGE, new org.apache.thrift7.meta_data.FieldMetaData("gauge", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + tmpMap.put(_Fields.COUNTER, new org.apache.thrift7.meta_data.FieldMetaData("counter", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + tmpMap.put(_Fields.METER, new org.apache.thrift7.meta_data.FieldMetaData("meter", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + tmpMap.put(_Fields.TIMER, new org.apache.thrift7.meta_data.FieldMetaData("timer", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + tmpMap.put(_Fields.HISTOGRAM, new org.apache.thrift7.meta_data.FieldMetaData("histogram", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(TaskMetricData.class, metaDataMap); + } + + public TaskMetricData() { + } + + public TaskMetricData( + int task_id, + String component_id, + Map gauge, + Map counter, + Map meter, + Map timer, + Map histogram) + { + this(); + this.task_id = task_id; + set_task_id_isSet(true); + this.component_id = component_id; + this.gauge = gauge; + this.counter = counter; + this.meter = meter; + this.timer = timer; + this.histogram = histogram; + } + + /** + * Performs a deep copy on other. + */ + public TaskMetricData(TaskMetricData other) { + __isset_bit_vector.clear(); + __isset_bit_vector.or(other.__isset_bit_vector); + this.task_id = other.task_id; + if (other.is_set_component_id()) { + this.component_id = other.component_id; + } + if (other.is_set_gauge()) { + Map __this__gauge = new HashMap(); + for (Map.Entry other_element : other.gauge.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__gauge_copy_key = other_element_key; + + Double __this__gauge_copy_value = other_element_value; + + __this__gauge.put(__this__gauge_copy_key, __this__gauge_copy_value); + } + this.gauge = __this__gauge; + } + if (other.is_set_counter()) { + Map __this__counter = new HashMap(); + for (Map.Entry other_element : other.counter.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__counter_copy_key = other_element_key; + + Double __this__counter_copy_value = other_element_value; + + __this__counter.put(__this__counter_copy_key, __this__counter_copy_value); + } + this.counter = __this__counter; + } + if (other.is_set_meter()) { + Map __this__meter = new HashMap(); + for (Map.Entry other_element : other.meter.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__meter_copy_key = other_element_key; + + Double __this__meter_copy_value = other_element_value; + + __this__meter.put(__this__meter_copy_key, __this__meter_copy_value); + } + this.meter = __this__meter; + } + if (other.is_set_timer()) { + Map __this__timer = new HashMap(); + for (Map.Entry other_element : other.timer.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__timer_copy_key = other_element_key; + + Double __this__timer_copy_value = other_element_value; + + __this__timer.put(__this__timer_copy_key, __this__timer_copy_value); + } + this.timer = __this__timer; + } + if (other.is_set_histogram()) { + Map __this__histogram = new HashMap(); + for (Map.Entry other_element : other.histogram.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__histogram_copy_key = other_element_key; + + Double __this__histogram_copy_value = other_element_value; + + __this__histogram.put(__this__histogram_copy_key, __this__histogram_copy_value); + } + this.histogram = __this__histogram; + } + } + + public TaskMetricData deepCopy() { + return new TaskMetricData(this); + } + + @Override + public void clear() { + set_task_id_isSet(false); + this.task_id = 0; + this.component_id = null; + this.gauge = null; + this.counter = null; + this.meter = null; + this.timer = null; + this.histogram = null; + } + + public int get_task_id() { + return this.task_id; + } + + public void set_task_id(int task_id) { + this.task_id = task_id; + set_task_id_isSet(true); + } + + public void unset_task_id() { + __isset_bit_vector.clear(__TASK_ID_ISSET_ID); + } + + /** Returns true if field task_id is set (has been assigned a value) and false otherwise */ + public boolean is_set_task_id() { + return __isset_bit_vector.get(__TASK_ID_ISSET_ID); + } + + public void set_task_id_isSet(boolean value) { + __isset_bit_vector.set(__TASK_ID_ISSET_ID, value); + } + + public String get_component_id() { + return this.component_id; + } + + public void set_component_id(String component_id) { + this.component_id = component_id; + } + + public void unset_component_id() { + this.component_id = null; + } + + /** Returns true if field component_id is set (has been assigned a value) and false otherwise */ + public boolean is_set_component_id() { + return this.component_id != null; + } + + public void set_component_id_isSet(boolean value) { + if (!value) { + this.component_id = null; + } + } + + public int get_gauge_size() { + return (this.gauge == null) ? 0 : this.gauge.size(); + } + + public void put_to_gauge(String key, double val) { + if (this.gauge == null) { + this.gauge = new HashMap(); + } + this.gauge.put(key, val); + } + + public Map get_gauge() { + return this.gauge; + } + + public void set_gauge(Map gauge) { + this.gauge = gauge; + } + + public void unset_gauge() { + this.gauge = null; + } + + /** Returns true if field gauge is set (has been assigned a value) and false otherwise */ + public boolean is_set_gauge() { + return this.gauge != null; + } + + public void set_gauge_isSet(boolean value) { + if (!value) { + this.gauge = null; + } + } + + public int get_counter_size() { + return (this.counter == null) ? 0 : this.counter.size(); + } + + public void put_to_counter(String key, double val) { + if (this.counter == null) { + this.counter = new HashMap(); + } + this.counter.put(key, val); + } + + public Map get_counter() { + return this.counter; + } + + public void set_counter(Map counter) { + this.counter = counter; + } + + public void unset_counter() { + this.counter = null; + } + + /** Returns true if field counter is set (has been assigned a value) and false otherwise */ + public boolean is_set_counter() { + return this.counter != null; + } + + public void set_counter_isSet(boolean value) { + if (!value) { + this.counter = null; + } + } + + public int get_meter_size() { + return (this.meter == null) ? 0 : this.meter.size(); + } + + public void put_to_meter(String key, double val) { + if (this.meter == null) { + this.meter = new HashMap(); + } + this.meter.put(key, val); + } + + public Map get_meter() { + return this.meter; + } + + public void set_meter(Map meter) { + this.meter = meter; + } + + public void unset_meter() { + this.meter = null; + } + + /** Returns true if field meter is set (has been assigned a value) and false otherwise */ + public boolean is_set_meter() { + return this.meter != null; + } + + public void set_meter_isSet(boolean value) { + if (!value) { + this.meter = null; + } + } + + public int get_timer_size() { + return (this.timer == null) ? 0 : this.timer.size(); + } + + public void put_to_timer(String key, double val) { + if (this.timer == null) { + this.timer = new HashMap(); + } + this.timer.put(key, val); + } + + public Map get_timer() { + return this.timer; + } + + public void set_timer(Map timer) { + this.timer = timer; + } + + public void unset_timer() { + this.timer = null; + } + + /** Returns true if field timer is set (has been assigned a value) and false otherwise */ + public boolean is_set_timer() { + return this.timer != null; + } + + public void set_timer_isSet(boolean value) { + if (!value) { + this.timer = null; + } + } + + public int get_histogram_size() { + return (this.histogram == null) ? 0 : this.histogram.size(); + } + + public void put_to_histogram(String key, double val) { + if (this.histogram == null) { + this.histogram = new HashMap(); + } + this.histogram.put(key, val); + } + + public Map get_histogram() { + return this.histogram; + } + + public void set_histogram(Map histogram) { + this.histogram = histogram; + } + + public void unset_histogram() { + this.histogram = null; + } + + /** Returns true if field histogram is set (has been assigned a value) and false otherwise */ + public boolean is_set_histogram() { + return this.histogram != null; + } + + public void set_histogram_isSet(boolean value) { + if (!value) { + this.histogram = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TASK_ID: + if (value == null) { + unset_task_id(); + } else { + set_task_id((Integer)value); + } + break; + + case COMPONENT_ID: + if (value == null) { + unset_component_id(); + } else { + set_component_id((String)value); + } + break; + + case GAUGE: + if (value == null) { + unset_gauge(); + } else { + set_gauge((Map)value); + } + break; + + case COUNTER: + if (value == null) { + unset_counter(); + } else { + set_counter((Map)value); + } + break; + + case METER: + if (value == null) { + unset_meter(); + } else { + set_meter((Map)value); + } + break; + + case TIMER: + if (value == null) { + unset_timer(); + } else { + set_timer((Map)value); + } + break; + + case HISTOGRAM: + if (value == null) { + unset_histogram(); + } else { + set_histogram((Map)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TASK_ID: + return Integer.valueOf(get_task_id()); + + case COMPONENT_ID: + return get_component_id(); + + case GAUGE: + return get_gauge(); + + case COUNTER: + return get_counter(); + + case METER: + return get_meter(); + + case TIMER: + return get_timer(); + + case HISTOGRAM: + return get_histogram(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TASK_ID: + return is_set_task_id(); + case COMPONENT_ID: + return is_set_component_id(); + case GAUGE: + return is_set_gauge(); + case COUNTER: + return is_set_counter(); + case METER: + return is_set_meter(); + case TIMER: + return is_set_timer(); + case HISTOGRAM: + return is_set_histogram(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TaskMetricData) + return this.equals((TaskMetricData)that); + return false; + } + + public boolean equals(TaskMetricData that) { + if (that == null) + return false; + + boolean this_present_task_id = true; + boolean that_present_task_id = true; + if (this_present_task_id || that_present_task_id) { + if (!(this_present_task_id && that_present_task_id)) + return false; + if (this.task_id != that.task_id) + return false; + } + + boolean this_present_component_id = true && this.is_set_component_id(); + boolean that_present_component_id = true && that.is_set_component_id(); + if (this_present_component_id || that_present_component_id) { + if (!(this_present_component_id && that_present_component_id)) + return false; + if (!this.component_id.equals(that.component_id)) + return false; + } + + boolean this_present_gauge = true && this.is_set_gauge(); + boolean that_present_gauge = true && that.is_set_gauge(); + if (this_present_gauge || that_present_gauge) { + if (!(this_present_gauge && that_present_gauge)) + return false; + if (!this.gauge.equals(that.gauge)) + return false; + } + + boolean this_present_counter = true && this.is_set_counter(); + boolean that_present_counter = true && that.is_set_counter(); + if (this_present_counter || that_present_counter) { + if (!(this_present_counter && that_present_counter)) + return false; + if (!this.counter.equals(that.counter)) + return false; + } + + boolean this_present_meter = true && this.is_set_meter(); + boolean that_present_meter = true && that.is_set_meter(); + if (this_present_meter || that_present_meter) { + if (!(this_present_meter && that_present_meter)) + return false; + if (!this.meter.equals(that.meter)) + return false; + } + + boolean this_present_timer = true && this.is_set_timer(); + boolean that_present_timer = true && that.is_set_timer(); + if (this_present_timer || that_present_timer) { + if (!(this_present_timer && that_present_timer)) + return false; + if (!this.timer.equals(that.timer)) + return false; + } + + boolean this_present_histogram = true && this.is_set_histogram(); + boolean that_present_histogram = true && that.is_set_histogram(); + if (this_present_histogram || that_present_histogram) { + if (!(this_present_histogram && that_present_histogram)) + return false; + if (!this.histogram.equals(that.histogram)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_task_id = true; + builder.append(present_task_id); + if (present_task_id) + builder.append(task_id); + + boolean present_component_id = true && (is_set_component_id()); + builder.append(present_component_id); + if (present_component_id) + builder.append(component_id); + + boolean present_gauge = true && (is_set_gauge()); + builder.append(present_gauge); + if (present_gauge) + builder.append(gauge); + + boolean present_counter = true && (is_set_counter()); + builder.append(present_counter); + if (present_counter) + builder.append(counter); + + boolean present_meter = true && (is_set_meter()); + builder.append(present_meter); + if (present_meter) + builder.append(meter); + + boolean present_timer = true && (is_set_timer()); + builder.append(present_timer); + if (present_timer) + builder.append(timer); + + boolean present_histogram = true && (is_set_histogram()); + builder.append(present_histogram); + if (present_histogram) + builder.append(histogram); + + return builder.toHashCode(); + } + + public int compareTo(TaskMetricData other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TaskMetricData typedOther = (TaskMetricData)other; + + lastComparison = Boolean.valueOf(is_set_task_id()).compareTo(typedOther.is_set_task_id()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_task_id()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.task_id, typedOther.task_id); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_component_id()).compareTo(typedOther.is_set_component_id()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_component_id()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.component_id, typedOther.component_id); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_gauge()).compareTo(typedOther.is_set_gauge()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_gauge()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.gauge, typedOther.gauge); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_counter()).compareTo(typedOther.is_set_counter()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_counter()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.counter, typedOther.counter); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_meter()).compareTo(typedOther.is_set_meter()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_meter()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.meter, typedOther.meter); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_timer()).compareTo(typedOther.is_set_timer()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_timer()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.timer, typedOther.timer); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_histogram()).compareTo(typedOther.is_set_histogram()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_histogram()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.histogram, typedOther.histogram); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache.thrift7.TException { + org.apache.thrift7.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift7.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // TASK_ID + if (field.type == org.apache.thrift7.protocol.TType.I32) { + this.task_id = iprot.readI32(); + set_task_id_isSet(true); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // COMPONENT_ID + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.component_id = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // GAUGE + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map205 = iprot.readMapBegin(); + this.gauge = new HashMap(2*_map205.size); + for (int _i206 = 0; _i206 < _map205.size; ++_i206) + { + String _key207; // required + double _val208; // required + _key207 = iprot.readString(); + _val208 = iprot.readDouble(); + this.gauge.put(_key207, _val208); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 4: // COUNTER + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map209 = iprot.readMapBegin(); + this.counter = new HashMap(2*_map209.size); + for (int _i210 = 0; _i210 < _map209.size; ++_i210) + { + String _key211; // required + double _val212; // required + _key211 = iprot.readString(); + _val212 = iprot.readDouble(); + this.counter.put(_key211, _val212); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 5: // METER + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map213 = iprot.readMapBegin(); + this.meter = new HashMap(2*_map213.size); + for (int _i214 = 0; _i214 < _map213.size; ++_i214) + { + String _key215; // required + double _val216; // required + _key215 = iprot.readString(); + _val216 = iprot.readDouble(); + this.meter.put(_key215, _val216); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 6: // TIMER + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map217 = iprot.readMapBegin(); + this.timer = new HashMap(2*_map217.size); + for (int _i218 = 0; _i218 < _map217.size; ++_i218) + { + String _key219; // required + double _val220; // required + _key219 = iprot.readString(); + _val220 = iprot.readDouble(); + this.timer.put(_key219, _val220); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 7: // HISTOGRAM + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map221 = iprot.readMapBegin(); + this.histogram = new HashMap(2*_map221.size); + for (int _i222 = 0; _i222 < _map221.size; ++_i222) + { + String _key223; // required + double _val224; // required + _key223 = iprot.readString(); + _val224 = iprot.readDouble(); + this.histogram.put(_key223, _val224); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(TASK_ID_FIELD_DESC); + oprot.writeI32(this.task_id); + oprot.writeFieldEnd(); + if (this.component_id != null) { + oprot.writeFieldBegin(COMPONENT_ID_FIELD_DESC); + oprot.writeString(this.component_id); + oprot.writeFieldEnd(); + } + if (this.gauge != null) { + oprot.writeFieldBegin(GAUGE_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.gauge.size())); + for (Map.Entry _iter225 : this.gauge.entrySet()) + { + oprot.writeString(_iter225.getKey()); + oprot.writeDouble(_iter225.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + if (this.counter != null) { + oprot.writeFieldBegin(COUNTER_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.counter.size())); + for (Map.Entry _iter226 : this.counter.entrySet()) + { + oprot.writeString(_iter226.getKey()); + oprot.writeDouble(_iter226.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + if (this.meter != null) { + oprot.writeFieldBegin(METER_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.meter.size())); + for (Map.Entry _iter227 : this.meter.entrySet()) + { + oprot.writeString(_iter227.getKey()); + oprot.writeDouble(_iter227.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + if (this.timer != null) { + oprot.writeFieldBegin(TIMER_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.timer.size())); + for (Map.Entry _iter228 : this.timer.entrySet()) + { + oprot.writeString(_iter228.getKey()); + oprot.writeDouble(_iter228.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + if (this.histogram != null) { + oprot.writeFieldBegin(HISTOGRAM_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.histogram.size())); + for (Map.Entry _iter229 : this.histogram.entrySet()) + { + oprot.writeString(_iter229.getKey()); + oprot.writeDouble(_iter229.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TaskMetricData("); + boolean first = true; + + sb.append("task_id:"); + sb.append(this.task_id); + first = false; + if (!first) sb.append(", "); + sb.append("component_id:"); + if (this.component_id == null) { + sb.append("null"); + } else { + sb.append(this.component_id); + } + first = false; + if (!first) sb.append(", "); + sb.append("gauge:"); + if (this.gauge == null) { + sb.append("null"); + } else { + sb.append(this.gauge); + } + first = false; + if (!first) sb.append(", "); + sb.append("counter:"); + if (this.counter == null) { + sb.append("null"); + } else { + sb.append(this.counter); + } + first = false; + if (!first) sb.append(", "); + sb.append("meter:"); + if (this.meter == null) { + sb.append("null"); + } else { + sb.append(this.meter); + } + first = false; + if (!first) sb.append(", "); + sb.append("timer:"); + if (this.timer == null) { + sb.append("null"); + } else { + sb.append(this.timer); + } + first = false; + if (!first) sb.append(", "); + sb.append("histogram:"); + if (this.histogram == null) { + sb.append("null"); + } else { + sb.append(this.histogram); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift7.TException { + // check for required fields + if (!is_set_task_id()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'task_id' is unset! Struct:" + toString()); + } + + if (!is_set_component_id()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'component_id' is unset! Struct:" + toString()); + } + + if (!is_set_gauge()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'gauge' is unset! Struct:" + toString()); + } + + if (!is_set_counter()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'counter' is unset! Struct:" + toString()); + } + + if (!is_set_meter()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'meter' is unset! Struct:" + toString()); + } + + if (!is_set_timer()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'timer' is unset! Struct:" + toString()); + } + + if (!is_set_histogram()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'histogram' is unset! Struct:" + toString()); + } + + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); + read(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + +} + diff --git a/jstorm-client/src/main/java/backtype/storm/generated/TaskSummary.java b/jstorm-client/src/main/java/backtype/storm/generated/TaskSummary.java index 5cc690989..00832fbc0 100644 --- a/jstorm-client/src/main/java/backtype/storm/generated/TaskSummary.java +++ b/jstorm-client/src/main/java/backtype/storm/generated/TaskSummary.java @@ -31,6 +31,7 @@ public class TaskSummary implements org.apache.thrift7.TBase errors; // required private TaskStats stats; // required + private String component_type; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { @@ -48,7 +50,8 @@ public enum _Fields implements org.apache.thrift7.TFieldIdEnum { PORT((short)4, "port"), UPTIME_SECS((short)5, "uptime_secs"), ERRORS((short)6, "errors"), - STATS((short)7, "stats"); + STATS((short)7, "stats"), + COMPONENT_TYPE((short)8, "component_type"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return ERRORS; case 7: // STATS return STATS; + case 8: // COMPONENT_TYPE + return COMPONENT_TYPE; default: return null; } @@ -140,6 +145,8 @@ public String getFieldName() { new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, ErrorInfo.class)))); tmpMap.put(_Fields.STATS, new org.apache.thrift7.meta_data.FieldMetaData("stats", org.apache.thrift7.TFieldRequirementType.OPTIONAL, new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, TaskStats.class))); + tmpMap.put(_Fields.COMPONENT_TYPE, new org.apache.thrift7.meta_data.FieldMetaData("component_type", org.apache.thrift7.TFieldRequirementType.OPTIONAL, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(TaskSummary.class, metaDataMap); } @@ -192,6 +199,9 @@ public TaskSummary(TaskSummary other) { if (other.is_set_stats()) { this.stats = new TaskStats(other.stats); } + if (other.is_set_component_type()) { + this.component_type = other.component_type; + } } public TaskSummary deepCopy() { @@ -210,6 +220,7 @@ public void clear() { this.uptime_secs = 0; this.errors = null; this.stats = null; + this.component_type = null; } public int get_task_id() { @@ -385,6 +396,29 @@ public void set_stats_isSet(boolean value) { } } + public String get_component_type() { + return this.component_type; + } + + public void set_component_type(String component_type) { + this.component_type = component_type; + } + + public void unset_component_type() { + this.component_type = null; + } + + /** Returns true if field component_type is set (has been assigned a value) and false otherwise */ + public boolean is_set_component_type() { + return this.component_type != null; + } + + public void set_component_type_isSet(boolean value) { + if (!value) { + this.component_type = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TASK_ID: @@ -443,6 +477,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case COMPONENT_TYPE: + if (value == null) { + unset_component_type(); + } else { + set_component_type((String)value); + } + break; + } } @@ -469,6 +511,9 @@ public Object getFieldValue(_Fields field) { case STATS: return get_stats(); + case COMPONENT_TYPE: + return get_component_type(); + } throw new IllegalStateException(); } @@ -494,6 +539,8 @@ public boolean isSet(_Fields field) { return is_set_errors(); case STATS: return is_set_stats(); + case COMPONENT_TYPE: + return is_set_component_type(); } throw new IllegalStateException(); } @@ -574,6 +621,15 @@ public boolean equals(TaskSummary that) { return false; } + boolean this_present_component_type = true && this.is_set_component_type(); + boolean that_present_component_type = true && that.is_set_component_type(); + if (this_present_component_type || that_present_component_type) { + if (!(this_present_component_type && that_present_component_type)) + return false; + if (!this.component_type.equals(that.component_type)) + return false; + } + return true; } @@ -616,6 +672,11 @@ public int hashCode() { if (present_stats) builder.append(stats); + boolean present_component_type = true && (is_set_component_type()); + builder.append(present_component_type); + if (present_component_type) + builder.append(component_type); + return builder.toHashCode(); } @@ -697,6 +758,16 @@ public int compareTo(TaskSummary other) { return lastComparison; } } + lastComparison = Boolean.valueOf(is_set_component_type()).compareTo(typedOther.is_set_component_type()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_component_type()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.component_type, typedOther.component_type); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -778,6 +849,13 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } break; + case 8: // COMPONENT_TYPE + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.component_type = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -829,6 +907,13 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache oprot.writeFieldEnd(); } } + if (this.component_type != null) { + if (is_set_component_type()) { + oprot.writeFieldBegin(COMPONENT_TYPE_FIELD_DESC); + oprot.writeString(this.component_type); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -883,6 +968,16 @@ public String toString() { } first = false; } + if (is_set_component_type()) { + if (!first) sb.append(", "); + sb.append("component_type:"); + if (this.component_type == null) { + sb.append("null"); + } else { + sb.append(this.component_type); + } + first = false; + } sb.append(")"); return sb.toString(); } diff --git a/jstorm-client/src/main/java/backtype/storm/generated/TopologyMetricInfo.java b/jstorm-client/src/main/java/backtype/storm/generated/TopologyMetricInfo.java new file mode 100644 index 000000000..bcfab1d5a --- /dev/null +++ b/jstorm-client/src/main/java/backtype/storm/generated/TopologyMetricInfo.java @@ -0,0 +1,594 @@ +/** + * Autogenerated by Thrift Compiler (0.7.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + */ +package backtype.storm.generated; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TopologyMetricInfo implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("TopologyMetricInfo"); + + private static final org.apache.thrift7.protocol.TField TOPOLOGY_ID_FIELD_DESC = new org.apache.thrift7.protocol.TField("topology_id", org.apache.thrift7.protocol.TType.STRING, (short)1); + private static final org.apache.thrift7.protocol.TField TASK_METRIC_LIST_FIELD_DESC = new org.apache.thrift7.protocol.TField("task_metric_list", org.apache.thrift7.protocol.TType.LIST, (short)2); + private static final org.apache.thrift7.protocol.TField WORKER_METRIC_LIST_FIELD_DESC = new org.apache.thrift7.protocol.TField("worker_metric_list", org.apache.thrift7.protocol.TType.LIST, (short)3); + + private String topology_id; // required + private List task_metric_list; // required + private List worker_metric_list; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift7.TFieldIdEnum { + TOPOLOGY_ID((short)1, "topology_id"), + TASK_METRIC_LIST((short)2, "task_metric_list"), + WORKER_METRIC_LIST((short)3, "worker_metric_list"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TOPOLOGY_ID + return TOPOLOGY_ID; + case 2: // TASK_METRIC_LIST + return TASK_METRIC_LIST; + case 3: // WORKER_METRIC_LIST + return WORKER_METRIC_LIST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TOPOLOGY_ID, new org.apache.thrift7.meta_data.FieldMetaData("topology_id", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); + tmpMap.put(_Fields.TASK_METRIC_LIST, new org.apache.thrift7.meta_data.FieldMetaData("task_metric_list", org.apache.thrift7.TFieldRequirementType.OPTIONAL, + new org.apache.thrift7.meta_data.ListMetaData(org.apache.thrift7.protocol.TType.LIST, + new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, TaskMetricData.class)))); + tmpMap.put(_Fields.WORKER_METRIC_LIST, new org.apache.thrift7.meta_data.FieldMetaData("worker_metric_list", org.apache.thrift7.TFieldRequirementType.OPTIONAL, + new org.apache.thrift7.meta_data.ListMetaData(org.apache.thrift7.protocol.TType.LIST, + new org.apache.thrift7.meta_data.StructMetaData(org.apache.thrift7.protocol.TType.STRUCT, WorkerMetricData.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(TopologyMetricInfo.class, metaDataMap); + } + + public TopologyMetricInfo() { + } + + public TopologyMetricInfo( + String topology_id) + { + this(); + this.topology_id = topology_id; + } + + /** + * Performs a deep copy on other. + */ + public TopologyMetricInfo(TopologyMetricInfo other) { + if (other.is_set_topology_id()) { + this.topology_id = other.topology_id; + } + if (other.is_set_task_metric_list()) { + List __this__task_metric_list = new ArrayList(); + for (TaskMetricData other_element : other.task_metric_list) { + __this__task_metric_list.add(new TaskMetricData(other_element)); + } + this.task_metric_list = __this__task_metric_list; + } + if (other.is_set_worker_metric_list()) { + List __this__worker_metric_list = new ArrayList(); + for (WorkerMetricData other_element : other.worker_metric_list) { + __this__worker_metric_list.add(new WorkerMetricData(other_element)); + } + this.worker_metric_list = __this__worker_metric_list; + } + } + + public TopologyMetricInfo deepCopy() { + return new TopologyMetricInfo(this); + } + + @Override + public void clear() { + this.topology_id = null; + this.task_metric_list = null; + this.worker_metric_list = null; + } + + public String get_topology_id() { + return this.topology_id; + } + + public void set_topology_id(String topology_id) { + this.topology_id = topology_id; + } + + public void unset_topology_id() { + this.topology_id = null; + } + + /** Returns true if field topology_id is set (has been assigned a value) and false otherwise */ + public boolean is_set_topology_id() { + return this.topology_id != null; + } + + public void set_topology_id_isSet(boolean value) { + if (!value) { + this.topology_id = null; + } + } + + public int get_task_metric_list_size() { + return (this.task_metric_list == null) ? 0 : this.task_metric_list.size(); + } + + public java.util.Iterator get_task_metric_list_iterator() { + return (this.task_metric_list == null) ? null : this.task_metric_list.iterator(); + } + + public void add_to_task_metric_list(TaskMetricData elem) { + if (this.task_metric_list == null) { + this.task_metric_list = new ArrayList(); + } + this.task_metric_list.add(elem); + } + + public List get_task_metric_list() { + return this.task_metric_list; + } + + public void set_task_metric_list(List task_metric_list) { + this.task_metric_list = task_metric_list; + } + + public void unset_task_metric_list() { + this.task_metric_list = null; + } + + /** Returns true if field task_metric_list is set (has been assigned a value) and false otherwise */ + public boolean is_set_task_metric_list() { + return this.task_metric_list != null; + } + + public void set_task_metric_list_isSet(boolean value) { + if (!value) { + this.task_metric_list = null; + } + } + + public int get_worker_metric_list_size() { + return (this.worker_metric_list == null) ? 0 : this.worker_metric_list.size(); + } + + public java.util.Iterator get_worker_metric_list_iterator() { + return (this.worker_metric_list == null) ? null : this.worker_metric_list.iterator(); + } + + public void add_to_worker_metric_list(WorkerMetricData elem) { + if (this.worker_metric_list == null) { + this.worker_metric_list = new ArrayList(); + } + this.worker_metric_list.add(elem); + } + + public List get_worker_metric_list() { + return this.worker_metric_list; + } + + public void set_worker_metric_list(List worker_metric_list) { + this.worker_metric_list = worker_metric_list; + } + + public void unset_worker_metric_list() { + this.worker_metric_list = null; + } + + /** Returns true if field worker_metric_list is set (has been assigned a value) and false otherwise */ + public boolean is_set_worker_metric_list() { + return this.worker_metric_list != null; + } + + public void set_worker_metric_list_isSet(boolean value) { + if (!value) { + this.worker_metric_list = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TOPOLOGY_ID: + if (value == null) { + unset_topology_id(); + } else { + set_topology_id((String)value); + } + break; + + case TASK_METRIC_LIST: + if (value == null) { + unset_task_metric_list(); + } else { + set_task_metric_list((List)value); + } + break; + + case WORKER_METRIC_LIST: + if (value == null) { + unset_worker_metric_list(); + } else { + set_worker_metric_list((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TOPOLOGY_ID: + return get_topology_id(); + + case TASK_METRIC_LIST: + return get_task_metric_list(); + + case WORKER_METRIC_LIST: + return get_worker_metric_list(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TOPOLOGY_ID: + return is_set_topology_id(); + case TASK_METRIC_LIST: + return is_set_task_metric_list(); + case WORKER_METRIC_LIST: + return is_set_worker_metric_list(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TopologyMetricInfo) + return this.equals((TopologyMetricInfo)that); + return false; + } + + public boolean equals(TopologyMetricInfo that) { + if (that == null) + return false; + + boolean this_present_topology_id = true && this.is_set_topology_id(); + boolean that_present_topology_id = true && that.is_set_topology_id(); + if (this_present_topology_id || that_present_topology_id) { + if (!(this_present_topology_id && that_present_topology_id)) + return false; + if (!this.topology_id.equals(that.topology_id)) + return false; + } + + boolean this_present_task_metric_list = true && this.is_set_task_metric_list(); + boolean that_present_task_metric_list = true && that.is_set_task_metric_list(); + if (this_present_task_metric_list || that_present_task_metric_list) { + if (!(this_present_task_metric_list && that_present_task_metric_list)) + return false; + if (!this.task_metric_list.equals(that.task_metric_list)) + return false; + } + + boolean this_present_worker_metric_list = true && this.is_set_worker_metric_list(); + boolean that_present_worker_metric_list = true && that.is_set_worker_metric_list(); + if (this_present_worker_metric_list || that_present_worker_metric_list) { + if (!(this_present_worker_metric_list && that_present_worker_metric_list)) + return false; + if (!this.worker_metric_list.equals(that.worker_metric_list)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_topology_id = true && (is_set_topology_id()); + builder.append(present_topology_id); + if (present_topology_id) + builder.append(topology_id); + + boolean present_task_metric_list = true && (is_set_task_metric_list()); + builder.append(present_task_metric_list); + if (present_task_metric_list) + builder.append(task_metric_list); + + boolean present_worker_metric_list = true && (is_set_worker_metric_list()); + builder.append(present_worker_metric_list); + if (present_worker_metric_list) + builder.append(worker_metric_list); + + return builder.toHashCode(); + } + + public int compareTo(TopologyMetricInfo other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + TopologyMetricInfo typedOther = (TopologyMetricInfo)other; + + lastComparison = Boolean.valueOf(is_set_topology_id()).compareTo(typedOther.is_set_topology_id()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_topology_id()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.topology_id, typedOther.topology_id); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_task_metric_list()).compareTo(typedOther.is_set_task_metric_list()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_task_metric_list()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.task_metric_list, typedOther.task_metric_list); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_worker_metric_list()).compareTo(typedOther.is_set_worker_metric_list()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_worker_metric_list()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.worker_metric_list, typedOther.worker_metric_list); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache.thrift7.TException { + org.apache.thrift7.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift7.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // TOPOLOGY_ID + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.topology_id = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // TASK_METRIC_LIST + if (field.type == org.apache.thrift7.protocol.TType.LIST) { + { + org.apache.thrift7.protocol.TList _list255 = iprot.readListBegin(); + this.task_metric_list = new ArrayList(_list255.size); + for (int _i256 = 0; _i256 < _list255.size; ++_i256) + { + TaskMetricData _elem257; // required + _elem257 = new TaskMetricData(); + _elem257.read(iprot); + this.task_metric_list.add(_elem257); + } + iprot.readListEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // WORKER_METRIC_LIST + if (field.type == org.apache.thrift7.protocol.TType.LIST) { + { + org.apache.thrift7.protocol.TList _list258 = iprot.readListBegin(); + this.worker_metric_list = new ArrayList(_list258.size); + for (int _i259 = 0; _i259 < _list258.size; ++_i259) + { + WorkerMetricData _elem260; // required + _elem260 = new WorkerMetricData(); + _elem260.read(iprot); + this.worker_metric_list.add(_elem260); + } + iprot.readListEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.topology_id != null) { + oprot.writeFieldBegin(TOPOLOGY_ID_FIELD_DESC); + oprot.writeString(this.topology_id); + oprot.writeFieldEnd(); + } + if (this.task_metric_list != null) { + if (is_set_task_metric_list()) { + oprot.writeFieldBegin(TASK_METRIC_LIST_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift7.protocol.TList(org.apache.thrift7.protocol.TType.STRUCT, this.task_metric_list.size())); + for (TaskMetricData _iter261 : this.task_metric_list) + { + _iter261.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (this.worker_metric_list != null) { + if (is_set_worker_metric_list()) { + oprot.writeFieldBegin(WORKER_METRIC_LIST_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift7.protocol.TList(org.apache.thrift7.protocol.TType.STRUCT, this.worker_metric_list.size())); + for (WorkerMetricData _iter262 : this.worker_metric_list) + { + _iter262.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TopologyMetricInfo("); + boolean first = true; + + sb.append("topology_id:"); + if (this.topology_id == null) { + sb.append("null"); + } else { + sb.append(this.topology_id); + } + first = false; + if (is_set_task_metric_list()) { + if (!first) sb.append(", "); + sb.append("task_metric_list:"); + if (this.task_metric_list == null) { + sb.append("null"); + } else { + sb.append(this.task_metric_list); + } + first = false; + } + if (is_set_worker_metric_list()) { + if (!first) sb.append(", "); + sb.append("worker_metric_list:"); + if (this.worker_metric_list == null) { + sb.append("null"); + } else { + sb.append(this.worker_metric_list); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift7.TException { + // check for required fields + if (!is_set_topology_id()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'topology_id' is unset! Struct:" + toString()); + } + + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + +} + diff --git a/jstorm-client/src/main/java/backtype/storm/generated/TopologySummary.java b/jstorm-client/src/main/java/backtype/storm/generated/TopologySummary.java index ee4a6da70..e2ee70892 100644 --- a/jstorm-client/src/main/java/backtype/storm/generated/TopologySummary.java +++ b/jstorm-client/src/main/java/backtype/storm/generated/TopologySummary.java @@ -30,6 +30,7 @@ public class TopologySummary implements org.apache.thrift7.TBase byName = new HashMap(); @@ -72,6 +75,8 @@ public static _Fields findByThriftId(int fieldId) { return NUM_TASKS; case 6: // NUM_WORKERS return NUM_WORKERS; + case 7: // ERROR_INFO + return ERROR_INFO; default: return null; } @@ -132,6 +137,8 @@ public String getFieldName() { new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.I32))); tmpMap.put(_Fields.NUM_WORKERS, new org.apache.thrift7.meta_data.FieldMetaData("num_workers", org.apache.thrift7.TFieldRequirementType.REQUIRED, new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.I32))); + tmpMap.put(_Fields.ERROR_INFO, new org.apache.thrift7.meta_data.FieldMetaData("error_info", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(TopologySummary.class, metaDataMap); } @@ -145,7 +152,8 @@ public TopologySummary( String status, int uptime_secs, int num_tasks, - int num_workers) + int num_workers, + String error_info) { this(); this.id = id; @@ -157,6 +165,7 @@ public TopologySummary( set_num_tasks_isSet(true); this.num_workers = num_workers; set_num_workers_isSet(true); + this.error_info = error_info; } /** @@ -177,6 +186,9 @@ public TopologySummary(TopologySummary other) { this.uptime_secs = other.uptime_secs; this.num_tasks = other.num_tasks; this.num_workers = other.num_workers; + if (other.is_set_error_info()) { + this.error_info = other.error_info; + } } public TopologySummary deepCopy() { @@ -194,6 +206,7 @@ public void clear() { this.num_tasks = 0; set_num_workers_isSet(false); this.num_workers = 0; + this.error_info = null; } public String get_id() { @@ -331,6 +344,29 @@ public void set_num_workers_isSet(boolean value) { __isset_bit_vector.set(__NUM_WORKERS_ISSET_ID, value); } + public String get_error_info() { + return this.error_info; + } + + public void set_error_info(String error_info) { + this.error_info = error_info; + } + + public void unset_error_info() { + this.error_info = null; + } + + /** Returns true if field error_info is set (has been assigned a value) and false otherwise */ + public boolean is_set_error_info() { + return this.error_info != null; + } + + public void set_error_info_isSet(boolean value) { + if (!value) { + this.error_info = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case ID: @@ -381,6 +417,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ERROR_INFO: + if (value == null) { + unset_error_info(); + } else { + set_error_info((String)value); + } + break; + } } @@ -404,6 +448,9 @@ public Object getFieldValue(_Fields field) { case NUM_WORKERS: return Integer.valueOf(get_num_workers()); + case ERROR_INFO: + return get_error_info(); + } throw new IllegalStateException(); } @@ -427,6 +474,8 @@ public boolean isSet(_Fields field) { return is_set_num_tasks(); case NUM_WORKERS: return is_set_num_workers(); + case ERROR_INFO: + return is_set_error_info(); } throw new IllegalStateException(); } @@ -498,6 +547,15 @@ public boolean equals(TopologySummary that) { return false; } + boolean this_present_error_info = true && this.is_set_error_info(); + boolean that_present_error_info = true && that.is_set_error_info(); + if (this_present_error_info || that_present_error_info) { + if (!(this_present_error_info && that_present_error_info)) + return false; + if (!this.error_info.equals(that.error_info)) + return false; + } + return true; } @@ -535,6 +593,11 @@ public int hashCode() { if (present_num_workers) builder.append(num_workers); + boolean present_error_info = true && (is_set_error_info()); + builder.append(present_error_info); + if (present_error_info) + builder.append(error_info); + return builder.toHashCode(); } @@ -606,6 +669,16 @@ public int compareTo(TopologySummary other) { return lastComparison; } } + lastComparison = Boolean.valueOf(is_set_error_info()).compareTo(typedOther.is_set_error_info()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_error_info()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.error_info, typedOther.error_info); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -668,6 +741,13 @@ public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache. org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } break; + case 7: // ERROR_INFO + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.error_info = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; default: org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -705,6 +785,11 @@ public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache oprot.writeFieldBegin(NUM_WORKERS_FIELD_DESC); oprot.writeI32(this.num_workers); oprot.writeFieldEnd(); + if (this.error_info != null) { + oprot.writeFieldBegin(ERROR_INFO_FIELD_DESC); + oprot.writeString(this.error_info); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -749,6 +834,14 @@ public String toString() { sb.append("num_workers:"); sb.append(this.num_workers); first = false; + if (!first) sb.append(", "); + sb.append("error_info:"); + if (this.error_info == null) { + sb.append("null"); + } else { + sb.append(this.error_info); + } + first = false; sb.append(")"); return sb.toString(); } @@ -779,6 +872,10 @@ public void validate() throws org.apache.thrift7.TException { throw new org.apache.thrift7.protocol.TProtocolException("Required field 'num_workers' is unset! Struct:" + toString()); } + if (!is_set_error_info()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'error_info' is unset! Struct:" + toString()); + } + } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { diff --git a/jstorm-client/src/main/java/backtype/storm/generated/WorkerMetricData.java b/jstorm-client/src/main/java/backtype/storm/generated/WorkerMetricData.java new file mode 100644 index 000000000..148a6a431 --- /dev/null +++ b/jstorm-client/src/main/java/backtype/storm/generated/WorkerMetricData.java @@ -0,0 +1,1135 @@ +/** + * Autogenerated by Thrift Compiler (0.7.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + */ +package backtype.storm.generated; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class WorkerMetricData implements org.apache.thrift7.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift7.protocol.TStruct STRUCT_DESC = new org.apache.thrift7.protocol.TStruct("WorkerMetricData"); + + private static final org.apache.thrift7.protocol.TField HOSTNAME_FIELD_DESC = new org.apache.thrift7.protocol.TField("hostname", org.apache.thrift7.protocol.TType.STRING, (short)1); + private static final org.apache.thrift7.protocol.TField PORT_FIELD_DESC = new org.apache.thrift7.protocol.TField("port", org.apache.thrift7.protocol.TType.I32, (short)2); + private static final org.apache.thrift7.protocol.TField GAUGE_FIELD_DESC = new org.apache.thrift7.protocol.TField("gauge", org.apache.thrift7.protocol.TType.MAP, (short)3); + private static final org.apache.thrift7.protocol.TField COUNTER_FIELD_DESC = new org.apache.thrift7.protocol.TField("counter", org.apache.thrift7.protocol.TType.MAP, (short)4); + private static final org.apache.thrift7.protocol.TField METER_FIELD_DESC = new org.apache.thrift7.protocol.TField("meter", org.apache.thrift7.protocol.TType.MAP, (short)5); + private static final org.apache.thrift7.protocol.TField TIMER_FIELD_DESC = new org.apache.thrift7.protocol.TField("timer", org.apache.thrift7.protocol.TType.MAP, (short)6); + private static final org.apache.thrift7.protocol.TField HISTOGRAM_FIELD_DESC = new org.apache.thrift7.protocol.TField("histogram", org.apache.thrift7.protocol.TType.MAP, (short)7); + + private String hostname; // required + private int port; // required + private Map gauge; // required + private Map counter; // required + private Map meter; // required + private Map timer; // required + private Map histogram; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift7.TFieldIdEnum { + HOSTNAME((short)1, "hostname"), + PORT((short)2, "port"), + GAUGE((short)3, "gauge"), + COUNTER((short)4, "counter"), + METER((short)5, "meter"), + TIMER((short)6, "timer"), + HISTOGRAM((short)7, "histogram"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // HOSTNAME + return HOSTNAME; + case 2: // PORT + return PORT; + case 3: // GAUGE + return GAUGE; + case 4: // COUNTER + return COUNTER; + case 5: // METER + return METER; + case 6: // TIMER + return TIMER; + case 7: // HISTOGRAM + return HISTOGRAM; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __PORT_ISSET_ID = 0; + private BitSet __isset_bit_vector = new BitSet(1); + + public static final Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift7.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift7.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.HOSTNAME, new org.apache.thrift7.meta_data.FieldMetaData("hostname", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING))); + tmpMap.put(_Fields.PORT, new org.apache.thrift7.meta_data.FieldMetaData("port", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.I32))); + tmpMap.put(_Fields.GAUGE, new org.apache.thrift7.meta_data.FieldMetaData("gauge", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + tmpMap.put(_Fields.COUNTER, new org.apache.thrift7.meta_data.FieldMetaData("counter", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + tmpMap.put(_Fields.METER, new org.apache.thrift7.meta_data.FieldMetaData("meter", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + tmpMap.put(_Fields.TIMER, new org.apache.thrift7.meta_data.FieldMetaData("timer", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + tmpMap.put(_Fields.HISTOGRAM, new org.apache.thrift7.meta_data.FieldMetaData("histogram", org.apache.thrift7.TFieldRequirementType.REQUIRED, + new org.apache.thrift7.meta_data.MapMetaData(org.apache.thrift7.protocol.TType.MAP, + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.STRING), + new org.apache.thrift7.meta_data.FieldValueMetaData(org.apache.thrift7.protocol.TType.DOUBLE)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift7.meta_data.FieldMetaData.addStructMetaDataMap(WorkerMetricData.class, metaDataMap); + } + + public WorkerMetricData() { + } + + public WorkerMetricData( + String hostname, + int port, + Map gauge, + Map counter, + Map meter, + Map timer, + Map histogram) + { + this(); + this.hostname = hostname; + this.port = port; + set_port_isSet(true); + this.gauge = gauge; + this.counter = counter; + this.meter = meter; + this.timer = timer; + this.histogram = histogram; + } + + /** + * Performs a deep copy on other. + */ + public WorkerMetricData(WorkerMetricData other) { + __isset_bit_vector.clear(); + __isset_bit_vector.or(other.__isset_bit_vector); + if (other.is_set_hostname()) { + this.hostname = other.hostname; + } + this.port = other.port; + if (other.is_set_gauge()) { + Map __this__gauge = new HashMap(); + for (Map.Entry other_element : other.gauge.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__gauge_copy_key = other_element_key; + + Double __this__gauge_copy_value = other_element_value; + + __this__gauge.put(__this__gauge_copy_key, __this__gauge_copy_value); + } + this.gauge = __this__gauge; + } + if (other.is_set_counter()) { + Map __this__counter = new HashMap(); + for (Map.Entry other_element : other.counter.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__counter_copy_key = other_element_key; + + Double __this__counter_copy_value = other_element_value; + + __this__counter.put(__this__counter_copy_key, __this__counter_copy_value); + } + this.counter = __this__counter; + } + if (other.is_set_meter()) { + Map __this__meter = new HashMap(); + for (Map.Entry other_element : other.meter.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__meter_copy_key = other_element_key; + + Double __this__meter_copy_value = other_element_value; + + __this__meter.put(__this__meter_copy_key, __this__meter_copy_value); + } + this.meter = __this__meter; + } + if (other.is_set_timer()) { + Map __this__timer = new HashMap(); + for (Map.Entry other_element : other.timer.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__timer_copy_key = other_element_key; + + Double __this__timer_copy_value = other_element_value; + + __this__timer.put(__this__timer_copy_key, __this__timer_copy_value); + } + this.timer = __this__timer; + } + if (other.is_set_histogram()) { + Map __this__histogram = new HashMap(); + for (Map.Entry other_element : other.histogram.entrySet()) { + + String other_element_key = other_element.getKey(); + Double other_element_value = other_element.getValue(); + + String __this__histogram_copy_key = other_element_key; + + Double __this__histogram_copy_value = other_element_value; + + __this__histogram.put(__this__histogram_copy_key, __this__histogram_copy_value); + } + this.histogram = __this__histogram; + } + } + + public WorkerMetricData deepCopy() { + return new WorkerMetricData(this); + } + + @Override + public void clear() { + this.hostname = null; + set_port_isSet(false); + this.port = 0; + this.gauge = null; + this.counter = null; + this.meter = null; + this.timer = null; + this.histogram = null; + } + + public String get_hostname() { + return this.hostname; + } + + public void set_hostname(String hostname) { + this.hostname = hostname; + } + + public void unset_hostname() { + this.hostname = null; + } + + /** Returns true if field hostname is set (has been assigned a value) and false otherwise */ + public boolean is_set_hostname() { + return this.hostname != null; + } + + public void set_hostname_isSet(boolean value) { + if (!value) { + this.hostname = null; + } + } + + public int get_port() { + return this.port; + } + + public void set_port(int port) { + this.port = port; + set_port_isSet(true); + } + + public void unset_port() { + __isset_bit_vector.clear(__PORT_ISSET_ID); + } + + /** Returns true if field port is set (has been assigned a value) and false otherwise */ + public boolean is_set_port() { + return __isset_bit_vector.get(__PORT_ISSET_ID); + } + + public void set_port_isSet(boolean value) { + __isset_bit_vector.set(__PORT_ISSET_ID, value); + } + + public int get_gauge_size() { + return (this.gauge == null) ? 0 : this.gauge.size(); + } + + public void put_to_gauge(String key, double val) { + if (this.gauge == null) { + this.gauge = new HashMap(); + } + this.gauge.put(key, val); + } + + public Map get_gauge() { + return this.gauge; + } + + public void set_gauge(Map gauge) { + this.gauge = gauge; + } + + public void unset_gauge() { + this.gauge = null; + } + + /** Returns true if field gauge is set (has been assigned a value) and false otherwise */ + public boolean is_set_gauge() { + return this.gauge != null; + } + + public void set_gauge_isSet(boolean value) { + if (!value) { + this.gauge = null; + } + } + + public int get_counter_size() { + return (this.counter == null) ? 0 : this.counter.size(); + } + + public void put_to_counter(String key, double val) { + if (this.counter == null) { + this.counter = new HashMap(); + } + this.counter.put(key, val); + } + + public Map get_counter() { + return this.counter; + } + + public void set_counter(Map counter) { + this.counter = counter; + } + + public void unset_counter() { + this.counter = null; + } + + /** Returns true if field counter is set (has been assigned a value) and false otherwise */ + public boolean is_set_counter() { + return this.counter != null; + } + + public void set_counter_isSet(boolean value) { + if (!value) { + this.counter = null; + } + } + + public int get_meter_size() { + return (this.meter == null) ? 0 : this.meter.size(); + } + + public void put_to_meter(String key, double val) { + if (this.meter == null) { + this.meter = new HashMap(); + } + this.meter.put(key, val); + } + + public Map get_meter() { + return this.meter; + } + + public void set_meter(Map meter) { + this.meter = meter; + } + + public void unset_meter() { + this.meter = null; + } + + /** Returns true if field meter is set (has been assigned a value) and false otherwise */ + public boolean is_set_meter() { + return this.meter != null; + } + + public void set_meter_isSet(boolean value) { + if (!value) { + this.meter = null; + } + } + + public int get_timer_size() { + return (this.timer == null) ? 0 : this.timer.size(); + } + + public void put_to_timer(String key, double val) { + if (this.timer == null) { + this.timer = new HashMap(); + } + this.timer.put(key, val); + } + + public Map get_timer() { + return this.timer; + } + + public void set_timer(Map timer) { + this.timer = timer; + } + + public void unset_timer() { + this.timer = null; + } + + /** Returns true if field timer is set (has been assigned a value) and false otherwise */ + public boolean is_set_timer() { + return this.timer != null; + } + + public void set_timer_isSet(boolean value) { + if (!value) { + this.timer = null; + } + } + + public int get_histogram_size() { + return (this.histogram == null) ? 0 : this.histogram.size(); + } + + public void put_to_histogram(String key, double val) { + if (this.histogram == null) { + this.histogram = new HashMap(); + } + this.histogram.put(key, val); + } + + public Map get_histogram() { + return this.histogram; + } + + public void set_histogram(Map histogram) { + this.histogram = histogram; + } + + public void unset_histogram() { + this.histogram = null; + } + + /** Returns true if field histogram is set (has been assigned a value) and false otherwise */ + public boolean is_set_histogram() { + return this.histogram != null; + } + + public void set_histogram_isSet(boolean value) { + if (!value) { + this.histogram = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case HOSTNAME: + if (value == null) { + unset_hostname(); + } else { + set_hostname((String)value); + } + break; + + case PORT: + if (value == null) { + unset_port(); + } else { + set_port((Integer)value); + } + break; + + case GAUGE: + if (value == null) { + unset_gauge(); + } else { + set_gauge((Map)value); + } + break; + + case COUNTER: + if (value == null) { + unset_counter(); + } else { + set_counter((Map)value); + } + break; + + case METER: + if (value == null) { + unset_meter(); + } else { + set_meter((Map)value); + } + break; + + case TIMER: + if (value == null) { + unset_timer(); + } else { + set_timer((Map)value); + } + break; + + case HISTOGRAM: + if (value == null) { + unset_histogram(); + } else { + set_histogram((Map)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case HOSTNAME: + return get_hostname(); + + case PORT: + return Integer.valueOf(get_port()); + + case GAUGE: + return get_gauge(); + + case COUNTER: + return get_counter(); + + case METER: + return get_meter(); + + case TIMER: + return get_timer(); + + case HISTOGRAM: + return get_histogram(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case HOSTNAME: + return is_set_hostname(); + case PORT: + return is_set_port(); + case GAUGE: + return is_set_gauge(); + case COUNTER: + return is_set_counter(); + case METER: + return is_set_meter(); + case TIMER: + return is_set_timer(); + case HISTOGRAM: + return is_set_histogram(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WorkerMetricData) + return this.equals((WorkerMetricData)that); + return false; + } + + public boolean equals(WorkerMetricData that) { + if (that == null) + return false; + + boolean this_present_hostname = true && this.is_set_hostname(); + boolean that_present_hostname = true && that.is_set_hostname(); + if (this_present_hostname || that_present_hostname) { + if (!(this_present_hostname && that_present_hostname)) + return false; + if (!this.hostname.equals(that.hostname)) + return false; + } + + boolean this_present_port = true; + boolean that_present_port = true; + if (this_present_port || that_present_port) { + if (!(this_present_port && that_present_port)) + return false; + if (this.port != that.port) + return false; + } + + boolean this_present_gauge = true && this.is_set_gauge(); + boolean that_present_gauge = true && that.is_set_gauge(); + if (this_present_gauge || that_present_gauge) { + if (!(this_present_gauge && that_present_gauge)) + return false; + if (!this.gauge.equals(that.gauge)) + return false; + } + + boolean this_present_counter = true && this.is_set_counter(); + boolean that_present_counter = true && that.is_set_counter(); + if (this_present_counter || that_present_counter) { + if (!(this_present_counter && that_present_counter)) + return false; + if (!this.counter.equals(that.counter)) + return false; + } + + boolean this_present_meter = true && this.is_set_meter(); + boolean that_present_meter = true && that.is_set_meter(); + if (this_present_meter || that_present_meter) { + if (!(this_present_meter && that_present_meter)) + return false; + if (!this.meter.equals(that.meter)) + return false; + } + + boolean this_present_timer = true && this.is_set_timer(); + boolean that_present_timer = true && that.is_set_timer(); + if (this_present_timer || that_present_timer) { + if (!(this_present_timer && that_present_timer)) + return false; + if (!this.timer.equals(that.timer)) + return false; + } + + boolean this_present_histogram = true && this.is_set_histogram(); + boolean that_present_histogram = true && that.is_set_histogram(); + if (this_present_histogram || that_present_histogram) { + if (!(this_present_histogram && that_present_histogram)) + return false; + if (!this.histogram.equals(that.histogram)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_hostname = true && (is_set_hostname()); + builder.append(present_hostname); + if (present_hostname) + builder.append(hostname); + + boolean present_port = true; + builder.append(present_port); + if (present_port) + builder.append(port); + + boolean present_gauge = true && (is_set_gauge()); + builder.append(present_gauge); + if (present_gauge) + builder.append(gauge); + + boolean present_counter = true && (is_set_counter()); + builder.append(present_counter); + if (present_counter) + builder.append(counter); + + boolean present_meter = true && (is_set_meter()); + builder.append(present_meter); + if (present_meter) + builder.append(meter); + + boolean present_timer = true && (is_set_timer()); + builder.append(present_timer); + if (present_timer) + builder.append(timer); + + boolean present_histogram = true && (is_set_histogram()); + builder.append(present_histogram); + if (present_histogram) + builder.append(histogram); + + return builder.toHashCode(); + } + + public int compareTo(WorkerMetricData other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + WorkerMetricData typedOther = (WorkerMetricData)other; + + lastComparison = Boolean.valueOf(is_set_hostname()).compareTo(typedOther.is_set_hostname()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_hostname()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.hostname, typedOther.hostname); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_port()).compareTo(typedOther.is_set_port()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_port()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.port, typedOther.port); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_gauge()).compareTo(typedOther.is_set_gauge()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_gauge()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.gauge, typedOther.gauge); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_counter()).compareTo(typedOther.is_set_counter()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_counter()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.counter, typedOther.counter); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_meter()).compareTo(typedOther.is_set_meter()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_meter()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.meter, typedOther.meter); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_timer()).compareTo(typedOther.is_set_timer()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_timer()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.timer, typedOther.timer); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(is_set_histogram()).compareTo(typedOther.is_set_histogram()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_histogram()) { + lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.histogram, typedOther.histogram); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift7.protocol.TProtocol iprot) throws org.apache.thrift7.TException { + org.apache.thrift7.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift7.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // HOSTNAME + if (field.type == org.apache.thrift7.protocol.TType.STRING) { + this.hostname = iprot.readString(); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // PORT + if (field.type == org.apache.thrift7.protocol.TType.I32) { + this.port = iprot.readI32(); + set_port_isSet(true); + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // GAUGE + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map230 = iprot.readMapBegin(); + this.gauge = new HashMap(2*_map230.size); + for (int _i231 = 0; _i231 < _map230.size; ++_i231) + { + String _key232; // required + double _val233; // required + _key232 = iprot.readString(); + _val233 = iprot.readDouble(); + this.gauge.put(_key232, _val233); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 4: // COUNTER + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map234 = iprot.readMapBegin(); + this.counter = new HashMap(2*_map234.size); + for (int _i235 = 0; _i235 < _map234.size; ++_i235) + { + String _key236; // required + double _val237; // required + _key236 = iprot.readString(); + _val237 = iprot.readDouble(); + this.counter.put(_key236, _val237); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 5: // METER + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map238 = iprot.readMapBegin(); + this.meter = new HashMap(2*_map238.size); + for (int _i239 = 0; _i239 < _map238.size; ++_i239) + { + String _key240; // required + double _val241; // required + _key240 = iprot.readString(); + _val241 = iprot.readDouble(); + this.meter.put(_key240, _val241); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 6: // TIMER + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map242 = iprot.readMapBegin(); + this.timer = new HashMap(2*_map242.size); + for (int _i243 = 0; _i243 < _map242.size; ++_i243) + { + String _key244; // required + double _val245; // required + _key244 = iprot.readString(); + _val245 = iprot.readDouble(); + this.timer.put(_key244, _val245); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 7: // HISTOGRAM + if (field.type == org.apache.thrift7.protocol.TType.MAP) { + { + org.apache.thrift7.protocol.TMap _map246 = iprot.readMapBegin(); + this.histogram = new HashMap(2*_map246.size); + for (int _i247 = 0; _i247 < _map246.size; ++_i247) + { + String _key248; // required + double _val249; // required + _key248 = iprot.readString(); + _val249 = iprot.readDouble(); + this.histogram.put(_key248, _val249); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift7.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift7.protocol.TProtocol oprot) throws org.apache.thrift7.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.hostname != null) { + oprot.writeFieldBegin(HOSTNAME_FIELD_DESC); + oprot.writeString(this.hostname); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(PORT_FIELD_DESC); + oprot.writeI32(this.port); + oprot.writeFieldEnd(); + if (this.gauge != null) { + oprot.writeFieldBegin(GAUGE_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.gauge.size())); + for (Map.Entry _iter250 : this.gauge.entrySet()) + { + oprot.writeString(_iter250.getKey()); + oprot.writeDouble(_iter250.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + if (this.counter != null) { + oprot.writeFieldBegin(COUNTER_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.counter.size())); + for (Map.Entry _iter251 : this.counter.entrySet()) + { + oprot.writeString(_iter251.getKey()); + oprot.writeDouble(_iter251.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + if (this.meter != null) { + oprot.writeFieldBegin(METER_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.meter.size())); + for (Map.Entry _iter252 : this.meter.entrySet()) + { + oprot.writeString(_iter252.getKey()); + oprot.writeDouble(_iter252.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + if (this.timer != null) { + oprot.writeFieldBegin(TIMER_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.timer.size())); + for (Map.Entry _iter253 : this.timer.entrySet()) + { + oprot.writeString(_iter253.getKey()); + oprot.writeDouble(_iter253.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + if (this.histogram != null) { + oprot.writeFieldBegin(HISTOGRAM_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift7.protocol.TMap(org.apache.thrift7.protocol.TType.STRING, org.apache.thrift7.protocol.TType.DOUBLE, this.histogram.size())); + for (Map.Entry _iter254 : this.histogram.entrySet()) + { + oprot.writeString(_iter254.getKey()); + oprot.writeDouble(_iter254.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WorkerMetricData("); + boolean first = true; + + sb.append("hostname:"); + if (this.hostname == null) { + sb.append("null"); + } else { + sb.append(this.hostname); + } + first = false; + if (!first) sb.append(", "); + sb.append("port:"); + sb.append(this.port); + first = false; + if (!first) sb.append(", "); + sb.append("gauge:"); + if (this.gauge == null) { + sb.append("null"); + } else { + sb.append(this.gauge); + } + first = false; + if (!first) sb.append(", "); + sb.append("counter:"); + if (this.counter == null) { + sb.append("null"); + } else { + sb.append(this.counter); + } + first = false; + if (!first) sb.append(", "); + sb.append("meter:"); + if (this.meter == null) { + sb.append("null"); + } else { + sb.append(this.meter); + } + first = false; + if (!first) sb.append(", "); + sb.append("timer:"); + if (this.timer == null) { + sb.append("null"); + } else { + sb.append(this.timer); + } + first = false; + if (!first) sb.append(", "); + sb.append("histogram:"); + if (this.histogram == null) { + sb.append("null"); + } else { + sb.append(this.histogram); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift7.TException { + // check for required fields + if (!is_set_hostname()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'hostname' is unset! Struct:" + toString()); + } + + if (!is_set_port()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'port' is unset! Struct:" + toString()); + } + + if (!is_set_gauge()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'gauge' is unset! Struct:" + toString()); + } + + if (!is_set_counter()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'counter' is unset! Struct:" + toString()); + } + + if (!is_set_meter()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'meter' is unset! Struct:" + toString()); + } + + if (!is_set_timer()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'timer' is unset! Struct:" + toString()); + } + + if (!is_set_histogram()) { + throw new org.apache.thrift7.protocol.TProtocolException("Required field 'histogram' is unset! Struct:" + toString()); + } + + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); + read(new org.apache.thrift7.protocol.TCompactProtocol(new org.apache.thrift7.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift7.TException te) { + throw new java.io.IOException(te); + } + } + +} + diff --git a/jstorm-client/src/main/py/storm/DistributedRPC.py b/jstorm-client/src/main/py/storm/DistributedRPC.py index 373d4eda5..a7e6ef9d2 100644 --- a/jstorm-client/src/main/py/storm/DistributedRPC.py +++ b/jstorm-client/src/main/py/storm/DistributedRPC.py @@ -118,6 +118,9 @@ class execute_args: (2, TType.STRING, 'funcArgs', None, None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.functionName) + hash(self.funcArgs) + def __init__(self, functionName=None, funcArgs=None,): self.functionName = functionName self.funcArgs = funcArgs @@ -189,6 +192,9 @@ class execute_result: (1, TType.STRUCT, 'e', (DRPCExecutionException, DRPCExecutionException.thrift_spec), None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.success) + hash(self.e) + def __init__(self, success=None, e=None,): self.success = success self.e = e diff --git a/jstorm-client/src/main/py/storm/DistributedRPCInvocations.py b/jstorm-client/src/main/py/storm/DistributedRPCInvocations.py index 15797b280..4f951a943 100644 --- a/jstorm-client/src/main/py/storm/DistributedRPCInvocations.py +++ b/jstorm-client/src/main/py/storm/DistributedRPCInvocations.py @@ -207,6 +207,9 @@ class result_args: (2, TType.STRING, 'result', None, None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.id) + hash(self.result) + def __init__(self, id=None, result=None,): self.id = id self.result = result @@ -271,6 +274,9 @@ class result_result: thrift_spec = ( ) + def __hash__(self): + return 0 + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -319,6 +325,9 @@ class fetchRequest_args: (1, TType.STRING, 'functionName', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.functionName) + def __init__(self, functionName=None,): self.functionName = functionName @@ -378,6 +387,9 @@ class fetchRequest_result: (0, TType.STRUCT, 'success', (DRPCRequest, DRPCRequest.thrift_spec), None, ), # 0 ) + def __hash__(self): + return 0 + hash(self.success) + def __init__(self, success=None,): self.success = success @@ -439,6 +451,9 @@ class failRequest_args: (1, TType.STRING, 'id', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.id) + def __init__(self, id=None,): self.id = id @@ -493,6 +508,9 @@ class failRequest_result: thrift_spec = ( ) + def __hash__(self): + return 0 + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) diff --git a/jstorm-client/src/main/py/storm/Nimbus-remote b/jstorm-client/src/main/py/storm/Nimbus-remote index dad54adc9..1fe705144 100644 --- a/jstorm-client/src/main/py/storm/Nimbus-remote +++ b/jstorm-client/src/main/py/storm/Nimbus-remote @@ -22,21 +22,27 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print '' print 'Functions:' print ' void submitTopology(string name, string uploadedJarLocation, string jsonConf, StormTopology topology)' + print ' void submitTopologyWithOpts(string name, string uploadedJarLocation, string jsonConf, StormTopology topology, SubmitOptions options)' print ' void killTopology(string name)' print ' void killTopologyWithOpts(string name, KillOptions options)' print ' void activate(string name)' print ' void deactivate(string name)' print ' void rebalance(string name, RebalanceOptions options)' + print ' void metricMonitor(string name, MonitorOptions options)' + print ' void beginLibUpload(string libName)' print ' string beginFileUpload()' print ' void uploadChunk(string location, string chunk)' print ' void finishFileUpload(string location)' print ' string beginFileDownload(string file)' print ' string downloadChunk(string id)' + print ' string getNimbusConf()' print ' ClusterSummary getClusterInfo()' print ' TopologyInfo getTopologyInfo(string id)' print ' SupervisorWorkers getSupervisorWorkers(string host)' print ' string getTopologyConf(string id)' print ' StormTopology getTopology(string id)' + print ' StormTopology getUserTopology(string id)' + print ' TopologyMetricInfo getTopologyMetric(string id)' print '' sys.exit(0) @@ -93,6 +99,12 @@ if cmd == 'submitTopology': sys.exit(1) pp.pprint(client.submitTopology(args[0],args[1],args[2],eval(args[3]),)) +elif cmd == 'submitTopologyWithOpts': + if len(args) != 5: + print 'submitTopologyWithOpts requires 5 args' + sys.exit(1) + pp.pprint(client.submitTopologyWithOpts(args[0],args[1],args[2],eval(args[3]),eval(args[4]),)) + elif cmd == 'killTopology': if len(args) != 1: print 'killTopology requires 1 args' @@ -123,6 +135,18 @@ elif cmd == 'rebalance': sys.exit(1) pp.pprint(client.rebalance(args[0],eval(args[1]),)) +elif cmd == 'metricMonitor': + if len(args) != 2: + print 'metricMonitor requires 2 args' + sys.exit(1) + pp.pprint(client.metricMonitor(args[0],eval(args[1]),)) + +elif cmd == 'beginLibUpload': + if len(args) != 1: + print 'beginLibUpload requires 1 args' + sys.exit(1) + pp.pprint(client.beginLibUpload(args[0],)) + elif cmd == 'beginFileUpload': if len(args) != 0: print 'beginFileUpload requires 0 args' @@ -153,6 +177,12 @@ elif cmd == 'downloadChunk': sys.exit(1) pp.pprint(client.downloadChunk(args[0],)) +elif cmd == 'getNimbusConf': + if len(args) != 0: + print 'getNimbusConf requires 0 args' + sys.exit(1) + pp.pprint(client.getNimbusConf()) + elif cmd == 'getClusterInfo': if len(args) != 0: print 'getClusterInfo requires 0 args' @@ -183,6 +213,18 @@ elif cmd == 'getTopology': sys.exit(1) pp.pprint(client.getTopology(args[0],)) +elif cmd == 'getUserTopology': + if len(args) != 1: + print 'getUserTopology requires 1 args' + sys.exit(1) + pp.pprint(client.getUserTopology(args[0],)) + +elif cmd == 'getTopologyMetric': + if len(args) != 1: + print 'getTopologyMetric requires 1 args' + sys.exit(1) + pp.pprint(client.getTopologyMetric(args[0],)) + else: print 'Unrecognized method %s' % cmd sys.exit(1) diff --git a/jstorm-client/src/main/py/storm/Nimbus.py b/jstorm-client/src/main/py/storm/Nimbus.py index 32c325e51..9f382a5ac 100644 --- a/jstorm-client/src/main/py/storm/Nimbus.py +++ b/jstorm-client/src/main/py/storm/Nimbus.py @@ -26,6 +26,17 @@ def submitTopology(self, name, uploadedJarLocation, jsonConf, topology): """ pass + def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options): + """ + Parameters: + - name + - uploadedJarLocation + - jsonConf + - topology + - options + """ + pass + def killTopology(self, name): """ Parameters: @@ -63,6 +74,21 @@ def rebalance(self, name, options): """ pass + def metricMonitor(self, name, options): + """ + Parameters: + - name + - options + """ + pass + + def beginLibUpload(self, libName): + """ + Parameters: + - libName + """ + pass + def beginFileUpload(self, ): pass @@ -95,6 +121,9 @@ def downloadChunk(self, id): """ pass + def getNimbusConf(self, ): + pass + def getClusterInfo(self, ): pass @@ -126,6 +155,20 @@ def getTopology(self, id): """ pass + def getUserTopology(self, id): + """ + Parameters: + - id + """ + pass + + def getTopologyMetric(self, id): + """ + Parameters: + - id + """ + pass + class Client(Iface): def __init__(self, iprot, oprot=None): @@ -170,6 +213,50 @@ def recv_submitTopology(self, ): raise result.e if result.ite is not None: raise result.ite + if result.tae is not None: + raise result.tae + return + + def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options): + """ + Parameters: + - name + - uploadedJarLocation + - jsonConf + - topology + - options + """ + self.send_submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology, options) + self.recv_submitTopologyWithOpts() + + def send_submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options): + self._oprot.writeMessageBegin('submitTopologyWithOpts', TMessageType.CALL, self._seqid) + args = submitTopologyWithOpts_args() + args.name = name + args.uploadedJarLocation = uploadedJarLocation + args.jsonConf = jsonConf + args.topology = topology + args.options = options + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_submitTopologyWithOpts(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = submitTopologyWithOpts_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.e is not None: + raise result.e + if result.ite is not None: + raise result.ite + if result.tae is not None: + raise result.tae return def killTopology(self, name): @@ -324,6 +411,68 @@ def recv_rebalance(self, ): self._iprot.readMessageEnd() if result.e is not None: raise result.e + if result.ite is not None: + raise result.ite + return + + def metricMonitor(self, name, options): + """ + Parameters: + - name + - options + """ + self.send_metricMonitor(name, options) + self.recv_metricMonitor() + + def send_metricMonitor(self, name, options): + self._oprot.writeMessageBegin('metricMonitor', TMessageType.CALL, self._seqid) + args = metricMonitor_args() + args.name = name + args.options = options + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_metricMonitor(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = metricMonitor_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.e is not None: + raise result.e + return + + def beginLibUpload(self, libName): + """ + Parameters: + - libName + """ + self.send_beginLibUpload(libName) + self.recv_beginLibUpload() + + def send_beginLibUpload(self, libName): + self._oprot.writeMessageBegin('beginLibUpload', TMessageType.CALL, self._seqid) + args = beginLibUpload_args() + args.libName = libName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_beginLibUpload(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = beginLibUpload_result() + result.read(self._iprot) + self._iprot.readMessageEnd() return def beginFileUpload(self, ): @@ -469,6 +618,31 @@ def recv_downloadChunk(self, ): return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "downloadChunk failed: unknown result"); + def getNimbusConf(self, ): + self.send_getNimbusConf() + return self.recv_getNimbusConf() + + def send_getNimbusConf(self, ): + self._oprot.writeMessageBegin('getNimbusConf', TMessageType.CALL, self._seqid) + args = getNimbusConf_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getNimbusConf(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = getNimbusConf_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getNimbusConf failed: unknown result"); + def getClusterInfo(self, ): self.send_getClusterInfo() return self.recv_getClusterInfo() @@ -622,27 +796,97 @@ def recv_getTopology(self, ): raise result.e raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopology failed: unknown result"); + def getUserTopology(self, id): + """ + Parameters: + - id + """ + self.send_getUserTopology(id) + return self.recv_getUserTopology() + + def send_getUserTopology(self, id): + self._oprot.writeMessageBegin('getUserTopology', TMessageType.CALL, self._seqid) + args = getUserTopology_args() + args.id = id + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getUserTopology(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = getUserTopology_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.e is not None: + raise result.e + raise TApplicationException(TApplicationException.MISSING_RESULT, "getUserTopology failed: unknown result"); + + def getTopologyMetric(self, id): + """ + Parameters: + - id + """ + self.send_getTopologyMetric(id) + return self.recv_getTopologyMetric() + + def send_getTopologyMetric(self, id): + self._oprot.writeMessageBegin('getTopologyMetric', TMessageType.CALL, self._seqid) + args = getTopologyMetric_args() + args.id = id + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getTopologyMetric(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = getTopologyMetric_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.e is not None: + raise result.e + raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyMetric failed: unknown result"); + class Processor(Iface, TProcessor): def __init__(self, handler): self._handler = handler self._processMap = {} self._processMap["submitTopology"] = Processor.process_submitTopology + self._processMap["submitTopologyWithOpts"] = Processor.process_submitTopologyWithOpts self._processMap["killTopology"] = Processor.process_killTopology self._processMap["killTopologyWithOpts"] = Processor.process_killTopologyWithOpts self._processMap["activate"] = Processor.process_activate self._processMap["deactivate"] = Processor.process_deactivate self._processMap["rebalance"] = Processor.process_rebalance + self._processMap["metricMonitor"] = Processor.process_metricMonitor + self._processMap["beginLibUpload"] = Processor.process_beginLibUpload self._processMap["beginFileUpload"] = Processor.process_beginFileUpload self._processMap["uploadChunk"] = Processor.process_uploadChunk self._processMap["finishFileUpload"] = Processor.process_finishFileUpload self._processMap["beginFileDownload"] = Processor.process_beginFileDownload self._processMap["downloadChunk"] = Processor.process_downloadChunk + self._processMap["getNimbusConf"] = Processor.process_getNimbusConf self._processMap["getClusterInfo"] = Processor.process_getClusterInfo self._processMap["getTopologyInfo"] = Processor.process_getTopologyInfo self._processMap["getSupervisorWorkers"] = Processor.process_getSupervisorWorkers self._processMap["getTopologyConf"] = Processor.process_getTopologyConf self._processMap["getTopology"] = Processor.process_getTopology + self._processMap["getUserTopology"] = Processor.process_getUserTopology + self._processMap["getTopologyMetric"] = Processor.process_getTopologyMetric def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -670,11 +914,31 @@ def process_submitTopology(self, seqid, iprot, oprot): result.e = e except InvalidTopologyException, ite: result.ite = ite + except TopologyAssignException, tae: + result.tae = tae oprot.writeMessageBegin("submitTopology", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() + def process_submitTopologyWithOpts(self, seqid, iprot, oprot): + args = submitTopologyWithOpts_args() + args.read(iprot) + iprot.readMessageEnd() + result = submitTopologyWithOpts_result() + try: + self._handler.submitTopologyWithOpts(args.name, args.uploadedJarLocation, args.jsonConf, args.topology, args.options) + except AlreadyAliveException, e: + result.e = e + except InvalidTopologyException, ite: + result.ite = ite + except TopologyAssignException, tae: + result.tae = tae + oprot.writeMessageBegin("submitTopologyWithOpts", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_killTopology(self, seqid, iprot, oprot): args = killTopology_args() args.read(iprot) @@ -740,11 +1004,38 @@ def process_rebalance(self, seqid, iprot, oprot): self._handler.rebalance(args.name, args.options) except NotAliveException, e: result.e = e + except InvalidTopologyException, ite: + result.ite = ite oprot.writeMessageBegin("rebalance", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() + def process_metricMonitor(self, seqid, iprot, oprot): + args = metricMonitor_args() + args.read(iprot) + iprot.readMessageEnd() + result = metricMonitor_result() + try: + self._handler.metricMonitor(args.name, args.options) + except NotAliveException, e: + result.e = e + oprot.writeMessageBegin("metricMonitor", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_beginLibUpload(self, seqid, iprot, oprot): + args = beginLibUpload_args() + args.read(iprot) + iprot.readMessageEnd() + result = beginLibUpload_result() + self._handler.beginLibUpload(args.libName) + oprot.writeMessageBegin("beginLibUpload", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_beginFileUpload(self, seqid, iprot, oprot): args = beginFileUpload_args() args.read(iprot) @@ -800,6 +1091,17 @@ def process_downloadChunk(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_getNimbusConf(self, seqid, iprot, oprot): + args = getNimbusConf_args() + args.read(iprot) + iprot.readMessageEnd() + result = getNimbusConf_result() + result.success = self._handler.getNimbusConf() + oprot.writeMessageBegin("getNimbusConf", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_getClusterInfo(self, seqid, iprot, oprot): args = getClusterInfo_args() args.read(iprot) @@ -867,6 +1169,34 @@ def process_getTopology(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_getUserTopology(self, seqid, iprot, oprot): + args = getUserTopology_args() + args.read(iprot) + iprot.readMessageEnd() + result = getUserTopology_result() + try: + result.success = self._handler.getUserTopology(args.id) + except NotAliveException, e: + result.e = e + oprot.writeMessageBegin("getUserTopology", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getTopologyMetric(self, seqid, iprot, oprot): + args = getTopologyMetric_args() + args.read(iprot) + iprot.readMessageEnd() + result = getTopologyMetric_result() + try: + result.success = self._handler.getTopologyMetric(args.id) + except NotAliveException, e: + result.e = e + oprot.writeMessageBegin("getTopologyMetric", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -887,6 +1217,9 @@ class submitTopology_args: (4, TType.STRUCT, 'topology', (StormTopology, StormTopology.thrift_spec), None, ), # 4 ) + def __hash__(self): + return 0 + hash(self.name) + hash(self.uploadedJarLocation) + hash(self.jsonConf) + hash(self.topology) + def __init__(self, name=None, uploadedJarLocation=None, jsonConf=None, topology=None,): self.name = name self.uploadedJarLocation = uploadedJarLocation @@ -972,17 +1305,23 @@ class submitTopology_result: Attributes: - e - ite + - tae """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'e', (AlreadyAliveException, AlreadyAliveException.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'tae', (TopologyAssignException, TopologyAssignException.thrift_spec), None, ), # 3 ) - def __init__(self, e=None, ite=None,): + def __hash__(self): + return 0 + hash(self.e) + hash(self.ite) + hash(self.tae) + + def __init__(self, e=None, ite=None, tae=None,): self.e = e self.ite = ite + self.tae = tae def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1005,6 +1344,12 @@ def read(self, iprot): self.ite.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.tae = TopologyAssignException() + self.tae.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1023,6 +1368,10 @@ def write(self, oprot): oprot.writeFieldBegin('ite', TType.STRUCT, 2) self.ite.write(oprot) oprot.writeFieldEnd() + if self.tae is not None: + oprot.writeFieldBegin('tae', TType.STRUCT, 3) + self.tae.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1041,19 +1390,34 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class killTopology_args: +class submitTopologyWithOpts_args: """ Attributes: - name + - uploadedJarLocation + - jsonConf + - topology + - options """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRING, 'uploadedJarLocation', None, None, ), # 2 + (3, TType.STRING, 'jsonConf', None, None, ), # 3 + (4, TType.STRUCT, 'topology', (StormTopology, StormTopology.thrift_spec), None, ), # 4 + (5, TType.STRUCT, 'options', (SubmitOptions, SubmitOptions.thrift_spec), None, ), # 5 ) - def __init__(self, name=None,): + def __hash__(self): + return 0 + hash(self.name) + hash(self.uploadedJarLocation) + hash(self.jsonConf) + hash(self.topology) + hash(self.options) + + def __init__(self, name=None, uploadedJarLocation=None, jsonConf=None, topology=None, options=None,): self.name = name + self.uploadedJarLocation = uploadedJarLocation + self.jsonConf = jsonConf + self.topology = topology + self.options = options def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1069,20 +1433,58 @@ def read(self, iprot): self.name = iprot.readString().decode('utf-8') else: iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + elif fid == 2: + if ftype == TType.STRING: + self.uploadedJarLocation = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.jsonConf = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.topology = StormTopology() + self.topology.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.options = SubmitOptions() + self.options.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('killTopology_args') + oprot.writeStructBegin('submitTopologyWithOpts_args') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8')) oprot.writeFieldEnd() + if self.uploadedJarLocation is not None: + oprot.writeFieldBegin('uploadedJarLocation', TType.STRING, 2) + oprot.writeString(self.uploadedJarLocation.encode('utf-8')) + oprot.writeFieldEnd() + if self.jsonConf is not None: + oprot.writeFieldBegin('jsonConf', TType.STRING, 3) + oprot.writeString(self.jsonConf.encode('utf-8')) + oprot.writeFieldEnd() + if self.topology is not None: + oprot.writeFieldBegin('topology', TType.STRUCT, 4) + self.topology.write(oprot) + oprot.writeFieldEnd() + if self.options is not None: + oprot.writeFieldBegin('options', TType.STRUCT, 5) + self.options.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1101,19 +1503,28 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class killTopology_result: +class submitTopologyWithOpts_result: """ Attributes: - e + - ite + - tae """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'e', (AlreadyAliveException, AlreadyAliveException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'tae', (TopologyAssignException, TopologyAssignException.thrift_spec), None, ), # 3 ) - def __init__(self, e=None,): + def __hash__(self): + return 0 + hash(self.e) + hash(self.ite) + hash(self.tae) + + def __init__(self, e=None, ite=None, tae=None,): self.e = e + self.ite = ite + self.tae = tae def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1126,10 +1537,22 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.e = NotAliveException() + self.e = AlreadyAliveException() self.e.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.ite = InvalidTopologyException() + self.ite.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.tae = TopologyAssignException() + self.tae.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1139,11 +1562,19 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('killTopology_result') + oprot.writeStructBegin('submitTopologyWithOpts_result') if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() + if self.ite is not None: + oprot.writeFieldBegin('ite', TType.STRUCT, 2) + self.ite.write(oprot) + oprot.writeFieldEnd() + if self.tae is not None: + oprot.writeFieldBegin('tae', TType.STRUCT, 3) + self.tae.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1162,22 +1593,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class killTopologyWithOpts_args: +class killTopology_args: """ Attributes: - name - - options """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 - (2, TType.STRUCT, 'options', (KillOptions, KillOptions.thrift_spec), None, ), # 2 ) - def __init__(self, name=None, options=None,): + def __hash__(self): + return 0 + hash(self.name) + + def __init__(self, name=None,): self.name = name - self.options = options def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1193,12 +1624,6 @@ def read(self, iprot): self.name = iprot.readString().decode('utf-8') else: iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.options = KillOptions() - self.options.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1208,15 +1633,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('killTopologyWithOpts_args') + oprot.writeStructBegin('killTopology_args') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8')) oprot.writeFieldEnd() - if self.options is not None: - oprot.writeFieldBegin('options', TType.STRUCT, 2) - self.options.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1235,7 +1656,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class killTopologyWithOpts_result: +class killTopology_result: """ Attributes: - e @@ -1246,6 +1667,9 @@ class killTopologyWithOpts_result: (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.e) + def __init__(self, e=None,): self.e = e @@ -1273,7 +1697,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('killTopologyWithOpts_result') + oprot.writeStructBegin('killTopology_result') if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) @@ -1296,19 +1720,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class activate_args: +class killTopologyWithOpts_args: """ Attributes: - name + - options """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRUCT, 'options', (KillOptions, KillOptions.thrift_spec), None, ), # 2 ) - def __init__(self, name=None,): + def __hash__(self): + return 0 + hash(self.name) + hash(self.options) + + def __init__(self, name=None, options=None,): self.name = name + self.options = options def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1324,6 +1754,12 @@ def read(self, iprot): self.name = iprot.readString().decode('utf-8') else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.options = KillOptions() + self.options.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1333,11 +1769,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('activate_args') + oprot.writeStructBegin('killTopologyWithOpts_args') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8')) oprot.writeFieldEnd() + if self.options is not None: + oprot.writeFieldBegin('options', TType.STRUCT, 2) + self.options.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1356,7 +1796,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class activate_result: +class killTopologyWithOpts_result: """ Attributes: - e @@ -1367,6 +1807,9 @@ class activate_result: (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.e) + def __init__(self, e=None,): self.e = e @@ -1394,7 +1837,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('activate_result') + oprot.writeStructBegin('killTopologyWithOpts_result') if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) @@ -1417,7 +1860,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class deactivate_args: +class activate_args: """ Attributes: - name @@ -1428,6 +1871,9 @@ class deactivate_args: (1, TType.STRING, 'name', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.name) + def __init__(self, name=None,): self.name = name @@ -1454,7 +1900,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('deactivate_args') + oprot.writeStructBegin('activate_args') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8')) @@ -1477,7 +1923,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class deactivate_result: +class activate_result: """ Attributes: - e @@ -1488,6 +1934,9 @@ class deactivate_result: (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.e) + def __init__(self, e=None,): self.e = e @@ -1515,7 +1964,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('deactivate_result') + oprot.writeStructBegin('activate_result') if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) @@ -1538,22 +1987,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class rebalance_args: +class deactivate_args: """ Attributes: - name - - options """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 - (2, TType.STRUCT, 'options', (RebalanceOptions, RebalanceOptions.thrift_spec), None, ), # 2 ) - def __init__(self, name=None, options=None,): + def __hash__(self): + return 0 + hash(self.name) + + def __init__(self, name=None,): self.name = name - self.options = options def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1569,12 +2018,6 @@ def read(self, iprot): self.name = iprot.readString().decode('utf-8') else: iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.options = RebalanceOptions() - self.options.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1584,15 +2027,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('rebalance_args') + oprot.writeStructBegin('deactivate_args') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8')) oprot.writeFieldEnd() - if self.options is not None: - oprot.writeFieldBegin('options', TType.STRUCT, 2) - self.options.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1611,7 +2050,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class rebalance_result: +class deactivate_result: """ Attributes: - e @@ -1622,6 +2061,9 @@ class rebalance_result: (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.e) + def __init__(self, e=None,): self.e = e @@ -1649,7 +2091,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('rebalance_result') + oprot.writeStructBegin('deactivate_result') if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) @@ -1672,11 +2114,26 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class beginFileUpload_args: +class rebalance_args: + """ + Attributes: + - name + - options + """ thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRUCT, 'options', (RebalanceOptions, RebalanceOptions.thrift_spec), None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.name) + hash(self.options) + + def __init__(self, name=None, options=None,): + self.name = name + self.options = options + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -1686,6 +2143,17 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.options = RebalanceOptions() + self.options.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1695,7 +2163,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('beginFileUpload_args') + oprot.writeStructBegin('rebalance_args') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name.encode('utf-8')) + oprot.writeFieldEnd() + if self.options is not None: + oprot.writeFieldBegin('options', TType.STRUCT, 2) + self.options.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1714,18 +2190,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class beginFileUpload_result: +class rebalance_result: """ Attributes: - - success + - e + - ite """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): - self.success = success + def __hash__(self): + return 0 + hash(self.e) + hash(self.ite) + + def __init__(self, e=None, ite=None,): + self.e = e + self.ite = ite def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1736,9 +2219,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString().decode('utf-8') + if fid == 1: + if ftype == TType.STRUCT: + self.e = NotAliveException() + self.e.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.ite = InvalidTopologyException() + self.ite.read(iprot) else: iprot.skip(ftype) else: @@ -1750,10 +2240,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('beginFileUpload_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success.encode('utf-8')) + oprot.writeStructBegin('rebalance_result') + if self.e is not None: + oprot.writeFieldBegin('e', TType.STRUCT, 1) + self.e.write(oprot) + oprot.writeFieldEnd() + if self.ite is not None: + oprot.writeFieldBegin('ite', TType.STRUCT, 2) + self.ite.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1773,22 +2267,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class uploadChunk_args: +class metricMonitor_args: """ Attributes: - - location - - chunk + - name + - options """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'location', None, None, ), # 1 - (2, TType.STRING, 'chunk', None, None, ), # 2 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRUCT, 'options', (MonitorOptions, MonitorOptions.thrift_spec), None, ), # 2 ) - def __init__(self, location=None, chunk=None,): - self.location = location - self.chunk = chunk + def __hash__(self): + return 0 + hash(self.name) + hash(self.options) + + def __init__(self, name=None, options=None,): + self.name = name + self.options = options def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1801,12 +2298,13 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.location = iprot.readString().decode('utf-8') + self.name = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: - if ftype == TType.STRING: - self.chunk = iprot.readString(); + if ftype == TType.STRUCT: + self.options = MonitorOptions() + self.options.read(iprot) else: iprot.skip(ftype) else: @@ -1818,14 +2316,584 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('uploadChunk_args') - if self.location is not None: - oprot.writeFieldBegin('location', TType.STRING, 1) - oprot.writeString(self.location.encode('utf-8')) + oprot.writeStructBegin('metricMonitor_args') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name.encode('utf-8')) + oprot.writeFieldEnd() + if self.options is not None: + oprot.writeFieldBegin('options', TType.STRUCT, 2) + self.options.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class metricMonitor_result: + """ + Attributes: + - e + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 + ) + + def __hash__(self): + return 0 + hash(self.e) + + def __init__(self, e=None,): + self.e = e + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.e = NotAliveException() + self.e.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('metricMonitor_result') + if self.e is not None: + oprot.writeFieldBegin('e', TType.STRUCT, 1) + self.e.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class beginLibUpload_args: + """ + Attributes: + - libName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'libName', None, None, ), # 1 + ) + + def __hash__(self): + return 0 + hash(self.libName) + + def __init__(self, libName=None,): + self.libName = libName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.libName = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('beginLibUpload_args') + if self.libName is not None: + oprot.writeFieldBegin('libName', TType.STRING, 1) + oprot.writeString(self.libName.encode('utf-8')) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class beginLibUpload_result: + + thrift_spec = ( + ) + + def __hash__(self): + return 0 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('beginLibUpload_result') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class beginFileUpload_args: + + thrift_spec = ( + ) + + def __hash__(self): + return 0 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('beginFileUpload_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class beginFileUpload_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRING, 'success', None, None, ), # 0 + ) + + def __hash__(self): + return 0 + hash(self.success) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('beginFileUpload_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success.encode('utf-8')) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class uploadChunk_args: + """ + Attributes: + - location + - chunk + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'location', None, None, ), # 1 + (2, TType.STRING, 'chunk', None, None, ), # 2 + ) + + def __hash__(self): + return 0 + hash(self.location) + hash(self.chunk) + + def __init__(self, location=None, chunk=None,): + self.location = location + self.chunk = chunk + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.location = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.chunk = iprot.readString(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('uploadChunk_args') + if self.location is not None: + oprot.writeFieldBegin('location', TType.STRING, 1) + oprot.writeString(self.location.encode('utf-8')) + oprot.writeFieldEnd() + if self.chunk is not None: + oprot.writeFieldBegin('chunk', TType.STRING, 2) + oprot.writeString(self.chunk) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class uploadChunk_result: + + thrift_spec = ( + ) + + def __hash__(self): + return 0 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('uploadChunk_result') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class finishFileUpload_args: + """ + Attributes: + - location + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'location', None, None, ), # 1 + ) + + def __hash__(self): + return 0 + hash(self.location) + + def __init__(self, location=None,): + self.location = location + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.location = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('finishFileUpload_args') + if self.location is not None: + oprot.writeFieldBegin('location', TType.STRING, 1) + oprot.writeString(self.location.encode('utf-8')) oprot.writeFieldEnd() - if self.chunk is not None: - oprot.writeFieldBegin('chunk', TType.STRING, 2) - oprot.writeString(self.chunk) + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class finishFileUpload_result: + + thrift_spec = ( + ) + + def __hash__(self): + return 0 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('finishFileUpload_result') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class beginFileDownload_args: + """ + Attributes: + - file + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'file', None, None, ), # 1 + ) + + def __hash__(self): + return 0 + hash(self.file) + + def __init__(self, file=None,): + self.file = file + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.file = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('beginFileDownload_args') + if self.file is not None: + oprot.writeFieldBegin('file', TType.STRING, 1) + oprot.writeString(self.file.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1845,11 +2913,147 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class uploadChunk_result: +class beginFileDownload_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRING, 'success', None, None, ), # 0 + ) + + def __hash__(self): + return 0 + hash(self.success) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('beginFileDownload_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success.encode('utf-8')) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class downloadChunk_args: + """ + Attributes: + - id + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'id', None, None, ), # 1 + ) + + def __hash__(self): + return 0 + hash(self.id) + + def __init__(self, id=None,): + self.id = id + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.id = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('downloadChunk_args') + if self.id is not None: + oprot.writeFieldBegin('id', TType.STRING, 1) + oprot.writeString(self.id.encode('utf-8')) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class downloadChunk_result: + """ + Attributes: + - success + """ thrift_spec = ( + (0, TType.STRING, 'success', None, None, ), # 0 ) + def __hash__(self): + return 0 + hash(self.success) + + def __init__(self, success=None,): + self.success = success + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -1859,6 +3063,11 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 0: + if ftype == TType.STRING: + self.success = iprot.readString(); + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1868,7 +3077,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('uploadChunk_result') + oprot.writeStructBegin('downloadChunk_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1887,19 +3100,13 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class finishFileUpload_args: - """ - Attributes: - - location - """ +class getNimbusConf_args: thrift_spec = ( - None, # 0 - (1, TType.STRING, 'location', None, None, ), # 1 ) - def __init__(self, location=None,): - self.location = location + def __hash__(self): + return 0 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1910,11 +3117,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRING: - self.location = iprot.readString().decode('utf-8') - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1924,11 +3126,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('finishFileUpload_args') - if self.location is not None: - oprot.writeFieldBegin('location', TType.STRING, 1) - oprot.writeString(self.location.encode('utf-8')) - oprot.writeFieldEnd() + oprot.writeStructBegin('getNimbusConf_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -1947,11 +3145,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class finishFileUpload_result: +class getNimbusConf_result: + """ + Attributes: + - success + """ thrift_spec = ( + (0, TType.STRING, 'success', None, None, ), # 0 ) + def __hash__(self): + return 0 + hash(self.success) + + def __init__(self, success=None,): + self.success = success + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -1961,6 +3170,11 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 0: + if ftype == TType.STRING: + self.success = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1970,7 +3184,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('finishFileUpload_result') + oprot.writeStructBegin('getNimbusConf_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success.encode('utf-8')) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1989,19 +3207,13 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class beginFileDownload_args: - """ - Attributes: - - file - """ +class getClusterInfo_args: thrift_spec = ( - None, # 0 - (1, TType.STRING, 'file', None, None, ), # 1 ) - def __init__(self, file=None,): - self.file = file + def __hash__(self): + return 0 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2012,11 +3224,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRING: - self.file = iprot.readString().decode('utf-8') - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2026,11 +3233,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('beginFileDownload_args') - if self.file is not None: - oprot.writeFieldBegin('file', TType.STRING, 1) - oprot.writeString(self.file.encode('utf-8')) - oprot.writeFieldEnd() + oprot.writeStructBegin('getClusterInfo_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -2049,16 +3252,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class beginFileDownload_result: +class getClusterInfo_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (ClusterSummary, ClusterSummary.thrift_spec), None, ), # 0 ) + def __hash__(self): + return 0 + hash(self.success) + def __init__(self, success=None,): self.success = success @@ -2072,8 +3278,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString().decode('utf-8') + if ftype == TType.STRUCT: + self.success = ClusterSummary() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -2085,10 +3292,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('beginFileDownload_result') + oprot.writeStructBegin('getClusterInfo_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success.encode('utf-8')) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2108,7 +3315,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class downloadChunk_args: +class getTopologyInfo_args: """ Attributes: - id @@ -2119,6 +3326,9 @@ class downloadChunk_args: (1, TType.STRING, 'id', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.id) + def __init__(self, id=None,): self.id = id @@ -2145,7 +3355,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('downloadChunk_args') + oprot.writeStructBegin('getTopologyInfo_args') if self.id is not None: oprot.writeFieldBegin('id', TType.STRING, 1) oprot.writeString(self.id.encode('utf-8')) @@ -2168,18 +3378,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class downloadChunk_result: +class getTopologyInfo_result: """ Attributes: - success + - e """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (TopologyInfo, TopologyInfo.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None,): + def __hash__(self): + return 0 + hash(self.success) + hash(self.e) + + def __init__(self, success=None, e=None,): self.success = success + self.e = e def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2191,8 +3407,15 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString(); + if ftype == TType.STRUCT: + self.success = TopologyInfo() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.e = NotAliveException() + self.e.read(iprot) else: iprot.skip(ftype) else: @@ -2204,10 +3427,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('downloadChunk_result') + oprot.writeStructBegin('getTopologyInfo_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.e is not None: + oprot.writeFieldBegin('e', TType.STRUCT, 1) + self.e.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2227,11 +3454,23 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getClusterInfo_args: +class getSupervisorWorkers_args: + """ + Attributes: + - host + """ thrift_spec = ( + None, # 0 + (1, TType.STRING, 'host', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.host) + + def __init__(self, host=None,): + self.host = host + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -2241,6 +3480,11 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRING: + self.host = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2250,7 +3494,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getClusterInfo_args') + oprot.writeStructBegin('getSupervisorWorkers_args') + if self.host is not None: + oprot.writeFieldBegin('host', TType.STRING, 1) + oprot.writeString(self.host.encode('utf-8')) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2269,18 +3517,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getClusterInfo_result: +class getSupervisorWorkers_result: """ Attributes: - success + - e """ thrift_spec = ( - (0, TType.STRUCT, 'success', (ClusterSummary, ClusterSummary.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (SupervisorWorkers, SupervisorWorkers.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None,): + def __hash__(self): + return 0 + hash(self.success) + hash(self.e) + + def __init__(self, success=None, e=None,): self.success = success + self.e = e def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2293,10 +3547,16 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = ClusterSummary() + self.success = SupervisorWorkers() self.success.read(iprot) else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.e = NotAliveException() + self.e.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2306,11 +3566,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getClusterInfo_result') + oprot.writeStructBegin('getSupervisorWorkers_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.e is not None: + oprot.writeFieldBegin('e', TType.STRUCT, 1) + self.e.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2329,7 +3593,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getTopologyInfo_args: +class getTopologyConf_args: """ Attributes: - id @@ -2340,6 +3604,9 @@ class getTopologyInfo_args: (1, TType.STRING, 'id', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.id) + def __init__(self, id=None,): self.id = id @@ -2366,7 +3633,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getTopologyInfo_args') + oprot.writeStructBegin('getTopologyConf_args') if self.id is not None: oprot.writeFieldBegin('id', TType.STRING, 1) oprot.writeString(self.id.encode('utf-8')) @@ -2389,7 +3656,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getTopologyInfo_result: +class getTopologyConf_result: """ Attributes: - success @@ -2397,10 +3664,13 @@ class getTopologyInfo_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (TopologyInfo, TopologyInfo.thrift_spec), None, ), # 0 + (0, TType.STRING, 'success', None, None, ), # 0 (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.success) + hash(self.e) + def __init__(self, success=None, e=None,): self.success = success self.e = e @@ -2415,9 +3685,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = TopologyInfo() - self.success.read(iprot) + if ftype == TType.STRING: + self.success = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 1: @@ -2435,10 +3704,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getTopologyInfo_result') + oprot.writeStructBegin('getTopologyConf_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success.encode('utf-8')) oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) @@ -2462,19 +3731,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getSupervisorWorkers_args: +class getTopology_args: """ Attributes: - - host + - id """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'host', None, None, ), # 1 + (1, TType.STRING, 'id', None, None, ), # 1 ) - def __init__(self, host=None,): - self.host = host + def __hash__(self): + return 0 + hash(self.id) + + def __init__(self, id=None,): + self.id = id def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2487,7 +3759,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.host = iprot.readString().decode('utf-8') + self.id = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: @@ -2499,10 +3771,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getSupervisorWorkers_args') - if self.host is not None: - oprot.writeFieldBegin('host', TType.STRING, 1) - oprot.writeString(self.host.encode('utf-8')) + oprot.writeStructBegin('getTopology_args') + if self.id is not None: + oprot.writeFieldBegin('id', TType.STRING, 1) + oprot.writeString(self.id.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2522,7 +3794,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getSupervisorWorkers_result: +class getTopology_result: """ Attributes: - success @@ -2530,10 +3802,13 @@ class getSupervisorWorkers_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (SupervisorWorkers, SupervisorWorkers.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (StormTopology, StormTopology.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.success) + hash(self.e) + def __init__(self, success=None, e=None,): self.success = success self.e = e @@ -2549,7 +3824,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = SupervisorWorkers() + self.success = StormTopology() self.success.read(iprot) else: iprot.skip(ftype) @@ -2568,7 +3843,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getSupervisorWorkers_result') + oprot.writeStructBegin('getTopology_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -2595,7 +3870,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getTopologyConf_args: +class getUserTopology_args: """ Attributes: - id @@ -2606,6 +3881,9 @@ class getTopologyConf_args: (1, TType.STRING, 'id', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.id) + def __init__(self, id=None,): self.id = id @@ -2632,7 +3910,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getTopologyConf_args') + oprot.writeStructBegin('getUserTopology_args') if self.id is not None: oprot.writeFieldBegin('id', TType.STRING, 1) oprot.writeString(self.id.encode('utf-8')) @@ -2655,7 +3933,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getTopologyConf_result: +class getUserTopology_result: """ Attributes: - success @@ -2663,10 +3941,13 @@ class getTopologyConf_result: """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (StormTopology, StormTopology.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.success) + hash(self.e) + def __init__(self, success=None, e=None,): self.success = success self.e = e @@ -2681,8 +3962,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString().decode('utf-8') + if ftype == TType.STRUCT: + self.success = StormTopology() + self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: @@ -2700,10 +3982,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getTopologyConf_result') + oprot.writeStructBegin('getUserTopology_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success.encode('utf-8')) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) @@ -2727,7 +4009,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getTopology_args: +class getTopologyMetric_args: """ Attributes: - id @@ -2738,6 +4020,9 @@ class getTopology_args: (1, TType.STRING, 'id', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.id) + def __init__(self, id=None,): self.id = id @@ -2764,7 +4049,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getTopology_args') + oprot.writeStructBegin('getTopologyMetric_args') if self.id is not None: oprot.writeFieldBegin('id', TType.STRING, 1) oprot.writeString(self.id.encode('utf-8')) @@ -2787,7 +4072,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class getTopology_result: +class getTopologyMetric_result: """ Attributes: - success @@ -2795,10 +4080,13 @@ class getTopology_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (StormTopology, StormTopology.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (TopologyMetricInfo, TopologyMetricInfo.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.success) + hash(self.e) + def __init__(self, success=None, e=None,): self.success = success self.e = e @@ -2814,7 +4102,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = StormTopology() + self.success = TopologyMetricInfo() self.success.read(iprot) else: iprot.skip(ftype) @@ -2833,7 +4121,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('getTopology_result') + oprot.writeStructBegin('getTopologyMetric_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) diff --git a/jstorm-client/src/main/py/storm/ttypes.py b/jstorm-client/src/main/py/storm/ttypes.py index bec9f1e22..64a20e8f1 100644 --- a/jstorm-client/src/main/py/storm/ttypes.py +++ b/jstorm-client/src/main/py/storm/ttypes.py @@ -14,6 +14,20 @@ fastbinary = None +class TopologyInitialStatus: + ACTIVE = 1 + INACTIVE = 2 + + _VALUES_TO_NAMES = { + 1: "ACTIVE", + 2: "INACTIVE", + } + + _NAMES_TO_VALUES = { + "ACTIVE": 1, + "INACTIVE": 2, + } + class JavaObjectArg: """ @@ -36,6 +50,9 @@ class JavaObjectArg: (6, TType.DOUBLE, 'double_arg', None, None, ), # 6 ) + def __hash__(self): + return 0 + hash(self.int_arg) + hash(self.long_arg) + hash(self.string_arg) + hash(self.bool_arg) + hash(self.binary_arg) + hash(self.double_arg) + def __init__(self, int_arg=None, long_arg=None, string_arg=None, bool_arg=None, binary_arg=None, double_arg=None,): self.int_arg = int_arg self.long_arg = long_arg @@ -148,6 +165,9 @@ class JavaObject: (2, TType.LIST, 'args_list', (TType.STRUCT,(JavaObjectArg, JavaObjectArg.thrift_spec)), None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.full_class_name) + hash(self.args_list) + def __init__(self, full_class_name=None, args_list=None,): self.full_class_name = full_class_name self.args_list = args_list @@ -225,6 +245,9 @@ class NullStruct: thrift_spec = ( ) + def __hash__(self): + return 0 + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -275,6 +298,9 @@ class GlobalStreamId: (2, TType.STRING, 'streamId', None, None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.componentId) + hash(self.streamId) + def __init__(self, componentId=None, streamId=None,): self.componentId = componentId self.streamId = streamId @@ -349,6 +375,7 @@ class Grouping: - custom_object - custom_serialized - local_or_shuffle + - localFirst """ thrift_spec = ( @@ -361,9 +388,13 @@ class Grouping: (6, TType.STRUCT, 'custom_object', (JavaObject, JavaObject.thrift_spec), None, ), # 6 (7, TType.STRING, 'custom_serialized', None, None, ), # 7 (8, TType.STRUCT, 'local_or_shuffle', (NullStruct, NullStruct.thrift_spec), None, ), # 8 + (9, TType.STRUCT, 'localFirst', (NullStruct, NullStruct.thrift_spec), None, ), # 9 ) - def __init__(self, fields=None, shuffle=None, all=None, none=None, direct=None, custom_object=None, custom_serialized=None, local_or_shuffle=None,): + def __hash__(self): + return 0 + hash(self.fields) + hash(self.shuffle) + hash(self.all) + hash(self.none) + hash(self.direct) + hash(self.custom_object) + hash(self.custom_serialized) + hash(self.local_or_shuffle) + hash(self.localFirst) + + def __init__(self, fields=None, shuffle=None, all=None, none=None, direct=None, custom_object=None, custom_serialized=None, local_or_shuffle=None, localFirst=None,): self.fields = fields self.shuffle = shuffle self.all = all @@ -372,6 +403,7 @@ def __init__(self, fields=None, shuffle=None, all=None, none=None, direct=None, self.custom_object = custom_object self.custom_serialized = custom_serialized self.local_or_shuffle = local_or_shuffle + self.localFirst = localFirst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -433,6 +465,12 @@ def read(self, iprot): self.local_or_shuffle.read(iprot) else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRUCT: + self.localFirst = NullStruct() + self.localFirst.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -478,6 +516,10 @@ def write(self, oprot): oprot.writeFieldBegin('local_or_shuffle', TType.STRUCT, 8) self.local_or_shuffle.write(oprot) oprot.writeFieldEnd() + if self.localFirst is not None: + oprot.writeFieldBegin('localFirst', TType.STRUCT, 9) + self.localFirst.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -509,6 +551,9 @@ class StreamInfo: (2, TType.BOOL, 'direct', None, None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.output_fields) + hash(self.direct) + def __init__(self, output_fields=None, direct=None,): self.output_fields = output_fields self.direct = direct @@ -593,6 +638,9 @@ class ShellComponent: (2, TType.STRING, 'script', None, None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.execution_command) + hash(self.script) + def __init__(self, execution_command=None, script=None,): self.execution_command = execution_command self.script = script @@ -667,6 +715,9 @@ class ComponentObject: (3, TType.STRUCT, 'java_object', (JavaObject, JavaObject.thrift_spec), None, ), # 3 ) + def __hash__(self): + return 0 + hash(self.serialized_java) + hash(self.shell) + hash(self.java_object) + def __init__(self, serialized_java=None, shell=None, java_object=None,): self.serialized_java = serialized_java self.shell = shell @@ -755,6 +806,9 @@ class ComponentCommon: (4, TType.STRING, 'json_conf', None, None, ), # 4 ) + def __hash__(self): + return 0 + hash(self.inputs) + hash(self.streams) + hash(self.parallelism_hint) + hash(self.json_conf) + def __init__(self, inputs=None, streams=None, parallelism_hint=None, json_conf=None,): self.inputs = inputs self.streams = streams @@ -874,6 +928,9 @@ class SpoutSpec: (2, TType.STRUCT, 'common', (ComponentCommon, ComponentCommon.thrift_spec), None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.spout_object) + hash(self.common) + def __init__(self, spout_object=None, common=None,): self.spout_object = spout_object self.common = common @@ -952,6 +1009,9 @@ class Bolt: (2, TType.STRUCT, 'common', (ComponentCommon, ComponentCommon.thrift_spec), None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.bolt_object) + hash(self.common) + def __init__(self, bolt_object=None, common=None,): self.bolt_object = bolt_object self.common = common @@ -1030,6 +1090,9 @@ class StateSpoutSpec: (2, TType.STRUCT, 'common', (ComponentCommon, ComponentCommon.thrift_spec), None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.state_spout_object) + hash(self.common) + def __init__(self, state_spout_object=None, common=None,): self.state_spout_object = state_spout_object self.common = common @@ -1110,6 +1173,9 @@ class StormTopology: (3, TType.MAP, 'state_spouts', (TType.STRING,None,TType.STRUCT,(StateSpoutSpec, StateSpoutSpec.thrift_spec)), None, ), # 3 ) + def __hash__(self): + return 0 + hash(self.spouts) + hash(self.bolts) + hash(self.state_spouts) + def __init__(self, spouts=None, bolts=None, state_spouts=None,): self.spouts = spouts self.bolts = bolts @@ -1218,6 +1284,74 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class TopologyAssignException(Exception): + """ + Attributes: + - msg + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'msg', None, None, ), # 1 + ) + + def __hash__(self): + return 0 + hash(self.msg) + + def __init__(self, msg=None,): + self.msg = msg + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.msg = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TopologyAssignException') + if self.msg is not None: + oprot.writeFieldBegin('msg', TType.STRING, 1) + oprot.writeString(self.msg.encode('utf-8')) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.msg is None: + raise TProtocol.TProtocolException(message='Required field msg is unset!') + return + + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class AlreadyAliveException(Exception): """ Attributes: @@ -1229,6 +1363,9 @@ class AlreadyAliveException(Exception): (1, TType.STRING, 'msg', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.msg) + def __init__(self, msg=None,): self.msg = msg @@ -1294,6 +1431,9 @@ class NotAliveException(Exception): (1, TType.STRING, 'msg', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.msg) + def __init__(self, msg=None,): self.msg = msg @@ -1359,6 +1499,9 @@ class InvalidTopologyException(Exception): (1, TType.STRING, 'msg', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.msg) + def __init__(self, msg=None,): self.msg = msg @@ -1418,29 +1561,35 @@ class TopologySummary: Attributes: - id - name + - status + - uptime_secs - num_tasks - num_workers - - uptime_secs - - status + - error_info """ thrift_spec = ( None, # 0 (1, TType.STRING, 'id', None, None, ), # 1 (2, TType.STRING, 'name', None, None, ), # 2 - (3, TType.I32, 'num_tasks', None, None, ), # 3 - (4, TType.I32, 'num_workers', None, None, ), # 4 - (5, TType.I32, 'uptime_secs', None, None, ), # 5 - (6, TType.STRING, 'status', None, None, ), # 6 + (3, TType.STRING, 'status', None, None, ), # 3 + (4, TType.I32, 'uptime_secs', None, None, ), # 4 + (5, TType.I32, 'num_tasks', None, None, ), # 5 + (6, TType.I32, 'num_workers', None, None, ), # 6 + (7, TType.STRING, 'error_info', None, None, ), # 7 ) - def __init__(self, id=None, name=None, num_tasks=None, num_workers=None, uptime_secs=None, status=None,): + def __hash__(self): + return 0 + hash(self.id) + hash(self.name) + hash(self.status) + hash(self.uptime_secs) + hash(self.num_tasks) + hash(self.num_workers) + hash(self.error_info) + + def __init__(self, id=None, name=None, status=None, uptime_secs=None, num_tasks=None, num_workers=None, error_info=None,): self.id = id self.name = name + self.status = status + self.uptime_secs = uptime_secs self.num_tasks = num_tasks self.num_workers = num_workers - self.uptime_secs = uptime_secs - self.status = status + self.error_info = error_info def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1462,23 +1611,28 @@ def read(self, iprot): else: iprot.skip(ftype) elif fid == 3: - if ftype == TType.I32: - self.num_tasks = iprot.readI32(); + if ftype == TType.STRING: + self.status = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: - self.num_workers = iprot.readI32(); + self.uptime_secs = iprot.readI32(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.I32: - self.uptime_secs = iprot.readI32(); + self.num_tasks = iprot.readI32(); else: iprot.skip(ftype) elif fid == 6: + if ftype == TType.I32: + self.num_workers = iprot.readI32(); + else: + iprot.skip(ftype) + elif fid == 7: if ftype == TType.STRING: - self.status = iprot.readString().decode('utf-8') + self.error_info = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: @@ -1499,21 +1653,25 @@ def write(self, oprot): oprot.writeFieldBegin('name', TType.STRING, 2) oprot.writeString(self.name.encode('utf-8')) oprot.writeFieldEnd() + if self.status is not None: + oprot.writeFieldBegin('status', TType.STRING, 3) + oprot.writeString(self.status.encode('utf-8')) + oprot.writeFieldEnd() + if self.uptime_secs is not None: + oprot.writeFieldBegin('uptime_secs', TType.I32, 4) + oprot.writeI32(self.uptime_secs) + oprot.writeFieldEnd() if self.num_tasks is not None: - oprot.writeFieldBegin('num_tasks', TType.I32, 3) + oprot.writeFieldBegin('num_tasks', TType.I32, 5) oprot.writeI32(self.num_tasks) oprot.writeFieldEnd() if self.num_workers is not None: - oprot.writeFieldBegin('num_workers', TType.I32, 4) + oprot.writeFieldBegin('num_workers', TType.I32, 6) oprot.writeI32(self.num_workers) oprot.writeFieldEnd() - if self.uptime_secs is not None: - oprot.writeFieldBegin('uptime_secs', TType.I32, 5) - oprot.writeI32(self.uptime_secs) - oprot.writeFieldEnd() - if self.status is not None: - oprot.writeFieldBegin('status', TType.STRING, 6) - oprot.writeString(self.status.encode('utf-8')) + if self.error_info is not None: + oprot.writeFieldBegin('error_info', TType.STRING, 7) + oprot.writeString(self.error_info.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1523,14 +1681,16 @@ def validate(self): raise TProtocol.TProtocolException(message='Required field id is unset!') if self.name is None: raise TProtocol.TProtocolException(message='Required field name is unset!') + if self.status is None: + raise TProtocol.TProtocolException(message='Required field status is unset!') + if self.uptime_secs is None: + raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!') if self.num_tasks is None: raise TProtocol.TProtocolException(message='Required field num_tasks is unset!') if self.num_workers is None: raise TProtocol.TProtocolException(message='Required field num_workers is unset!') - if self.uptime_secs is None: - raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!') - if self.status is None: - raise TProtocol.TProtocolException(message='Required field status is unset!') + if self.error_info is None: + raise TProtocol.TProtocolException(message='Required field error_info is unset!') return @@ -1549,6 +1709,7 @@ class SupervisorSummary: """ Attributes: - host + - supervisor_id - uptime_secs - num_workers - num_used_workers @@ -1557,13 +1718,18 @@ class SupervisorSummary: thrift_spec = ( None, # 0 (1, TType.STRING, 'host', None, None, ), # 1 - (2, TType.I32, 'uptime_secs', None, None, ), # 2 - (3, TType.I32, 'num_workers', None, None, ), # 3 - (4, TType.I32, 'num_used_workers', None, None, ), # 4 + (2, TType.STRING, 'supervisor_id', None, None, ), # 2 + (3, TType.I32, 'uptime_secs', None, None, ), # 3 + (4, TType.I32, 'num_workers', None, None, ), # 4 + (5, TType.I32, 'num_used_workers', None, None, ), # 5 ) - def __init__(self, host=None, uptime_secs=None, num_workers=None, num_used_workers=None,): + def __hash__(self): + return 0 + hash(self.host) + hash(self.supervisor_id) + hash(self.uptime_secs) + hash(self.num_workers) + hash(self.num_used_workers) + + def __init__(self, host=None, supervisor_id=None, uptime_secs=None, num_workers=None, num_used_workers=None,): self.host = host + self.supervisor_id = supervisor_id self.uptime_secs = uptime_secs self.num_workers = num_workers self.num_used_workers = num_used_workers @@ -1583,16 +1749,21 @@ def read(self, iprot): else: iprot.skip(ftype) elif fid == 2: + if ftype == TType.STRING: + self.supervisor_id = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 3: if ftype == TType.I32: self.uptime_secs = iprot.readI32(); else: iprot.skip(ftype) - elif fid == 3: + elif fid == 4: if ftype == TType.I32: self.num_workers = iprot.readI32(); else: iprot.skip(ftype) - elif fid == 4: + elif fid == 5: if ftype == TType.I32: self.num_used_workers = iprot.readI32(); else: @@ -1611,16 +1782,20 @@ def write(self, oprot): oprot.writeFieldBegin('host', TType.STRING, 1) oprot.writeString(self.host.encode('utf-8')) oprot.writeFieldEnd() + if self.supervisor_id is not None: + oprot.writeFieldBegin('supervisor_id', TType.STRING, 2) + oprot.writeString(self.supervisor_id.encode('utf-8')) + oprot.writeFieldEnd() if self.uptime_secs is not None: - oprot.writeFieldBegin('uptime_secs', TType.I32, 2) + oprot.writeFieldBegin('uptime_secs', TType.I32, 3) oprot.writeI32(self.uptime_secs) oprot.writeFieldEnd() if self.num_workers is not None: - oprot.writeFieldBegin('num_workers', TType.I32, 3) + oprot.writeFieldBegin('num_workers', TType.I32, 4) oprot.writeI32(self.num_workers) oprot.writeFieldEnd() if self.num_used_workers is not None: - oprot.writeFieldBegin('num_used_workers', TType.I32, 4) + oprot.writeFieldBegin('num_used_workers', TType.I32, 5) oprot.writeI32(self.num_used_workers) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1629,6 +1804,8 @@ def write(self, oprot): def validate(self): if self.host is None: raise TProtocol.TProtocolException(message='Required field host is unset!') + if self.supervisor_id is None: + raise TProtocol.TProtocolException(message='Required field supervisor_id is unset!') if self.uptime_secs is None: raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!') if self.num_workers is None: @@ -1664,6 +1841,9 @@ class ClusterSummary: (3, TType.LIST, 'topologies', (TType.STRUCT,(TopologySummary, TopologySummary.thrift_spec)), None, ), # 3 ) + def __hash__(self): + return 0 + hash(self.supervisors) + hash(self.nimbus_uptime_secs) + hash(self.topologies) + def __init__(self, supervisors=None, nimbus_uptime_secs=None, topologies=None,): self.supervisors = supervisors self.nimbus_uptime_secs = nimbus_uptime_secs @@ -1770,6 +1950,9 @@ class ErrorInfo: (2, TType.I32, 'error_time_secs', None, None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.error) + hash(self.error_time_secs) + def __init__(self, error=None, error_time_secs=None,): self.error = error self.error_time_secs = error_time_secs @@ -1833,34 +2016,34 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class TaskStats: +class BoltStats: """ Attributes: - - emitted - - send_tps - - recv_tps - acked - failed - process_ms_avg + - executed + - execute_ms_avg """ thrift_spec = ( None, # 0 - (1, TType.MAP, 'emitted', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.I64,None)), None, ), # 1 - (2, TType.MAP, 'send_tps', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.DOUBLE,None)), None, ), # 2 - (3, TType.MAP, 'recv_tps', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.DOUBLE,None)), None, ), # 3 - (4, TType.MAP, 'acked', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.I64,None)), None, ), # 4 - (5, TType.MAP, 'failed', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.I64,None)), None, ), # 5 - (6, TType.MAP, 'process_ms_avg', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.DOUBLE,None)), None, ), # 6 + (1, TType.MAP, 'acked', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.I64,None)), None, ), # 1 + (2, TType.MAP, 'failed', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.I64,None)), None, ), # 2 + (3, TType.MAP, 'process_ms_avg', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.DOUBLE,None)), None, ), # 3 + (4, TType.MAP, 'executed', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.I64,None)), None, ), # 4 + (5, TType.MAP, 'execute_ms_avg', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.DOUBLE,None)), None, ), # 5 ) - def __init__(self, emitted=None, send_tps=None, recv_tps=None, acked=None, failed=None, process_ms_avg=None,): - self.emitted = emitted - self.send_tps = send_tps - self.recv_tps = recv_tps + def __hash__(self): + return 0 + hash(self.acked) + hash(self.failed) + hash(self.process_ms_avg) + hash(self.executed) + hash(self.execute_ms_avg) + + def __init__(self, acked=None, failed=None, process_ms_avg=None, executed=None, execute_ms_avg=None,): self.acked = acked self.failed = failed self.process_ms_avg = process_ms_avg + self.executed = executed + self.execute_ms_avg = execute_ms_avg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1873,41 +2056,43 @@ def read(self, iprot): break if fid == 1: if ftype == TType.MAP: - self.emitted = {} + self.acked = {} (_ktype81, _vtype82, _size80 ) = iprot.readMapBegin() for _i84 in xrange(_size80): _key85 = iprot.readString().decode('utf-8') _val86 = {} (_ktype88, _vtype89, _size87 ) = iprot.readMapBegin() for _i91 in xrange(_size87): - _key92 = iprot.readString().decode('utf-8') + _key92 = GlobalStreamId() + _key92.read(iprot) _val93 = iprot.readI64(); _val86[_key92] = _val93 iprot.readMapEnd() - self.emitted[_key85] = _val86 + self.acked[_key85] = _val86 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: - self.send_tps = {} + self.failed = {} (_ktype95, _vtype96, _size94 ) = iprot.readMapBegin() for _i98 in xrange(_size94): _key99 = iprot.readString().decode('utf-8') _val100 = {} (_ktype102, _vtype103, _size101 ) = iprot.readMapBegin() for _i105 in xrange(_size101): - _key106 = iprot.readString().decode('utf-8') - _val107 = iprot.readDouble(); + _key106 = GlobalStreamId() + _key106.read(iprot) + _val107 = iprot.readI64(); _val100[_key106] = _val107 iprot.readMapEnd() - self.send_tps[_key99] = _val100 + self.failed[_key99] = _val100 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: - self.recv_tps = {} + self.process_ms_avg = {} (_ktype109, _vtype110, _size108 ) = iprot.readMapBegin() for _i112 in xrange(_size108): _key113 = iprot.readString().decode('utf-8') @@ -1919,13 +2104,13 @@ def read(self, iprot): _val121 = iprot.readDouble(); _val114[_key120] = _val121 iprot.readMapEnd() - self.recv_tps[_key113] = _val114 + self.process_ms_avg[_key113] = _val114 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.MAP: - self.acked = {} + self.executed = {} (_ktype123, _vtype124, _size122 ) = iprot.readMapBegin() for _i126 in xrange(_size122): _key127 = iprot.readString().decode('utf-8') @@ -1937,13 +2122,13 @@ def read(self, iprot): _val135 = iprot.readI64(); _val128[_key134] = _val135 iprot.readMapEnd() - self.acked[_key127] = _val128 + self.executed[_key127] = _val128 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.MAP: - self.failed = {} + self.execute_ms_avg = {} (_ktype137, _vtype138, _size136 ) = iprot.readMapBegin() for _i140 in xrange(_size136): _key141 = iprot.readString().decode('utf-8') @@ -1952,28 +2137,10 @@ def read(self, iprot): for _i147 in xrange(_size143): _key148 = GlobalStreamId() _key148.read(iprot) - _val149 = iprot.readI64(); + _val149 = iprot.readDouble(); _val142[_key148] = _val149 iprot.readMapEnd() - self.failed[_key141] = _val142 - iprot.readMapEnd() - else: - iprot.skip(ftype) - elif fid == 6: - if ftype == TType.MAP: - self.process_ms_avg = {} - (_ktype151, _vtype152, _size150 ) = iprot.readMapBegin() - for _i154 in xrange(_size150): - _key155 = iprot.readString().decode('utf-8') - _val156 = {} - (_ktype158, _vtype159, _size157 ) = iprot.readMapBegin() - for _i161 in xrange(_size157): - _key162 = GlobalStreamId() - _key162.read(iprot) - _val163 = iprot.readDouble(); - _val156[_key162] = _val163 - iprot.readMapEnd() - self.process_ms_avg[_key155] = _val156 + self.execute_ms_avg[_key141] = _val142 iprot.readMapEnd() else: iprot.skip(ftype) @@ -1986,76 +2153,64 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('TaskStats') - if self.emitted is not None: - oprot.writeFieldBegin('emitted', TType.MAP, 1) - oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.emitted)) - for kiter164,viter165 in self.emitted.items(): - oprot.writeString(kiter164.encode('utf-8')) - oprot.writeMapBegin(TType.STRING, TType.I64, len(viter165)) - for kiter166,viter167 in viter165.items(): - oprot.writeString(kiter166.encode('utf-8')) - oprot.writeI64(viter167) - oprot.writeMapEnd() - oprot.writeMapEnd() - oprot.writeFieldEnd() - if self.send_tps is not None: - oprot.writeFieldBegin('send_tps', TType.MAP, 2) - oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.send_tps)) - for kiter168,viter169 in self.send_tps.items(): - oprot.writeString(kiter168.encode('utf-8')) - oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(viter169)) - for kiter170,viter171 in viter169.items(): - oprot.writeString(kiter170.encode('utf-8')) - oprot.writeDouble(viter171) - oprot.writeMapEnd() - oprot.writeMapEnd() - oprot.writeFieldEnd() - if self.recv_tps is not None: - oprot.writeFieldBegin('recv_tps', TType.MAP, 3) - oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.recv_tps)) - for kiter172,viter173 in self.recv_tps.items(): - oprot.writeString(kiter172.encode('utf-8')) - oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter173)) - for kiter174,viter175 in viter173.items(): - kiter174.write(oprot) - oprot.writeDouble(viter175) - oprot.writeMapEnd() - oprot.writeMapEnd() - oprot.writeFieldEnd() + oprot.writeStructBegin('BoltStats') if self.acked is not None: - oprot.writeFieldBegin('acked', TType.MAP, 4) + oprot.writeFieldBegin('acked', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.acked)) - for kiter176,viter177 in self.acked.items(): - oprot.writeString(kiter176.encode('utf-8')) - oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter177)) - for kiter178,viter179 in viter177.items(): - kiter178.write(oprot) - oprot.writeI64(viter179) + for kiter150,viter151 in self.acked.items(): + oprot.writeString(kiter150.encode('utf-8')) + oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter151)) + for kiter152,viter153 in viter151.items(): + kiter152.write(oprot) + oprot.writeI64(viter153) oprot.writeMapEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.failed is not None: - oprot.writeFieldBegin('failed', TType.MAP, 5) + oprot.writeFieldBegin('failed', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.failed)) - for kiter180,viter181 in self.failed.items(): - oprot.writeString(kiter180.encode('utf-8')) - oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter181)) - for kiter182,viter183 in viter181.items(): - kiter182.write(oprot) - oprot.writeI64(viter183) + for kiter154,viter155 in self.failed.items(): + oprot.writeString(kiter154.encode('utf-8')) + oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter155)) + for kiter156,viter157 in viter155.items(): + kiter156.write(oprot) + oprot.writeI64(viter157) oprot.writeMapEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.process_ms_avg is not None: - oprot.writeFieldBegin('process_ms_avg', TType.MAP, 6) + oprot.writeFieldBegin('process_ms_avg', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.process_ms_avg)) - for kiter184,viter185 in self.process_ms_avg.items(): - oprot.writeString(kiter184.encode('utf-8')) - oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter185)) - for kiter186,viter187 in viter185.items(): - kiter186.write(oprot) - oprot.writeDouble(viter187) + for kiter158,viter159 in self.process_ms_avg.items(): + oprot.writeString(kiter158.encode('utf-8')) + oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter159)) + for kiter160,viter161 in viter159.items(): + kiter160.write(oprot) + oprot.writeDouble(viter161) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.executed is not None: + oprot.writeFieldBegin('executed', TType.MAP, 4) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.executed)) + for kiter162,viter163 in self.executed.items(): + oprot.writeString(kiter162.encode('utf-8')) + oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter163)) + for kiter164,viter165 in viter163.items(): + kiter164.write(oprot) + oprot.writeI64(viter165) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.execute_ms_avg is not None: + oprot.writeFieldBegin('execute_ms_avg', TType.MAP, 5) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.execute_ms_avg)) + for kiter166,viter167 in self.execute_ms_avg.items(): + oprot.writeString(kiter166.encode('utf-8')) + oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter167)) + for kiter168,viter169 in viter167.items(): + kiter168.write(oprot) + oprot.writeDouble(viter169) oprot.writeMapEnd() oprot.writeMapEnd() oprot.writeFieldEnd() @@ -2063,18 +2218,16 @@ def write(self, oprot): oprot.writeStructEnd() def validate(self): - if self.emitted is None: - raise TProtocol.TProtocolException(message='Required field emitted is unset!') - if self.send_tps is None: - raise TProtocol.TProtocolException(message='Required field send_tps is unset!') - if self.recv_tps is None: - raise TProtocol.TProtocolException(message='Required field recv_tps is unset!') if self.acked is None: raise TProtocol.TProtocolException(message='Required field acked is unset!') if self.failed is None: raise TProtocol.TProtocolException(message='Required field failed is unset!') if self.process_ms_avg is None: raise TProtocol.TProtocolException(message='Required field process_ms_avg is unset!') + if self.executed is None: + raise TProtocol.TProtocolException(message='Required field executed is unset!') + if self.execute_ms_avg is None: + raise TProtocol.TProtocolException(message='Required field execute_ms_avg is unset!') return @@ -2089,37 +2242,28 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class TaskSummary: +class SpoutStats: """ Attributes: - - task_id - - component_id - - host - - port - - uptime_secs - - errors - - stats + - acked + - failed + - complete_ms_avg """ thrift_spec = ( None, # 0 - (1, TType.I32, 'task_id', None, None, ), # 1 - (2, TType.STRING, 'component_id', None, None, ), # 2 - (3, TType.STRING, 'host', None, None, ), # 3 - (4, TType.I32, 'port', None, None, ), # 4 - (5, TType.I32, 'uptime_secs', None, None, ), # 5 - (6, TType.LIST, 'errors', (TType.STRUCT,(ErrorInfo, ErrorInfo.thrift_spec)), None, ), # 6 - (7, TType.STRUCT, 'stats', (TaskStats, TaskStats.thrift_spec), None, ), # 7 + (1, TType.MAP, 'acked', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.I64,None)), None, ), # 1 + (2, TType.MAP, 'failed', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.I64,None)), None, ), # 2 + (3, TType.MAP, 'complete_ms_avg', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.DOUBLE,None)), None, ), # 3 ) - def __init__(self, task_id=None, component_id=None, host=None, port=None, uptime_secs=None, errors=None, stats=None,): - self.task_id = task_id - self.component_id = component_id - self.host = host - self.port = port - self.uptime_secs = uptime_secs - self.errors = errors - self.stats = stats + def __hash__(self): + return 0 + hash(self.acked) + hash(self.failed) + hash(self.complete_ms_avg) + + def __init__(self, acked=None, failed=None, complete_ms_avg=None,): + self.acked = acked + self.failed = failed + self.complete_ms_avg = complete_ms_avg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2131,47 +2275,1011 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.I32: - self.task_id = iprot.readI32(); + if ftype == TType.MAP: + self.acked = {} + (_ktype171, _vtype172, _size170 ) = iprot.readMapBegin() + for _i174 in xrange(_size170): + _key175 = iprot.readString().decode('utf-8') + _val176 = {} + (_ktype178, _vtype179, _size177 ) = iprot.readMapBegin() + for _i181 in xrange(_size177): + _key182 = iprot.readString().decode('utf-8') + _val183 = iprot.readI64(); + _val176[_key182] = _val183 + iprot.readMapEnd() + self.acked[_key175] = _val176 + iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 2: - if ftype == TType.STRING: - self.component_id = iprot.readString().decode('utf-8') + if ftype == TType.MAP: + self.failed = {} + (_ktype185, _vtype186, _size184 ) = iprot.readMapBegin() + for _i188 in xrange(_size184): + _key189 = iprot.readString().decode('utf-8') + _val190 = {} + (_ktype192, _vtype193, _size191 ) = iprot.readMapBegin() + for _i195 in xrange(_size191): + _key196 = iprot.readString().decode('utf-8') + _val197 = iprot.readI64(); + _val190[_key196] = _val197 + iprot.readMapEnd() + self.failed[_key189] = _val190 + iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: - if ftype == TType.STRING: - self.host = iprot.readString().decode('utf-8') - else: - iprot.skip(ftype) + if ftype == TType.MAP: + self.complete_ms_avg = {} + (_ktype199, _vtype200, _size198 ) = iprot.readMapBegin() + for _i202 in xrange(_size198): + _key203 = iprot.readString().decode('utf-8') + _val204 = {} + (_ktype206, _vtype207, _size205 ) = iprot.readMapBegin() + for _i209 in xrange(_size205): + _key210 = iprot.readString().decode('utf-8') + _val211 = iprot.readDouble(); + _val204[_key210] = _val211 + iprot.readMapEnd() + self.complete_ms_avg[_key203] = _val204 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('SpoutStats') + if self.acked is not None: + oprot.writeFieldBegin('acked', TType.MAP, 1) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.acked)) + for kiter212,viter213 in self.acked.items(): + oprot.writeString(kiter212.encode('utf-8')) + oprot.writeMapBegin(TType.STRING, TType.I64, len(viter213)) + for kiter214,viter215 in viter213.items(): + oprot.writeString(kiter214.encode('utf-8')) + oprot.writeI64(viter215) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.failed is not None: + oprot.writeFieldBegin('failed', TType.MAP, 2) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.failed)) + for kiter216,viter217 in self.failed.items(): + oprot.writeString(kiter216.encode('utf-8')) + oprot.writeMapBegin(TType.STRING, TType.I64, len(viter217)) + for kiter218,viter219 in viter217.items(): + oprot.writeString(kiter218.encode('utf-8')) + oprot.writeI64(viter219) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.complete_ms_avg is not None: + oprot.writeFieldBegin('complete_ms_avg', TType.MAP, 3) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.complete_ms_avg)) + for kiter220,viter221 in self.complete_ms_avg.items(): + oprot.writeString(kiter220.encode('utf-8')) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(viter221)) + for kiter222,viter223 in viter221.items(): + oprot.writeString(kiter222.encode('utf-8')) + oprot.writeDouble(viter223) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.acked is None: + raise TProtocol.TProtocolException(message='Required field acked is unset!') + if self.failed is None: + raise TProtocol.TProtocolException(message='Required field failed is unset!') + if self.complete_ms_avg is None: + raise TProtocol.TProtocolException(message='Required field complete_ms_avg is unset!') + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class ExecutorSpecificStats: + """ + Attributes: + - bolt + - spout + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'bolt', (BoltStats, BoltStats.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'spout', (SpoutStats, SpoutStats.thrift_spec), None, ), # 2 + ) + + def __hash__(self): + return 0 + hash(self.bolt) + hash(self.spout) + + def __init__(self, bolt=None, spout=None,): + self.bolt = bolt + self.spout = spout + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.bolt = BoltStats() + self.bolt.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.spout = SpoutStats() + self.spout.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ExecutorSpecificStats') + if self.bolt is not None: + oprot.writeFieldBegin('bolt', TType.STRUCT, 1) + self.bolt.write(oprot) + oprot.writeFieldEnd() + if self.spout is not None: + oprot.writeFieldBegin('spout', TType.STRUCT, 2) + self.spout.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class TaskStats: + """ + Attributes: + - emitted + - send_tps + - recv_tps + - acked + - failed + - process_ms_avg + """ + + thrift_spec = ( + None, # 0 + (1, TType.MAP, 'emitted', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.I64,None)), None, ), # 1 + (2, TType.MAP, 'send_tps', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.DOUBLE,None)), None, ), # 2 + (3, TType.MAP, 'recv_tps', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.DOUBLE,None)), None, ), # 3 + (4, TType.MAP, 'acked', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.I64,None)), None, ), # 4 + (5, TType.MAP, 'failed', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.I64,None)), None, ), # 5 + (6, TType.MAP, 'process_ms_avg', (TType.STRING,None,TType.MAP,(TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.DOUBLE,None)), None, ), # 6 + ) + + def __hash__(self): + return 0 + hash(self.emitted) + hash(self.send_tps) + hash(self.recv_tps) + hash(self.acked) + hash(self.failed) + hash(self.process_ms_avg) + + def __init__(self, emitted=None, send_tps=None, recv_tps=None, acked=None, failed=None, process_ms_avg=None,): + self.emitted = emitted + self.send_tps = send_tps + self.recv_tps = recv_tps + self.acked = acked + self.failed = failed + self.process_ms_avg = process_ms_avg + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.emitted = {} + (_ktype225, _vtype226, _size224 ) = iprot.readMapBegin() + for _i228 in xrange(_size224): + _key229 = iprot.readString().decode('utf-8') + _val230 = {} + (_ktype232, _vtype233, _size231 ) = iprot.readMapBegin() + for _i235 in xrange(_size231): + _key236 = iprot.readString().decode('utf-8') + _val237 = iprot.readI64(); + _val230[_key236] = _val237 + iprot.readMapEnd() + self.emitted[_key229] = _val230 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.MAP: + self.send_tps = {} + (_ktype239, _vtype240, _size238 ) = iprot.readMapBegin() + for _i242 in xrange(_size238): + _key243 = iprot.readString().decode('utf-8') + _val244 = {} + (_ktype246, _vtype247, _size245 ) = iprot.readMapBegin() + for _i249 in xrange(_size245): + _key250 = iprot.readString().decode('utf-8') + _val251 = iprot.readDouble(); + _val244[_key250] = _val251 + iprot.readMapEnd() + self.send_tps[_key243] = _val244 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.recv_tps = {} + (_ktype253, _vtype254, _size252 ) = iprot.readMapBegin() + for _i256 in xrange(_size252): + _key257 = iprot.readString().decode('utf-8') + _val258 = {} + (_ktype260, _vtype261, _size259 ) = iprot.readMapBegin() + for _i263 in xrange(_size259): + _key264 = GlobalStreamId() + _key264.read(iprot) + _val265 = iprot.readDouble(); + _val258[_key264] = _val265 + iprot.readMapEnd() + self.recv_tps[_key257] = _val258 + iprot.readMapEnd() + else: + iprot.skip(ftype) elif fid == 4: + if ftype == TType.MAP: + self.acked = {} + (_ktype267, _vtype268, _size266 ) = iprot.readMapBegin() + for _i270 in xrange(_size266): + _key271 = iprot.readString().decode('utf-8') + _val272 = {} + (_ktype274, _vtype275, _size273 ) = iprot.readMapBegin() + for _i277 in xrange(_size273): + _key278 = GlobalStreamId() + _key278.read(iprot) + _val279 = iprot.readI64(); + _val272[_key278] = _val279 + iprot.readMapEnd() + self.acked[_key271] = _val272 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.MAP: + self.failed = {} + (_ktype281, _vtype282, _size280 ) = iprot.readMapBegin() + for _i284 in xrange(_size280): + _key285 = iprot.readString().decode('utf-8') + _val286 = {} + (_ktype288, _vtype289, _size287 ) = iprot.readMapBegin() + for _i291 in xrange(_size287): + _key292 = GlobalStreamId() + _key292.read(iprot) + _val293 = iprot.readI64(); + _val286[_key292] = _val293 + iprot.readMapEnd() + self.failed[_key285] = _val286 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.MAP: + self.process_ms_avg = {} + (_ktype295, _vtype296, _size294 ) = iprot.readMapBegin() + for _i298 in xrange(_size294): + _key299 = iprot.readString().decode('utf-8') + _val300 = {} + (_ktype302, _vtype303, _size301 ) = iprot.readMapBegin() + for _i305 in xrange(_size301): + _key306 = GlobalStreamId() + _key306.read(iprot) + _val307 = iprot.readDouble(); + _val300[_key306] = _val307 + iprot.readMapEnd() + self.process_ms_avg[_key299] = _val300 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TaskStats') + if self.emitted is not None: + oprot.writeFieldBegin('emitted', TType.MAP, 1) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.emitted)) + for kiter308,viter309 in self.emitted.items(): + oprot.writeString(kiter308.encode('utf-8')) + oprot.writeMapBegin(TType.STRING, TType.I64, len(viter309)) + for kiter310,viter311 in viter309.items(): + oprot.writeString(kiter310.encode('utf-8')) + oprot.writeI64(viter311) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.send_tps is not None: + oprot.writeFieldBegin('send_tps', TType.MAP, 2) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.send_tps)) + for kiter312,viter313 in self.send_tps.items(): + oprot.writeString(kiter312.encode('utf-8')) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(viter313)) + for kiter314,viter315 in viter313.items(): + oprot.writeString(kiter314.encode('utf-8')) + oprot.writeDouble(viter315) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.recv_tps is not None: + oprot.writeFieldBegin('recv_tps', TType.MAP, 3) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.recv_tps)) + for kiter316,viter317 in self.recv_tps.items(): + oprot.writeString(kiter316.encode('utf-8')) + oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter317)) + for kiter318,viter319 in viter317.items(): + kiter318.write(oprot) + oprot.writeDouble(viter319) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.acked is not None: + oprot.writeFieldBegin('acked', TType.MAP, 4) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.acked)) + for kiter320,viter321 in self.acked.items(): + oprot.writeString(kiter320.encode('utf-8')) + oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter321)) + for kiter322,viter323 in viter321.items(): + kiter322.write(oprot) + oprot.writeI64(viter323) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.failed is not None: + oprot.writeFieldBegin('failed', TType.MAP, 5) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.failed)) + for kiter324,viter325 in self.failed.items(): + oprot.writeString(kiter324.encode('utf-8')) + oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter325)) + for kiter326,viter327 in viter325.items(): + kiter326.write(oprot) + oprot.writeI64(viter327) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.process_ms_avg is not None: + oprot.writeFieldBegin('process_ms_avg', TType.MAP, 6) + oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.process_ms_avg)) + for kiter328,viter329 in self.process_ms_avg.items(): + oprot.writeString(kiter328.encode('utf-8')) + oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter329)) + for kiter330,viter331 in viter329.items(): + kiter330.write(oprot) + oprot.writeDouble(viter331) + oprot.writeMapEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.emitted is None: + raise TProtocol.TProtocolException(message='Required field emitted is unset!') + if self.send_tps is None: + raise TProtocol.TProtocolException(message='Required field send_tps is unset!') + if self.recv_tps is None: + raise TProtocol.TProtocolException(message='Required field recv_tps is unset!') + if self.acked is None: + raise TProtocol.TProtocolException(message='Required field acked is unset!') + if self.failed is None: + raise TProtocol.TProtocolException(message='Required field failed is unset!') + if self.process_ms_avg is None: + raise TProtocol.TProtocolException(message='Required field process_ms_avg is unset!') + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class ExecutorInfo: + """ + Attributes: + - task_start + - task_end + """ + + thrift_spec = ( + None, # 0 + (1, TType.I32, 'task_start', None, None, ), # 1 + (2, TType.I32, 'task_end', None, None, ), # 2 + ) + + def __hash__(self): + return 0 + hash(self.task_start) + hash(self.task_end) + + def __init__(self, task_start=None, task_end=None,): + self.task_start = task_start + self.task_end = task_end + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.task_start = iprot.readI32(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.task_end = iprot.readI32(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ExecutorInfo') + if self.task_start is not None: + oprot.writeFieldBegin('task_start', TType.I32, 1) + oprot.writeI32(self.task_start) + oprot.writeFieldEnd() + if self.task_end is not None: + oprot.writeFieldBegin('task_end', TType.I32, 2) + oprot.writeI32(self.task_end) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.task_start is None: + raise TProtocol.TProtocolException(message='Required field task_start is unset!') + if self.task_end is None: + raise TProtocol.TProtocolException(message='Required field task_end is unset!') + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class TaskSummary: + """ + Attributes: + - task_id + - component_id + - host + - port + - uptime_secs + - errors + - stats + - component_type + """ + + thrift_spec = ( + None, # 0 + (1, TType.I32, 'task_id', None, None, ), # 1 + (2, TType.STRING, 'component_id', None, None, ), # 2 + (3, TType.STRING, 'host', None, None, ), # 3 + (4, TType.I32, 'port', None, None, ), # 4 + (5, TType.I32, 'uptime_secs', None, None, ), # 5 + (6, TType.LIST, 'errors', (TType.STRUCT,(ErrorInfo, ErrorInfo.thrift_spec)), None, ), # 6 + (7, TType.STRUCT, 'stats', (TaskStats, TaskStats.thrift_spec), None, ), # 7 + (8, TType.STRING, 'component_type', None, None, ), # 8 + ) + + def __hash__(self): + return 0 + hash(self.task_id) + hash(self.component_id) + hash(self.host) + hash(self.port) + hash(self.uptime_secs) + hash(self.errors) + hash(self.stats) + hash(self.component_type) + + def __init__(self, task_id=None, component_id=None, host=None, port=None, uptime_secs=None, errors=None, stats=None, component_type=None,): + self.task_id = task_id + self.component_id = component_id + self.host = host + self.port = port + self.uptime_secs = uptime_secs + self.errors = errors + self.stats = stats + self.component_type = component_type + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.task_id = iprot.readI32(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.component_id = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.host = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.port = iprot.readI32(); + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.uptime_secs = iprot.readI32(); + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.errors = [] + (_etype335, _size332) = iprot.readListBegin() + for _i336 in xrange(_size332): + _elem337 = ErrorInfo() + _elem337.read(iprot) + self.errors.append(_elem337) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRUCT: + self.stats = TaskStats() + self.stats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.component_type = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TaskSummary') + if self.task_id is not None: + oprot.writeFieldBegin('task_id', TType.I32, 1) + oprot.writeI32(self.task_id) + oprot.writeFieldEnd() + if self.component_id is not None: + oprot.writeFieldBegin('component_id', TType.STRING, 2) + oprot.writeString(self.component_id.encode('utf-8')) + oprot.writeFieldEnd() + if self.host is not None: + oprot.writeFieldBegin('host', TType.STRING, 3) + oprot.writeString(self.host.encode('utf-8')) + oprot.writeFieldEnd() + if self.port is not None: + oprot.writeFieldBegin('port', TType.I32, 4) + oprot.writeI32(self.port) + oprot.writeFieldEnd() + if self.uptime_secs is not None: + oprot.writeFieldBegin('uptime_secs', TType.I32, 5) + oprot.writeI32(self.uptime_secs) + oprot.writeFieldEnd() + if self.errors is not None: + oprot.writeFieldBegin('errors', TType.LIST, 6) + oprot.writeListBegin(TType.STRUCT, len(self.errors)) + for iter338 in self.errors: + iter338.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.stats is not None: + oprot.writeFieldBegin('stats', TType.STRUCT, 7) + self.stats.write(oprot) + oprot.writeFieldEnd() + if self.component_type is not None: + oprot.writeFieldBegin('component_type', TType.STRING, 8) + oprot.writeString(self.component_type.encode('utf-8')) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.task_id is None: + raise TProtocol.TProtocolException(message='Required field task_id is unset!') + if self.component_id is None: + raise TProtocol.TProtocolException(message='Required field component_id is unset!') + if self.host is None: + raise TProtocol.TProtocolException(message='Required field host is unset!') + if self.port is None: + raise TProtocol.TProtocolException(message='Required field port is unset!') + if self.uptime_secs is None: + raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!') + if self.errors is None: + raise TProtocol.TProtocolException(message='Required field errors is unset!') + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WorkerSummary: + """ + Attributes: + - port + - topology + - tasks + """ + + thrift_spec = ( + None, # 0 + (1, TType.I32, 'port', None, None, ), # 1 + (2, TType.STRING, 'topology', None, None, ), # 2 + (3, TType.LIST, 'tasks', (TType.STRUCT,(TaskSummary, TaskSummary.thrift_spec)), None, ), # 3 + ) + + def __hash__(self): + return 0 + hash(self.port) + hash(self.topology) + hash(self.tasks) + + def __init__(self, port=None, topology=None, tasks=None,): + self.port = port + self.topology = topology + self.tasks = tasks + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: if ftype == TType.I32: self.port = iprot.readI32(); else: iprot.skip(ftype) - elif fid == 5: + elif fid == 2: + if ftype == TType.STRING: + self.topology = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.tasks = [] + (_etype342, _size339) = iprot.readListBegin() + for _i343 in xrange(_size339): + _elem344 = TaskSummary() + _elem344.read(iprot) + self.tasks.append(_elem344) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WorkerSummary') + if self.port is not None: + oprot.writeFieldBegin('port', TType.I32, 1) + oprot.writeI32(self.port) + oprot.writeFieldEnd() + if self.topology is not None: + oprot.writeFieldBegin('topology', TType.STRING, 2) + oprot.writeString(self.topology.encode('utf-8')) + oprot.writeFieldEnd() + if self.tasks is not None: + oprot.writeFieldBegin('tasks', TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.tasks)) + for iter345 in self.tasks: + iter345.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.port is None: + raise TProtocol.TProtocolException(message='Required field port is unset!') + if self.topology is None: + raise TProtocol.TProtocolException(message='Required field topology is unset!') + if self.tasks is None: + raise TProtocol.TProtocolException(message='Required field tasks is unset!') + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class TopologyInfo: + """ + Attributes: + - id + - name + - uptime_secs + - workers + - status + - tasks + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'id', None, None, ), # 1 + (2, TType.STRING, 'name', None, None, ), # 2 + (3, TType.I32, 'uptime_secs', None, None, ), # 3 + (4, TType.LIST, 'workers', (TType.STRUCT,(WorkerSummary, WorkerSummary.thrift_spec)), None, ), # 4 + (5, TType.STRING, 'status', None, None, ), # 5 + (6, TType.LIST, 'tasks', (TType.STRUCT,(TaskSummary, TaskSummary.thrift_spec)), None, ), # 6 + ) + + def __hash__(self): + return 0 + hash(self.id) + hash(self.name) + hash(self.uptime_secs) + hash(self.workers) + hash(self.status) + hash(self.tasks) + + def __init__(self, id=None, name=None, uptime_secs=None, workers=None, status=None, tasks=None,): + self.id = id + self.name = name + self.uptime_secs = uptime_secs + self.workers = workers + self.status = status + self.tasks = tasks + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.id = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.name = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 3: if ftype == TType.I32: self.uptime_secs = iprot.readI32(); else: iprot.skip(ftype) - elif fid == 6: + elif fid == 4: + if ftype == TType.LIST: + self.workers = [] + (_etype349, _size346) = iprot.readListBegin() + for _i350 in xrange(_size346): + _elem351 = WorkerSummary() + _elem351.read(iprot) + self.workers.append(_elem351) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.status = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.tasks = [] + (_etype355, _size352) = iprot.readListBegin() + for _i356 in xrange(_size352): + _elem357 = TaskSummary() + _elem357.read(iprot) + self.tasks.append(_elem357) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TopologyInfo') + if self.id is not None: + oprot.writeFieldBegin('id', TType.STRING, 1) + oprot.writeString(self.id.encode('utf-8')) + oprot.writeFieldEnd() + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 2) + oprot.writeString(self.name.encode('utf-8')) + oprot.writeFieldEnd() + if self.uptime_secs is not None: + oprot.writeFieldBegin('uptime_secs', TType.I32, 3) + oprot.writeI32(self.uptime_secs) + oprot.writeFieldEnd() + if self.workers is not None: + oprot.writeFieldBegin('workers', TType.LIST, 4) + oprot.writeListBegin(TType.STRUCT, len(self.workers)) + for iter358 in self.workers: + iter358.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.status is not None: + oprot.writeFieldBegin('status', TType.STRING, 5) + oprot.writeString(self.status.encode('utf-8')) + oprot.writeFieldEnd() + if self.tasks is not None: + oprot.writeFieldBegin('tasks', TType.LIST, 6) + oprot.writeListBegin(TType.STRUCT, len(self.tasks)) + for iter359 in self.tasks: + iter359.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.id is None: + raise TProtocol.TProtocolException(message='Required field id is unset!') + if self.name is None: + raise TProtocol.TProtocolException(message='Required field name is unset!') + if self.uptime_secs is None: + raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!') + if self.workers is None: + raise TProtocol.TProtocolException(message='Required field workers is unset!') + if self.status is None: + raise TProtocol.TProtocolException(message='Required field status is unset!') + if self.tasks is None: + raise TProtocol.TProtocolException(message='Required field tasks is unset!') + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class SupervisorWorkers: + """ + Attributes: + - supervisor + - workers + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'supervisor', (SupervisorSummary, SupervisorSummary.thrift_spec), None, ), # 1 + (2, TType.LIST, 'workers', (TType.STRUCT,(WorkerSummary, WorkerSummary.thrift_spec)), None, ), # 2 + ) + + def __hash__(self): + return 0 + hash(self.supervisor) + hash(self.workers) + + def __init__(self, supervisor=None, workers=None,): + self.supervisor = supervisor + self.workers = workers + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.supervisor = SupervisorSummary() + self.supervisor.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: if ftype == TType.LIST: - self.errors = [] - (_etype191, _size188) = iprot.readListBegin() - for _i192 in xrange(_size188): - _elem193 = ErrorInfo() - _elem193.read(iprot) - self.errors.append(_elem193) + self.workers = [] + (_etype363, _size360) = iprot.readListBegin() + for _i364 in xrange(_size360): + _elem365 = WorkerSummary() + _elem365.read(iprot) + self.workers.append(_elem365) iprot.readListEnd() else: iprot.skip(ftype) - elif fid == 7: - if ftype == TType.STRUCT: - self.stats = TaskStats() - self.stats.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2181,54 +3289,89 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('TaskSummary') - if self.task_id is not None: - oprot.writeFieldBegin('task_id', TType.I32, 1) - oprot.writeI32(self.task_id) - oprot.writeFieldEnd() - if self.component_id is not None: - oprot.writeFieldBegin('component_id', TType.STRING, 2) - oprot.writeString(self.component_id.encode('utf-8')) - oprot.writeFieldEnd() - if self.host is not None: - oprot.writeFieldBegin('host', TType.STRING, 3) - oprot.writeString(self.host.encode('utf-8')) - oprot.writeFieldEnd() - if self.port is not None: - oprot.writeFieldBegin('port', TType.I32, 4) - oprot.writeI32(self.port) - oprot.writeFieldEnd() - if self.uptime_secs is not None: - oprot.writeFieldBegin('uptime_secs', TType.I32, 5) - oprot.writeI32(self.uptime_secs) + oprot.writeStructBegin('SupervisorWorkers') + if self.supervisor is not None: + oprot.writeFieldBegin('supervisor', TType.STRUCT, 1) + self.supervisor.write(oprot) oprot.writeFieldEnd() - if self.errors is not None: - oprot.writeFieldBegin('errors', TType.LIST, 6) - oprot.writeListBegin(TType.STRUCT, len(self.errors)) - for iter194 in self.errors: - iter194.write(oprot) + if self.workers is not None: + oprot.writeFieldBegin('workers', TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.workers)) + for iter366 in self.workers: + iter366.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() - if self.stats is not None: - oprot.writeFieldBegin('stats', TType.STRUCT, 7) - self.stats.write(oprot) + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.supervisor is None: + raise TProtocol.TProtocolException(message='Required field supervisor is unset!') + if self.workers is None: + raise TProtocol.TProtocolException(message='Required field workers is unset!') + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class KillOptions: + """ + Attributes: + - wait_secs + """ + + thrift_spec = ( + None, # 0 + (1, TType.I32, 'wait_secs', None, None, ), # 1 + ) + + def __hash__(self): + return 0 + hash(self.wait_secs) + + def __init__(self, wait_secs=None,): + self.wait_secs = wait_secs + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.wait_secs = iprot.readI32(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('KillOptions') + if self.wait_secs is not None: + oprot.writeFieldBegin('wait_secs', TType.I32, 1) + oprot.writeI32(self.wait_secs) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): - if self.task_id is None: - raise TProtocol.TProtocolException(message='Required field task_id is unset!') - if self.component_id is None: - raise TProtocol.TProtocolException(message='Required field component_id is unset!') - if self.host is None: - raise TProtocol.TProtocolException(message='Required field host is unset!') - if self.port is None: - raise TProtocol.TProtocolException(message='Required field port is unset!') - if self.uptime_secs is None: - raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!') - if self.errors is None: - raise TProtocol.TProtocolException(message='Required field errors is unset!') return @@ -2243,31 +3386,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class TopologyInfo: +class RebalanceOptions: """ Attributes: - - id - - name - - uptime_secs - - tasks - - status + - wait_secs + - num_workers """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'id', None, None, ), # 1 - (2, TType.STRING, 'name', None, None, ), # 2 - (3, TType.I32, 'uptime_secs', None, None, ), # 3 - (4, TType.LIST, 'tasks', (TType.STRUCT,(TaskSummary, TaskSummary.thrift_spec)), None, ), # 4 - (5, TType.STRING, 'status', None, None, ), # 5 + (1, TType.I32, 'wait_secs', None, None, ), # 1 + (2, TType.I32, 'num_workers', None, None, ), # 2 ) - def __init__(self, id=None, name=None, uptime_secs=None, tasks=None, status=None,): - self.id = id - self.name = name - self.uptime_secs = uptime_secs - self.tasks = tasks - self.status = status + def __hash__(self): + return 0 + hash(self.wait_secs) + hash(self.num_workers) + + def __init__(self, wait_secs=None, num_workers=None,): + self.wait_secs = wait_secs + self.num_workers = num_workers def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2279,34 +3416,80 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.id = iprot.readString().decode('utf-8') + if ftype == TType.I32: + self.wait_secs = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: - if ftype == TType.STRING: - self.name = iprot.readString().decode('utf-8') + if ftype == TType.I32: + self.num_workers = iprot.readI32(); else: iprot.skip(ftype) - elif fid == 3: + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('RebalanceOptions') + if self.wait_secs is not None: + oprot.writeFieldBegin('wait_secs', TType.I32, 1) + oprot.writeI32(self.wait_secs) + oprot.writeFieldEnd() + if self.num_workers is not None: + oprot.writeFieldBegin('num_workers', TType.I32, 2) + oprot.writeI32(self.num_workers) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class SubmitOptions: + """ + Attributes: + - initial_status + """ + + thrift_spec = ( + None, # 0 + (1, TType.I32, 'initial_status', None, None, ), # 1 + ) + + def __hash__(self): + return 0 + hash(self.initial_status) + + def __init__(self, initial_status=None,): + self.initial_status = initial_status + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: if ftype == TType.I32: - self.uptime_secs = iprot.readI32(); - else: - iprot.skip(ftype) - elif fid == 4: - if ftype == TType.LIST: - self.tasks = [] - (_etype198, _size195) = iprot.readListBegin() - for _i199 in xrange(_size195): - _elem200 = TaskSummary() - _elem200.read(iprot) - self.tasks.append(_elem200) - iprot.readListEnd() - else: - iprot.skip(ftype) - elif fid == 5: - if ftype == TType.STRING: - self.status = iprot.readString().decode('utf-8') + self.initial_status = iprot.readI32(); else: iprot.skip(ftype) else: @@ -2318,44 +3501,17 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('TopologyInfo') - if self.id is not None: - oprot.writeFieldBegin('id', TType.STRING, 1) - oprot.writeString(self.id.encode('utf-8')) - oprot.writeFieldEnd() - if self.name is not None: - oprot.writeFieldBegin('name', TType.STRING, 2) - oprot.writeString(self.name.encode('utf-8')) - oprot.writeFieldEnd() - if self.uptime_secs is not None: - oprot.writeFieldBegin('uptime_secs', TType.I32, 3) - oprot.writeI32(self.uptime_secs) - oprot.writeFieldEnd() - if self.tasks is not None: - oprot.writeFieldBegin('tasks', TType.LIST, 4) - oprot.writeListBegin(TType.STRUCT, len(self.tasks)) - for iter201 in self.tasks: - iter201.write(oprot) - oprot.writeListEnd() - oprot.writeFieldEnd() - if self.status is not None: - oprot.writeFieldBegin('status', TType.STRING, 5) - oprot.writeString(self.status.encode('utf-8')) + oprot.writeStructBegin('SubmitOptions') + if self.initial_status is not None: + oprot.writeFieldBegin('initial_status', TType.I32, 1) + oprot.writeI32(self.initial_status) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): - if self.id is None: - raise TProtocol.TProtocolException(message='Required field id is unset!') - if self.name is None: - raise TProtocol.TProtocolException(message='Required field name is unset!') - if self.uptime_secs is None: - raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!') - if self.tasks is None: - raise TProtocol.TProtocolException(message='Required field tasks is unset!') - if self.status is None: - raise TProtocol.TProtocolException(message='Required field status is unset!') + if self.initial_status is None: + raise TProtocol.TProtocolException(message='Required field initial_status is unset!') return @@ -2370,25 +3526,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class WorkerSummary: +class MonitorOptions: """ Attributes: - - port - - topology - - tasks + - isEnable """ thrift_spec = ( None, # 0 - (1, TType.I32, 'port', None, None, ), # 1 - (2, TType.STRING, 'topology', None, None, ), # 2 - (3, TType.LIST, 'tasks', (TType.STRUCT,(TaskSummary, TaskSummary.thrift_spec)), None, ), # 3 + (1, TType.BOOL, 'isEnable', None, None, ), # 1 ) - def __init__(self, port=None, topology=None, tasks=None,): - self.port = port - self.topology = topology - self.tasks = tasks + def __hash__(self): + return 0 + hash(self.isEnable) + + def __init__(self, isEnable=None,): + self.isEnable = isEnable def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2400,24 +3553,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.I32: - self.port = iprot.readI32(); - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.topology = iprot.readString().decode('utf-8') - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.LIST: - self.tasks = [] - (_etype205, _size202) = iprot.readListBegin() - for _i206 in xrange(_size202): - _elem207 = TaskSummary() - _elem207.read(iprot) - self.tasks.append(_elem207) - iprot.readListEnd() + if ftype == TType.BOOL: + self.isEnable = iprot.readBool(); else: iprot.skip(ftype) else: @@ -2429,32 +3566,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('WorkerSummary') - if self.port is not None: - oprot.writeFieldBegin('port', TType.I32, 1) - oprot.writeI32(self.port) - oprot.writeFieldEnd() - if self.topology is not None: - oprot.writeFieldBegin('topology', TType.STRING, 2) - oprot.writeString(self.topology.encode('utf-8')) - oprot.writeFieldEnd() - if self.tasks is not None: - oprot.writeFieldBegin('tasks', TType.LIST, 3) - oprot.writeListBegin(TType.STRUCT, len(self.tasks)) - for iter208 in self.tasks: - iter208.write(oprot) - oprot.writeListEnd() + oprot.writeStructBegin('MonitorOptions') + if self.isEnable is not None: + oprot.writeFieldBegin('isEnable', TType.BOOL, 1) + oprot.writeBool(self.isEnable) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): - if self.port is None: - raise TProtocol.TProtocolException(message='Required field port is unset!') - if self.topology is None: - raise TProtocol.TProtocolException(message='Required field topology is unset!') - if self.tasks is None: - raise TProtocol.TProtocolException(message='Required field tasks is unset!') return @@ -2469,22 +3589,40 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class SupervisorWorkers: +class TaskMetricData: """ Attributes: - - supervisor - - workers + - task_id + - component_id + - gauge + - counter + - meter + - timer + - histogram """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'supervisor', (SupervisorSummary, SupervisorSummary.thrift_spec), None, ), # 1 - (2, TType.LIST, 'workers', (TType.STRUCT,(WorkerSummary, WorkerSummary.thrift_spec)), None, ), # 2 + (1, TType.I32, 'task_id', None, None, ), # 1 + (2, TType.STRING, 'component_id', None, None, ), # 2 + (3, TType.MAP, 'gauge', (TType.STRING,None,TType.DOUBLE,None), None, ), # 3 + (4, TType.MAP, 'counter', (TType.STRING,None,TType.DOUBLE,None), None, ), # 4 + (5, TType.MAP, 'meter', (TType.STRING,None,TType.DOUBLE,None), None, ), # 5 + (6, TType.MAP, 'timer', (TType.STRING,None,TType.DOUBLE,None), None, ), # 6 + (7, TType.MAP, 'histogram', (TType.STRING,None,TType.DOUBLE,None), None, ), # 7 ) - def __init__(self, supervisor=None, workers=None,): - self.supervisor = supervisor - self.workers = workers + def __hash__(self): + return 0 + hash(self.task_id) + hash(self.component_id) + hash(self.gauge) + hash(self.counter) + hash(self.meter) + hash(self.timer) + hash(self.histogram) + + def __init__(self, task_id=None, component_id=None, gauge=None, counter=None, meter=None, timer=None, histogram=None,): + self.task_id = task_id + self.component_id = component_id + self.gauge = gauge + self.counter = counter + self.meter = meter + self.timer = timer + self.histogram = histogram def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2496,20 +3634,68 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.supervisor = SupervisorSummary() - self.supervisor.read(iprot) + if ftype == TType.I32: + self.task_id = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: - if ftype == TType.LIST: - self.workers = [] - (_etype212, _size209) = iprot.readListBegin() - for _i213 in xrange(_size209): - _elem214 = WorkerSummary() - _elem214.read(iprot) - self.workers.append(_elem214) - iprot.readListEnd() + if ftype == TType.STRING: + self.component_id = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.gauge = {} + (_ktype368, _vtype369, _size367 ) = iprot.readMapBegin() + for _i371 in xrange(_size367): + _key372 = iprot.readString().decode('utf-8') + _val373 = iprot.readDouble(); + self.gauge[_key372] = _val373 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.MAP: + self.counter = {} + (_ktype375, _vtype376, _size374 ) = iprot.readMapBegin() + for _i378 in xrange(_size374): + _key379 = iprot.readString().decode('utf-8') + _val380 = iprot.readDouble(); + self.counter[_key379] = _val380 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.MAP: + self.meter = {} + (_ktype382, _vtype383, _size381 ) = iprot.readMapBegin() + for _i385 in xrange(_size381): + _key386 = iprot.readString().decode('utf-8') + _val387 = iprot.readDouble(); + self.meter[_key386] = _val387 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.MAP: + self.timer = {} + (_ktype389, _vtype390, _size388 ) = iprot.readMapBegin() + for _i392 in xrange(_size388): + _key393 = iprot.readString().decode('utf-8') + _val394 = iprot.readDouble(); + self.timer[_key393] = _val394 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.MAP: + self.histogram = {} + (_ktype396, _vtype397, _size395 ) = iprot.readMapBegin() + for _i399 in xrange(_size395): + _key400 = iprot.readString().decode('utf-8') + _val401 = iprot.readDouble(); + self.histogram[_key400] = _val401 + iprot.readMapEnd() else: iprot.skip(ftype) else: @@ -2521,26 +3707,73 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('SupervisorWorkers') - if self.supervisor is not None: - oprot.writeFieldBegin('supervisor', TType.STRUCT, 1) - self.supervisor.write(oprot) + oprot.writeStructBegin('TaskMetricData') + if self.task_id is not None: + oprot.writeFieldBegin('task_id', TType.I32, 1) + oprot.writeI32(self.task_id) oprot.writeFieldEnd() - if self.workers is not None: - oprot.writeFieldBegin('workers', TType.LIST, 2) - oprot.writeListBegin(TType.STRUCT, len(self.workers)) - for iter215 in self.workers: - iter215.write(oprot) - oprot.writeListEnd() + if self.component_id is not None: + oprot.writeFieldBegin('component_id', TType.STRING, 2) + oprot.writeString(self.component_id.encode('utf-8')) + oprot.writeFieldEnd() + if self.gauge is not None: + oprot.writeFieldBegin('gauge', TType.MAP, 3) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.gauge)) + for kiter402,viter403 in self.gauge.items(): + oprot.writeString(kiter402.encode('utf-8')) + oprot.writeDouble(viter403) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.counter is not None: + oprot.writeFieldBegin('counter', TType.MAP, 4) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.counter)) + for kiter404,viter405 in self.counter.items(): + oprot.writeString(kiter404.encode('utf-8')) + oprot.writeDouble(viter405) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.meter is not None: + oprot.writeFieldBegin('meter', TType.MAP, 5) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.meter)) + for kiter406,viter407 in self.meter.items(): + oprot.writeString(kiter406.encode('utf-8')) + oprot.writeDouble(viter407) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.timer is not None: + oprot.writeFieldBegin('timer', TType.MAP, 6) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.timer)) + for kiter408,viter409 in self.timer.items(): + oprot.writeString(kiter408.encode('utf-8')) + oprot.writeDouble(viter409) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.histogram is not None: + oprot.writeFieldBegin('histogram', TType.MAP, 7) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.histogram)) + for kiter410,viter411 in self.histogram.items(): + oprot.writeString(kiter410.encode('utf-8')) + oprot.writeDouble(viter411) + oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): - if self.supervisor is None: - raise TProtocol.TProtocolException(message='Required field supervisor is unset!') - if self.workers is None: - raise TProtocol.TProtocolException(message='Required field workers is unset!') + if self.task_id is None: + raise TProtocol.TProtocolException(message='Required field task_id is unset!') + if self.component_id is None: + raise TProtocol.TProtocolException(message='Required field component_id is unset!') + if self.gauge is None: + raise TProtocol.TProtocolException(message='Required field gauge is unset!') + if self.counter is None: + raise TProtocol.TProtocolException(message='Required field counter is unset!') + if self.meter is None: + raise TProtocol.TProtocolException(message='Required field meter is unset!') + if self.timer is None: + raise TProtocol.TProtocolException(message='Required field timer is unset!') + if self.histogram is None: + raise TProtocol.TProtocolException(message='Required field histogram is unset!') return @@ -2555,19 +3788,40 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class KillOptions: +class WorkerMetricData: """ Attributes: - - wait_secs + - hostname + - port + - gauge + - counter + - meter + - timer + - histogram """ thrift_spec = ( None, # 0 - (1, TType.I32, 'wait_secs', None, None, ), # 1 + (1, TType.STRING, 'hostname', None, None, ), # 1 + (2, TType.I32, 'port', None, None, ), # 2 + (3, TType.MAP, 'gauge', (TType.STRING,None,TType.DOUBLE,None), None, ), # 3 + (4, TType.MAP, 'counter', (TType.STRING,None,TType.DOUBLE,None), None, ), # 4 + (5, TType.MAP, 'meter', (TType.STRING,None,TType.DOUBLE,None), None, ), # 5 + (6, TType.MAP, 'timer', (TType.STRING,None,TType.DOUBLE,None), None, ), # 6 + (7, TType.MAP, 'histogram', (TType.STRING,None,TType.DOUBLE,None), None, ), # 7 ) - def __init__(self, wait_secs=None,): - self.wait_secs = wait_secs + def __hash__(self): + return 0 + hash(self.hostname) + hash(self.port) + hash(self.gauge) + hash(self.counter) + hash(self.meter) + hash(self.timer) + hash(self.histogram) + + def __init__(self, hostname=None, port=None, gauge=None, counter=None, meter=None, timer=None, histogram=None,): + self.hostname = hostname + self.port = port + self.gauge = gauge + self.counter = counter + self.meter = meter + self.timer = timer + self.histogram = histogram def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2579,8 +3833,68 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: + if ftype == TType.STRING: + self.hostname = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 2: if ftype == TType.I32: - self.wait_secs = iprot.readI32(); + self.port = iprot.readI32(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.gauge = {} + (_ktype413, _vtype414, _size412 ) = iprot.readMapBegin() + for _i416 in xrange(_size412): + _key417 = iprot.readString().decode('utf-8') + _val418 = iprot.readDouble(); + self.gauge[_key417] = _val418 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.MAP: + self.counter = {} + (_ktype420, _vtype421, _size419 ) = iprot.readMapBegin() + for _i423 in xrange(_size419): + _key424 = iprot.readString().decode('utf-8') + _val425 = iprot.readDouble(); + self.counter[_key424] = _val425 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.MAP: + self.meter = {} + (_ktype427, _vtype428, _size426 ) = iprot.readMapBegin() + for _i430 in xrange(_size426): + _key431 = iprot.readString().decode('utf-8') + _val432 = iprot.readDouble(); + self.meter[_key431] = _val432 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.MAP: + self.timer = {} + (_ktype434, _vtype435, _size433 ) = iprot.readMapBegin() + for _i437 in xrange(_size433): + _key438 = iprot.readString().decode('utf-8') + _val439 = iprot.readDouble(); + self.timer[_key438] = _val439 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.MAP: + self.histogram = {} + (_ktype441, _vtype442, _size440 ) = iprot.readMapBegin() + for _i444 in xrange(_size440): + _key445 = iprot.readString().decode('utf-8') + _val446 = iprot.readDouble(); + self.histogram[_key445] = _val446 + iprot.readMapEnd() else: iprot.skip(ftype) else: @@ -2592,15 +3906,73 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('KillOptions') - if self.wait_secs is not None: - oprot.writeFieldBegin('wait_secs', TType.I32, 1) - oprot.writeI32(self.wait_secs) + oprot.writeStructBegin('WorkerMetricData') + if self.hostname is not None: + oprot.writeFieldBegin('hostname', TType.STRING, 1) + oprot.writeString(self.hostname.encode('utf-8')) + oprot.writeFieldEnd() + if self.port is not None: + oprot.writeFieldBegin('port', TType.I32, 2) + oprot.writeI32(self.port) + oprot.writeFieldEnd() + if self.gauge is not None: + oprot.writeFieldBegin('gauge', TType.MAP, 3) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.gauge)) + for kiter447,viter448 in self.gauge.items(): + oprot.writeString(kiter447.encode('utf-8')) + oprot.writeDouble(viter448) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.counter is not None: + oprot.writeFieldBegin('counter', TType.MAP, 4) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.counter)) + for kiter449,viter450 in self.counter.items(): + oprot.writeString(kiter449.encode('utf-8')) + oprot.writeDouble(viter450) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.meter is not None: + oprot.writeFieldBegin('meter', TType.MAP, 5) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.meter)) + for kiter451,viter452 in self.meter.items(): + oprot.writeString(kiter451.encode('utf-8')) + oprot.writeDouble(viter452) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.timer is not None: + oprot.writeFieldBegin('timer', TType.MAP, 6) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.timer)) + for kiter453,viter454 in self.timer.items(): + oprot.writeString(kiter453.encode('utf-8')) + oprot.writeDouble(viter454) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.histogram is not None: + oprot.writeFieldBegin('histogram', TType.MAP, 7) + oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.histogram)) + for kiter455,viter456 in self.histogram.items(): + oprot.writeString(kiter455.encode('utf-8')) + oprot.writeDouble(viter456) + oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.hostname is None: + raise TProtocol.TProtocolException(message='Required field hostname is unset!') + if self.port is None: + raise TProtocol.TProtocolException(message='Required field port is unset!') + if self.gauge is None: + raise TProtocol.TProtocolException(message='Required field gauge is unset!') + if self.counter is None: + raise TProtocol.TProtocolException(message='Required field counter is unset!') + if self.meter is None: + raise TProtocol.TProtocolException(message='Required field meter is unset!') + if self.timer is None: + raise TProtocol.TProtocolException(message='Required field timer is unset!') + if self.histogram is None: + raise TProtocol.TProtocolException(message='Required field histogram is unset!') return @@ -2615,19 +3987,28 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class RebalanceOptions: +class TopologyMetricInfo: """ Attributes: - - wait_secs + - topology_id + - task_metric_list + - worker_metric_list """ thrift_spec = ( None, # 0 - (1, TType.I32, 'wait_secs', None, None, ), # 1 + (1, TType.STRING, 'topology_id', None, None, ), # 1 + (2, TType.LIST, 'task_metric_list', (TType.STRUCT,(TaskMetricData, TaskMetricData.thrift_spec)), None, ), # 2 + (3, TType.LIST, 'worker_metric_list', (TType.STRUCT,(WorkerMetricData, WorkerMetricData.thrift_spec)), None, ), # 3 ) - def __init__(self, wait_secs=None,): - self.wait_secs = wait_secs + def __hash__(self): + return 0 + hash(self.topology_id) + hash(self.task_metric_list) + hash(self.worker_metric_list) + + def __init__(self, topology_id=None, task_metric_list=None, worker_metric_list=None,): + self.topology_id = topology_id + self.task_metric_list = task_metric_list + self.worker_metric_list = worker_metric_list def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2639,8 +4020,30 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.I32: - self.wait_secs = iprot.readI32(); + if ftype == TType.STRING: + self.topology_id = iprot.readString().decode('utf-8') + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.task_metric_list = [] + (_etype460, _size457) = iprot.readListBegin() + for _i461 in xrange(_size457): + _elem462 = TaskMetricData() + _elem462.read(iprot) + self.task_metric_list.append(_elem462) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.worker_metric_list = [] + (_etype466, _size463) = iprot.readListBegin() + for _i467 in xrange(_size463): + _elem468 = WorkerMetricData() + _elem468.read(iprot) + self.worker_metric_list.append(_elem468) + iprot.readListEnd() else: iprot.skip(ftype) else: @@ -2652,15 +4055,31 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('RebalanceOptions') - if self.wait_secs is not None: - oprot.writeFieldBegin('wait_secs', TType.I32, 1) - oprot.writeI32(self.wait_secs) + oprot.writeStructBegin('TopologyMetricInfo') + if self.topology_id is not None: + oprot.writeFieldBegin('topology_id', TType.STRING, 1) + oprot.writeString(self.topology_id.encode('utf-8')) + oprot.writeFieldEnd() + if self.task_metric_list is not None: + oprot.writeFieldBegin('task_metric_list', TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.task_metric_list)) + for iter469 in self.task_metric_list: + iter469.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.worker_metric_list is not None: + oprot.writeFieldBegin('worker_metric_list', TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.worker_metric_list)) + for iter470 in self.worker_metric_list: + iter470.write(oprot) + oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.topology_id is None: + raise TProtocol.TProtocolException(message='Required field topology_id is unset!') return @@ -2688,6 +4107,9 @@ class DRPCRequest: (2, TType.STRING, 'request_id', None, None, ), # 2 ) + def __hash__(self): + return 0 + hash(self.func_args) + hash(self.request_id) + def __init__(self, func_args=None, request_id=None,): self.func_args = func_args self.request_id = request_id @@ -2762,6 +4184,9 @@ class DRPCExecutionException(Exception): (1, TType.STRING, 'msg', None, None, ), # 1 ) + def __hash__(self): + return 0 + hash(self.msg) + def __init__(self, msg=None,): self.msg = msg diff --git a/jstorm-server/bin/jstorm.py b/jstorm-server/bin/jstorm.py index 1bdbb72af..b01404e76 100644 --- a/jstorm-server/bin/jstorm.py +++ b/jstorm-server/bin/jstorm.py @@ -297,10 +297,36 @@ def unknown_command(*args): print "Unknown command: [jstorm %s]" % ' '.join(sys.argv[1:]) print_usage() +def metrics_Monitor(*args): + """Syntax: [jstorm metricsMonitor topologyname bool] + Enable or disable the metrics monitor of one topology. + """ + childopts = (" -Dstorm.root.logger=INFO,stdout -Dlog4j.configuration=File:%s/conf/aloha_log4j.properties" %JSTORM_DIR) + exec_storm_class( + "backtype.storm.command.metrics_monitor", + args=args, + jvmtype="-client -Xms256m -Xmx256m", + extrajars=[CONF_DIR, JSTORM_DIR + "/bin", LOG4J_CONF], + childopts=childopts) + +def list(*args): + """Syntax: [jstorm list] + + List cluster information + """ + childopts = (" -Dstorm.root.logger=INFO,stdout -Dlog4j.configuration=File:%s/conf/aloha_log4j.properties" %JSTORM_DIR) + exec_storm_class( + "backtype.storm.command.list", + args=args, + jvmtype="-client -Xms256m -Xmx256m", + extrajars=[CONF_DIR, JSTORM_DIR + "/bin", LOG4J_CONF], + childopts=childopts) + COMMANDS = {"jar": jar, "kill": kill, "nimbus": nimbus, "zktool": zktool, "drpc": drpc, "supervisor": supervisor, "localconfvalue": print_localconfvalue, "remoteconfvalue": print_remoteconfvalue, "classpath": print_classpath, - "activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage} + "activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage, + "metricsMonitor": metrics_Monitor, "list": list} def parse_config(config_list): global CONFIG_OPTS diff --git a/jstorm-server/conf/jstorm.log4j.properties b/jstorm-server/conf/jstorm.log4j.properties index 475886dab..e297710af 100644 --- a/jstorm-server/conf/jstorm.log4j.properties +++ b/jstorm-server/conf/jstorm.log4j.properties @@ -18,14 +18,14 @@ log4j.appender.stdout.layout.ConversionPattern = [%p %d{yyyy-MM-dd HH:mm:ss} % log4j.appender.D = org.apache.log4j.RollingFileAppender log4j.appender.D.File = ${jstorm.home}/logs/${logfile.name} log4j.appender.D.Append = true -log4j.appender.D.Threshold = INFO +log4j.appender.D.Threshold = INFO log4j.appender.D.MaxFileSize=1GB log4j.appender.D.MaxBackupIndex=5 log4j.appender.D.layout = org.apache.log4j.PatternLayout log4j.appender.D.layout.ConversionPattern = [%p %d{yyyy-MM-dd HH:mm:ss} %c{1}:%L %t] %m%n - - + + ### jstorm metrics ### log4j.logger.com.alibaba.jstorm.daemon.worker.metrics= INFO, M log4j.additivity.com.alibaba.jstorm.daemon.worker.metrics=false @@ -33,6 +33,8 @@ log4j.logger.com.alibaba.jstorm.task.heartbeat= INFO, M log4j.additivity.com.alibaba.jstorm.task.heartbeat=false log4j.logger.com.alibaba.jstorm.daemon.worker.hearbeat= INFO, M log4j.additivity.com.alibaba.jstorm.daemon.worker.hearbeat=false +log4j.logger.com.alibaba.jstorm.metric= INFO, M +log4j.additivity.com.alibaba.jstorm.metric=false log4j.appender.M = org.apache.log4j.RollingFileAppender log4j.appender.M.File = ${jstorm.home}/logs/${logfile.name}.metrics @@ -45,4 +47,4 @@ log4j.appender.M.layout.ConversionPattern = [%p %d{yyyy-MM-dd HH:mm:ss} %c{1}:% ##################jmonitor appender ########################## -#log4j.appender.jmonitor=com.alibaba.alimonitor.jmonitor.plugin.log4j.JMonitorLog4jAppender \ No newline at end of file +#log4j.appender.jmonitor=com.alibaba.alimonitor.jmonitor.plugin.log4j.JMonitorLog4jAppender diff --git a/jstorm-server/conf/storm.yaml b/jstorm-server/conf/storm.yaml index f8390d092..8f1097e07 100644 --- a/jstorm-server/conf/storm.yaml +++ b/jstorm-server/conf/storm.yaml @@ -37,3 +37,13 @@ ### default worker memory size, unit is byte worker.memory.size: 2147483648 + +# Metrics Monitor +# topology.performance.metrics: it is the switch flag for performance +# purpose. When it is disabled, the data of timer and histogram metrics +# will not be collected. +# topology.alimonitor.metrics.post: If it is disable, metrics data +# will only be printed to log. If it is enabled, the metrics data will be +# posted to alimonitor besides printing to log. + topology.performance.metrics: true + topology.alimonitor.metrics.post: true diff --git a/jstorm-server/pom.xml b/jstorm-server/pom.xml index e3a942170..31b357ef7 100644 --- a/jstorm-server/pom.xml +++ b/jstorm-server/pom.xml @@ -5,22 +5,34 @@ com.alibaba.jstorm jstorm-all - 0.9.5.1 + 0.9.6 .. - + + --> 4.0.0 com.alibaba.jstorm jstorm-server - 0.9.5.1 + 0.9.6 jar ${project.artifactId}-${project.version} jstorm server modules + + + + org.apache.maven.plugins + maven-surefire-plugin + + pertest + -Xms1024m -Xmx2048m + + + + @@ -43,17 +55,6 @@ test - - com.codahale.metrics - metrics-core - 3.0.1 - - - - com.codahale.metrics - metrics-jvm - 3.0.1 - diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/DelayStatusTransitionCallback.java b/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/DelayStatusTransitionCallback.java new file mode 100644 index 000000000..c2c49e898 --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/DelayStatusTransitionCallback.java @@ -0,0 +1,96 @@ +package com.alibaba.jstorm.callback.impl; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.log4j.Logger; + +import backtype.storm.Config; +import clojure.lang.IFn.OLD; + +import com.alibaba.jstorm.callback.BaseCallback; +import com.alibaba.jstorm.cluster.StormConfig; +import com.alibaba.jstorm.cluster.StormStatus; +import com.alibaba.jstorm.daemon.nimbus.NimbusData; +import com.alibaba.jstorm.daemon.nimbus.StatusType; +import com.alibaba.jstorm.schedule.DelayEventRunnable; +import com.alibaba.jstorm.utils.JStormUtils; + +/** + * + * + * The action when nimbus receive kill command 1. set the topology status as + * target 2. wait 2 * Timeout seconds later, do removing topology from ZK + * + * @author Longda + */ +public class DelayStatusTransitionCallback extends BaseCallback { + + private static Logger LOG = Logger.getLogger(DelayStatusTransitionCallback.class); + + public static final int DEFAULT_DELAY_SECONDS = 30; + + protected NimbusData data; + protected String topologyid; + protected StormStatus oldStatus; + protected StatusType newType; + protected StatusType nextAction; + + + public DelayStatusTransitionCallback(NimbusData data, + String topologyid, + StormStatus oldStatus, + StatusType newType, + StatusType nextAction) { + this.data = data; + this.topologyid = topologyid; + this.oldStatus = oldStatus; + this.newType = newType; + this.nextAction = nextAction; + } + + public int getDelaySeconds(Object[] args) { + if (oldStatus != null && oldStatus.getDelaySecs() > 0) { + return oldStatus.getDelaySecs(); + } + + Integer delaySecs = DelayStatusTransitionCallback.DEFAULT_DELAY_SECONDS; + if (args == null || args.length == 0 || args[0] == null) { + Map map = null; + try { + + map = StormConfig.read_nimbus_topology_conf(data.getConf(), + topologyid); + delaySecs = JStormUtils.parseInt( + map.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), + DEFAULT_DELAY_SECONDS); + } catch (Exception e) { + LOG.info("Failed to get topology configuration " + topologyid); + } + + } else { + delaySecs = JStormUtils.parseInt(args[0]); + } + + if (delaySecs == null || delaySecs <= 0) { + delaySecs = DelayStatusTransitionCallback.DEFAULT_DELAY_SECONDS; + } + + return delaySecs; + } + + @Override + public Object execute(T... args) { + int delaySecs = getDelaySeconds(args); + LOG.info("Delaying event " + newType + " for " + + delaySecs + " secs for " + topologyid); + + data.getScheduExec().schedule( + new DelayEventRunnable(data, topologyid, nextAction), + delaySecs, TimeUnit.SECONDS); + + return new StormStatus(delaySecs, newType); + } + +} diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/KillTransitionCallback.java b/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/KillTransitionCallback.java index b5ae7edf1..40a98d1a2 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/KillTransitionCallback.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/KillTransitionCallback.java @@ -1,79 +1,21 @@ package com.alibaba.jstorm.callback.impl; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import org.apache.log4j.Logger; - -import backtype.storm.Config; - -import com.alibaba.jstorm.callback.BaseCallback; -import com.alibaba.jstorm.cluster.StormConfig; -import com.alibaba.jstorm.cluster.StormStatus; import com.alibaba.jstorm.daemon.nimbus.NimbusData; import com.alibaba.jstorm.daemon.nimbus.StatusType; -import com.alibaba.jstorm.schedule.DelayEventRunnable; -import com.alibaba.jstorm.utils.JStormUtils; /** + * The action when nimbus receive killed command. * + * 1. change current topology status as killed 2. one TIMEOUT seconds later, do + * remove action, which remove topology from ZK * - * The action when nimbus receive kill command 1. set the topology status as - * killed 2. wait 2 * Timeout seconds later, do removing topology from ZK + * @author Longda * - * @author Li xin/Longda */ -public class KillTransitionCallback extends BaseCallback { - - private static Logger LOG = Logger.getLogger(KillTransitionCallback.class); - - public static final int DEFAULT_DELAY_SECONDS = 60; - - private NimbusData data; - private String topologyid; - private StormStatus oldStatus; +public class KillTransitionCallback extends DelayStatusTransitionCallback { public KillTransitionCallback(NimbusData data, String topologyid) { - this.data = data; - this.topologyid = topologyid; - } - - @Override - public Object execute(T... args) { - Integer delaySecs = KillTransitionCallback.DEFAULT_DELAY_SECONDS; - if (args == null || args.length == 0 || args[0] == null) { - Map map = null; - try { - - map = StormConfig.read_nimbus_topology_conf(data.getConf(), - topologyid); - delaySecs = JStormUtils.parseInt(map - .get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)); - if (delaySecs != null) { - delaySecs = delaySecs * 2; - } else { - LOG.info("Fail to get TOPOLOGY_MESSAGE_TIMEOUT_SECS " + map); - } - } catch (Exception e) { - LOG.info("Failed to get topology configuration " + topologyid); - } - - } else { - delaySecs = Integer.valueOf(String.valueOf(args[0])); - } - - if (delaySecs == null || delaySecs <= 0) { - delaySecs = KillTransitionCallback.DEFAULT_DELAY_SECONDS; - } - LOG.info("Delaying event " + StatusType.remove.getStatus() + " for " - + delaySecs + " secs for " + topologyid); - - data.getScheduExec().schedule( - new DelayEventRunnable(data, topologyid, StatusType.remove), - delaySecs, TimeUnit.SECONDS); - - return new StormStatus(delaySecs, StatusType.killed); + super(data, topologyid, null, StatusType.killed, StatusType.remove); } } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/RebalanceTransitionCallback.java b/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/RebalanceTransitionCallback.java index e89057f58..d5d3303ac 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/RebalanceTransitionCallback.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/RebalanceTransitionCallback.java @@ -1,20 +1,8 @@ package com.alibaba.jstorm.callback.impl; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import org.apache.log4j.Logger; - -import backtype.storm.Config; - -import com.alibaba.jstorm.callback.BaseCallback; -import com.alibaba.jstorm.cluster.StormConfig; import com.alibaba.jstorm.cluster.StormStatus; import com.alibaba.jstorm.daemon.nimbus.NimbusData; import com.alibaba.jstorm.daemon.nimbus.StatusType; -import com.alibaba.jstorm.schedule.DelayEventRunnable; -import com.alibaba.jstorm.utils.JStormUtils; /** * The action when nimbus receive rebalance command. Rebalance command is only @@ -26,57 +14,14 @@ * @author Lixin/Longda * */ -public class RebalanceTransitionCallback extends BaseCallback { +public class RebalanceTransitionCallback extends DelayStatusTransitionCallback { - private static Logger LOG = Logger - .getLogger(RebalanceTransitionCallback.class); - - private NimbusData data; - private String topologyid; - private StormStatus oldStatus; public RebalanceTransitionCallback(NimbusData data, String topologyid, StormStatus status) { - this.data = data; - this.topologyid = topologyid; - this.oldStatus = status; + super(data, topologyid, status, StatusType.rebalancing, StatusType.do_rebalance); } - @Override - public Object execute(T... args) { - Integer delaySecs = KillTransitionCallback.DEFAULT_DELAY_SECONDS; - if (args == null || args.length == 0 || args[0] == null) { - Map map = null; - try { - map = StormConfig.read_nimbus_topology_conf(data.getConf(), - topologyid); - - delaySecs = JStormUtils.parseInt(map - .get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)); - if (delaySecs != null) { - delaySecs = delaySecs * 2; - } else { - LOG.info("Fail to get TOPOLOGY_MESSAGE_TIMEOUT_SECS " + map); - } - } catch (Exception e) { - LOG.info("Failed to get topology configuration " + topologyid); - } - - } else { - delaySecs = Integer.valueOf(String.valueOf(args[0])); - } - if (delaySecs == null || delaySecs <= 0) { - delaySecs = KillTransitionCallback.DEFAULT_DELAY_SECONDS; - } - - LOG.info("Delaying event " + StatusType.do_rebalance.getStatus() - + " for " + delaySecs + " secs for " + topologyid); - - data.getScheduExec().schedule( - new DelayEventRunnable(data, topologyid, - StatusType.do_rebalance), delaySecs, TimeUnit.SECONDS); - - return new StormStatus(delaySecs, StatusType.rebalancing, oldStatus); - } + } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/RemoveTransitionCallback.java b/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/RemoveTransitionCallback.java index 1d162b42b..3b64fc92d 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/RemoveTransitionCallback.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/callback/impl/RemoveTransitionCallback.java @@ -5,7 +5,6 @@ import com.alibaba.jstorm.callback.BaseCallback; import com.alibaba.jstorm.cluster.StormBase; import com.alibaba.jstorm.daemon.nimbus.NimbusData; -import com.alibaba.jstorm.daemon.nimbus.NimbusUtils; /** * Remove topology /ZK-DIR/topology data @@ -39,9 +38,6 @@ public Object execute(T... args) { LOG.info("Topology " + topologyid + " has been removed "); return null; } - String topologyName = stormBase.getStormName(); - String group = stormBase.getGroup(); - data.getStormClusterState().remove_storm(topologyid); LOG.info("Successfully removed ZK items topology: " + topologyid); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/Cluster.java b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/Cluster.java index c30983d11..0467cbea6 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/Cluster.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/Cluster.java @@ -38,6 +38,11 @@ public class Cluster { public static final String TASKBEATS_ROOT = "taskbeats"; public static final String TASKERRORS_ROOT = "taskerrors"; public static final String MASTER_ROOT = "nimbus_master"; + public static final String MONITOR_ROOT = "monitor"; + + public static final String TASK_DIR = "task"; + public static final String WORKER_DIR = "worker"; + public static final String USER_DIR = "user"; public static final String ASSIGNMENTS_SUBTREE; public static final String TASKS_SUBTREE; @@ -46,6 +51,7 @@ public class Cluster { public static final String TASKBEATS_SUBTREE; public static final String TASKERRORS_SUBTREE; public static final String MASTER_SUBTREE; + public static final String MONITOR_SUBTREE; static { ASSIGNMENTS_SUBTREE = ZK_SEPERATOR + ASSIGNMENTS_ROOT; @@ -55,6 +61,7 @@ public class Cluster { TASKBEATS_SUBTREE = ZK_SEPERATOR + TASKBEATS_ROOT; TASKERRORS_SUBTREE = ZK_SEPERATOR + TASKERRORS_ROOT; MASTER_SUBTREE = ZK_SEPERATOR + MASTER_ROOT; + MONITOR_SUBTREE = ZK_SEPERATOR + MONITOR_ROOT; } public static String supervisor_path(String id) { @@ -93,6 +100,34 @@ public static String taskerror_path(String topology_id, int task_id) { return taskerror_storm_root(topology_id) + ZK_SEPERATOR + task_id; } + public static String monitor_path(String topology_id) { + return MONITOR_SUBTREE + ZK_SEPERATOR + topology_id; + } + + public static String monitor_taskdir_path(String topology_id) { + return monitor_path(topology_id) + ZK_SEPERATOR + TASK_DIR; + } + + public static String monitor_workerdir_path(String topology_id) { + return monitor_path(topology_id) + ZK_SEPERATOR + WORKER_DIR; + } + + public static String monitor_userdir_path(String topology_id) { + return monitor_path(topology_id) + ZK_SEPERATOR + USER_DIR; + } + + public static String monitor_task_path(String topology_id, String task_id) { + return monitor_taskdir_path(topology_id) + ZK_SEPERATOR + task_id; + } + + public static String monitor_worker_path(String topology_id, String worker_id) { + return monitor_workerdir_path(topology_id) + ZK_SEPERATOR + worker_id; + } + + public static String monitor_user_path(String topology_id, String worker_id) { + return monitor_userdir_path(topology_id) + ZK_SEPERATOR + worker_id; + } + public static Object maybe_deserialize(byte[] data) { if (data == null) { return null; @@ -138,6 +173,34 @@ public static HashMap topology_task_info( return rtn; } + + /** + * return Map + * + * @param zkCluster + * @param topology_id + * @return + * @throws Exception + */ + public static HashMap topology_task_compType( + StormClusterState zkCluster, String topology_id) throws Exception { + HashMap rtn = new HashMap(); + + List taks_ids = zkCluster.task_ids(topology_id); + + for (Integer task : taks_ids) { + TaskInfo info = zkCluster.task_info(topology_id, task); + if (info == null) { + LOG.error("Failed to get TaskInfo of " + topology_id + + ",taskid:" + task); + continue; + } + String componentType = info.getComponentType(); + rtn.put(task, componentType); + } + + return rtn; + } /** * if one topology's name equal the input storm_name, then return the diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormClusterState.java b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormClusterState.java index 65298ed15..ffa6da22b 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormClusterState.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormClusterState.java @@ -1,12 +1,19 @@ package com.alibaba.jstorm.cluster; import java.util.List; +import java.util.Map; + +import backtype.storm.utils.Utils; import com.alibaba.jstorm.callback.RunnableCallback; import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo; +import com.alibaba.jstorm.daemon.worker.WorkerMetricInfo; +import com.alibaba.jstorm.metric.UserDefMetric; +import com.alibaba.jstorm.metric.UserDefMetricData; import com.alibaba.jstorm.task.Assignment; import com.alibaba.jstorm.task.AssignmentBak; import com.alibaba.jstorm.task.TaskInfo; +import com.alibaba.jstorm.task.TaskMetricInfo; import com.alibaba.jstorm.task.error.TaskError; import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat; @@ -61,6 +68,9 @@ public void set_task(String topology_id, int task_id, TaskInfo info) public TaskHeartbeat task_heartbeat(String topology_id, int task_id) throws Exception; + + public Map task_heartbeat(String topologyId) + throws Exception; public void task_heartbeat(String topology_id, int task_id, TaskHeartbeat info) throws Exception; @@ -82,7 +92,12 @@ public void supervisor_heartbeat(String supervisor_id, SupervisorInfo info) public void report_task_error(String topology_id, int task_id, Throwable error) throws Exception; + + public void report_task_error(String topology_id, int task_id, + String error) throws Exception; + public String topo_lastErr_time(String topologyId) throws Exception; + public List task_errors(String topology_id, int task_id) throws Exception; @@ -99,4 +114,40 @@ public List task_errors(String topology_id, int task_id) public void unregister_nimbus_host(String host) throws Exception; public void disconnect() throws Exception; + + public void set_storm_monitor(String topologyId, StormMonitor metricsMonitor) throws Exception; + + public StormMonitor get_storm_monitor(String topologyId) throws Exception; + + public UserDefMetricData get_userDef_metric(String topologyId,String workerId) throws Exception; + + public Map task_info_list(String topologyId) throws Exception; + + public void update_task_metric(String topologyId, String taskId, TaskMetricInfo metricInfo) throws Exception; + + public void update_worker_metric(String topologyId, String workerId, WorkerMetricInfo metricInfo) throws Exception; + + public List get_task_metric_list(String topologyId) throws Exception; + + public List get_metric_taskIds(String topologyId) throws Exception; + + public void remove_metric_task(String topologyId, String taskId) throws Exception; + + public List get_worker_metric_list(String topologyId) throws Exception; + + public List get_metric_workerIds(String topologyId) throws Exception; + + public void remove_metric_worker(String topologyId, String workerId) throws Exception; + + public List get_metric_users(String topologyId) throws Exception; + + public void remove_metric_user(String topologyId, String workerId) throws Exception; + + public void update_userDef_metric(String topologyId, String workerId, UserDefMetricData metricInfo) throws Exception; + + public List monitor_user_workers(String topologyId) throws Exception; + + public TaskMetricInfo get_task_metric(String topologyId, int taskId) throws Exception; + + public WorkerMetricInfo get_worker_metric(String topologyId, String workerId) throws Exception; } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormMonitor.java b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormMonitor.java new file mode 100644 index 000000000..f73f37a73 --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormMonitor.java @@ -0,0 +1,33 @@ +package com.alibaba.jstorm.cluster; + +import java.io.Serializable; + +import org.apache.commons.lang.builder.ToStringBuilder; +import org.apache.commons.lang.builder.ToStringStyle; + +/** + * Topology metrics monitor in ZK + */ + +public class StormMonitor implements Serializable { + private static final long serialVersionUID = -6023196346496305314L; + private boolean metricsMonitor; + + public StormMonitor(boolean metricsMonitor) { + this.metricsMonitor = metricsMonitor; + } + + public void setMetrics(boolean metrics) { + this.metricsMonitor = metrics; + } + + public boolean getMetrics() { + return this.metricsMonitor; + } + + @Override + public String toString() { + return ToStringBuilder.reflectionToString(this, + ToStringStyle.SHORT_PREFIX_STYLE); + } +} \ No newline at end of file diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormStatus.java b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormStatus.java index 7b980c018..f33e176dd 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormStatus.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormStatus.java @@ -8,7 +8,7 @@ import com.alibaba.jstorm.daemon.nimbus.StatusType; /** - * author: lixin + * author: lixin/longda * * Dedicate Topology status * @@ -21,33 +21,26 @@ public class StormStatus implements Serializable { private static final long serialVersionUID = -2276901070967900100L; private StatusType type; + @Deprecated private int killTimeSecs; private int delaySecs; private StormStatus oldStatus = null; - public StormStatus(int killTimeSecs, StatusType type, StormStatus oldStatus) { - this.type = type; - this.killTimeSecs = killTimeSecs; - this.oldStatus = oldStatus; + public StormStatus(StatusType type) { + this(0, type); } - - public StormStatus(int killTimeSecs, StatusType type) { - this.type = type; - this.killTimeSecs = killTimeSecs; + + public StormStatus(int delaySecs, StatusType type) { + this(type, delaySecs, null); } public StormStatus(StatusType type, int delaySecs, StormStatus oldStatus) { this.type = type; this.delaySecs = delaySecs; + this.killTimeSecs = delaySecs; this.oldStatus = oldStatus; } - public StormStatus(StatusType type) { - this.type = type; - this.killTimeSecs = -1; - this.delaySecs = -1; - } - public StatusType getStatusType() { return type; } @@ -56,10 +49,12 @@ public void setStatusType(StatusType type) { this.type = type; } + @Deprecated public Integer getKillTimeSecs() { return killTimeSecs; } + @Deprecated public void setKillTimeSecs(int killTimeSecs) { this.killTimeSecs = killTimeSecs; } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormZkClusterState.java b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormZkClusterState.java index 1278fd693..f7d2da1a0 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormZkClusterState.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/cluster/StormZkClusterState.java @@ -5,6 +5,8 @@ import java.util.Comparator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; +import java.util.HashMap; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReference; @@ -21,6 +23,7 @@ import com.alibaba.jstorm.task.Assignment; import com.alibaba.jstorm.task.AssignmentBak; import com.alibaba.jstorm.task.TaskInfo; +import com.alibaba.jstorm.task.TaskMetricInfo; import com.alibaba.jstorm.task.error.TaskError; import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat; import com.alibaba.jstorm.utils.JStormUtils; @@ -28,6 +31,9 @@ import com.alibaba.jstorm.utils.TimeUtils; import com.alibaba.jstorm.zk.ZkConstant; import com.alibaba.jstorm.zk.ZkTool; +import com.alibaba.jstorm.daemon.worker.WorkerMetricInfo; +import com.alibaba.jstorm.metric.UserDefMetric; +import com.alibaba.jstorm.metric.UserDefMetricData; public class StormZkClusterState implements StormClusterState { private static Logger LOG = Logger.getLogger(StormZkClusterState.class); @@ -118,7 +124,7 @@ public Object execute(T... args) { String[] pathlist = JStormUtils.mk_arr(Cluster.ASSIGNMENTS_SUBTREE, Cluster.TASKS_SUBTREE, Cluster.STORMS_SUBTREE, Cluster.SUPERVISORS_SUBTREE, Cluster.TASKBEATS_SUBTREE, - Cluster.TASKERRORS_SUBTREE); + Cluster.TASKERRORS_SUBTREE, Cluster.MONITOR_SUBTREE); for (String path : pathlist) { cluster_state.mkdirs(path); } @@ -196,6 +202,11 @@ public void activate_storm(String topologyId, StormBase stormBase) public List active_storms() throws Exception { return cluster_state.get_children(Cluster.STORMS_SUBTREE, false); } + + @Override + public List monitor_user_workers(String topologyId) throws Exception { + return cluster_state.get_children(Cluster.monitor_userdir_path(topologyId), false); + } @Override public List heartbeat_storms() throws Exception { @@ -215,6 +226,7 @@ public void remove_storm(String topologyId) throws Exception { // wait 10 seconds, so supervisor will kill worker smoothly JStormUtils.sleepMs(10000); cluster_state.delete_node(Cluster.storm_task_root(topologyId)); + cluster_state.delete_node(Cluster.monitor_path(topologyId)); this.remove_storm_base(topologyId); } @@ -234,31 +246,53 @@ public void remove_task_heartbeat(String topologyId, int taskId) @Override public void report_task_error(String topologyId, int taskId, Throwable error) throws Exception { + report_task_error(topologyId, taskId, new String(JStormUtils.getErrorInfo(error))); + } + + public void report_task_error(String topologyId, int taskId, String error) + throws Exception { + boolean found = false; String path = Cluster.taskerror_path(topologyId, taskId); cluster_state.mkdirs(path); List children = new ArrayList(); + String timeStamp = String.valueOf(TimeUtils.current_time_secs()); + String timestampPath = path + Cluster.ZK_SEPERATOR + timeStamp; + for (String str : cluster_state.get_children(path, false)) { children.add(Integer.parseInt(str)); + + String errorPath = path + "/" + str; + byte[] data = cluster_state.get_data(errorPath, false); + if (data == null) continue; + String errorInfo = new String(data); + if (errorInfo.equals(error)) { + cluster_state.delete_node(errorPath); + cluster_state.set_data(timestampPath, error.getBytes()); + found = true; + break; + } } - Collections.sort(children); - - while (children.size() >= 10) { - cluster_state.delete_node(path + Cluster.ZK_SEPERATOR - + children.remove(0)); - } - - String timestampPath = path + Cluster.ZK_SEPERATOR - + TimeUtils.current_time_secs(); - byte[] errorData = new String(JStormUtils.getErrorInfo(error)) - .getBytes(); + if (found == false) { + Collections.sort(children); - cluster_state.set_data(timestampPath, errorData); + while (children.size() >= 10) { + cluster_state.delete_node(path + Cluster.ZK_SEPERATOR + + children.remove(0)); + } + cluster_state.set_data(timestampPath, error.getBytes()); + } + + //Set error information in task error topology patch + String taskErrTopoPath = Cluster.taskerror_storm_root(topologyId); + cluster_state.set_data(taskErrTopoPath + "/" + "last_error", timeStamp.getBytes()); } + + @Override public void set_task(String topologyId, int taskId, TaskInfo info) throws Exception { @@ -324,6 +358,15 @@ public List supervisors(RunnableCallback callback) throws Exception { callback != null); } + @Override + public String topo_lastErr_time(String topologyId) throws Exception { + String path = Cluster.taskerror_storm_root(topologyId); + String lastErrTime = null; + byte[] data = cluster_state.get_data(path + "/" + "last_error", false); + if (data != null) lastErrTime = new String(data); + return lastErrTime; + } + @Override public List task_error_storms() throws Exception { return cluster_state.get_children(Cluster.TASKERRORS_SUBTREE, false); @@ -378,6 +421,29 @@ public TaskHeartbeat task_heartbeat(String topologyId, int taskId) } return (TaskHeartbeat) data; } + + @Override + public Map task_heartbeat(String topologyId) + throws Exception { + Map ret = new HashMap(); + + String topoTbPath = Cluster.taskbeat_storm_root(topologyId); + List taskList = cluster_state.get_children(topoTbPath, false); + + for (String taskId : taskList) { + String taskbeatPath = Cluster.taskbeat_path(topologyId, Integer.parseInt(taskId)); + + byte[] znodeData = cluster_state.get_data(taskbeatPath, false); + + Object data = Cluster.maybe_deserialize(znodeData); + if (data == null) { + continue; + } + ret.put(taskId, (TaskHeartbeat)data); + } + + return ret; + } @Override public void task_heartbeat(String topologyId, int taskId, TaskHeartbeat info) @@ -416,6 +482,20 @@ public TaskInfo task_info(String topologyId, int taskId) throws Exception { } return (TaskInfo) data; } + + @Override + public Map task_info_list(String topologyId) throws Exception { + Map taskInfoList = new HashMap(); + + List taskIds = task_ids(topologyId); + + for (Integer taskId : taskIds) { + TaskInfo taskInfo = task_info(topologyId, taskId); + taskInfoList.put(taskId, taskInfo); + } + + return taskInfoList; + } @Override public List task_storms() throws Exception { @@ -518,5 +598,154 @@ public boolean try_to_be_leader(String path, String host, } return true; } - -} + + @Override + public void set_storm_monitor(String topologyId, StormMonitor metricsMonitor) throws Exception { + String monitorPath = Cluster.monitor_path(topologyId); + cluster_state.set_data(monitorPath, Utils.serialize(metricsMonitor)); + cluster_state.mkdirs(Cluster.monitor_taskdir_path(topologyId)); + cluster_state.mkdirs(Cluster.monitor_workerdir_path(topologyId)); + cluster_state.mkdirs(Cluster.monitor_userdir_path(topologyId)); + // Update the task list under /zk_root/monitor/task + //Map taskInfoList = task_info_list(topologyId); + //for(Entry taskEntry : taskInfoList.entrySet()) { + // TaskMetricInfo taskMetricInfo = new TaskMetricInfo(taskEntry.getValue().getComponentId()); + // String taskMetricsPath = monitorPath + taskEntry.getKey(); + // cluster_state.set_data(taskMetricsPath, Utils.serialize(taskMetricInfo)); + //} + } + + @Override + public StormMonitor get_storm_monitor(String topologyId) throws Exception { + String monitorPath = Cluster.monitor_path(topologyId); + + byte[] metricsMonitorData = cluster_state.get_data(monitorPath, false); + Object metricsMonitor = Cluster.maybe_deserialize(metricsMonitorData); + + return (StormMonitor)metricsMonitor; + } + public UserDefMetricData get_userDef_metric(String topologyId,String workerId) throws Exception{ + String workerMetricPath = Cluster.monitor_user_path(topologyId, workerId); + byte[] userMetricsData=cluster_state.get_data(workerMetricPath, false); + Object userMetrics = Cluster.maybe_deserialize(userMetricsData); + return (UserDefMetricData)userMetrics; + } + + @Override + public void update_task_metric(String topologyId, String taskId, TaskMetricInfo metricInfo) throws Exception { + String taskMetricPath = Cluster.monitor_task_path(topologyId, taskId); + cluster_state.set_data(taskMetricPath, Utils.serialize(metricInfo)); + } + + @Override + public void update_worker_metric(String topologyId, String workerId, WorkerMetricInfo metricInfo) throws Exception { + String workerMetricPath = Cluster.monitor_worker_path(topologyId, workerId); + cluster_state.set_data(workerMetricPath, Utils.serialize(metricInfo)); + } + + @Override + public void update_userDef_metric(String topologyId, String workerId, UserDefMetricData metricInfo) throws Exception { + String userMetricPath = Cluster.monitor_user_path(topologyId, workerId); + cluster_state.set_data(userMetricPath, Utils.serialize(metricInfo)); + } + + @Override + public List get_task_metric_list(String topologyId) throws Exception { + List taskMetricList = new ArrayList(); + + String monitorTaskDirPath = Cluster.monitor_taskdir_path(topologyId); + List taskList = cluster_state.get_children(monitorTaskDirPath, false); + + for(String taskId : taskList) { + Object taskMetric = Cluster.maybe_deserialize( + cluster_state.get_data(Cluster.monitor_task_path(topologyId, taskId), false)); + if(taskMetric != null) { + taskMetricList.add((TaskMetricInfo)taskMetric); + } else { + LOG.warn("get_task_metric_list failed, topoId: " + topologyId + " taskId:" + taskId); + } + } + + return taskMetricList; + } + + @Override + public List get_metric_taskIds(String topologyId) throws Exception { + String monitorTaskDirPath = Cluster.monitor_taskdir_path(topologyId); + return cluster_state.get_children(monitorTaskDirPath, false); + } + + @Override + public void remove_metric_task(String topologyId, String taskId) throws Exception { + String monitorTaskPath = Cluster.monitor_task_path(topologyId, taskId); + cluster_state.delete_node(monitorTaskPath); + } + + @Override + public List get_worker_metric_list(String topologyId) throws Exception { + List workerMetricList = new ArrayList(); + + String monitorWorkerDirPath = Cluster.monitor_workerdir_path(topologyId); + List workerList = cluster_state.get_children(monitorWorkerDirPath, false); + + for(String workerId : workerList) { + byte[] byteArray = cluster_state.get_data(Cluster.monitor_worker_path(topologyId, workerId), false); + if(byteArray != null) { + WorkerMetricInfo workerMetric = (WorkerMetricInfo)Cluster.maybe_deserialize(byteArray); + if(workerMetric != null) { + workerMetricList.add(workerMetric); + } + } else { + LOG.warn("get_worker_metric_list failed, workerMetric is null, topoId: " + topologyId + " workerId:" + workerId); + } + } + + return workerMetricList; + } + + @Override + public List get_metric_workerIds(String topologyId) throws Exception { + String monitorWorkerDirPath = Cluster.monitor_workerdir_path(topologyId); + return cluster_state.get_children(monitorWorkerDirPath, false); + } + + @Override + public void remove_metric_worker(String topologyId, String workerId) throws Exception { + String monitorWorkerPath = Cluster.monitor_worker_path(topologyId, workerId); + cluster_state.delete_node(monitorWorkerPath); + } + + @Override + public List get_metric_users(String topologyId) throws Exception { + String monitorUserDirPath = Cluster.monitor_userdir_path(topologyId); + return cluster_state.get_children(monitorUserDirPath, false); + } + + @Override + public void remove_metric_user(String topologyId, String workerId) throws Exception { + String monitorUserPath = Cluster.monitor_user_path(topologyId, workerId); + cluster_state.delete_node(monitorUserPath); + } + + @Override + public TaskMetricInfo get_task_metric(String topologyId, int taskId) throws Exception { + TaskMetricInfo taskMetric = null; + + String monitorTaskPath = Cluster.monitor_task_path(topologyId, String.valueOf(taskId)); + taskMetric = (TaskMetricInfo)(Cluster.maybe_deserialize( + cluster_state.get_data(monitorTaskPath, false))); + + return taskMetric; + } + + @Override + public WorkerMetricInfo get_worker_metric(String topologyId, String workerId) throws Exception { + WorkerMetricInfo workerMetric = null; + + String monitorWorkerPath = Cluster.monitor_worker_path(topologyId, workerId); + workerMetric = (WorkerMetricInfo)(Cluster.maybe_deserialize( + cluster_state.get_data(monitorWorkerPath, false))); + + return workerMetric; + } +} \ No newline at end of file diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/container/SystemOperation.java b/jstorm-server/src/main/java/com/alibaba/jstorm/container/SystemOperation.java index b032bff8e..358040c68 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/container/SystemOperation.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/container/SystemOperation.java @@ -31,14 +31,14 @@ public static void umount(String name) throws IOException { } public static String exec(String cmd) throws IOException { - LOG.info("Shell cmd: " + cmd); + LOG.debug("Shell cmd: " + cmd); Process process = new ProcessBuilder(new String[] { "/bin/bash", "-c", cmd }).start(); try { process.waitFor(); String output = IOUtils.toString(process.getInputStream()); String errorOutput = IOUtils.toString(process.getErrorStream()); - LOG.info("Shell Output: " + output); + LOG.debug("Shell Output: " + output); if (errorOutput.length() != 0) { LOG.error("Shell Error Output: " + errorOutput); throw new IOException(errorOutput); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusServer.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusServer.java index 401569f39..e36f7fa9a 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusServer.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusServer.java @@ -5,6 +5,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -28,6 +29,7 @@ import com.alibaba.jstorm.cluster.StormConfig; import com.alibaba.jstorm.daemon.supervisor.Httpserver; import com.alibaba.jstorm.daemon.worker.hearbeat.SyncContainerHb; +import com.alibaba.jstorm.daemon.worker.metrics.UploadMetricFromZK; import com.alibaba.jstorm.schedule.CleanRunnable; import com.alibaba.jstorm.schedule.FollowerRunnable; import com.alibaba.jstorm.schedule.MonitorRunnable; @@ -66,6 +68,8 @@ public class NimbusServer { private FollowerRunnable follower; private Httpserver hs; + + private UploadMetricFromZK uploadMetric; private List smartThreads = new ArrayList(); @@ -116,6 +120,9 @@ private void launchServer(final Map conf, INimbus inimbus) { initContainerHBThread(conf); + if (ConfigExtension.isAlimonitorMetricsPost(conf)) + initUploadMetricThread(data); + while (!data.isLeader()) Utils.sleep(5000); @@ -317,6 +324,16 @@ public void run() { }); } + + private void initUploadMetricThread(NimbusData data) { + ScheduledExecutorService scheduleService = data.getScheduExec(); + + uploadMetric = new UploadMetricFromZK(data); + + scheduleService.scheduleWithFixedDelay(uploadMetric, 120, 60, TimeUnit.SECONDS); + + LOG.info("Successfully init metrics uploading thread"); + } public void cleanup() { if (isShutdown.compareAndSet(false, true) == false) { @@ -350,6 +367,11 @@ public void cleanup() { follower.clean(); LOG.info("Successfully shutdown follower thread"); } + + if (uploadMetric != null) { + uploadMetric.clean(); + LOG.info("Successfully shutdown UploadMetric thread"); + } if (data != null) { data.cleanup(); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusUtils.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusUtils.java index acd1157d8..76d00d74e 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusUtils.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusUtils.java @@ -23,17 +23,23 @@ import backtype.storm.generated.StateSpoutSpec; import backtype.storm.generated.StormTopology; import backtype.storm.generated.SupervisorSummary; +import backtype.storm.generated.TaskMetricData; import backtype.storm.generated.TaskSummary; import backtype.storm.generated.TopologySummary; +import backtype.storm.generated.WorkerMetricData; import backtype.storm.generated.WorkerSummary; import backtype.storm.utils.ThriftTopologyUtils; import com.alibaba.jstorm.cluster.Cluster; import com.alibaba.jstorm.cluster.StormClusterState; import com.alibaba.jstorm.cluster.StormConfig; +import com.alibaba.jstorm.cluster.StormMonitor; import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo; +import com.alibaba.jstorm.daemon.worker.WorkerMetricInfo; import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot; +import com.alibaba.jstorm.metric.MetricDef; import com.alibaba.jstorm.task.Assignment; +import com.alibaba.jstorm.task.TaskMetricInfo; import com.alibaba.jstorm.task.TkHbCacheTime; import com.alibaba.jstorm.task.error.TaskError; import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat; @@ -471,7 +477,7 @@ public static void transition(NimbusData data, String topologyid, public static TopologySummary mkTopologySummary(Assignment assignment, String topologyId, String topologyName, String status, - int uptime_secs) { + int uptime_secs, String lastErrTimeStamp) { int num_workers = assignment.getWorkers().size(); int num_tasks = 0; @@ -479,9 +485,20 @@ public static TopologySummary mkTopologySummary(Assignment assignment, for (ResourceWorkerSlot worker : assignment.getWorkers()) { num_tasks = num_tasks + worker.getTasks().size(); } + + long currentTimeSecs = System.currentTimeMillis() / 1000; + String errorInfo; + if (lastErrTimeStamp == null) + errorInfo = "N"; + else { + if ((currentTimeSecs - Long.valueOf(lastErrTimeStamp)) < 1800) + errorInfo = "Y"; + else + errorInfo = "N"; + } TopologySummary ret = new TopologySummary(topologyId, topologyName, - status, uptime_secs, num_tasks, num_workers); + status, uptime_secs, num_tasks, num_workers, errorInfo); return ret; } @@ -548,11 +565,12 @@ public int compare(SupervisorSummary o1, SupervisorSummary o2) { } public static TaskSummary mkSimpleTaskSummary(ResourceWorkerSlot resource, - int taskId, String component, String host, int uptime) { + int taskId, String component, String componentType, String host, int uptime) { TaskSummary ret = new TaskSummary(); ret.set_task_id(taskId); ret.set_component_id(component); + ret.set_component_type(componentType); ret.set_host(host); ret.set_port(resource.getPort()); ret.set_uptime_secs(uptime); @@ -632,4 +650,79 @@ public static List mkWorkerSummary(String topology, } return result; } + + public static void updateMetricMonitorStatus(StormClusterState clusterState, + String topologyId, boolean isEnable) throws Exception { + StormMonitor stormMonitor = new StormMonitor(isEnable); + clusterState.set_storm_monitor(topologyId, stormMonitor); + } + + public static void updateMetricsInfo(NimbusData data, String topologyId, + Assignment assignment) { + List taskList = new ArrayList(); + List workerList = new ArrayList(); + + StormClusterState clusterState = data.getStormClusterState(); + + Set workerSlotSet = assignment.getWorkers(); + + for (ResourceWorkerSlot workerSlot : workerSlotSet) { + String workerId = workerSlot.getHostname() + ":" + workerSlot.getPort(); + workerList.add(workerId); + + taskList.addAll(workerSlot.getTasks()); + } + + try { + //Remove the obsolete tasks of metrics monitor in ZK + List metricTaskList = clusterState.get_metric_taskIds(topologyId); + for (String task : metricTaskList) { + Integer taskId = Integer.valueOf(task); + if(taskList.contains(taskId) == false) + clusterState.remove_metric_task(topologyId, String.valueOf(taskId)); + } + + //Remove the obsolete workers of metrics monitor in ZK + List metricWorkerList = clusterState.get_metric_workerIds(topologyId); + for (String workerId : metricWorkerList) { + if (workerList.contains(workerId) == false) + clusterState.remove_metric_worker(topologyId, workerId); + } + + //Remove the obsolete user workers of metrics monitor in ZK + List metricUserList = clusterState.get_metric_users(topologyId); + for (String workerId : metricUserList) { + if (workerList.contains(workerId) == false) + clusterState.remove_metric_user(topologyId, workerId); + } + } catch (Exception e) { + LOG.error("Failed to update metrics info when rebalance or reassignment, topologyId=" + + topologyId, e); + } + } + + public static void updateTaskMetricData(TaskMetricData metricData, TaskMetricInfo metricInfo) { + metricData.set_task_id(Integer.valueOf(metricInfo.getTaskId())); + metricData.set_component_id(metricInfo.getComponent()); + metricData.set_gauge(metricInfo.getGaugeData()); + metricData.set_counter(metricInfo.getCounterData()); + metricData.set_meter(metricInfo.getMeterData()); + metricData.set_timer(metricInfo.getTimerData()); + metricData.set_histogram(metricInfo.getHistogramData()); + } + + public static void updateWorkerMetricData(WorkerMetricData metricData, WorkerMetricInfo metricInfo) { + metricData.set_hostname(metricInfo.getHostName()); + metricData.set_port(metricInfo.getPort()); + metricData.set_gauge(metricInfo.getGaugeData()); + metricData.set_counter(metricInfo.getCounterData()); + metricData.set_meter(metricInfo.getMeterData()); + metricData.set_timer(metricInfo.getTimerData()); + metricData.set_histogram(metricInfo.getHistogramData()); + + //Add cpu and Mem into gauge map + Map gaugeMap = metricData.get_gauge(); + gaugeMap.put(MetricDef.CPU_USED_RATIO, metricInfo.getUsedCpu()); + gaugeMap.put(MetricDef.MEMORY_USED,((Long) metricInfo.getUsedMem()).doubleValue()); + } } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/ServiceHandler.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/ServiceHandler.java index d5e56bcad..e54f6739e 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/ServiceHandler.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/ServiceHandler.java @@ -31,6 +31,7 @@ import backtype.storm.generated.InvalidTopologyException; import backtype.storm.generated.KillOptions; import backtype.storm.generated.Nimbus.Iface; +import backtype.storm.generated.MonitorOptions; import backtype.storm.generated.NotAliveException; import backtype.storm.generated.RebalanceOptions; import backtype.storm.generated.SpoutSpec; @@ -45,10 +46,14 @@ import backtype.storm.generated.TopologyInitialStatus; import backtype.storm.generated.TopologySummary; import backtype.storm.generated.WorkerSummary; +import backtype.storm.generated.TopologyMetricInfo; +import backtype.storm.generated.TaskMetricData; +import backtype.storm.generated.WorkerMetricData; import backtype.storm.utils.BufferFileInputStream; import backtype.storm.utils.TimeCacheMap; import backtype.storm.utils.Utils; +import com.alibaba.jstorm.client.ConfigExtension; import com.alibaba.jstorm.cluster.Cluster; import com.alibaba.jstorm.cluster.Common; import com.alibaba.jstorm.cluster.DaemonCommon; @@ -56,9 +61,12 @@ import com.alibaba.jstorm.cluster.StormClusterState; import com.alibaba.jstorm.cluster.StormConfig; import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo; +import com.alibaba.jstorm.daemon.worker.WorkerMetricInfo; +import com.alibaba.jstorm.utils.JStromServerConfigExtension; import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot; import com.alibaba.jstorm.task.Assignment; import com.alibaba.jstorm.task.TaskInfo; +import com.alibaba.jstorm.task.TaskMetricInfo; import com.alibaba.jstorm.utils.FailedAssignTopologyException; import com.alibaba.jstorm.utils.JStormUtils; import com.alibaba.jstorm.utils.JStromServerConfigExtension; @@ -545,10 +553,12 @@ public ClusterSummary getClusterInfo() throws TException { } assignments.put(topologyId, assignment); + String lastErrTimeStamp = stormClusterState.topo_lastErr_time(topologyId); + TopologySummary topology = NimbusUtils.mkTopologySummary( assignment, topologyId, base.getStormName(), base.getStatusString(), - TimeUtils.time_delta(base.getLanchTimeSecs())); + TimeUtils.time_delta(base.getLanchTimeSecs()), lastErrTimeStamp); topologySummaries.add(topology); @@ -634,44 +644,47 @@ public SupervisorWorkers getSupervisorWorkers(String host) Map taskToComponent = Cluster .topology_task_info(stormClusterState, topologyId); + Map taskToComponentType = Cluster + .topology_task_compType(stormClusterState, topologyId); Set workers = assignment.getWorkers(); for (ResourceWorkerSlot worker : workers) { + if (supervisorId.equals(worker.getNodeId()) == false) { + continue; + } + Integer slotNum = supervisorToUsedSlotNum + .get(supervisorId); + if (slotNum == null) { + slotNum = 0; + supervisorToUsedSlotNum.put(supervisorId, slotNum); + } + supervisorToUsedSlotNum.put(supervisorId, ++slotNum); + + Integer port = worker.getPort(); + WorkerSummary workerSummary = portWorkerSummarys + .get(port); + if (workerSummary == null) { + workerSummary = new WorkerSummary(); + workerSummary.set_port(port); + workerSummary.set_topology(topologyId); + workerSummary + .set_tasks(new ArrayList()); + + portWorkerSummarys.put(port, workerSummary); + } + for (Integer taskId : worker.getTasks()) { - if (supervisorId.equals(worker.getNodeId()) == false) { - continue; - } - Integer slotNum = supervisorToUsedSlotNum - .get(supervisorId); - if (slotNum == null) { - slotNum = 0; - supervisorToUsedSlotNum.put(supervisorId, slotNum); - } - supervisorToUsedSlotNum.put(supervisorId, ++slotNum); - - Integer port = worker.getPort(); - WorkerSummary workerSummary = portWorkerSummarys - .get(port); - if (workerSummary == null) { - workerSummary = new WorkerSummary(); - workerSummary.set_port(port); - workerSummary.set_topology(topologyId); - workerSummary - .set_tasks(new ArrayList()); - - portWorkerSummarys.put(port, workerSummary); - } - String componentName = taskToComponent.get(taskId); + String componentType = taskToComponentType.get(taskId); int uptime = TimeUtils.time_delta(assignment .getTaskStartTimeSecs().get(taskId)); List tasks = workerSummary.get_tasks(); TaskSummary taskSummary = NimbusUtils .mkSimpleTaskSummary(worker, taskId, - componentName, host, uptime); + componentName, componentType, host, uptime); tasks.add(taskSummary); } @@ -947,18 +960,15 @@ public void setupZkTaskInfo(Map conf, String topologyId, // mkdir /ZK/taskbeats/topoologyId stormClusterState.setup_heartbeats(topologyId); - Map taskToComponetId = mkTaskComponentAssignments( + Map taskToComponetId = mkTaskComponentAssignments( conf, topologyId); if (taskToComponetId == null) { throw new InvalidTopologyException("Failed to generate TaskIDs map"); } - for (Entry entry : taskToComponetId.entrySet()) { + for (Entry entry : taskToComponetId.entrySet()) { // key is taskid, value is taskinfo - - TaskInfo taskinfo = new TaskInfo(entry.getValue()); - - stormClusterState.set_task(topologyId, entry.getKey(), taskinfo); + stormClusterState.set_task(topologyId, entry.getKey(), entry.getValue()); } } @@ -971,7 +981,7 @@ public void setupZkTaskInfo(Map conf, String topologyId, * @throws IOException * @throws InvalidTopologyException */ - public Map mkTaskComponentAssignments( + public Map mkTaskComponentAssignments( Map conf, String topologyid) throws IOException, InvalidTopologyException { @@ -984,7 +994,7 @@ public Map mkTaskComponentAssignments( topologyid); // use TreeMap to make task as sequence - Map rtn = new TreeMap(); + Map rtn = new TreeMap(); StormTopology topology = Common.system_topology(stormConf, stopology); @@ -998,7 +1008,7 @@ public Map mkTaskComponentAssignments( @SuppressWarnings({ "rawtypes", "unchecked" }) public Integer mkTaskMaker(Map stormConf, - Map cidSpec, Map rtn, Integer cnt) { + Map cidSpec, Map rtn, Integer cnt) { if (cidSpec == null) { LOG.warn("Component map is empty"); return cnt; @@ -1010,15 +1020,16 @@ public Integer mkTaskMaker(Map stormConf, Object obj = entry.getValue(); ComponentCommon common = null; + String componentType = "bolt"; if (obj instanceof Bolt) { common = ((Bolt) obj).get_common(); - + componentType = "bolt"; } else if (obj instanceof SpoutSpec) { common = ((SpoutSpec) obj).get_common(); - + componentType = "spout"; } else if (obj instanceof StateSpoutSpec) { common = ((StateSpoutSpec) obj).get_common(); - + componentType = "spout"; } if (common == null) { @@ -1040,7 +1051,8 @@ public Integer mkTaskMaker(Map stormConf, for (int i = 0; i < parallelism; i++) { cnt++; - rtn.put(cnt, (String) entry.getKey()); + TaskInfo taskInfo = new TaskInfo((String) entry.getKey(), componentType); + rtn.put(cnt, taskInfo); } } return cnt; @@ -1056,5 +1068,67 @@ public StormTopology getUserTopology(String id) throws NotAliveException, TException { return null; } - + + @Override + public void metricMonitor(String topologyName, MonitorOptions options) throws NotAliveException, + TException { + boolean isEnable = options.is_isEnable(); + StormClusterState clusterState = data.getStormClusterState(); + + try { + String topologyId = Cluster.get_topology_id(clusterState, topologyName); + if (null != topologyId) { + NimbusUtils.updateMetricMonitorStatus(clusterState, topologyId, isEnable); + } else { + throw new NotAliveException("Failed to update metricsMonitor status as " + topologyName + " is not alive"); + } + } catch(Exception e) { + String errMsg = "Failed to update metricsMonitor " + topologyName; + LOG.error(errMsg, e); + throw new TException(e); + } + + } + + @Override + public TopologyMetricInfo getTopologyMetric(String topologyId) throws NotAliveException, TException{ + LOG.debug("Nimbus service handler, getTopologyMetric, topology ID: " + topologyId); + + TopologyMetricInfo topologyMetricInfo = new TopologyMetricInfo(); + + StormClusterState clusterState = data.getStormClusterState(); + + topologyMetricInfo.set_topology_id(topologyId); + try { + //update task metrics list + Map taskInfoList = clusterState.task_info_list(topologyId); + List taskMetricList = clusterState.get_task_metric_list(topologyId); + for(TaskMetricInfo taskMetricInfo : taskMetricList) { + TaskMetricData taskMetricData = new TaskMetricData(); + NimbusUtils.updateTaskMetricData(taskMetricData, taskMetricInfo); + TaskInfo taskInfo = taskInfoList.get(Integer.parseInt(taskMetricInfo.getTaskId())); + String componentId = taskInfo.getComponentId(); + taskMetricData.set_component_id(componentId); + + topologyMetricInfo.add_to_task_metric_list(taskMetricData); + } + + //update worker metrics list + List workerMetricList = clusterState.get_worker_metric_list(topologyId); + for(WorkerMetricInfo workerMetricInfo : workerMetricList) { + WorkerMetricData workerMetricData = new WorkerMetricData(); + NimbusUtils.updateWorkerMetricData(workerMetricData, workerMetricInfo); + + topologyMetricInfo.add_to_worker_metric_list(workerMetricData); + } + + } catch(Exception e) { + String errMsg = "Failed to get topology Metric Data " + topologyId; + LOG.error(errMsg, e); + throw new TException(e); + } + + return topologyMetricInfo; + } + } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusTransition.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusTransition.java index f8f95a297..461913882 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusTransition.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusTransition.java @@ -99,7 +99,7 @@ public void transitionLock(String topologyid, if (changingCallbacks == null || changingCallbacks.containsKey(changeStatus) == false || changingCallbacks.get(changeStatus) == null) { - String msg = "No transition for event: changing status" + String msg = "No transition for event: changing status:" + changeStatus.getStatus() + ", current status: " + currentStatus.getStatusType() + " topology-id: " + topologyid; diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusType.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusType.java index 04f3e1910..f8fd36285 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusType.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusType.java @@ -1,5 +1,6 @@ package com.alibaba.jstorm.daemon.nimbus; + /** * topology status: * @@ -22,12 +23,18 @@ * * */ + public enum StatusType { - kill("kill"), killed("killed"), monitor("monitor"), inactive("inactive"), inactivate( - "inactivate"), active("active"), activate("activate"), startup( - "startup"), remove("remove"), rebalance("rebalance"), rebalancing( - "rebalancing"), do_rebalance("do-rebalance"); + // status + active("active"), inactive("inactive"), rebalancing("rebalancing"), + killed("killed"), + + // actions + activate("activate"), inactivate("inactivate"), + monitor("monitor"), startup("startup"), + kill("kill"), remove("remove"), + rebalance("rebalance"), do_rebalance("do-rebalance"); private String status; diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssign.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssign.java index b14e0a2d0..8c4ab3b84 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssign.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssign.java @@ -25,10 +25,12 @@ import com.alibaba.jstorm.cluster.StormClusterState; import com.alibaba.jstorm.cluster.StormConfig; import com.alibaba.jstorm.cluster.StormStatus; +import com.alibaba.jstorm.cluster.StormMonitor; import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo; import com.alibaba.jstorm.schedule.IToplogyScheduler; import com.alibaba.jstorm.schedule.TopologyAssignContext; import com.alibaba.jstorm.schedule.default_assign.DefaultTopologyScheduler; +import com.alibaba.jstorm.utils.JStromServerConfigExtension; import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot; import com.alibaba.jstorm.task.Assignment; import com.alibaba.jstorm.task.AssignmentBak; @@ -136,7 +138,7 @@ protected boolean doTopologyAssignment(TopologyAssignEvent event) { Assignment assignment = null; try { assignment = mkAssignment(event); - + setTopologyStatus(event); } catch (Throwable e) { LOG.error("Failed to assign topology " + event.getTopologyId(), e); @@ -415,6 +417,14 @@ public Assignment mkAssignment(TopologyAssignEvent event) throws Exception { // update task heartbeat's start time NimbusUtils.updateTaskHbStartTime(nimbusData, assignment, topologyId); + // Update metrics information in ZK when rebalance or reassignment + // Only update metrics monitor status when creating topology + if (context.getAssignType() == TopologyAssignContext.ASSIGN_TYPE_REBALANCE + || context.getAssignType() == TopologyAssignContext.ASSIGN_TYPE_MONITOR) + NimbusUtils.updateMetricsInfo(nimbusData, topologyId, assignment); + else + metricsMonitor(event); + LOG.info("Successfully make assignment for topology id " + topologyId + ": " + assignment); @@ -764,6 +774,19 @@ public void backupAssignment(Assignment assignment, + assignment, e); } } + + public void metricsMonitor(TopologyAssignEvent event) { + String topologyId = event.getTopologyId(); + try { + Map conf = nimbusData.getConf(); + boolean isEnable = ConfigExtension.isEnablePerformanceMetrics(conf); + StormClusterState zkClusterState = nimbusData.getStormClusterState(); + StormMonitor monitor = new StormMonitor(isEnable); + zkClusterState.set_storm_monitor(topologyId, monitor); + } catch (Exception e) { + LOG.warn("Failed to update metrics monitor status of " + topologyId, e); + } + } /** * @param args diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/CgroupManager.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/CgroupManager.java index f41c4718a..2a8b12ce8 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/CgroupManager.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/CgroupManager.java @@ -69,18 +69,21 @@ public String startNewWorker(int cpuNum, String workerId) return sb.toString(); } - public void shutDownWorker(String workerId) throws IOException { + public void shutDownWorker(String workerId, boolean isKilled) { CgroupCommon workerGroup = new CgroupCommon(workerId, h, this.rootCgroup); try { - for (Integer pid : workerGroup.getTasks()) { - JStormUtils.kill(pid); + if (isKilled == false) { + for (Integer pid : workerGroup.getTasks()) { + JStormUtils.kill(pid); + } + JStormUtils.sleepMs(1500); } + center.delete(workerGroup); }catch(Exception e) { LOG.info("No task of " + workerId); } - JStormUtils.sleepMs(1500); - center.delete(workerGroup); + } public void close() throws IOException { diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/Heartbeat.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/Heartbeat.java index 4822bf2b0..161484d90 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/Heartbeat.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/Heartbeat.java @@ -17,6 +17,7 @@ import com.alibaba.jstorm.utils.JStormUtils; import com.alibaba.jstorm.utils.NetWorkUtils; import com.alibaba.jstorm.utils.TimeUtils; +import com.alibaba.jstorm.utils.JStormServerUtils; /** * supervisor Heartbeat, just write SupervisorInfo to ZK @@ -52,14 +53,7 @@ class Heartbeat extends RunnableCallback { public Heartbeat(Map conf, StormClusterState stormClusterState, String supervisorId, AtomicBoolean active) { - String myHostName = ConfigExtension.getSupervisorHost(conf); - if (myHostName == null) { - myHostName = NetWorkUtils.hostname(); - } - - if (ConfigExtension.isSupervisorUseIp(conf)) { - myHostName = NetWorkUtils.ip(); - } + String myHostName = JStormServerUtils.getHostName(conf); this.stormClusterState = stormClusterState; this.supervisorId = supervisorId; diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/ShutdownWork.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/ShutdownWork.java index 0b252b329..c750b5049 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/ShutdownWork.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/ShutdownWork.java @@ -1,13 +1,16 @@ package com.alibaba.jstorm.daemon.supervisor; import java.io.IOException; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import org.apache.log4j.Logger; import com.alibaba.jstorm.callback.RunnableCallback; +import com.alibaba.jstorm.client.ConfigExtension; import com.alibaba.jstorm.cluster.StormConfig; import com.alibaba.jstorm.daemon.worker.ProcessSimulator; import com.alibaba.jstorm.utils.JStormUtils; @@ -16,45 +19,87 @@ public class ShutdownWork extends RunnableCallback { private static Logger LOG = Logger.getLogger(ShutdownWork.class); - + /** - * shutdown the spec worker of the supervisor. and clean the local dir of - * workers - * + * shutdown all workers * * @param conf * @param supervisorId - * @param workerId - * @param workerThreadPidsAtom - * @param workerThreadPidsAtomReadLock + * @param removed + * @param workerThreadPids + * @param cgroupManager */ - public void shutWorker(Map conf, String supervisorId, String workerId, - ConcurrentHashMap workerThreadPids) - throws IOException { - - LOG.info("Begin to shut down " + supervisorId + ":" + workerId); - - // STORM-LOCAL-DIR/workers/workerId/pids - String workerPidPath = StormConfig.worker_pids_root(conf, workerId); - - List pids = PathUtils.read_dir_contents(workerPidPath); - - String threadPid = workerThreadPids.get(workerId); - - if (threadPid != null) { - ProcessSimulator.killProcess(threadPid); + public void shutWorker(Map conf, String supervisorId, + Map removed, + ConcurrentHashMap workerThreadPids, + CgroupManager cgroupManager) { + + Map> workerId2Pids = new HashMap>(); + + boolean localMode = false; + + int maxWaitTime = 0; + + for (Entry entry : removed.entrySet()) { + String workerId = entry.getKey(); + String topologyId = entry.getValue(); + + LOG.info("Begin to shut down " + topologyId + ":" + workerId); + try { + + // STORM-LOCAL-DIR/workers/workerId/pids + String workerPidPath = StormConfig.worker_pids_root(conf, + workerId); + + List pids = PathUtils.read_dir_contents(workerPidPath); + workerId2Pids.put(workerId, pids); + + String threadPid = workerThreadPids.get(workerId); + + // local mode + if (threadPid != null) { + ProcessSimulator.killProcess(threadPid); + localMode = true; + continue; + } + + for (String pid : pids) { + JStormUtils.process_killed(Integer.parseInt(pid)); + } + + maxWaitTime = ConfigExtension + .getTaskCleanupTimeoutSec(conf); + // The best design is get getTaskCleanupTimeoutSec from + // topology configuration, but topology configuration is likely + // to be deleted before kill worker, so in order to simplify + // the logical, just get task.cleanup.timeout.sec from + // supervisor configuration + + } catch (Exception e) { + LOG.info("Failed to shutdown ", e); + } + } - - for (String pid : pids) { - - JStormUtils.kill(Integer.parseInt(pid)); - PathUtils.rmpath(StormConfig.worker_pid_path(conf, workerId, pid)); - + + JStormUtils.sleepMs(maxWaitTime * 1000); + + for (Entry> entry : workerId2Pids.entrySet()) { + String workerId = entry.getKey(); + List pids = entry.getValue(); + + if (localMode == false) { + for (String pid : pids) { + + JStormUtils.ensure_process_killed(Integer.parseInt(pid)); + if (cgroupManager != null) { + cgroupManager.shutDownWorker(workerId, true); + } + } + } + + tryCleanupWorkerDir(conf, workerId); + LOG.info("Successfully shut down " + workerId); } - - tryCleanupWorkerDir(conf, workerId); - - LOG.info("Successfully shut down " + supervisorId + ":" + workerId); } /** diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/StateHeartbeat.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/StateHeartbeat.java index 2e7051794..a65b4e8f4 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/StateHeartbeat.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/StateHeartbeat.java @@ -1,5 +1,8 @@ package com.alibaba.jstorm.daemon.supervisor; +import org.apache.commons.lang.builder.ToStringBuilder; +import org.apache.commons.lang.builder.ToStringStyle; + import com.alibaba.jstorm.daemon.worker.State; import com.alibaba.jstorm.daemon.worker.WorkerHeartbeat; @@ -25,4 +28,9 @@ public WorkerHeartbeat getHeartbeat() { return this.hb; } + @Override + public String toString() { + return ToStringBuilder.reflectionToString(this, + ToStringStyle.SHORT_PREFIX_STYLE); + } } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/Supervisor.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/Supervisor.java index 105c7280c..2eaafc92e 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/Supervisor.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/Supervisor.java @@ -6,6 +6,9 @@ import java.util.UUID; import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.io.FileUtils; @@ -23,6 +26,7 @@ import com.alibaba.jstorm.cluster.StormClusterState; import com.alibaba.jstorm.cluster.StormConfig; import com.alibaba.jstorm.daemon.worker.hearbeat.SyncContainerHb; +import com.alibaba.jstorm.daemon.worker.metrics.UploadSupervMetric; import com.alibaba.jstorm.event.EventManager; import com.alibaba.jstorm.event.EventManagerImp; import com.alibaba.jstorm.event.EventManagerPusher; @@ -155,7 +159,12 @@ public SupervisorManger mkSupervisor(Map conf, IContext sharedContext) Httpserver httpserver = new Httpserver(port); httpserver.start(); - + //Step 8 start uploading every 60 secs + if (ConfigExtension.isAlimonitorMetricsPost(conf)) { + UploadSupervMetric uploadMetric = new UploadSupervMetric(stormClusterState, supervisorId, active, 60); + AsyncLoopThread uploadMetricThread = new AsyncLoopThread(uploadMetric); + threads.add(uploadMetricThread); + } // SupervisorManger which can shutdown all supervisor and workers return new SupervisorManger(conf, supervisorId, active, threads, diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorManger.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorManger.java index cd973a7f7..65619067c 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorManger.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorManger.java @@ -1,6 +1,7 @@ package com.alibaba.jstorm.daemon.supervisor; import java.io.IOException; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Vector; @@ -120,18 +121,13 @@ public void ShutdownAllWorkers() { return; } List myWorkerIds = PathUtils.read_dir_contents(path); + HashMap workerId2topologyIds = new HashMap(); for (String workerId : myWorkerIds) { - try { - shutWorker(conf, supervisorId, workerId, workerThreadPidsAtom); - } catch (Exception e) { - String errMsg = "Failed to shutdown supervisorId:" - + supervisorId + ",workerId:" + workerId - + "workerThreadPidsAtom:" + workerThreadPidsAtom + "\n"; - LOG.error(errMsg, e); - - } + workerId2topologyIds.put(workerId, null); } + + shutWorker(conf, supervisorId, workerId2topologyIds, workerThreadPidsAtom, null); } @Override diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncProcessEvent.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncProcessEvent.java index dc10fbba6..fbb115e8c 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncProcessEvent.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncProcessEvent.java @@ -565,9 +565,8 @@ public void launchWorker(Map conf, IContext sharedcontext, environment.put("REDIRECT", "false"); } - // String logFileName = assignment.getTopologyName() + "-worker-" + port - // + ".log"; - String logFileName = topologyId + "-worker-" + port + ".log"; + String logFileName = JStormUtils.genLogName(assignment.getTopologyName(), port); + //String logFileName = topologyId + "-worker-" + port + ".log"; environment.put("LD_LIBRARY_PATH", (String) totalConf.get(Config.JAVA_LIBRARY_PATH)); @@ -667,7 +666,7 @@ public void launchWorker(Map conf, IContext sharedcontext, private Set killUselessWorkers( Map localWorkerStats) { - Set removed = new HashSet(); + Map removed = new HashMap(); Set keepPorts = new HashSet(); for (Entry entry : localWorkerStats.entrySet()) { @@ -679,33 +678,22 @@ private Set killUselessWorkers( // hbstate.getHeartbeat() won't be null keepPorts.add(hbstate.getHeartbeat().getPort()); } else { - removed.add(workerid); + removed.put(workerid, hbstate.getHeartbeat().getTopologyId()); StringBuilder sb = new StringBuilder(); sb.append("Shutting down and clearing state for id "); sb.append(workerid); sb.append(";State:"); - sb.append(hbstate.getState()); - sb.append(";Heartbeat"); - sb.append(hbstate.getHeartbeat()); - LOG.info(sb); - - try { - shutWorker(conf, supervisorId, workerid, workerThreadPids); - if (cgroupManager != null) { - cgroupManager.shutDownWorker(workerid); - } - } catch (IOException e) { - String errMsg = "Failed to shutdown worker workId:" - + workerid + ",supervisorId: " + supervisorId - + ",workerThreadPids:" + workerThreadPids; - LOG.error(errMsg, e); - } + sb.append(hbstate); + LOG.info(sb); } } + + shutWorker(conf, supervisorId, removed, workerThreadPids, cgroupManager); + - for (String removedWorkerId : removed) { + for (String removedWorkerId : removed.keySet()) { localWorkerStats.remove(removedWorkerId); } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/BatchTupleRunable.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/BatchTupleRunable.java index f1acd44b8..da2db625e 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/BatchTupleRunable.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/BatchTupleRunable.java @@ -16,6 +16,9 @@ import backtype.storm.utils.DisruptorQueue; import com.alibaba.jstorm.callback.RunnableCallback; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.utils.DisruptorRunable; import com.alibaba.jstorm.utils.Pair; import com.lmax.disruptor.EventHandler; @@ -41,17 +44,22 @@ public class BatchTupleRunable extends DisruptorRunable { private DisruptorQueue sendingQueue; private final boolean isDirectSend = true; + + private static JStormTimer timer = Metrics.registerTimer(null, + MetricDef.BATCH_TUPLE_TIME, null, Metrics.MetricType.WORKER); + private DisruptorQueue queue; public BatchTupleRunable(WorkerData workerData) { - super(workerData.getTransferQueue(), - BatchTupleRunable.class.getSimpleName(), workerData.getActive()); - + super(workerData.getTransferQueue(), timer, BatchTupleRunable.class.getSimpleName(), workerData.getActive()); this.sendingQueue = workerData.getSendingQueue(); this.nodeportSocket = workerData.getNodeportSocket(); this.taskNodeport = workerData.getTaskNodeport(); this.dispatchMap = new HashMap>(); - workerData.getTransferQueue().consumerStarted(); + this.queue = workerData.getTransferQueue(); + Metrics.registerQueue(null, MetricDef.BATCH_TUPLE_QUEUE, this.queue, null, Metrics.MetricType.WORKER); + + this.queue.consumerStarted(); } public void handleOneEvent(TaskMessage felem) { diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/DrainerRunable.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/DrainerRunable.java index d4226fe7d..bca2dd6cf 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/DrainerRunable.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/DrainerRunable.java @@ -12,6 +12,9 @@ import backtype.storm.utils.DisruptorQueue; import com.alibaba.jstorm.callback.RunnableCallback; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.utils.DisruptorRunable; import com.alibaba.jstorm.utils.Pair; import com.alibaba.jstorm.utils.RunCounter; @@ -29,11 +32,14 @@ public class DrainerRunable extends DisruptorRunable{ private final static Logger LOG = Logger.getLogger(DrainerRunable.class); - + private static JStormTimer timer = Metrics.registerTimer(null, + MetricDef.DRAINER_TIME, null, Metrics.MetricType.WORKER); public DrainerRunable(WorkerData workerData) { - super(workerData.getSendingQueue(), + super(workerData.getSendingQueue(), timer, DrainerRunable.class.getSimpleName(), workerData.getActive()); + + Metrics.registerQueue(null, MetricDef.DRAINER_QUEUE, queue, null, Metrics.MetricType.WORKER); } @Override diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshActive.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshActive.java index fbf9407c5..60925a25b 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshActive.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshActive.java @@ -11,12 +11,15 @@ import com.alibaba.jstorm.callback.RunnableCallback; import com.alibaba.jstorm.cluster.StormBase; import com.alibaba.jstorm.cluster.StormClusterState; +import com.alibaba.jstorm.cluster.StormMonitor; import com.alibaba.jstorm.daemon.nimbus.StatusType; +import com.alibaba.jstorm.daemon.worker.metrics.MetricReporter; import com.alibaba.jstorm.task.TaskShutdownDameon; import com.alibaba.jstorm.utils.JStormUtils; /** - * Timely check whether topology is active or not from ZK + * Timely check whether topology is active or not and whether + * the metrics monitor is enable or disable from ZK * * @author yannian/Longda * @@ -66,6 +69,30 @@ public void run() { newTopologyStatus = base.getStatus().getStatusType(); } + // Start metrics report if metrics monitor is enabled. + // Stop metrics report if metrics monitor is disabled. + try { + StormMonitor monitor = zkCluster.get_storm_monitor(topologyId); + if (null != monitor) { + boolean newMetricsMonitor = monitor.getMetrics(); + MetricReporter metricReporter = workerData.getMetricsReporter(); + boolean oldMetricsMonitor = metricReporter.isEnable(); + + if (oldMetricsMonitor != newMetricsMonitor) { + metricReporter.setEnable(newMetricsMonitor); + if (true == newMetricsMonitor) { + LOG.info("Start metrics reporter"); + } else { + LOG.info("Stop metrics reporter"); + } + } + } + } catch (Exception e) { + LOG.warn("Failed to get monitor status of topology " + topologyId); + LOG.debug(e); + } + + // Process the topology status change StatusType oldTopologyStatus = workerData.getTopologyStatus(); if (newTopologyStatus.equals(oldTopologyStatus)) { diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/VirtualPortDispatch.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/VirtualPortDispatch.java index d74d81b0d..c61e3dd2f 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/VirtualPortDispatch.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/VirtualPortDispatch.java @@ -19,6 +19,9 @@ import backtype.storm.utils.Utils; import com.alibaba.jstorm.callback.RunnableCallback; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.task.TaskStatus; import com.alibaba.jstorm.utils.DisruptorRunable; import com.alibaba.jstorm.utils.JStormUtils; @@ -41,14 +44,18 @@ public class VirtualPortDispatch extends DisruptorRunable { private ConcurrentHashMap deserializeQueues; private IConnection recvConnection; + private static JStormTimer timer = Metrics.registerTimer(null, + MetricDef.DISPATCH_TIME, null, Metrics.MetricType.WORKER); + public VirtualPortDispatch(WorkerData workerData, IConnection recvConnection, DisruptorQueue recvQueue) { - super(recvQueue, VirtualPortDispatch.class.getSimpleName(), workerData - .getActive()); + super(recvQueue, timer, VirtualPortDispatch.class.getSimpleName(), + workerData.getActive()); this.recvConnection = recvConnection; this.deserializeQueues = workerData.getDeserializeQueues(); + Metrics.registerQueue(null, MetricDef.DISPATCH_QUEUE, queue, null, Metrics.MetricType.WORKER); } public void cleanup() { diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/Worker.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/Worker.java index 79d99cef0..9613e0c2c 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/Worker.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/Worker.java @@ -1,6 +1,7 @@ package com.alibaba.jstorm.daemon.worker; import java.io.BufferedReader; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; @@ -35,6 +36,7 @@ import com.alibaba.jstorm.task.heartbeat.TaskHeartbeatRunable; import com.alibaba.jstorm.utils.JStormServerUtils; import com.alibaba.jstorm.utils.JStormUtils; +import com.alibaba.jstorm.utils.PathUtils; import com.lmax.disruptor.MultiThreadedClaimStrategy; import com.lmax.disruptor.WaitStrategy; @@ -195,7 +197,7 @@ public WorkerShutdown execute() throws Exception { threads.add(syncContainerHbThread); } - MetricReporter metricReporter = new MetricReporter(); + MetricReporter metricReporter = workerData.getMetricsReporter(); boolean isMetricsEnable = ConfigExtension .isEnablePerformanceMetrics(workerData.getStormConf()); metricReporter.setEnable(isMetricsEnable); @@ -252,25 +254,55 @@ public static WorkerShutdown mk_worker(Map conf, IContext context, Worker w = new Worker(conf, context, topology_id, supervisor_id, port, worker_id, jar_path); + + w.redirectOutput(); return w.execute(); } - public static void redirectOutput(String port) throws Exception { + public void redirectOutput(){ if (System.getenv("REDIRECT") == null || !System.getenv("REDIRECT").equals("true")) { return; } - - String OUT_TARGET_FILE = JStormUtils.getLogFileName(); - if (OUT_TARGET_FILE == null) { - OUT_TARGET_FILE = "/dev/null"; + + String DEFAULT_OUT_TARGET_FILE = JStormUtils.getLogFileName(); + if (DEFAULT_OUT_TARGET_FILE == null) { + DEFAULT_OUT_TARGET_FILE = "/dev/null"; } else { - OUT_TARGET_FILE += ".out"; + DEFAULT_OUT_TARGET_FILE += ".out"; + } + + String outputFile = ConfigExtension.getWorkerRedirectOutputFile(workerData.getStormConf()); + if (outputFile == null) { + outputFile = DEFAULT_OUT_TARGET_FILE; + }else { + try { + File file = new File(outputFile); + if (file.exists() == false) { + PathUtils.touch(outputFile); + }else { + if (file.isDirectory() == true) { + LOG.warn("Failed to write " + outputFile); + outputFile = DEFAULT_OUT_TARGET_FILE; + }else if (file.canWrite() == false) { + LOG.warn("Failed to write " + outputFile); + outputFile = DEFAULT_OUT_TARGET_FILE; + } + } + + }catch(Exception e) { + LOG.warn("Failed to touch " + outputFile, e); + outputFile = DEFAULT_OUT_TARGET_FILE; + } } - JStormUtils.redirectOutput(OUT_TARGET_FILE); + try { + JStormUtils.redirectOutput(outputFile); + }catch(Exception e) { + LOG.warn("Failed to redirect to " + outputFile, e); + } } @@ -432,8 +464,7 @@ public static void main(String[] args) { sb.append("jar_path:" + jar_path + "\n"); try { - redirectOutput(port_str); - + WorkerShutdown sd = mk_worker(conf, null, topology_id, supervisor_id, Integer.parseInt(port_str), worker_id, jar_path); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerData.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerData.java index c3952689e..906d247f2 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerData.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerData.java @@ -32,6 +32,7 @@ import com.alibaba.jstorm.cluster.StormClusterState; import com.alibaba.jstorm.cluster.StormConfig; import com.alibaba.jstorm.daemon.nimbus.StatusType; +import com.alibaba.jstorm.daemon.worker.metrics.MetricReporter; import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot; import com.alibaba.jstorm.task.Assignment; import com.alibaba.jstorm.task.TaskShutdownDameon; @@ -113,6 +114,7 @@ public class WorkerData { private DisruptorQueue sendingQueue; private List shutdownTasks; + private MetricReporter metricReporter; @SuppressWarnings({ "rawtypes", "unchecked" }) public WorkerData(Map conf, IContext context, String topology_id, @@ -223,6 +225,8 @@ public WorkerData(Map conf, IContext context, String topology_id, generateMaps(); contextMaker = new ContextMaker(this); + + metricReporter = new MetricReporter(this); LOG.info("Successfully create WorkerData"); @@ -400,4 +404,12 @@ public Set getLocalNodeTasks() { public void setLocalNodeTasks(Set localNodeTasks) { this.localNodeTasks = localNodeTasks; } + + public void setMetricsReporter(MetricReporter reporter) { + this.metricReporter = reporter; + } + + public MetricReporter getMetricsReporter() { + return this.metricReporter; + } } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerHeartbeat.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerHeartbeat.java index b8a79123b..282782811 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerHeartbeat.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerHeartbeat.java @@ -4,6 +4,9 @@ import java.util.HashSet; import java.util.Set; +import org.apache.commons.lang.builder.ToStringBuilder; +import org.apache.commons.lang.builder.ToStringStyle; + /** * Worker's Heartbeat data woker will update the object to * /LOCAL-DIR/workers/${woker-id}/heartbeats @@ -61,8 +64,9 @@ public void setPort(Integer port) { this.port = port; } + @Override public String toString() { - return "topologyId:" + topologyId + ", timeSecs:" + timeSecs - + ", port:" + port + ", taskIds:" + taskIds.toString(); + return ToStringBuilder.reflectionToString(this, + ToStringStyle.SHORT_PREFIX_STYLE); } } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerMetricInfo.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerMetricInfo.java new file mode 100644 index 000000000..60b3d5bcc --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerMetricInfo.java @@ -0,0 +1,149 @@ +package com.alibaba.jstorm.daemon.worker; + +import java.util.Map; +import java.util.HashMap; +import java.io.Serializable; + +import org.apache.log4j.Logger; + +import com.codahale.metrics.Metric; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Timer; +import com.codahale.metrics.Counter; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.codahale.metrics.Snapshot; +import com.esotericsoftware.minlog.Log; +import com.alibaba.jstorm.metric.MetricInfo; +import com.alibaba.jstorm.metric.Metrics.QueueGauge; +import com.alibaba.jstorm.utils.JStormUtils; + + +/** + * /storm-zk-root/Monitor/{topologyid}/{workerid} data + */ +public class WorkerMetricInfo implements Serializable { + private static Logger LOG = Logger.getLogger(WorkerMetricInfo.class); + + private static final long serialVersionUID = 7745327094257659471L; + + private String hostName; + private Integer port; + + private long usedMem; + private double usedCpu; + + private Map gaugeData; + private Map counterData; + private Map meterData; + private Map timerData; + private Map histogramData; + + private static final String METRIC_SEPERATOR = "-"; + + public WorkerMetricInfo(String hostName, Integer port) { + this.hostName = hostName; + this.port = port; + + this.gaugeData = new HashMap(); + this.counterData = new HashMap(); + this.meterData = new HashMap(); + this.timerData = new HashMap(); + this.histogramData = new HashMap(); + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public String getHostName() { + return this.hostName; + } + + public void setPort(Integer port) { + this.port = port; + } + + public Integer getPort() { + return this.port; + } + + public void setUsedMem(long usedMem) { + this.usedMem = usedMem; + } + + public long getUsedMem() { + return this.usedMem; + } + + public void setUsedCpu(double usedCpu) { + this.usedCpu = usedCpu; + } + + public double getUsedCpu() { + return this.usedCpu; + } + + public Map getGaugeData() { + return gaugeData; + } + + public Map getCounterData() { + return counterData; + } + + public Map getMeterData() { + return meterData; + } + + public Map getTimerData() { + return timerData; + } + + public Map getHistogramData() { + return histogramData; + } + + // There are some metrics that have same metric name, but just different prefix. + // e.g for netty_send_time, full metric name is dest_ip:port-name + // The metrics with same metric name will be sum here. + public void updateMetricData(MetricInfo metricInfo) { + String name = metricInfo.getName(); + Metric metric = metricInfo.getMetric(); + LOG.debug("Metric name=" + name); + if (metric instanceof QueueGauge) { + //covert to % + float queueRatio = (((QueueGauge) metric).getValue())*100; + sum(gaugeData, name, (double)queueRatio); + } else if (metric instanceof Gauge) { + Double value = JStormUtils.convertToDouble(((Gauge) metric).getValue()); + if (value == null) { + LOG.warn("gauge value is null or unknow type."); + } else { + sum(gaugeData, name, value); + } + } else if (metric instanceof Timer) { + Snapshot snapshot = ((Timer) metric).getSnapshot(); + //covert from ns to ms + sum(timerData, name, (snapshot.getMean())/1000000); + } else if (metric instanceof Counter) { + Double value = ((Long) ((Counter) metric).getCount()).doubleValue(); + sum(counterData, name, value); + } else if (metric instanceof Meter) { + sum(meterData, name, ((Meter) metric).getMeanRate()); + } else if (metric instanceof Histogram) { + Snapshot snapshot = ((Histogram)metric).getSnapshot(); + sum(histogramData, name, snapshot.getMean()); + } else { + LOG.warn("Unknown metric type, name:" + name); + } + } + + private void sum(Map dataMap, String name, Double value) { + Double currentValue = dataMap.get(name); + if (currentValue != null) + value = value + currentValue; + value = JStormUtils.formatDoubleDecPoint4(value); + dataMap.put(name, value); + } +} \ No newline at end of file diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerShutdown.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerShutdown.java index 556e2dac0..84179a20d 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerShutdown.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerShutdown.java @@ -6,7 +6,6 @@ import org.apache.log4j.Logger; -import backtype.storm.daemon.Shutdownable; import backtype.storm.messaging.IConnection; import backtype.storm.messaging.IContext; import backtype.storm.scheduler.WorkerSlot; @@ -28,6 +27,8 @@ public class WorkerShutdown implements ShutdownableDameon { private static Logger LOG = Logger.getLogger(WorkerShutdown.class); + public static final String HOOK_SIGNAL = "USR2"; + private List shutdowntasks; private AtomicBoolean active; private ConcurrentHashMap nodeportSocket; @@ -40,8 +41,7 @@ public class WorkerShutdown implements ShutdownableDameon { // active nodeportSocket context zkCluster zkClusterstate public WorkerShutdown(WorkerData workerData, List _shutdowntasks, - List _threads, - MetricReporter metricReporter) { + List _threads, MetricReporter metricReporter) { this.shutdowntasks = _shutdowntasks; this.threads = _threads; @@ -54,22 +54,19 @@ public WorkerShutdown(WorkerData workerData, this.metricReporter = metricReporter; Runtime.getRuntime().addShutdownHook(new Thread(this)); + + // PreCleanupTasks preCleanupTasks = new PreCleanupTasks(); + // // install signals + // Signal sig = new Signal(HOOK_SIGNAL); + // Signal.handle(sig, preCleanupTasks); } @Override public void shutdown() { active.set(false); - - metricReporter.shutdown(); - - // send data to close connection - for (WorkerSlot k : nodeportSocket.keySet()) { - IConnection value = nodeportSocket.get(k); - value.close(); - } - context.term(); + metricReporter.shutdown(); // shutdown tasks for (ShutdownableDameon task : shutdowntasks) { @@ -79,16 +76,26 @@ public void shutdown() { // shutdown worker's demon thread // refreshconn, refreshzk, hb, drainer for (AsyncLoopThread t : threads) { + LOG.info("Begin to shutdown " + t.getThread().getName()); t.cleanup(); JStormUtils.sleepMs(100); t.interrupt(); -// try { -// t.join(); -// } catch (InterruptedException e) { -// LOG.error("join thread", e); -// } + // try { + // t.join(); + // } catch (InterruptedException e) { + // LOG.error("join thread", e); + // } + LOG.info("Successfully " + t.getThread().getName()); + } + + // send data to close connection + for (WorkerSlot k : nodeportSocket.keySet()) { + IConnection value = nodeportSocket.get(k); + value.close(); } + context.term(); + // close ZK client try { zkCluster.disconnect(); @@ -134,4 +141,19 @@ public void run() { shutdown(); } + // class PreCleanupTasks implements SignalHandler { + // + // @Override + // public void handle(Signal arg0) { + // LOG.info("Receive " + arg0.getName() + ", begin to do pre_cleanup job"); + // + // for (ShutdownableDameon task : shutdowntasks) { + // task.shutdown(); + // } + // + // LOG.info("Successfully do pre_cleanup job"); + // } + // + // } + } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/AlimonitorClient.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/AlimonitorClient.java new file mode 100644 index 000000000..591c3d222 --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/AlimonitorClient.java @@ -0,0 +1,277 @@ +package com.alibaba.jstorm.daemon.worker.metrics; + +import java.io.IOException; +import java.net.URLEncoder; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.entity.UrlEncodedFormEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.util.EntityUtils; +import org.apache.http.NameValuePair; +import org.apache.http.message.BasicNameValuePair; +import org.apache.log4j.Logger; + +public class AlimonitorClient { + + public static Logger LOG = Logger.getLogger(AlimonitorClient.class); + + public static final String ERROR_MSG = ""; + + private final String COLLECTION_FLAG = "collection_flag"; + + private final String ERROR_INFO = "error_info"; + + private final String MSG = "MSG"; + + private String port; + + private String requestIP; + + private String monitorName; + + private boolean post = true; + + public AlimonitorClient(String requestIP, String port) { + this.requestIP = requestIP; + this.port = port; + } + + // Send to localhost:15776 by default + public AlimonitorClient() { + this.requestIP = "127.0.0.1"; + this.port = "15776"; + } + + public void setMonitorName(String monitorName) { + this.monitorName = monitorName; + } + + public String buildURL() { + return "http://" + requestIP + ":" + port + "/passive"; + } + + public String buildRqstAddr() { + return "http://" + requestIP + ":" + port + "/passive?name=" + monitorName + "&msg="; + } + + public boolean sendRequest(K key,V value) throws Exception { + return sendRequest(0," ",key,value); + } + public boolean sendRequest(int collection_flag, String error_message, + K key,V value) throws Exception { + Map msg=new HashMap(); + msg.put(key, value); + return sendRequest(collection_flag,error_message,msg); + } + /** + * + * @param collection_flag + * 0ʶɼ0ʾɼ + * @param error_message + * ɼʱϢ (default: "") + * @param msg + * {} + * @return Ƿɹ + * @throws Exception + */ + public boolean sendRequest(int collection_flag, String error_message, + Map msg) throws Exception { + boolean ret = false; + + if(msg.size() == 0) + return false; + StringBuilder sb = new StringBuilder(); + //{K,V} + sb.append("{"); + sb.append("\'").append(MSG).append("\'"); + sb.append(':'); + sb.append("{"); + for (Entry entry: msg.entrySet()) { + sb.append("\'").append(entry.getKey()).append("\'"); + sb.append(":"); + if(entry.getValue() instanceof String) + sb.append("\"").append(entry.getValue()).append("\""); + else + sb.append(entry.getValue()); + sb.append(","); + } + //remove the last "," + if(msg.size()>0) + sb.deleteCharAt(sb.length()-1); + sb.append("}"); + sb.append(','); + sb.append("\'").append(COLLECTION_FLAG).append("\'"); + sb.append(':'); + sb.append(collection_flag); + sb.append(','); + sb.append("\'").append(ERROR_INFO).append("\'"); + sb.append(':').append("\'").append(error_message).append("\'"); + sb.append("}"); + String kvMsg = sb.toString(); + LOG.debug(kvMsg); + + if (post == true) { + String url = buildURL(); + ret = httpPost(url, kvMsg); + } else { + String request = buildRqstAddr(); + StringBuilder postAddr= new StringBuilder(); + postAddr.append(request); + postAddr.append(URLEncoder.encode(kvMsg)); + + ret = httpGet(postAddr); + } + + return ret; + } + /** + * + * @param collection_flag + * 0ʶɼ0ʾɼ + * @param error_message + * ɼʱϢ (default: "") + * @param msg + * [] + * @return Ƿɹ + * @throws Exception + */ + public boolean sendRequest(int collection_flag, String error_message, + List> msgList) throws Exception { + boolean ret = false; + + //[{K:V},{K:V}] + StringBuilder sb = new StringBuilder(); + sb.append("{"); + sb.append("\'").append(MSG).append("\'"); + sb.append(':'); + sb.append("["); + for(Map msg:msgList){ + + sb.append("{"); + + for (Entry entry: msg.entrySet()) { + sb.append("\'").append(entry.getKey()).append("\'"); + sb.append(":"); + if(entry.getValue() instanceof String) + sb.append("\"").append(entry.getValue()).append("\""); + else + sb.append(entry.getValue()); + sb.append(","); + } + //remove the last "," + if(msg.size()>0) + sb.deleteCharAt(sb.length()-1); + sb.append("}"); + sb.append(","); + } + //remove the last "," + if(msgList.size()>0) + sb.deleteCharAt(sb.length()-1); + sb.append("]"); + sb.append(','); + sb.append("\'").append(COLLECTION_FLAG).append("\'"); + sb.append(':'); + sb.append(collection_flag); + sb.append(','); + sb.append("\'").append(ERROR_INFO).append("\'"); + sb.append(':').append("\'").append(error_message).append("\'"); + sb.append("}"); + String msg = sb.toString(); + LOG.debug(msg); + + if (post == true) { + String url = buildURL(); + ret = httpPost(url, msg); + } else { + String request = buildRqstAddr(); + StringBuilder postAddr= new StringBuilder(); + postAddr.append(request); + postAddr.append(URLEncoder.encode(msg)); + + ret = httpGet(postAddr); + } + + return ret; + } + + private boolean httpGet(StringBuilder postAddr) { + boolean ret = false; + + CloseableHttpClient httpClient = HttpClientBuilder.create().build(); + CloseableHttpResponse response = null; + + try { + HttpGet request = new HttpGet(postAddr.toString()); + response = httpClient.execute(request); + HttpEntity entity = response.getEntity(); + if (entity != null) { + LOG.debug(EntityUtils.toString(entity)); + } + EntityUtils.consume(entity); + ret = true; + } catch (Exception e) { + LOG.error("Exception when sending http request to alimonitor", e); + } finally { + try { + if (response != null) + response.close(); + httpClient.close(); + } catch (Exception e) { + LOG.error("Exception when closing httpclient", e); + } + } + + return ret; + } + + private boolean httpPost(String url, String msg) { + boolean ret = false; + + CloseableHttpClient httpClient = HttpClientBuilder.create().build(); + CloseableHttpResponse response = null; + + try { + HttpPost request = new HttpPost(url); + List nvps = new ArrayList (); + nvps.add(new BasicNameValuePair("name", monitorName)); + nvps.add(new BasicNameValuePair("msg", msg)); + request.setEntity(new UrlEncodedFormEntity(nvps)); + response = httpClient.execute(request); + HttpEntity entity = response.getEntity(); + if (entity != null) { + LOG.debug(EntityUtils.toString(entity)); + } + EntityUtils.consume(entity); + ret = true; + } catch (Exception e) { + LOG.error("Exception when sending http request to alimonitor", e); + } finally { + try { + if (response != null) + response.close(); + httpClient.close(); + } catch (Exception e) { + LOG.error("Exception when closing httpclient", e); + } + } + + return ret; + } + + public void close() { + } +} + diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricKVMsg.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricKVMsg.java new file mode 100644 index 000000000..283492522 --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricKVMsg.java @@ -0,0 +1,201 @@ +package com.alibaba.jstorm.daemon.worker.metrics; + +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; + +import org.apache.log4j.Logger; + +import com.alibaba.jstorm.metric.metrdata.*; +import com.esotericsoftware.minlog.Log; +//count metric data,and transform metric_data to Alimonitor message +public class MetricKVMsg { + private static final Logger LOG = Logger.getLogger(MetricKVMsg.class); + + public enum MetricType{ + count, min, max, mean, median, stddev, p75, p95, p98, p99, p999, mean_rate, m1, m5, m15 + } + + private Map gaugeMapKV = new HashMap(); //count value of Gauge + private Map counterMapKV = new HashMap(); //count value of Counter + private Map> histogramMapKV = new HashMap>(); // count data of Histogram + private Map> timerMapKV = new HashMap>(); // count data of Timer + private Map> meterMapKV = new HashMap>(); // count data of Meter + private Map countMap = new HashMap(); + + public Map convertToKVMap() { + Map ret = new HashMap(); + + addGaugeToKVMap(ret); + addCounterToKVMap(ret); + addHistogramToKVMap(ret); + addTimerToKVMap(ret); + addMeterToKVMap(ret); + + return ret; + } + + public void addGaugeToKVMap(Map kVMap) { + for (Entry entry : gaugeMapKV.entrySet()) { + kVMap.put(entry.getKey(), entry.getValue()); + } + } + + public void addCounterToKVMap(Map kVMap) { + for (Entry entry : counterMapKV.entrySet()) { + kVMap.put(entry.getKey(), entry.getValue()); + } + } + + public void addHistogramToKVMap(Map kVMap) { + for (Entry> entry : histogramMapKV.entrySet()) { + String name = entry.getKey(); + Map typeMap = entry.getValue(); + + for (Entry typeEntry : typeMap.entrySet()) { + kVMap.put(name+ "_" + typeEntry.getKey().toString(), typeEntry.getValue()); + } + } + } + + public void addTimerToKVMap(Map kVMap) { + for (Entry> entry : timerMapKV.entrySet()) { + String name = entry.getKey(); + Map typeMap = entry.getValue(); + + for (Entry typeEntry : typeMap.entrySet()) { + kVMap.put(name+ "_" + typeEntry.getKey().toString(), typeEntry.getValue()); + } + } + } + + public void addMeterToKVMap(Map kVMap) { + for (Entry> entry : meterMapKV.entrySet()) { + String name = entry.getKey(); + Map typeMap = entry.getValue(); + + for (Entry typeEntry : typeMap.entrySet()) { + kVMap.put(name+ "_" + typeEntry.getKey().toString(), typeEntry.getValue()); + } + } + } + + public void countGangeMetric(Map gaugeMap ){ + //count value of Gauge + for(Entry entry : gaugeMap.entrySet()){ + String taskMetricName = entry.getKey(); + String userDefName = taskMetricName.substring(taskMetricName.indexOf(":") + 1); + Double value = entry.getValue().getValue(); + if(gaugeMapKV.containsKey(userDefName)){ + value = value + gaugeMapKV.get(userDefName); + } + gaugeMapKV.put(userDefName, value); + } + } + + public void countCounterMetric(Map counterMap){ + for (Entry entry : counterMap.entrySet()) { + String taskMetricName = entry.getKey(); + String userDefName = taskMetricName.substring(taskMetricName.indexOf(":") + 1); + Long value = entry.getValue().getValue(); + if(counterMapKV.containsKey(userDefName)){ + value = value + counterMapKV.get(userDefName); + } + counterMapKV.put(userDefName, value); + } + } + + public void countHistogramMetric(Map histogramMap){ + //only count: minValue, maxValue ,aveValue + for (Entry entry : histogramMap.entrySet()) { + String taskMetricName = entry.getKey(); + String userDefName = taskMetricName.substring(taskMetricName.indexOf(":") + 1); + + long maxValue = entry.getValue().getMax(); + long minValue = entry.getValue().getMin(); + long meanValue = (long)entry.getValue().getMean(); + + Map temMap = histogramMapKV.get(userDefName); + if(temMap == null){ + temMap = new HashMap(); + histogramMapKV.put(userDefName, temMap); + } + + maxValue += (temMap.get(MetricType.max) == null ? 0l : temMap.get(MetricType.max)); + minValue += (temMap.get(MetricType.min) == null ? 0l : temMap.get(MetricType.max)); + meanValue += (temMap.get(MetricType.mean) == null ? 0l : temMap.get(MetricType.mean)); + + temMap.put(MetricType.max, maxValue); + temMap.put(MetricType.min, minValue); + temMap.put(MetricType.mean, meanValue); + } + } + + public void countTimerMetric(Map timerMap){ + //only count: mean time + for(Entry entry:timerMap.entrySet()){ + String taskMetricName = entry.getKey(); + String userDefName = taskMetricName.substring(taskMetricName.indexOf(":") + 1); + double meanValue = (double)entry.getValue().getMean(); + + Map temMap = timerMapKV.get(userDefName); + if (temMap == null) { + temMap = new HashMap(); + timerMapKV.put(userDefName, temMap); + } + + meanValue += (temMap.get(MetricType.mean) == null ? 0.0 : temMap.get(MetricType.mean)); + temMap.put(MetricType.mean, meanValue); + + Integer count = (countMap.get(userDefName) == null ? 0 : countMap.get(userDefName)); + count++; + countMap.put(userDefName, count); + } + } + + public void calcAvgTimer() { + for (Entry> entry: timerMapKV.entrySet()) { + String userDefName = entry.getKey(); + Map valueMap = entry.getValue(); + Integer count = countMap.get(userDefName); + if (count == null) { + Log.warn("Name=" + userDefName + " is not found in countMap for timer."); + continue; + } + double meanValue = (valueMap.get(MetricType.mean))/count; + valueMap.put(MetricType.mean, convertDurationFromNsToMs(meanValue)); + } + } + + public void countMeterMetric(Map meterMap){ + //only count: meanRate + for(Entry entry:meterMap.entrySet()){ + String taskMetricName = entry.getKey(); + String userDefName = taskMetricName.substring(taskMetricName.indexOf(":")+1); + + Double meanRate = entry.getValue().getMeanRate(); + Map temMap = meterMapKV.get(userDefName); + if (temMap == null) { + temMap = new HashMap(); + meterMapKV.put(userDefName, temMap); + } + + meanRate += (temMap.get(MetricType.mean) == null ? 0.0 : temMap.get(MetricType.mean)); + temMap.put(MetricType.mean, meanRate); + meterMapKV.put(userDefName, temMap); + } + } + + public Map> getTimerKVMap() { + return this.timerMapKV; + } + + public void emptyCountMap() { + countMap.clear(); + } + + private double convertDurationFromNsToMs(double duration) { + return duration / TimeUnit.MILLISECONDS.toNanos(1); + } +} diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricReporter.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricReporter.java index a94088ec5..d6e37a869 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricReporter.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/MetricReporter.java @@ -11,40 +11,52 @@ import com.codahale.metrics.Slf4jReporter; import com.codahale.metrics.Snapshot; import com.codahale.metrics.Timer; +import com.alibaba.jstorm.daemon.worker.WorkerData; +import com.alibaba.jstorm.metric.JStormHistogram; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; -public class MetricReporter { +public class MetricReporter { - final Slf4jReporter reporter1Minute; + final StormMetricReporter reporter1Minute; final Slf4jReporter reporter10Minute; - + + WorkerData workerData; + private boolean isEnable; - public MetricReporter() { - reporter1Minute = Slf4jReporter.forRegistry(Metrics.getMetrics()) + public MetricReporter(WorkerData workerData) { + this.workerData = workerData; + + reporter1Minute = StormMetricReporter.forRegistry(Metrics.getMetrics()) .outputTo(LoggerFactory.getLogger(MetricReporter.class)) .convertRatesTo(TimeUnit.SECONDS) - .convertDurationsTo(TimeUnit.MILLISECONDS).build(); - - + .convertDurationsTo(TimeUnit.MILLISECONDS) + .setWorkerData(workerData).build(); + reporter10Minute = Slf4jReporter.forRegistry(Metrics.getJstack()) .outputTo(LoggerFactory.getLogger(MetricReporter.class)) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS).build(); - + } - + public void start() { reporter1Minute.start(1, TimeUnit.MINUTES); reporter10Minute.start(10, TimeUnit.MINUTES); - + + } + + public void stop() { + reporter1Minute.stop(); + reporter10Minute.stop(); + } - + public void shutdown() { reporter10Minute.close(); reporter1Minute.close(); } - - public boolean isEnable() { return isEnable; @@ -56,8 +68,6 @@ public void setEnable(boolean isEnable) { JStormHistogram.setEnable(isEnable); } - - private static class LatencyRatio implements Gauge { Timer timer; @@ -114,7 +124,8 @@ public void run() { }); Metrics.getMetrics().registerAll(Metrics.getJstack()); - final ConsoleReporter reporter = ConsoleReporter.forRegistry(Metrics.getMetrics()) + final ConsoleReporter reporter = ConsoleReporter + .forRegistry(Metrics.getMetrics()) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS).build(); reporter.start(1, TimeUnit.MINUTES); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/Metrics.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/Metrics.java deleted file mode 100644 index 98066e92b..000000000 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/Metrics.java +++ /dev/null @@ -1,128 +0,0 @@ -package com.alibaba.jstorm.daemon.worker.metrics; - -import java.util.Map; - -import org.apache.log4j.Logger; - -import backtype.storm.utils.DisruptorQueue; - -import com.alibaba.jstorm.daemon.worker.Worker; -import com.codahale.metrics.Counter; -import com.codahale.metrics.Gauge; -import com.codahale.metrics.Histogram; -import com.codahale.metrics.Meter; -import com.codahale.metrics.Metric; -import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.MetricSet; -import com.codahale.metrics.Timer; -import com.codahale.metrics.jvm.GarbageCollectorMetricSet; -import com.codahale.metrics.jvm.MemoryUsageGaugeSet; -import com.codahale.metrics.jvm.ThreadStatesGaugeSet; - -public class Metrics { - - private static final Logger LOG = Logger.getLogger(Metrics.class); - private static final Logger DEFAULT_LOG = Logger.getLogger(Worker.class); - - private static final MetricRegistry metrics = new MetricRegistry(); - - private static final MetricRegistry jstack = new MetricRegistry(); - - static { - try { - registerAll("jvm-thread-state", new ThreadStatesGaugeSet()); - registerAll("jvm-mem", new MemoryUsageGaugeSet()); - registerAll("jvm-gc", new GarbageCollectorMetricSet()); - - jstack.register("jstack", new MetricJstack()); - } catch (Exception e) { - LOG.warn("Failed to regist jvm metrics"); - } - } - - - public static MetricRegistry getMetrics() { - return metrics; - } - - public static MetricRegistry getJstack() { - return jstack; - } - - public static void unregister(String name) { - metrics.remove(name); - } - - public static T register(String name, T metric) - throws IllegalArgumentException { - LOG.info("Register Metric " + name); - return metrics.register(name, metric); - } - - // copy from MetricRegistry - public static void registerAll(String prefix, MetricSet metrics) - throws IllegalArgumentException { - for (Map.Entry entry : metrics.getMetrics().entrySet()) { - if (entry.getValue() instanceof MetricSet) { - registerAll(MetricRegistry.name(prefix, entry.getKey()), - (MetricSet) entry.getValue()); - } else { - register(MetricRegistry.name(prefix, entry.getKey()), - entry.getValue()); - } - } - } - - public static class QueueGauge implements Gauge { - DisruptorQueue queue; - String name; - - public QueueGauge(String name, DisruptorQueue queue) { - this.queue = queue; - this.name = name; - } - - @Override - public Float getValue() { - Float ret = queue.pctFull(); - if (ret > 0.8) { - DEFAULT_LOG.info("Queue " + name + "is full " + ret); - } - - return ret; - } - - } - - public static Gauge registerQueue(String name, DisruptorQueue queue) { - LOG.info("Register Metric " + name); - return metrics.register(name, new QueueGauge(name, queue)); - } - - public static Counter registerCounter(String name) { - LOG.info("Register Metric " + name); - return metrics.counter(name); - } - - public static Meter registerMeter(String name) { - LOG.info("Register Metric " + name); - Meter ret = metrics.meter(name); - - return ret; - } - - public static JStormHistogram registerHistograms(String name) { - LOG.info("Register Metric " + name); - Histogram instance = metrics.histogram(name); - - return new JStormHistogram(name, instance); - } - - public static JStormTimer registerTimer(String name) { - LOG.info("Register Metric " + name); - - Timer instance = metrics.timer(name); - return new JStormTimer(name, instance); - } - -} diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/StormMetricReporter.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/StormMetricReporter.java new file mode 100644 index 000000000..472f4c184 --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/StormMetricReporter.java @@ -0,0 +1,424 @@ +package com.alibaba.jstorm.daemon.worker.metrics; + +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.SortedMap; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.Marker; + +import com.codahale.metrics.Counter; +import com.codahale.metrics.CsvReporter; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.codahale.metrics.Metric; +import com.codahale.metrics.MetricFilter; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.Snapshot; +import com.codahale.metrics.Timer; +import com.codahale.metrics.ScheduledReporter; +import com.esotericsoftware.minlog.Log; +import com.alibaba.jstorm.daemon.worker.WorkerData; +import com.alibaba.jstorm.daemon.worker.WorkerMetricInfo; +import com.alibaba.jstorm.client.metric.MetricCallback; +import com.alibaba.jstorm.cluster.StormClusterState; +import com.alibaba.jstorm.container.SystemOperation; +import com.alibaba.jstorm.metric.MetricInfo; +import com.alibaba.jstorm.metric.Metrics; +import com.alibaba.jstorm.metric.UserDefMetric; +import com.alibaba.jstorm.metric.UserDefMetricData; +import com.alibaba.jstorm.task.TaskMetricInfo; +import com.alibaba.jstorm.utils.JStormServerUtils; +import com.alibaba.jstorm.utils.JStormUtils; + +public class StormMetricReporter extends ScheduledReporter { + /** + * Returns a new {@link Builder} for {@link StormMetricReporter}. + * + * @param registry the registry to report + * @return a {@link Builder} instance for a {@link StormMetricReporter} + */ + public static Builder forRegistry(MetricRegistry registry) { + return new Builder(registry); + } + + /** + * A builder for {@link CsvReporter} instances. Defaults to logging to {@code metrics}, not + * using a marker, converting rates to events/second, converting durations to milliseconds, and + * not filtering metrics. + */ + public static class Builder { + private final MetricRegistry registry; + private Logger logger; + private Marker marker; + private TimeUnit rateUnit; + private TimeUnit durationUnit; + private MetricFilter filter; + private WorkerData workerData; + + private Builder(MetricRegistry registry) { + this.registry = registry; + this.logger = LoggerFactory.getLogger("metrics"); + this.marker = null; + this.rateUnit = TimeUnit.SECONDS; + this.durationUnit = TimeUnit.MILLISECONDS; + this.filter = MetricFilter.ALL; + this.workerData = null; + } + + /** + * Log metrics to the given logger. + * + * @param logger an SLF4J {@link Logger} + * @return {@code this} + */ + public Builder outputTo(Logger logger) { + this.logger = logger; + return this; + } + + /** + * Mark all logged metrics with the given marker. + * + * @param marker an SLF4J {@link Marker} + * @return {@code this} + */ + public Builder markWith(Marker marker) { + this.marker = marker; + return this; + } + + /** + * Convert rates to the given time unit. + * + * @param rateUnit a unit of time + * @return {@code this} + */ + public Builder convertRatesTo(TimeUnit rateUnit) { + this.rateUnit = rateUnit; + return this; + } + + /** + * Convert durations to the given time unit. + * + * @param durationUnit a unit of time + * @return {@code this} + */ + public Builder convertDurationsTo(TimeUnit durationUnit) { + this.durationUnit = durationUnit; + return this; + } + + /** + * Only report metrics which match the given filter. + * + * @param filter a {@link MetricFilter} + * @return {@code this} + */ + public Builder filter(MetricFilter filter) { + this.filter = filter; + return this; + } + + public Builder setWorkerData(WorkerData Data) { + this.workerData = Data; + return this; + } + /** + * Builds a {@link StormMetricReporter} with the given properties. + * + * @return a {@link StormMetricReporter} + */ + public StormMetricReporter build() { + return new StormMetricReporter(registry, logger, marker, rateUnit, + durationUnit, filter, workerData); + } + } + + private final Logger logger; + private final Marker marker; + private WorkerData workerData; + + private StormMetricReporter(MetricRegistry registry, + Logger logger, + Marker marker, + TimeUnit rateUnit, + TimeUnit durationUnit, + MetricFilter filter, + WorkerData workerData) { + super(registry, "logger-reporter", filter, rateUnit, durationUnit); + this.logger = logger; + this.marker = marker; + this.workerData = workerData; + } + + @Override + public void report(SortedMap gauges, + SortedMap counters, + SortedMap histograms, + SortedMap meters, + SortedMap timers) { + boolean metricPerf = workerData.getMetricsReporter().isEnable(); + + processMetricData(gauges, counters, histograms, meters, timers, metricPerf); + + // update internal metrics data of jstorm task and worker, + // and user define metrics data to ZK + updateMetricsDataToZK(metricPerf); + } + + private void doCallback(Map metrics) { + Map callbacks = Metrics.getUserDefMetric().getCallbacks(); + String name = ""; + try{ + for (Entry entry : metrics.entrySet()) { + name = entry.getKey(); + MetricCallback callback = callbacks.get(entry.getValue()); + if (callback != null) + callback.callback(entry.getValue()); + } + } catch (Exception e) { + logger.error("Error when excuting the callbacks defined by user. CallBack Name=" + name, e); + } + } + + private void processMetricData(SortedMap gauges, + SortedMap counters, + SortedMap histograms, + SortedMap meters, + SortedMap timers, + boolean metricPerf) { + UserDefMetric userDefMetric = Metrics.getUserDefMetric(); + + for (Entry entry : gauges.entrySet()) { + logGauge(entry.getKey(), entry.getValue()); + } + + for (Entry entry : counters.entrySet()) { + logCounter(entry.getKey(), entry.getValue()); + } + + for (Entry entry : meters.entrySet()) { + logMeter(entry.getKey(), entry.getValue()); + } + + if (metricPerf == true) { + for (Entry entry : histograms.entrySet()) { + logHistogram(entry.getKey(), entry.getValue()); + } + + for (Entry entry : timers.entrySet()) { + Map timerMap = userDefMetric.getTimer(); + logTimer(entry.getKey(), entry.getValue()); + } + } + + } + + private void logTimer(String name, Timer timer) { + final Snapshot snapshot = timer.getSnapshot(); + + logger.info(marker, + "type=TIMER, name={}, count={}, min={}, max={}, mean={}, stddev={}, median={}, " + + "p75={}, p95={}, p98={}, p99={}, p999={}, mean_rate={}, m1={}, m5={}, " + + "m15={}, rate_unit={}, duration_unit={}", + name, + timer.getCount(), + convertDuration(snapshot.getMin()), + convertDuration(snapshot.getMax()), + convertDuration(snapshot.getMean()), + convertDuration(snapshot.getStdDev()), + convertDuration(snapshot.getMedian()), + convertDuration(snapshot.get75thPercentile()), + convertDuration(snapshot.get95thPercentile()), + convertDuration(snapshot.get98thPercentile()), + convertDuration(snapshot.get99thPercentile()), + convertDuration(snapshot.get999thPercentile()), + convertRate(timer.getMeanRate()), + convertRate(timer.getOneMinuteRate()), + convertRate(timer.getFiveMinuteRate()), + convertRate(timer.getFifteenMinuteRate()), + getRateUnit(), + getDurationUnit()); + + } + + private void logMeter(String name, Meter meter) { + logger.info(marker, + "type=METER, name={}, count={}, mean_rate={}, m1={}, m5={}, m15={}, rate_unit={}", + name, + meter.getCount(), + convertRate(meter.getMeanRate()), + convertRate(meter.getOneMinuteRate()), + convertRate(meter.getFiveMinuteRate()), + convertRate(meter.getFifteenMinuteRate()), + getRateUnit()); + } + + private void logHistogram(String name, Histogram histogram) { + final Snapshot snapshot = histogram.getSnapshot(); + logger.info(marker, + "type=HISTOGRAM, name={}, count={}, min={}, max={}, mean={}, stddev={}, " + + "median={}, p75={}, p95={}, p98={}, p99={}, p999={}", + name, + histogram.getCount(), + snapshot.getMin(), + snapshot.getMax(), + snapshot.getMean(), + snapshot.getStdDev(), + snapshot.getMedian(), + snapshot.get75thPercentile(), + snapshot.get95thPercentile(), + snapshot.get98thPercentile(), + snapshot.get99thPercentile(), + snapshot.get999thPercentile()); + } + + private void logCounter(String name, Counter counter) { + logger.info(marker, "type=COUNTER, name={}, count={}", name, counter.getCount()); + } + + private void logGauge(String name, Gauge gauge) { + logger.info(marker, "type=GAUGE, name={}, value={}", name, gauge.getValue()); + } + + @Override + protected String getRateUnit() { + return "events/" + super.getRateUnit(); + } + + private void updateMetricsDataToZK(boolean metricPerf) { + Map> taskMetricMap = Metrics.getTaskMetricMap(); + List workerMetricList = Metrics.getWorkerMetricList(); + + updateTaskMetricsToZK(taskMetricMap, metricPerf); + updateWorkerMetricsToZK(workerMetricList, metricPerf); + updateUserDefMetricsToZK(metricPerf); + } + + private void updateTaskMetricsToZK(Map> metricMap, boolean metricPerf) { + StormClusterState clusterState = workerData.getZkCluster(); + String topologyId = workerData.getTopologyId(); + + for(Entry> entry : metricMap.entrySet()) { + String taskId = entry.getKey(); + List MetricList = entry.getValue(); + + try { + String component = clusterState.task_info(topologyId, Integer.valueOf(taskId)).getComponentId(); + TaskMetricInfo taskMetricInfo = new TaskMetricInfo(taskId, component); + + for(MetricInfo metricInfo : MetricList) { + taskMetricInfo.updateMetricData(metricInfo); + } + + String error = taskMetricInfo.anyQueueFull(); + if (error != null) + clusterState.report_task_error(topologyId, Integer.valueOf(taskId), error); + + clusterState.update_task_metric(topologyId, taskId, taskMetricInfo); + } catch(Exception e) { + logger.error(marker, "Failed to update metrics data in ZK for topo-{} task-{}.", + topologyId, taskId, e); + } + } + } + + public Double getCpuUsage() { + Double value = 0.0; + String output = null; + try { + String pid = JStormUtils.process_pid(); + output = SystemOperation.exec("top -b -n 1 | grep " + pid); + String subStr = output.substring(output.indexOf("S") + 1); + for(int i = 0; i < subStr.length(); i++) { + char ch = subStr.charAt(i); + if (ch != ' ') { + subStr = subStr.substring(i); + break; + } + } + String usedCpu = subStr.substring(0, subStr.indexOf(" ")); + value = Double.valueOf(usedCpu); + } catch (Exception e) { + logger.warn("Failed to get cpu usage ratio."); + if (output != null) + logger.warn("Output string is \"" + output + "\""); + value = 0.0; + } + + return value; + } + + private void updateWorkerMetricsToZK(List metricList, boolean metricPerf) { + StormClusterState clusterState = workerData.getZkCluster(); + String topologyId = workerData.getTopologyId(); + String hostName; + + hostName = JStormServerUtils.getHostName(workerData.getConf()); + String workerId = hostName + ":" + workerData.getPort(); + + WorkerMetricInfo workerMetricInfo = new WorkerMetricInfo(hostName, workerData.getPort()); + try { + //Set metrics data + for(MetricInfo metricInfo : metricList) { + workerMetricInfo.updateMetricData(metricInfo); + } + + //Set cpu & memory usage + Runtime rt=Runtime.getRuntime(); + long usedMem = rt.totalMemory() - rt.freeMemory(); + workerMetricInfo.setUsedMem(usedMem); + + workerMetricInfo.setUsedCpu(getCpuUsage()); + + clusterState.update_worker_metric(topologyId, workerId, workerMetricInfo); + } catch(Exception e) { + logger.error(marker, "Failed to update metrics data in ZK for topo-{} idStr-{}.", + topologyId, workerId, e); + } + } + + private void updateUserDefMetricsToZK(boolean metricPerf) { + StormClusterState clusterState = workerData.getZkCluster(); + String topologyId = workerData.getTopologyId(); + String hostName =JStormServerUtils.getHostName(workerData.getConf()); + String workerId = hostName + ":" + workerData.getPort(); + + UserDefMetric userDefMetric = Metrics.getUserDefMetric(); + UserDefMetricData userDefMetricData = new UserDefMetricData(); + userDefMetricData.updateFromGauge(userDefMetric.getGauge()); + userDefMetricData.updateFromCounter(userDefMetric.getCounter()); + userDefMetricData.updateFromMeterData(userDefMetric.getMeter()); + // If metrics performance is disable, Timer & Histogram metrics will not be monitored, + // and the corresponding metrics data will not be sent to ZK either. + if (metricPerf == false) { + userDefMetricData.updateFromHistogramData(userDefMetric.getHistogram()); + userDefMetricData.updateFromTimerData(userDefMetric.getTimer()); + } + + try { + clusterState.update_userDef_metric(topologyId, workerId, userDefMetricData); + } catch(Exception e) { + logger.error(marker, "Failed to update user define metrics data in ZK for topo-{} idStr-{}.", + topologyId, workerId, e); + } + + //Do callbacks defined by user + doCallback(userDefMetric.getGauge()); + doCallback(userDefMetric.getCounter()); + doCallback(userDefMetric.getMeter()); + if (metricPerf == false) { + doCallback(userDefMetric.getHistogram()); + doCallback(userDefMetric.getTimer()); + } + } + +} \ No newline at end of file diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/TopoCommStatsInfo.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/TopoCommStatsInfo.java new file mode 100644 index 000000000..165f02f9f --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/TopoCommStatsInfo.java @@ -0,0 +1,229 @@ +package com.alibaba.jstorm.daemon.worker.metrics; + +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.log4j.Logger; + +import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat; +import com.alibaba.jstorm.stats.CommonStatsData; + +public class TopoCommStatsInfo { + private static final Logger LOG = Logger.getLogger(TopoCommStatsInfo.class); + + private String topologyId; + private String topologyName; + // Map> + private Map> spoutHbMap; + private Map> boltHbMap; + + //*********** Statistic data ****************// + // Topology Data + private CommStatsData topoStatsData; + // Spout Data + private Map spoutStatsDataMap; + // Bolt Data + private Map boltStatsDataMap; + + public TopoCommStatsInfo(String topologyId, String topologyName) { + this.topologyId = topologyId; + this.topologyName = topologyName; + topoStatsData = new CommStatsData(); + spoutHbMap = new HashMap>(); + boltHbMap = new HashMap>(); + spoutStatsDataMap = new HashMap(); + boltStatsDataMap = new HashMap(); + } + + public String getTopoId() { + return topologyId; + } + + public String getTopoName() { + return topologyName; + } + + public Map> getSpoutList() { + return spoutHbMap; + } + + public Map> getBoltList() { + return boltHbMap; + } + + public CommStatsData getTopoStatsData() { + return topoStatsData; + } + + public Map getSpoutStatsData() { + return spoutStatsDataMap; + } + + public Map getBoltStatsData() { + return boltStatsDataMap; + } + + public void addToSpoutList(String componentId, String taskId, TaskHeartbeat taskHb) { + Map taskMap = spoutHbMap.get(componentId); + if (taskMap == null) { + taskMap = new HashMap(); + spoutHbMap.put(componentId, taskMap); + } + taskMap.put(taskId, taskHb); + } + + public void addToBoltList(String componentId, String taskId, TaskHeartbeat taskHb) { + Map taskMap = boltHbMap.get(componentId); + if (taskMap == null) { + taskMap = new HashMap(); + boltHbMap.put(componentId, taskMap); + } + taskMap.put(taskId, taskHb); + } + + public void buildTopoStatsData() { + topoStatsData.resetData(); + Double latency = 0.0; + for (Entry spoutEntry : spoutStatsDataMap.entrySet()) { + CommStatsData statsData = spoutEntry.getValue(); + topoStatsData.updateSendTps(statsData.getSendTps()); + topoStatsData.updateRecvTps(statsData.getRecvTps()); + topoStatsData.updateFailed(statsData.getFailed()); + latency += statsData.getLatency(); + } + latency = latency/(spoutStatsDataMap.size()); + topoStatsData.updateLatency(latency); + + for (Entry boltEntry : boltStatsDataMap.entrySet()) { + CommStatsData statsData = boltEntry.getValue(); + topoStatsData.updateSendTps(statsData.getSendTps()); + topoStatsData.updateRecvTps(statsData.getRecvTps()); + topoStatsData.updateFailed(statsData.getFailed()); + } + } + + public void buildSpoutStatsData() { + updateStatsData(spoutHbMap, spoutStatsDataMap); + } + + public void buildBoltStatsData() { + updateStatsData(boltHbMap, boltStatsDataMap); + } + + public void updateStatsData(Map> HbMap, Map statsDataMap) { + for (Entry> Entry : HbMap.entrySet()) { + String componentId = Entry.getKey(); + Map compList = Entry.getValue(); + + CommStatsData comStatsData = statsDataMap.get(componentId); + if (comStatsData == null) { + comStatsData = new CommStatsData(); + statsDataMap.put(componentId, comStatsData); + } + comStatsData.resetData(); + + for (Entry compEntry : compList.entrySet()) { + TaskHeartbeat taskHb = compEntry.getValue(); + CommonStatsData statsData = taskHb.getStats(); + comStatsData.updateStatsData(statsData); + } + double avgLatency = (comStatsData.getLatency())/(compList.size()); + comStatsData.updateLatency(avgLatency); + } + } + + + public class CommStatsData { + private static final String TOPOLOGYNAME = "TopologyName"; + private static final String COMPONTENT= "Component"; + private static final String SEND_TPS = "send_tps"; + private static final String RECV_TPS = "recv_tps"; + private static final String FAILED = "failed"; + private static final String LATENCY = "process_latency"; + + private Double sendTps; + private Double recvTps; + private Long failed; + private Double latency; + + public CommStatsData() { + resetData(); + } + + public Double getSendTps() { + return sendTps; + } + + public Double getRecvTps() { + return recvTps; + } + + public Long getFailed() { + return failed; + } + + public Double getLatency() { + return latency; + } + + public void updateSendTps(Double tps) { + sendTps += tps; + } + + public void updateRecvTps(Double tps) { + recvTps += tps; + } + + public void updateFailed(Long fail) { + failed += fail; + } + + public void updateLatency(Double latency) { + this.latency = latency; + } + + public void updateStatsData(CommonStatsData commStatsData) { + sendTps += commStatsData.get_total_send_tps(); + recvTps += commStatsData.get_total_recv_tps(); + failed += commStatsData.get_total_failed(); + latency += commStatsData.get_avg_latency(); + } + + public void updateStatsData(CommStatsData commStatsData) { + sendTps += commStatsData.getSendTps(); + recvTps += commStatsData.getRecvTps(); + failed += commStatsData.getFailed(); + latency += commStatsData.getLatency(); + } + + public void resetData() { + sendTps = 0.0; + recvTps = 0.0; + failed = 0l; + latency = 0.0; + } + + public Map convertToKVMap(String topoloygName,String componentId) { + Map ret = new HashMap(); + ret.put(TOPOLOGYNAME, topoloygName); + ret.put( COMPONTENT, componentId); + ret.put(SEND_TPS, sendTps); + ret.put( RECV_TPS, recvTps); + ret.put(FAILED, failed); + ret.put(LATENCY, latency); + + return ret; + } + + public void printValue() { + LOG.info("send_tps: " + sendTps); + LOG.info("recv_tps: " + recvTps); + LOG.info("failed: " + failed); + LOG.info("latency: " + latency); + } + } +} + + \ No newline at end of file diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/UploadMetricFromZK.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/UploadMetricFromZK.java new file mode 100644 index 000000000..bb77d9f65 --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/UploadMetricFromZK.java @@ -0,0 +1,248 @@ +package com.alibaba.jstorm.daemon.worker.metrics; + +import java.util.List; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.Map; +import java.util.Map.Entry; +import java.util.HashMap; + +import org.apache.log4j.Logger; + +import com.alibaba.jstorm.daemon.nimbus.NimbusData; +import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat; +import com.alibaba.jstorm.cluster.StormBase; +import com.alibaba.jstorm.cluster.StormClusterState; +import com.alibaba.jstorm.stats.CommonStatsData; +import com.alibaba.jstorm.task.TaskInfo; +import com.alibaba.jstorm.metric.UserDefMetricData; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Metric; + +public class UploadMetricFromZK implements Runnable { + + private static final Logger LOG = Logger.getLogger(UploadMetricFromZK.class); + + private NimbusData data; + private StormClusterState clusterState; + + private AlimonitorClient client = new AlimonitorClient(); + + private Map topologyMap; + + public UploadMetricFromZK(NimbusData data) { + this.data = data; + clusterState = data.getStormClusterState(); + topologyMap = new HashMap(); + } + + @Override + public void run() { + uploadCommStats(); + uploadUseDefMetric(clusterState); + } + + // remove obsolete topology + private boolean rmObsoleteTopo() { + boolean ret = true; + List obsoleteTopos = new ArrayList(); + try { + List topologys = clusterState.active_storms(); + + for (Entry entry : topologyMap.entrySet()) { + if (topologys.contains(entry.getKey()) == false) { + obsoleteTopos.add(entry.getKey()); + } + } + + for (String topo : obsoleteTopos) { + topologyMap.remove(topo); + } + } catch (Exception e) { + LOG.warn("Faild to update topology list.", e); + ret = false; + } + + return ret; + } + + private void uploadCommStats() { + // Remove obsolete topology firstly. new topology will be + // added when uploading the common statistic data + rmObsoleteTopo(); + + List> listMapMsg=new ArrayList>(); + + try { + TopoCommStatsInfo ret; + List topologys = clusterState.heartbeat_storms(); + + for (String topologyId : topologys) { + if (topologyMap.containsKey(topologyId) == false) { + StormBase base = clusterState.storm_base(topologyId, null); + if (base == null) { + topologyMap.remove(topologyId); + continue; + } else { + topologyMap.put(topologyId, new TopoCommStatsInfo(topologyId, base.getStormName())); + } + } + // Get common statistic data from taskbeats in ZK for a topology + ret = getCommStatsData(topologyId); + + if (ret != null) { + // Build topology, spout and bolt statis data from the + // statis data of all tasks + buildCommStatsData(ret); + // Build statistic data message of remote monitor server + buildComonSendMsg(ret,listMapMsg); + + } + } + + if(listMapMsg.size() > 0) { + // Send statistic data to remote monitor server + sendCommStatsData(listMapMsg); + } + } catch (Exception e) { + LOG.warn("Failed to upload comm statistic data to Alimonitor.", e); + } + } + + public void uploadUseDefMetric(StormClusterState clusterState) { + try { + List active_topologys = clusterState.active_storms(); + if (active_topologys == null) { + return; + } + + Map totalMsg = new HashMap(); + + for (String topologyId : active_topologys) { + Map> compont_metrics = new HashMap>(); + List workerIds = clusterState.monitor_user_workers(topologyId); + if(workerIds == null) + continue; + MetricKVMsg topologyMetricMsg = new MetricKVMsg(); + for(String workerId : workerIds) { + UserDefMetricData useWorkDefMetric = clusterState.get_userDef_metric(topologyId, workerId); + //add metric based on worker to useWorkDefMetric + topologyMetricMsg.countGangeMetric(useWorkDefMetric.getGaugeDataMap()); + topologyMetricMsg.countCounterMetric(useWorkDefMetric.getCounterDataMap()); + topologyMetricMsg.countHistogramMetric(useWorkDefMetric.getHistogramDataMap()); + topologyMetricMsg.countTimerMetric(useWorkDefMetric.getTimerDataMap()); + topologyMetricMsg.countMeterMetric(useWorkDefMetric.getMeterDataMap()); + } + topologyMetricMsg.calcAvgTimer(); + topologyMetricMsg.emptyCountMap(); + Map ret = topologyMetricMsg.convertToKVMap(); + if(ret.size() >0) totalMsg.putAll(ret); + /* + if(ret.size() > 0){ + client.setMonitorName("jstorm_user_metric"); + client.sendRequest(0, " ",ret); + } */ + } + + if(totalMsg.size() > 0) { + client.setMonitorName("jstorm_user_metric"); + client.sendRequest(0, " ", totalMsg); + } + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + + public void clean() { + + } + + private TopoCommStatsInfo getCommStatsData(String topologyId) { + try + { + String taskId; + String componentId; + TaskHeartbeat taskHb; + + TopoCommStatsInfo commStatsInfo = topologyMap.get(topologyId); + if (commStatsInfo == null) {LOG.warn("commStatsInfo is null, topoId=" + topologyId);} + + Map heartbeats = clusterState.task_heartbeat(topologyId); + if (heartbeats == null || heartbeats.size() == 0) return null; + + for (Entry entry : heartbeats.entrySet()) { + taskId = entry.getKey(); + taskHb = entry.getValue(); + + TaskInfo taskInfo = clusterState.task_info(topologyId, Integer.parseInt(taskId)); + if (taskInfo == null ) { + LOG.warn("Task information can not be found in ZK for task-" + taskId); + continue; + } + componentId = taskInfo.getComponentId(); + + //update taskHb into the corresponding component map + if (taskHb.getComponentType().equals("spout")) { + commStatsInfo.addToSpoutList(componentId, taskId, taskHb); + } else { + commStatsInfo.addToBoltList(componentId, taskId, taskHb); + } + } + + return commStatsInfo; + + } catch (Exception e) { + LOG.warn("getCommStatsData, failed to read data from ZK.", e); + return null; + } + } + + private void buildCommStatsData(TopoCommStatsInfo commStatsInfo) { + commStatsInfo.buildBoltStatsData(); + commStatsInfo.buildSpoutStatsData(); + commStatsInfo.buildTopoStatsData(); + } + + private void sendCommStatsData(List> listMapMsg) { + + try { + client.setMonitorName("jstorm_metric"); + client.sendRequest(0, AlimonitorClient.ERROR_MSG, listMapMsg); + } catch (Exception e) { + LOG.warn("Error when sending common statistic data.", e); + } + } + + private void buildComonSendMsg(TopoCommStatsInfo commStatsInfo,List> listMapMsg) { + String topoloygName = commStatsInfo.getTopoName(); + + Map jsonMsg; + + try { + //build topology statistic data + TopoCommStatsInfo.CommStatsData topoStatsData = commStatsInfo.getTopoStatsData(); + jsonMsg = topoStatsData.convertToKVMap(topoloygName,topoloygName); + listMapMsg.add(jsonMsg); + //build spout statistic data + Map spoutStatsData = commStatsInfo.getSpoutStatsData(); + for (Entry entry : spoutStatsData.entrySet()) { + String componentId = entry.getKey(); + jsonMsg = entry.getValue().convertToKVMap(topoloygName,componentId); + listMapMsg.add(jsonMsg); + } + + //build bolt statistic data + Map boltStatsData = commStatsInfo.getBoltStatsData(); + for (Entry entry : boltStatsData.entrySet()) { + String componentId = entry.getKey(); + jsonMsg = entry.getValue().convertToKVMap(topoloygName,componentId); + listMapMsg.add(jsonMsg); + } + } catch (Exception e) { + LOG.warn("Error when bulding common statistic data message.", e); + } + } + + +} \ No newline at end of file diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/UploadSupervMetric.java b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/UploadSupervMetric.java new file mode 100644 index 000000000..466c12736 --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/daemon/worker/metrics/UploadSupervMetric.java @@ -0,0 +1,183 @@ +package com.alibaba.jstorm.daemon.worker.metrics; + +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.HashMap; +import java.util.Set; +import java.util.HashSet; + +import org.apache.log4j.Logger; + +import com.alibaba.jstorm.callback.RunnableCallback; +import com.alibaba.jstorm.cluster.StormBase; +import com.alibaba.jstorm.cluster.StormClusterState; +import com.alibaba.jstorm.cluster.StormMonitor; +import com.alibaba.jstorm.daemon.supervisor.Supervisor; +import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo; +import com.alibaba.jstorm.daemon.worker.WorkerMetricInfo; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.task.TaskMetricInfo; +import com.alibaba.jstorm.task.Assignment; + +public class UploadSupervMetric extends RunnableCallback { + private static Logger LOG = Logger.getLogger(UploadSupervMetric.class); + + private static final String TASK_MONITOR_NAME = "jstorm_task_metrics"; + private static final String WORKER_MONITOR_NAME = "jstorm_worker_metrics"; + + private AtomicBoolean active; + private Integer result; + private int frequence; + + private String supervisorId; + private String hostName; + private StormClusterState cluster; + private AlimonitorClient client = new AlimonitorClient(); + + List> jsonMsgTasks = new ArrayList>(); + List> jsonMsgWorkers = new ArrayList>(); + + public UploadSupervMetric(StormClusterState cluster, String supervisorId, AtomicBoolean active, int frequence) { + this.active = active; + this.frequence = frequence; + this.result = null; + this.cluster = cluster; + this.supervisorId = supervisorId; + try { + SupervisorInfo supervisorInfo = cluster.supervisor_info(supervisorId); + this.hostName = supervisorInfo.getHostName(); + } catch (Exception e) { + LOG.error("Failed to get hostname for supervisorID=" + supervisorId); + } + } + + @Override + public Object getResult() { + return result; + } + + @Override + public void run() { + sendMetricsData();; + if (active.get()) { + this.result = frequence; + } else { + this.result = -1; + + } + } + + + public void sendMetricsData() { + + try { + List topologys = cluster.active_storms(); + + for (String topologyId : topologys) { + StormMonitor monitor = cluster.get_storm_monitor(topologyId); + if (monitor == null) continue; + boolean metricPerf = monitor.getMetrics(); + + Assignment assignment = cluster.assignment_info(topologyId, null); + + if (assignment != null) { + Set taskSet = new HashSet(); + Set workerSet = new HashSet(); + //Retrieve task set + Set tempTaskSet = assignment.getCurrentSuperviosrTasks(supervisorId); + taskSet.addAll(tempTaskSet); + + //Retrieve worker set + Set tempWorkerSet = assignment.getCurrentSuperviosrWorkers(supervisorId); + workerSet.addAll(tempWorkerSet); + + //Build KV Map for AliMonitor + buildTaskJsonMsg(topologyId, taskSet, metricPerf); + buildWorkerJsonMsg(topologyId, workerSet, metricPerf); + } + } + + if (jsonMsgTasks.size() != 0) { + client.setMonitorName(TASK_MONITOR_NAME); + client.sendRequest(0, "", jsonMsgTasks); + } + + if (jsonMsgWorkers.size() != 0) { + client.setMonitorName(WORKER_MONITOR_NAME); + client.sendRequest(0, "", jsonMsgWorkers); + } + + jsonMsgTasks.clear(); + jsonMsgWorkers.clear(); + + } catch (Exception e) { + LOG.error("Failed to upload worker&task metrics data", e); + jsonMsgTasks.clear(); + jsonMsgWorkers.clear(); + } + } + + public void buildTaskJsonMsg(String topologyId, Set taskSet, boolean metricPerf) { + for (Integer taskId : taskSet) { + try { + TaskMetricInfo taskMetric = cluster.get_task_metric(topologyId, taskId); + if (taskMetric == null) continue; + + // Task KV structure + Map taskKV = new HashMap(); + taskKV.put("Topology_Name", topologyId); + taskKV.put("Task_Id", String.valueOf(taskId)); + taskKV.put("Component", taskMetric.getComponent()); + taskKV.putAll(taskMetric.getGaugeData()); + taskKV.putAll(taskMetric.getCounterData()); + taskKV.putAll(taskMetric.getMeterData()); + if (metricPerf == true) { + taskKV.putAll(taskMetric.getTimerData()); + taskKV.putAll(taskMetric.getHistogramData()); + } + + jsonMsgTasks.add(taskKV); + } catch (Exception e) { + LOG.error("Failed to buildTaskJsonMsg, taskID=" + taskId + ", e=" + e); + } + } + } + + public void buildWorkerJsonMsg(String topologyId, Set workerSet, boolean metricPerf) { + String workerId = null; + for (Integer port: workerSet) { + try { + workerId = hostName + ":" + port; + WorkerMetricInfo workerMetric = cluster.get_worker_metric(topologyId, workerId); + if (workerMetric == null) continue; + + Map workerKV = new HashMap(); + + workerKV.put("Topology_Name", topologyId); + workerKV.put("Port", String.valueOf(port)); + workerKV.put(MetricDef.MEMORY_USED, workerMetric.getUsedMem()); + workerKV.put(MetricDef.CPU_USED_RATIO, workerMetric.getUsedCpu()); + + workerKV.putAll(workerMetric.getGaugeData()); + workerKV.putAll(workerMetric.getCounterData()); + workerKV.putAll(workerMetric.getMeterData()); + + if (metricPerf == true) + { + workerKV.putAll(workerMetric.getTimerData()); + workerKV.putAll(workerMetric.getHistogramData()); + } + + jsonMsgWorkers.add(workerKV); + } catch (Exception e) { + LOG.error("Failed to buildWorkerJsonMsg, workerId=" + workerId + ", e=" + e); + } + } + } + + public void clean() { + } +} \ No newline at end of file diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/event/EventManagerImp.java b/jstorm-server/src/main/java/com/alibaba/jstorm/event/EventManagerImp.java index e1ad9f4aa..7d2de10e7 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/event/EventManagerImp.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/event/EventManagerImp.java @@ -35,6 +35,11 @@ public RunnableCallback poll() throws InterruptedException { RunnableCallback event = queue.poll(); return event; } + + public RunnableCallback take() throws InterruptedException { + RunnableCallback event = queue.take(); + return event; + } public void proccessinc() { processed.incrementAndGet(); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/event/EventManagerImpExecute.java b/jstorm-server/src/main/java/com/alibaba/jstorm/event/EventManagerImpExecute.java index cd66582da..69a83e6a9 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/event/EventManagerImpExecute.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/event/EventManagerImpExecute.java @@ -21,7 +21,7 @@ public void run() { while (manager.isRunning()) { RunnableCallback r = null; try { - r = manager.poll(); + r = manager.take(); } catch (InterruptedException e) { // LOG.info("Failed to get ArgsRunable from EventManager queue"); } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/local/LocalClusterMap.java b/jstorm-server/src/main/java/com/alibaba/jstorm/local/LocalClusterMap.java index ea4833fc2..97fcf135a 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/local/LocalClusterMap.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/local/LocalClusterMap.java @@ -80,12 +80,15 @@ public void setTmpDir(List tmpDir) { } public void clean() { - if (nimbusServer != null) - nimbusServer.cleanup(); + if (supervisor != null) { supervisor.ShutdownAllWorkers(); supervisor.shutdown(); } + + if (nimbusServer != null) { + nimbusServer.cleanup(); + } if (zookeeper != null) zookeeper.shutdown(); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/ControlMessage.java b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/ControlMessage.java index 23cbcf729..ce2e7ee0e 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/ControlMessage.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/ControlMessage.java @@ -9,6 +9,7 @@ enum ControlMessage { EOB_MESSAGE((short) -201), OK_RESPONSE((short) -200); private short code; + private long timeStamp; // private constructor private ControlMessage(short code) { @@ -30,7 +31,7 @@ static ControlMessage mkMessage(short encoded) { } int encodeLength() { - return 2; // short + return 10; // short + long } /** @@ -48,5 +49,14 @@ ChannelBuffer buffer() throws Exception { void write(ChannelBufferOutputStream bout) throws Exception { bout.writeShort(code); + bout.writeLong(System.currentTimeMillis()); + } + + long getTimeStamp() { + return timeStamp; + } + + void setTimeStamp(long timeStamp) { + this.timeStamp = timeStamp; } } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/MessageDecoder.java b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/MessageDecoder.java index ec9a659fd..ef7400c2f 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/MessageDecoder.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/MessageDecoder.java @@ -1,5 +1,12 @@ package com.alibaba.jstorm.message.netty; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.HashMap; +import java.net.SocketAddress; +import java.net.InetSocketAddress; + import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelHandlerContext; @@ -9,15 +16,19 @@ import backtype.storm.messaging.TaskMessage; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormHistogram; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; public class MessageDecoder extends FrameDecoder { private static final Logger LOG = LoggerFactory .getLogger(MessageDecoder.class); - private static JStormTimer timer = Metrics.registerTimer("netty-server-decode-timer"); + private static JStormTimer timer = Metrics.registerTimer(null, MetricDef.NETTY_SERV_DECODE_TIME, + null, Metrics.MetricType.WORKER); + private static Map networkTransmitTimeMap = new HashMap(); /* * Each ControlMessage is encoded as: code (<0) ... short(2) Each @@ -25,10 +36,12 @@ public class MessageDecoder extends FrameDecoder { * ... byte[] * */ protected Object decode(ChannelHandlerContext ctx, Channel channel, - ChannelBuffer buf) throws Exception { + ChannelBuffer buf) throws Exception { // Make sure that we have received at least a short long available = buf.readableBytes(); - if (available < 2) { + // Length of control message is 10. + // Minimum length of a task message is 6(short taskId, int length). + if (available < 6) { // need more data return null; } @@ -42,26 +55,50 @@ protected Object decode(ChannelHandlerContext ctx, Channel channel, // there's not enough bytes in the buffer. buf.markReaderIndex(); - // read the short field - short code = buf.readShort(); - - // case 1: Control message - ControlMessage ctrl_msg = ControlMessage.mkMessage(code); - if (ctrl_msg != null) { - return ctrl_msg; - } - - // case 2: task Message - short task = code; - - // Make sure that we have received at least an integer (length) - available -= 2; - if (available < 4) { - // need more data - buf.resetReaderIndex(); - - return null; - } + // read the short field + short code = buf.readShort(); + available -= 2; + + // case 1: Control message + ControlMessage ctrl_msg = ControlMessage.mkMessage(code); + if (ctrl_msg != null) { + if (available < 8) { + // The time stamp bytes were not received yet - return null. + buf.resetReaderIndex(); + return null; + } + long timeStamp = buf.readLong(); + available -= 8; + if (ctrl_msg == ControlMessage.EOB_MESSAGE) { + InetSocketAddress sockAddr = (InetSocketAddress)(channel.getRemoteAddress()); + String remoteAddr = sockAddr.getHostName() + ":" + sockAddr.getPort(); + + long interval = System.currentTimeMillis() - timeStamp; + if (interval < 0) interval = 0; + + JStormHistogram netTransTime = networkTransmitTimeMap.get(remoteAddr); + if (netTransTime == null) { + netTransTime = Metrics.registerHistograms(remoteAddr, MetricDef.NETWORK_MSG_TRANS_TIME, + null, Metrics.MetricType.WORKER); + networkTransmitTimeMap.put(remoteAddr, netTransTime); + } + + netTransTime.update(interval); + } + + return ctrl_msg; + } + + // case 2: task Message + short task = code; + + // Make sure that we have received at least an integer (length) + if (available < 4) { + // need more data + buf.resetReaderIndex(); + + return null; + } // Read the length field. int length = buf.readInt(); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClient.java b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClient.java index c524b906b..db76af788 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClient.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClient.java @@ -2,24 +2,21 @@ import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.concurrent.Executors; +import java.util.Set; import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import org.apache.commons.lang.builder.ToStringBuilder; -import org.apache.commons.lang.builder.ToStringStyle; import org.jboss.netty.bootstrap.ClientBootstrap; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelFactory; import org.jboss.netty.channel.ChannelFuture; import org.jboss.netty.channel.ChannelFutureListener; -import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,9 +27,10 @@ import backtype.storm.utils.Utils; import com.alibaba.jstorm.client.ConfigExtension; -import com.alibaba.jstorm.daemon.worker.metrics.JStormHistogram; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormHistogram; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.utils.JStormServerUtils; import com.alibaba.jstorm.utils.JStormUtils; import com.codahale.metrics.Gauge; @@ -65,14 +63,16 @@ class NettyClient implements IConnection { protected ScheduledExecutorService scheduler; - protected String sendTimerName; + protected String address; protected JStormTimer sendTimer; - protected String histogramName; protected JStormHistogram histogram; - protected String pendingGaugeName; protected ReconnectRunnable reconnector; protected ChannelFactory clientChannelFactory; + + protected Set closingChannel; + + protected AtomicBoolean isConnecting = new AtomicBoolean(false); @SuppressWarnings("rawtypes") NettyClient(Map storm_conf, ChannelFactory factory, @@ -110,23 +110,20 @@ class NettyClient implements IConnection { remote_addr = new InetSocketAddress(host, port); name = remote_addr.toString(); - sendTimerName = JStormServerUtils.getName(host, port) - + "-netty-send-timer"; - sendTimer = Metrics.registerTimer(sendTimerName); - histogramName = JStormServerUtils.getName(host, port) - + "-netty-send-histogram"; - histogram = Metrics.registerHistograms(histogramName); - - pendingGaugeName = JStormServerUtils.getName(host, port) - + "-netty-send-pending-gauge"; - Metrics.register(pendingGaugeName, new Gauge() { + address = JStormServerUtils.getName(host, port); + sendTimer = Metrics.registerTimer(address, MetricDef.NETTY_CLI_SEND_TIME, + null, Metrics.MetricType.WORKER); + histogram = Metrics.registerHistograms(address, MetricDef.NETTY_CLI_BATCH_SIZE, + null, Metrics.MetricType.WORKER); + Metrics.register(address, MetricDef.NETTY_CLI_SEND_PENDING, new Gauge() { @Override public Long getValue() { return pendings.get(); } - }); + }, null, Metrics.MetricType.WORKER); + closingChannel = new HashSet(); } public void start() { @@ -137,7 +134,7 @@ public void start() { // Set up the pipeline factory. bootstrap.setPipelineFactory(new StormClientPipelineFactory(this)); - doReconnect(); + reconnect(); } /** @@ -145,13 +142,23 @@ public void start() { * */ public void doReconnect() { - if (channelRef.get() != null) { + if (channelRef.get() != null ) { + +// if (channelRef.get().isWritable()) { +// LOG.info("already exist a writable channel, give up reconnect, {}", +// channelRef.get()); +// return; +// } return; } if (isClosed() == true) { return; } + + if (isConnecting.getAndSet(true)) { + return ; + } long sleepMs = getSleepTimeMs(); LOG.info("Reconnect ... [{}], {}, sleep {}ms", retries.get(), name, @@ -161,11 +168,14 @@ public void doReconnect() { public void operationComplete(ChannelFuture future) throws Exception { + isConnecting.set(false); if (future.isSuccess()) { // do something else } else { - LOG.info("Failed to reconnect ... [{}], {}", retries.get(), - name); + LOG.info( + "Failed to reconnect ... [{}], {}, channel = {}, cause = {}", + retries.get(), name, future.getChannel(), + future.getCause()); reconnect(); } } @@ -219,12 +229,14 @@ public void operationComplete(ChannelFuture future) pendings.decrementAndGet(); if (!future.isSuccess()) { + Channel channel = future.getChannel(); if (isClosed() == false) { - LOG.info("Failed to send requests to " + name + ": ", - future.getCause()); + LOG.info("Failed to send requests to " + name + ": " + + channel.toString() + ":", + future.getCause() ); } - Channel channel = future.getChannel(); + if (null != channel) { @@ -250,9 +262,9 @@ public synchronized void close() { } being_closed.set(true); - Metrics.unregister(pendingGaugeName); - Metrics.unregister(histogramName); - Metrics.unregister(sendTimerName); + Metrics.unregister(address, MetricDef.NETTY_CLI_SEND_TIME, null, Metrics.MetricType.WORKER); + Metrics.unregister(address, MetricDef.NETTY_CLI_BATCH_SIZE, null, Metrics.MetricType.WORKER); + Metrics.unregister(address, MetricDef.NETTY_CLI_SEND_PENDING, null, Metrics.MetricType.WORKER); Channel channel = channelRef.get(); if (channel == null) { @@ -301,23 +313,67 @@ void close_n_release() { } } + + /** + * Avoid channel double close + * + * @param channel + */ + void closeChannel(final Channel channel) { + synchronized (this) { + if (closingChannel.contains(channel)) { + LOG.info(channel.toString() + " is already closed"); + return ; + } + + closingChannel.add(channel); + } + + LOG.debug(channel.toString() + " begin to closed"); + ChannelFuture closeFuture = channel.close(); + closeFuture.addListener(new ChannelFutureListener() { + public void operationComplete(ChannelFuture future) + throws Exception { + + synchronized (this) { + closingChannel.remove(channel); + } + LOG.debug(channel.toString() + " finish closed"); + } + }); + } + + + void disconnectChannel(Channel channel) { + if (isClosed()) { + return ; + } + + if (channel == channelRef.get()) { + setChannel(null); + reconnect(); + }else { + closeChannel(channel); + } + + } void exceptionChannel(Channel channel) { if (channel == channelRef.get()) { setChannel(null); } else { - channel.close(); + closeChannel(channel); } } void setChannel(Channel newChannel) { - Channel oldChannel = channelRef.getAndSet(newChannel); + final Channel oldChannel = channelRef.getAndSet(newChannel); if (newChannel != null) { retries.set(0); } - String oldLocalAddres = (oldChannel == null) ? "null" : oldChannel + final String oldLocalAddres = (oldChannel == null) ? "null" : oldChannel .getLocalAddress().toString(); String newLocalAddress = (newChannel == null) ? "null" : newChannel .getLocalAddress().toString(); @@ -326,8 +382,17 @@ void setChannel(Channel newChannel) { // avoid one netty client use too much connection, close old one if (oldChannel != newChannel && oldChannel != null) { - oldChannel.close(); - LOG.info("Close old channel " + oldLocalAddres); + + closeChannel(oldChannel); + LOG.info("Successfully close old channel " + oldLocalAddres); +// scheduler.schedule(new Runnable() { +// +// @Override +// public void run() { +// +// } +// }, 10, TimeUnit.SECONDS); + // @@@ todo // pendings.set(0); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClientAsync.java b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClientAsync.java index f72da3c04..a1efc2029 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClientAsync.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClientAsync.java @@ -170,6 +170,17 @@ void waitChannelReady(long cachedSize, long sleepMs) { } } } + + long getDelaySec(long cachedSize) { + long count = cachedSize / BATCH_THREASHOLD_WARN; + long sleepMs = (long)(Math.pow(2, count) * 10); + + if (sleepMs > 1000) { + sleepMs = 1000; + } + + return sleepMs; + } void handleFailedChannel(MessageBatch messageBatch) { @@ -178,10 +189,9 @@ void handleFailedChannel(MessageBatch messageBatch) { long cachedSize = messageBatch.getEncoded_length(); if (cachedSize > BATCH_THREASHOLD_WARN) { - long count = (cachedSize + BATCH_THREASHOLD_WARN - 1) - / BATCH_THREASHOLD_WARN; - long sleepMs = count * 10; - + + long sleepMs = getDelaySec(cachedSize); + if (blockSend == false) { LOG.warn( "Target server {} is unavailable, pending {}, bufferSize {}, block sending {}ms", diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClientSync.java b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClientSync.java index efc6bedd3..8facf6d74 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClientSync.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyClientSync.java @@ -22,7 +22,8 @@ import backtype.storm.utils.DisruptorQueue; import backtype.storm.utils.Utils; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.utils.JStormServerUtils; import com.alibaba.jstorm.utils.JStormUtils; import com.codahale.metrics.Gauge; @@ -34,9 +35,7 @@ class NettyClientSync extends NettyClient implements EventHandler { private static final Logger LOG = LoggerFactory .getLogger(NettyClientSync.class); - private String batchQueueName; private ConcurrentLinkedQueue batchQueue; - private String disruptorQueueName; private DisruptorQueue disruptorQueue; private AtomicLong emitTs = new AtomicLong(0); @@ -46,19 +45,15 @@ class NettyClientSync extends NettyClient implements EventHandler { ScheduledExecutorService scheduler, String host, int port, ReconnectRunnable reconnector) { super(storm_conf, factory, scheduler, host, port, reconnector); - + batchQueue = new ConcurrentLinkedQueue(); - batchQueueName = JStormServerUtils.getName(host, port) - + "-batchQueue-counter"; - - Metrics.register(batchQueueName, new Gauge() { - - @Override - public Integer getValue() { - return batchQueue.size(); - } - - }); + Metrics.register(address, MetricDef.NETTY_CLI_SYNC_BATCH_QUEUE, + new Gauge() { + @Override + public Integer getValue() { + return batchQueue.size(); + } + }, null, Metrics.MetricType.WORKER); WaitStrategy waitStrategy = (WaitStrategy) Utils .newInstance((String) storm_conf @@ -67,9 +62,8 @@ public Integer getValue() { disruptorQueue = new DisruptorQueue(new SingleThreadedClaimStrategy( MAX_SEND_PENDING * 8), waitStrategy); - disruptorQueueName = JStormServerUtils.getName(host, port) - + "-disruptorQueue"; - Metrics.registerQueue(disruptorQueueName, disruptorQueue); + Metrics.registerQueue(address, MetricDef.NETTY_CLI_SYNC_DISR_QUEUE, disruptorQueue, + null, Metrics.MetricType.WORKER); Runnable trigger = new Runnable() { @Override @@ -249,8 +243,8 @@ public void close() { "Begin to close connection to {} and flush all data, batchQueue {}, disruptor {}", name, batchQueue.size(), disruptorQueue.population()); sendAllData(); - Metrics.unregister(batchQueueName); - Metrics.unregister(disruptorQueueName); + Metrics.unregister(address, MetricDef.NETTY_CLI_SYNC_BATCH_QUEUE, null, Metrics.MetricType.WORKER); + Metrics.unregister(address, MetricDef.NETTY_CLI_SYNC_DISR_QUEUE, null, Metrics.MetricType.WORKER); super.close(); clientChannelFactory.releaseExternalResources(); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyContext.java b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyContext.java index f5fc2fa6c..b81a6d9b4 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyContext.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/NettyContext.java @@ -75,6 +75,7 @@ public IConnection bind(String topology_id, int port) { retConnection = new NettyServer(storm_conf, port, isSyncMode); } catch (Throwable e) { + LOG.error("Failed to instance NettyServer", e.getCause()); JStormUtils.halt_process(-1, "Failed to bind " + port); } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/StormClientHandler.java b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/StormClientHandler.java index 62873cce4..fc966e982 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/StormClientHandler.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/message/netty/StormClientHandler.java @@ -66,21 +66,19 @@ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent event) { @Override public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception { - LOG.info("Receive channelDisconnected to {}", client.getRemoteAddr()); + LOG.info("Receive channelDisconnected to {}, channel = {}", + client.getRemoteAddr(), e.getChannel()); // ctx.sendUpstream(e); super.channelDisconnected(ctx, e); - if (!being_closed.get()) { - - client.setChannel(null); - client.reconnect(); - } + client.disconnectChannel(e.getChannel()); } @Override public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception { - LOG.info("Connection to {} has been closed", client.getRemoteAddr()); + LOG.info("Connection to {} has been closed, channel = {}", + client.getRemoteAddr(), e.getChannel()); super.channelClosed(ctx, e); } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/message/zeroMq/ZMQSendConnection.java b/jstorm-server/src/main/java/com/alibaba/jstorm/message/zeroMq/ZMQSendConnection.java index 90146236b..0f99a6532 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/message/zeroMq/ZMQSendConnection.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/message/zeroMq/ZMQSendConnection.java @@ -8,9 +8,10 @@ import backtype.storm.messaging.TaskMessage; import backtype.storm.utils.DisruptorQueue; -import com.alibaba.jstorm.daemon.worker.metrics.JStormHistogram; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormHistogram; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.utils.JStormServerUtils; /** @@ -23,13 +24,15 @@ public class ZMQSendConnection implements IConnection { private boolean closed = false; private JStormTimer timer; private JStormHistogram histogram; + private String prefix; public ZMQSendConnection(Socket _socket, String host, int port) { socket = _socket; - timer = Metrics.registerTimer(JStormServerUtils.getName(host, port) - + "-zmq-send-timer"); - histogram = Metrics.registerHistograms(JStormServerUtils.getName(host, - port) + "-zmq-send-histogram"); + prefix = JStormServerUtils.getName(host, port); + timer = Metrics.registerTimer(prefix, MetricDef.ZMQ_SEND_TIME, + null, Metrics.MetricType.WORKER); + histogram = Metrics.registerHistograms(prefix, MetricDef.ZMQ_SEND_MSG_SIZE, + null, Metrics.MetricType.WORKER); } @Override diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/stats/CommonStatsData.java b/jstorm-server/src/main/java/com/alibaba/jstorm/stats/CommonStatsData.java index 3fe641db9..eee9df2d2 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/stats/CommonStatsData.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/stats/CommonStatsData.java @@ -28,6 +28,8 @@ public class CommonStatsData implements Serializable { protected int rate = StatFunction.NUM_STAT_BUCKETS; public static final long LATENCY_MS_RATIO = 1000; + + public static final Integer ALL_TIME_KEY = new Integer(0); public CommonStatsData() { staticsMap = new HashMap>(); @@ -200,6 +202,65 @@ public Map> get_process_latencie() { return ret; } + + /* + * Get total statics of all-time + */ + public Long get_total_emitted() { + Long ret = new Long(0); + + Map allTimeEmitted = get_emitted().get(StatBuckets.ALL_WINDOW_STR); + for (Entry entry : allTimeEmitted.entrySet()) { + ret += entry.getValue(); + } + + return ret; + } + + public Double get_total_send_tps() { + Double ret = new Double(0); + + Map allTimeSendTps = get_send_tps().get(StatBuckets.ALL_WINDOW_STR); + for (Entry entry : allTimeSendTps.entrySet()) { + ret += entry.getValue(); + } + + return ret; + } + + public Double get_total_recv_tps() { + Double ret = new Double(0); + + Map allTimeRecvTps = get_recv_tps().get(StatBuckets.ALL_WINDOW_STR); + for (Entry entry : allTimeRecvTps.entrySet()) { + ret += entry.getValue(); + } + + return ret; + } + + public Long get_total_failed() { + Long ret = new Long(0); + + Map allTimeFailed = get_failed().get(StatBuckets.ALL_WINDOW_STR); + for (Entry entry : allTimeFailed.entrySet()) { + ret += entry.getValue(); + } + + return ret; + } + + public Double get_avg_latency() { + Double ret = new Double(0); + int i = 0; + + Map allAvglatency = get_process_latencie().get(StatBuckets.ALL_WINDOW_STR); + for (Entry entry : allAvglatency.entrySet()) { + ret += entry.getValue(); + i++; + } + return ret; + } public TaskStats getTaskStats() { TaskStats taskStats = new TaskStats(); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/Assignment.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/Assignment.java index f3892c24a..29d23a816 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/Assignment.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/Assignment.java @@ -73,6 +73,28 @@ public Map getTaskToNodePortbyNode( } return result; } + + public Set getCurrentSuperviosrTasks(String supervisorId) { + Set Tasks = new HashSet(); + + for (ResourceWorkerSlot worker : workers) { + if (worker.getNodeId().equals(supervisorId)) + Tasks.addAll(worker.getTasks()); + } + + return Tasks; + } + + public Set getCurrentSuperviosrWorkers(String supervisorId) { + Set workerSet = new HashSet(); + + for (ResourceWorkerSlot worker : workers) { + if (worker.getNodeId().equals(supervisorId)) + workerSet.add(worker.getPort()); + } + + return workerSet; + } public Set getCurrentWorkerTasks(String supervisorId, int port) { diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/Task.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/Task.java index 6f37b24a3..a0cb5762b 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/Task.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/Task.java @@ -7,6 +7,7 @@ import org.apache.log4j.Logger; import backtype.storm.Config; +import backtype.storm.messaging.IConnection; import backtype.storm.messaging.IContext; import backtype.storm.serialization.KryoTupleSerializer; import backtype.storm.spout.ISpout; @@ -32,13 +33,14 @@ import com.alibaba.jstorm.task.error.ITaskReportErr; import com.alibaba.jstorm.task.error.TaskReportError; import com.alibaba.jstorm.task.error.TaskReportErrorAndDie; -import com.alibaba.jstorm.task.execute.BaseExecutors; import com.alibaba.jstorm.task.execute.BoltExecutors; +import com.alibaba.jstorm.task.execute.BaseExecutors; import com.alibaba.jstorm.task.execute.spout.MultipleThreadSpoutExecutors; import com.alibaba.jstorm.task.execute.spout.SingleThreadSpoutExecutors; import com.alibaba.jstorm.task.execute.spout.SpoutExecutors; import com.alibaba.jstorm.task.group.MkGrouper; import com.alibaba.jstorm.task.heartbeat.TaskHeartbeatRunable; +import com.alibaba.jstorm.task.heartbeat.TaskStats; import com.alibaba.jstorm.utils.JStormServerUtils; import com.alibaba.jstorm.utils.JStormUtils; import com.lmax.disruptor.SingleThreadedClaimStrategy; @@ -77,18 +79,18 @@ public class Task { private Object taskObj; private CommonStatsRolling taskStats; private WorkerData workerData; + private String componentType; //"spout" or "bolt" @SuppressWarnings("rawtypes") public Task(WorkerData workerData, int taskId) throws Exception { - this.workerData = workerData; openOrPrepareWasCalled = new Atom(Boolean.valueOf(false)); + this.workerData = workerData; this.topologyContext = workerData.getContextMaker() .makeTopologyContext(workerData.getSysTopology(), taskId, openOrPrepareWasCalled); this.userContext = workerData.getContextMaker().makeTopologyContext( workerData.getRawTopology(), taskId, openOrPrepareWasCalled); - this.taskid = taskId; this.componentid = topologyContext.getThisComponentId(); @@ -101,17 +103,24 @@ public Task(WorkerData workerData, int taskId) throws Exception { this.workHalt = workerData.getWorkHalt(); this.zkCluster = new StormZkClusterState(workerData.getZkClusterstate()); - this.stormConf = Common.component_conf(workerData.getStormConf(), topologyContext, componentid); - // get real task object -- spout/bolt/spoutspec this.taskObj = Common.get_task_object(topologyContext.getRawTopology(), componentid, WorkerClassLoader.getInstance()); int samplerate = StormConfig.sampling_rate(stormConf); this.taskStats = new CommonStatsRolling(samplerate); + LOG.info("Loading task " + componentid + ":" + taskid); + } + + private void setComponentType() { + if (taskObj instanceof IBolt) { + componentType = "bolt"; + } else if (taskObj instanceof ISpout) { + componentType = "spout"; + } } private TaskSendTargets makeSendTargets() { @@ -161,10 +170,10 @@ public boolean isSingleThread(Map conf) { if (isOnePending == true) { return true; } - - return ConfigExtension.isSpoutSingleThread(conf) ; + + return ConfigExtension.isSpoutSingleThread(conf); } - + public RunnableCallback mk_executors(DisruptorQueue deserializeQueue, TaskSendTargets sendTargets, ITaskReportErr report_error) { @@ -185,9 +194,6 @@ public RunnableCallback mk_executors(DisruptorQueue deserializeQueue, taskStatus, topologyContext, userContext, taskStats, report_error); } - - - } return null; @@ -213,7 +219,7 @@ private RunnableCallback mkExecutor(DisruptorQueue deserializeQueue, return mk_executors(deserializeQueue, sendTargets, reportErrorDie); } - + public DisruptorQueue registerDisruptorQueue() { int queueSize = JStormUtils.parseInt( stormConf.get(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE), 256); @@ -223,14 +229,15 @@ public DisruptorQueue registerDisruptorQueue() { .get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY)); DisruptorQueue queue = new DisruptorQueue( new SingleThreadedClaimStrategy(queueSize), waitStrategy); - + deserializeQueues.put(taskid, queue); - + return queue; } public TaskShutdownDameon execute() throws Exception { - + setComponentType(); + DisruptorQueue deserializeQueue = registerDisruptorQueue(); TaskSendTargets sendTargets = echoToSystemBolt(); @@ -240,11 +247,11 @@ public TaskShutdownDameon execute() throws Exception { RunnableCallback baseExecutor = mkExecutor(deserializeQueue, sendTargets); AsyncLoopThread executor_threads = new AsyncLoopThread(baseExecutor, false, Thread.MAX_PRIORITY, true); - + List allThreads = new ArrayList(); allThreads.add(executor_threads); - TaskHeartbeatRunable.registerTaskStats(taskid, taskStats); + TaskHeartbeatRunable.registerTaskStats(taskid, new TaskStats(componentType, taskStats)); LOG.info("Finished loading task " + componentid + ":" + taskid); return getShutdown(allThreads, deserializeQueue, baseExecutor); @@ -263,11 +270,10 @@ public TaskShutdownDameon getShutdown(List allThreads, } AsyncLoopThread recvThread = ((BaseExecutors) baseExecutor).getDeserlizeThread(); allThreads.add(recvThread); - - + AsyncLoopThread serializeThread = taskTransfer.getSerializeThread(); allThreads.add(serializeThread); - + TaskShutdownDameon shutdown = new TaskShutdownDameon(taskStatus, topologyid, taskid, allThreads, zkCluster, taskObj); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskInfo.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskInfo.java index 0cda42bf7..f410b3a7b 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskInfo.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskInfo.java @@ -12,9 +12,11 @@ public class TaskInfo implements Serializable { private static final long serialVersionUID = 5625165079055837777L; private String componentId; + private String componentType; - public TaskInfo(String componentId) { + public TaskInfo(String componentId, String componentType) { this.componentId = componentId; + this.componentType = componentType; } public String getComponentId() { @@ -25,11 +27,19 @@ public void setComponentId(String componentId) { this.componentId = componentId; } + public String getComponentType() { + return componentType; + } + + public void setComponentType(String componentType) { + this.componentType = componentType; + } + @Override public boolean equals(Object assignment) { if (assignment instanceof TaskInfo - && ((TaskInfo) assignment).getComponentId().equals( - getComponentId())) { + && ((TaskInfo) assignment).getComponentId().equals(getComponentId()) + && ((TaskInfo) assignment).getComponentType().equals(componentType)) { return true; } return false; @@ -37,7 +47,7 @@ public boolean equals(Object assignment) { @Override public int hashCode() { - return this.getComponentId().hashCode(); + return this.getComponentId().hashCode() + this.getComponentType().hashCode(); } @Override diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskMetricInfo.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskMetricInfo.java new file mode 100644 index 000000000..797165b4f --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskMetricInfo.java @@ -0,0 +1,136 @@ +package com.alibaba.jstorm.task; + +import java.io.Serializable; +import java.util.Map; +import java.util.HashMap; + +import org.apache.log4j.Logger; + +import com.codahale.metrics.Metric; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Timer; +import com.codahale.metrics.Counter; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.codahale.metrics.Snapshot; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.MetricInfo; +import com.alibaba.jstorm.metric.Metrics.QueueGauge; +import com.alibaba.jstorm.utils.JStormUtils; +import com.alibaba.jstorm.task.execute.spout.TimerRatio; + +/** + * /storm-zk-root/Monitor/{topologyid}/{taskid} data + */ +public class TaskMetricInfo implements Serializable { + public static Logger LOG = Logger.getLogger(TaskMetricInfo.class); + + private static final long serialVersionUID = 7645367099257857979L; + private String taskId; + private String component; + + private Map gaugeData; + private Map counterData; + private Map meterData; + private Map timerData; + private Map histogramData; + + private static final double FULL_RATIO = 100.0; + + public TaskMetricInfo(String taskId, String component) { + this.taskId = taskId; + this.component = component; + + this.gaugeData = new HashMap(); + this.counterData = new HashMap(); + this.meterData = new HashMap(); + this.timerData = new HashMap(); + this.histogramData = new HashMap(); + } + + public String getTaskId() { + return taskId; + } + + public void setTaskId(String taskId) { + this.taskId = taskId; + } + + public String getComponent() { + return this.component; + } + + public void setComponent(String component) { + this.component = component; + } + + public Map getGaugeData() { + return gaugeData; + } + + public Map getCounterData() { + return counterData; + } + + public Map getMeterData() { + return meterData; + } + + public Map getTimerData() { + return timerData; + } + + public Map getHistogramData() { + return histogramData; + } + + public void updateMetricData(MetricInfo metricInfo) { + String name = metricInfo.getName(); + Metric metric = metricInfo.getMetric(); + if (metric instanceof QueueGauge) { + //covert to % + float queueRatio = (((QueueGauge) metric).getValue())*100; + double value = JStormUtils.formatDoubleDecPoint2((double)queueRatio); + gaugeData.put(name, value); + } else if ( (metric instanceof Gauge) || + (metric instanceof TimerRatio)) { + Double value = JStormUtils.convertToDouble(((Gauge) metric).getValue()); + if (value == null) { + LOG.warn("gauge value is null or unknow type."); + } else { + value = JStormUtils.formatDoubleDecPoint4(value); + gaugeData.put(name, value); + } + } else if (metric instanceof Timer) { + Snapshot snapshot = ((Timer) metric).getSnapshot(); + //Covert from ns to ms + Double value = JStormUtils.formatDoubleDecPoint4( + (snapshot.getMean())/1000000); + timerData.put(name, value); + } else if (metric instanceof Counter) { + Long value = ((Counter) metric).getCount(); + counterData.put(name, value.doubleValue()); + } else if (metric instanceof Meter) { + Double value = JStormUtils.formatDoubleDecPoint4( + ((Meter) metric).getMeanRate()); + meterData.put(name, value); + } else if (metric instanceof Histogram) { + Snapshot snapshot = ((Histogram) metric).getSnapshot(); + Double value = JStormUtils.formatDoubleDecPoint4( + snapshot.getMean()); + histogramData.put(name, value); + } else { + LOG.warn("Unknown metric type, name:" + name); + } + } + + public String anyQueueFull() { + String ret = null; + if (gaugeData.get(MetricDef.DESERIALIZE_QUEUE) == FULL_RATIO || + gaugeData.get(MetricDef.SERIALIZE_QUEUE) == FULL_RATIO || + gaugeData.get(MetricDef.EXECUTE_QUEUE) == FULL_RATIO) { + ret = component + "-" + taskId + ": queue is full"; + } + return ret; + } +} diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskShutdownDameon.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskShutdownDameon.java index 6743c42ec..fa8bd49a0 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskShutdownDameon.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskShutdownDameon.java @@ -12,6 +12,7 @@ import com.alibaba.jstorm.callback.AsyncLoopThread; import com.alibaba.jstorm.cluster.StormClusterState; +import com.alibaba.jstorm.task.heartbeat.TaskHeartbeatRunable; import com.alibaba.jstorm.utils.JStormUtils; /** @@ -31,6 +32,7 @@ public class TaskShutdownDameon implements ShutdownableDameon { private List all_threads; private StormClusterState zkCluster; private Object task_obj; + private boolean isClosed = false; public TaskShutdownDameon(TaskStatus taskStatus, String topology_id, Integer task_id, List all_threads, @@ -46,14 +48,19 @@ public TaskShutdownDameon(TaskStatus taskStatus, String topology_id, @Override public void shutdown() { + synchronized (this) { + if (isClosed == true) { + return ; + } + isClosed = true; + } + LOG.info("Begin to shut down task " + topology_id + ":" + task_id); // all thread will check the taskStatus // once it has been set SHUTDOWN, it will quit taskStatus.setStatus(TaskStatus.SHUTDOWN); - closeComponent(task_obj); - // waiting 100ms for executor thread shutting it's own try { Thread.sleep(100); @@ -65,15 +72,18 @@ public void shutdown() { thr.cleanup(); JStormUtils.sleepMs(10); thr.interrupt(); - try { - //thr.join(); - thr.getThread().stop(); - } catch (Exception e) { - } +// try { +// //thr.join(); +// thr.getThread().stop(new RuntimeException()); +// } catch (Throwable e) { +// } LOG.info("Successfully shutdown " + thr.getThread().getName()); } + + closeComponent(task_obj); try { + TaskHeartbeatRunable.unregisterTaskStats(task_id); zkCluster.remove_task_heartbeat(topology_id, task_id); zkCluster.disconnect(); } catch (Exception e) { diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskTransfer.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskTransfer.java index 2fd985329..cf9c21ca3 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskTransfer.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/TaskTransfer.java @@ -15,8 +15,9 @@ import com.alibaba.jstorm.callback.AsyncLoopThread; import com.alibaba.jstorm.callback.RunnableCallback; import com.alibaba.jstorm.daemon.worker.WorkerData; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.codahale.metrics.Timer; import com.lmax.disruptor.EventHandler; import com.lmax.disruptor.SingleThreadedClaimStrategy; @@ -64,8 +65,9 @@ public TaskTransfer(String taskName, this.serializeQueue = new DisruptorQueue( new SingleThreadedClaimStrategy(queue_size), waitStrategy); - Metrics.registerQueue(taskName + "-serialize-queue", serializeQueue); - timer = Metrics.registerTimer(taskName + "-serialize-timer"); + String taskId = taskName.substring(taskName.indexOf(":") + 1); + Metrics.registerQueue(taskName, MetricDef.SERIALIZE_QUEUE, serializeQueue, taskId, Metrics.MetricType.TASK); + timer = Metrics.registerTimer(taskName, MetricDef.SERIALIZE_TIME, taskId, Metrics.MetricType.TASK); serializeThread = new AsyncLoopThread(new TransferRunnable()); LOG.info("Successfully start TaskTransfer thread"); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BaseExecutors.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BaseExecutors.java index 1678d202c..2975501fe 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BaseExecutors.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BaseExecutors.java @@ -15,8 +15,9 @@ import com.alibaba.jstorm.callback.AsyncLoopThread; import com.alibaba.jstorm.callback.RunnableCallback; import com.alibaba.jstorm.client.ConfigExtension; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; +import com.alibaba.jstorm.metric.MetricDef; //import com.alibaba.jstorm.message.zeroMq.IRecvConnection; import com.alibaba.jstorm.stats.CommonStatsRolling; import com.alibaba.jstorm.task.TaskStatus; @@ -114,9 +115,9 @@ public BaseExecutors(TaskTransfer _transfer_fn, Map _storm_conf, deserializeThread = new AsyncLoopThread(new DeserializeRunnable( deserializeQueue, exeQueue)); - deserializeTimer = Metrics.registerTimer(idStr + "-deserialize-timer"); - Metrics.registerQueue(idStr + "-deserialize-queue" , deserializeQueue); - Metrics.registerQueue(idStr + "-exe-queue" , exeQueue); + deserializeTimer = Metrics.registerTimer(idStr, MetricDef.DESERIALIZE_TIME, String.valueOf(taskId), Metrics.MetricType.TASK); + Metrics.registerQueue(idStr, MetricDef.DESERIALIZE_QUEUE, deserializeQueue, String.valueOf(taskId), Metrics.MetricType.TASK); + Metrics.registerQueue(idStr, MetricDef.EXECUTE_QUEUE, exeQueue, String.valueOf(taskId), Metrics.MetricType.TASK); } @Override diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BoltCollector.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BoltCollector.java index 79ed6b8fe..356ee1b98 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BoltCollector.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BoltCollector.java @@ -17,8 +17,9 @@ import backtype.storm.tuple.Tuple; import backtype.storm.tuple.TupleImplExt; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.stats.CommonStatsRolling; import com.alibaba.jstorm.task.TaskTransfer; import com.alibaba.jstorm.task.acker.Acker; @@ -84,11 +85,10 @@ public BoltCollector(int message_timeout_secs, ITaskReportErr report_error, .get(Config.TOPOLOGY_ACKER_EXECUTORS)); String componentId = topologyContext.getThisComponentId(); - timer = Metrics.registerTimer(JStormServerUtils.getName(componentId, task_id) + "-emit-timer"); - + timer = Metrics.registerTimer(JStormServerUtils.getName(componentId, task_id), + MetricDef.EMIT_TIME, String.valueOf(task_id), Metrics.MetricType.TASK); random = new Random(); random.setSeed(System.currentTimeMillis()); - } @Override diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BoltExecutors.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BoltExecutors.java index 427d5e7e8..1e7285496 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BoltExecutors.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/BoltExecutors.java @@ -15,8 +15,9 @@ import backtype.storm.utils.WorkerClassLoader; import com.alibaba.jstorm.daemon.worker.TimeTick; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.stats.CommonStatsRolling; import com.alibaba.jstorm.task.TaskStatus; import com.alibaba.jstorm.task.TaskTransfer; @@ -87,7 +88,8 @@ public BoltExecutors(IBolt _bolt, TaskTransfer _transfer_fn, outputCollector = new OutputCollector(output_collector); - boltExeTimer = Metrics.registerTimer(idStr + "-exe-timer"); + boltExeTimer = Metrics.registerTimer(idStr, MetricDef.EXECUTE_TIME, + String.valueOf(taskId), Metrics.MetricType.TASK); TimeTick.registerTimer(idStr + "-sampling-tick", exeQueue); try { diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/MultipleThreadSpoutExecutors.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/MultipleThreadSpoutExecutors.java index ad448d05f..2298da689 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/MultipleThreadSpoutExecutors.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/MultipleThreadSpoutExecutors.java @@ -15,8 +15,9 @@ import com.alibaba.jstorm.callback.AsyncLoopThread; import com.alibaba.jstorm.callback.RunnableCallback; import com.alibaba.jstorm.client.ConfigExtension; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.stats.CommonStatsRolling; import com.alibaba.jstorm.task.TaskStatus; import com.alibaba.jstorm.task.TaskTransfer; @@ -54,14 +55,14 @@ public MultipleThreadSpoutExecutors(backtype.storm.spout.ISpout _spout, ackerRunnableThread = new AsyncLoopThread(new AckerRunnable()); pending = new RotatingMap(Acker.TIMEOUT_BUCKET_NUM, null, false); - Metrics.register(idStr + "-pending-map-gauge", new Gauge() { + Metrics.register(idStr, MetricDef.PENDING_MAP, new Gauge() { @Override public Integer getValue() { return pending.size(); } - }); + }, String.valueOf(taskId), Metrics.MetricType.TASK); super.prepare(sendTargets, _transfer_fn, topology_context); } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SingleThreadSpoutExecutors.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SingleThreadSpoutExecutors.java index 27bdf7b70..5248fa3de 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SingleThreadSpoutExecutors.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SingleThreadSpoutExecutors.java @@ -8,7 +8,8 @@ import backtype.storm.utils.DisruptorQueue; import backtype.storm.utils.WorkerClassLoader; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.stats.CommonStatsRolling; import com.alibaba.jstorm.task.TaskStatus; import com.alibaba.jstorm.task.TaskTransfer; @@ -44,14 +45,14 @@ public SingleThreadSpoutExecutors(backtype.storm.spout.ISpout _spout, // sending Tuple's TimeCacheMap pending = new RotatingMap(Acker.TIMEOUT_BUCKET_NUM, null, true); - Metrics.register(idStr + "-pending-map-gauge", new Gauge() { + Metrics.register(idStr, MetricDef.PENDING_MAP, new Gauge() { @Override public Integer getValue() { return pending.size(); } - }); + }, String.valueOf(taskId), Metrics.MetricType.TASK); super.prepare(sendTargets, _transfer_fn, topology_context); } diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutCollector.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutCollector.java index 65730d335..f70d0eea9 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutCollector.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutCollector.java @@ -14,8 +14,9 @@ import backtype.storm.tuple.TupleImplExt; import backtype.storm.utils.DisruptorQueue; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.stats.CommonStatsRolling; import com.alibaba.jstorm.task.TaskTransfer; import com.alibaba.jstorm.task.acker.Acker; @@ -84,8 +85,8 @@ public SpoutCollector(Integer task_id, backtype.storm.spout.ISpout spout, random.setSeed(System.currentTimeMillis()); String componentId = topology_context.getThisComponentId(); - emitTotalTimer = Metrics.registerTimer(JStormServerUtils.getName( - componentId, task_id) + "-emit-timer"); + emitTotalTimer = Metrics.registerTimer(JStormServerUtils.getName(componentId, task_id), + MetricDef.EMIT_TIME, String.valueOf(task_id), Metrics.MetricType.TASK); } @Override diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutExecutors.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutExecutors.java index a53384cf1..f03d79a67 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutExecutors.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutExecutors.java @@ -15,8 +15,9 @@ import com.alibaba.jstorm.callback.AsyncLoopThread; import com.alibaba.jstorm.client.ConfigExtension; import com.alibaba.jstorm.daemon.worker.TimeTick; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.alibaba.jstorm.stats.CommonStatsRolling; import com.alibaba.jstorm.task.TaskStatus; import com.alibaba.jstorm.task.TaskTransfer; @@ -71,10 +72,13 @@ public SpoutExecutors(backtype.storm.spout.ISpout _spout, this.max_spout_pending = JStormUtils.parseInt(storm_conf .get(Config.TOPOLOGY_MAX_SPOUT_PENDING)); - this.nextTupleTimer = Metrics.registerTimer(idStr + "-nextTuple-timer"); - this.ackerTimer = Metrics.registerTimer(idStr + "-acker-timer"); + this.nextTupleTimer = Metrics.registerTimer(idStr, MetricDef.EXECUTE_TIME, + String.valueOf(taskId), Metrics.MetricType.TASK); + this.ackerTimer = Metrics.registerTimer(idStr, MetricDef.ACKER_TIME, + String.valueOf(taskId), Metrics.MetricType.TASK); this.emptyCpuCounter = new TimerRatio(); - Metrics.register(idStr + "-empty-cputime-ratio", emptyCpuCounter); + Metrics.register(idStr, MetricDef.EMPTY_CPU_RATIO, emptyCpuCounter, + String.valueOf(taskId), Metrics.MetricType.TASK); TimeTick.registerTimer(idStr+ "-acker-tick", exeQueue); diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeat.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeat.java index 90adae1e6..e9189e110 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeat.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeat.java @@ -20,11 +20,13 @@ public class TaskHeartbeat implements Serializable { private Integer uptimeSecs; private CommonStatsData stats; // BoltTaskStats or // SpoutTaskStats + private String componentType; - public TaskHeartbeat(int timeSecs, int uptimeSecs, CommonStatsData stats) { + public TaskHeartbeat(int timeSecs, int uptimeSecs, CommonStatsData stats, String componentType) { this.timeSecs = timeSecs; this.uptimeSecs = uptimeSecs; this.stats = stats; + this.componentType = componentType; } public int getTimeSecs() { @@ -56,6 +58,14 @@ public CommonStatsData getStats() { public void setStats(CommonStatsData stats) { this.stats = stats; } + + public void setComponentType(String componentType) { + this.componentType = componentType; + } + + public String getComponentType() { + return componentType; + } @Override public boolean equals(Object hb) { diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatRunable.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatRunable.java index 5a3309fb2..f6cc4bb78 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatRunable.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatRunable.java @@ -36,12 +36,16 @@ public class TaskHeartbeatRunable extends RunnableCallback { private AtomicBoolean active; - private static Map taskStatsMap = - new HashMap(); + private static Map taskStatsMap = + new HashMap(); - public static void registerTaskStats(int taskId, CommonStatsRolling taskStats) { + public static void registerTaskStats(int taskId, TaskStats taskStats) { taskStatsMap.put(taskId, taskStats); } + + public static void unregisterTaskStats(int taskId) { + taskStatsMap.remove(taskId); + } public TaskHeartbeatRunable(WorkerData workerData) { // StormClusterState zkCluster, String _topology_id, @@ -66,15 +70,15 @@ public TaskHeartbeatRunable(WorkerData workerData) { public void run() { Integer currtime = TimeUtils.current_time_secs(); - for (Entry entry : taskStatsMap.entrySet()) { + for (Entry entry : taskStatsMap.entrySet()) { Integer taskId = entry.getKey(); - CommonStatsRolling taskStats = entry.getValue(); + CommonStatsRolling taskStats = entry.getValue().getTaskStat(); String idStr = " " + topology_id + ":" + taskId + " "; try { TaskHeartbeat hb = new TaskHeartbeat(currtime, uptime.uptime(), - taskStats.render_stats()); + taskStats.render_stats(), entry.getValue().getComponentType()); zkCluster.task_heartbeat(topology_id, taskId, hb); } catch (Exception e) { // TODO Auto-generated catch block @@ -85,7 +89,7 @@ public void run() { } } - LOG.info("update all task hearbeat ts " + currtime); + LOG.info("update all task hearbeat ts " + currtime + "," + taskStatsMap.keySet()); } @Override diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskStats.java b/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskStats.java new file mode 100644 index 000000000..c8efb7462 --- /dev/null +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskStats.java @@ -0,0 +1,21 @@ +package com.alibaba.jstorm.task.heartbeat; + +import com.alibaba.jstorm.stats.CommonStatsRolling; + +public class TaskStats { + private CommonStatsRolling taskStats; + private String componentType; + + public TaskStats(String componentType, CommonStatsRolling taskStats) { + this.componentType = componentType; + this.taskStats = taskStats; + } + + public CommonStatsRolling getTaskStat() { + return taskStats; + } + + public String getComponentType() { + return componentType; + } +} \ No newline at end of file diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/utils/DisruptorRunable.java b/jstorm-server/src/main/java/com/alibaba/jstorm/utils/DisruptorRunable.java index 719a8216a..185b1ff90 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/utils/DisruptorRunable.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/utils/DisruptorRunable.java @@ -7,8 +7,8 @@ import backtype.storm.utils.DisruptorQueue; import com.alibaba.jstorm.callback.RunnableCallback; -import com.alibaba.jstorm.daemon.worker.metrics.JStormTimer; -import com.alibaba.jstorm.daemon.worker.metrics.Metrics; +import com.alibaba.jstorm.metric.JStormTimer; +import com.alibaba.jstorm.metric.Metrics; import com.codahale.metrics.Timer; import com.lmax.disruptor.EventHandler; @@ -30,13 +30,12 @@ public abstract class DisruptorRunable extends RunnableCallback implements Event protected JStormTimer timer; - public DisruptorRunable(DisruptorQueue queue,String idStr, - AtomicBoolean active) { - this.queue = queue; - this.idStr = idStr; + public DisruptorRunable(DisruptorQueue queue, JStormTimer timer, String idStr, + AtomicBoolean active) { + this.queue = queue; + this.timer = timer; + this.idStr = idStr; this.active = active; - this.timer = Metrics.registerTimer(idStr + "-timer"); - Metrics.registerQueue(idStr + "-queue", queue); } public abstract void handleEvent(Object event, boolean endOfBatch) throws Exception; diff --git a/jstorm-server/src/main/java/com/alibaba/jstorm/utils/JStormServerUtils.java b/jstorm-server/src/main/java/com/alibaba/jstorm/utils/JStormServerUtils.java index f4b24fffe..6e30ac8a3 100644 --- a/jstorm-server/src/main/java/com/alibaba/jstorm/utils/JStormServerUtils.java +++ b/jstorm-server/src/main/java/com/alibaba/jstorm/utils/JStormServerUtils.java @@ -13,6 +13,7 @@ import backtype.storm.GenericOptionsParser; import backtype.storm.utils.Utils; +import com.alibaba.jstorm.client.ConfigExtension; import com.alibaba.jstorm.cluster.StormConfig; /** @@ -114,5 +115,18 @@ public static boolean isOnePending(Map conf) { public static String getName(String componentId, int taskId) { return componentId + ":" + taskId; } + + public static String getHostName(Map conf) { + String hostName = ConfigExtension.getSupervisorHost(conf); + if (hostName == null) { + hostName = NetWorkUtils.hostname(); + } + + if (ConfigExtension.isSupervisorUseIp(conf)) { + hostName = NetWorkUtils.ip(); + } + + return hostName; + } }; diff --git a/jstorm-server/src/main/resources/defaults.yaml b/jstorm-server/src/main/resources/defaults.yaml index 439db3da9..bc6827e17 100644 --- a/jstorm-server/src/main/resources/defaults.yaml +++ b/jstorm-server/src/main/resources/defaults.yaml @@ -94,12 +94,17 @@ worker.gc.childopts: " -XX:SurvivorRatio=4 -XX:MaxTenuringThreshold=20 -XX:+UseC worker.heartbeat.frequency.secs: 2 worker.classpath: "" worker.redirect.output: true +# if worker.redirect.output.file is null, then it will be $LOG.out +# please use absolute path +worker.redirect.output.file: null # when supervisor is shutdown, automatically shutdown worker worker.stop.without.supervisor: false worker.memory.size: 2147483648 task.heartbeat.frequency.secs: 10 task.refresh.poll.secs: 10 +# how long task do cleanup +task.cleanup.timeout.sec: 10 zmq.threads: 1 zmq.linger.millis: 5000 @@ -161,7 +166,8 @@ topology.max.error.report.per.interval: 5 topology.kryo.factory: "backtype.storm.serialization.DefaultKryoFactory" topology.tuple.serializer: "backtype.storm.serialization.types.ListDelegateSerializer" topology.trident.batch.emit.interval.millis: 500 -topology.performance.metrics: false +topology.performance.metrics: true +topology.alimonitor.metrics.post: true # enable topology use user-define classloader to avoid class conflict diff --git a/jstorm-ui/pom.xml b/jstorm-ui/pom.xml index d9e3bce6d..6f4ad66eb 100644 --- a/jstorm-ui/pom.xml +++ b/jstorm-ui/pom.xml @@ -4,7 +4,7 @@ com.alibaba.jstorm jstorm-all - 0.9.5.1 + 0.9.6 .. diff --git a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/UIUtils.java b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/UIUtils.java index 9a61c3b17..f11b96dd8 100644 --- a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/UIUtils.java +++ b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/UIUtils.java @@ -306,7 +306,13 @@ public static String mostRecentError(List summarys) { for (int j = 0; j < einfoSize; j++) { ErrorInfo einfo = einfos.get(j); long current = System.currentTimeMillis() / 1000; - if (current - einfo.get_error_time_secs() < maxErrortime) { + + //shorten the most recent time for "queue is full" error + int maxTime = maxErrortime; + if (einfo.get_error().indexOf("queue is full") != -1) + maxTime = maxErrortime / 10; + + if (current - einfo.get_error_time_secs() < maxTime) { map.put(new Integer(einfo.get_error_time_secs()), einfo.get_error()); } @@ -338,7 +344,13 @@ public static String getTaskError(List errList) { for (ErrorInfo einfo : errList) { long current = System.currentTimeMillis() / 1000; - if (current - einfo.get_error_time_secs() < maxErrortime) { + + //shorten the most recent time for "queue is full" error + int maxTime = maxErrortime; + if (einfo.get_error().indexOf("queue is full") != -1) + maxTime = maxErrortime / 10; + + if (current - einfo.get_error_time_secs() < maxTime) { map.put(new Integer(einfo.get_error_time_secs()), einfo.get_error()); } @@ -379,6 +391,8 @@ public static List topologySummary(List ts) { topologySumm.setNumWorkers(String.valueOf(t.get_num_workers())); topologySumm.setNumTasks(String.valueOf(t.get_num_tasks())); + + topologySumm.setErrorInfo(t.get_error_info()); tsumm.add(topologySumm); } } @@ -546,6 +560,11 @@ public static Map readUiConfig() { } return ret; } + + public static double getDoubleValue(Double value) { + double ret = (value != null ? value.doubleValue() : 0.0); + return ret; + } public static void main(String[] args) { } diff --git a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/TaskMetrics.java b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/TaskMetrics.java new file mode 100644 index 000000000..be8d018aa --- /dev/null +++ b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/TaskMetrics.java @@ -0,0 +1,146 @@ +package com.alibaba.jstorm.ui.model; + +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.ui.UIUtils; + +import backtype.storm.generated.*; + +public class TaskMetrics { + int taskId; + String componentId; + double deserializeQueue; + double deserializeTime; + double executorQueue; + double executorTime; + double serializeQueue; + double serializeTime; + double ackerTime; + double emptyCpuRatio; + double pendingNum; + double emitTime; + + public TaskMetrics() { + } + + public int getTaskId() { + return taskId; + } + + public void setTaskId(int taskId) { + this.taskId = taskId; + } + + public String getComponentId() { + return componentId; + } + + public void setComponentId(String componentId) { + this.componentId = componentId; + } + + public double getDeserializeQueue() { + return deserializeQueue; + } + + public void setDeserializeQueue(double value) { + this.deserializeQueue = value; + } + + public double getDeserializeTime() { + return deserializeTime; + } + + public void setDeserializeTime(double value) { + this.deserializeTime = value; + } + + public double getExecutorQueue() { + return executorQueue; + } + + public void setExecutorQueue(double value) { + this.executorQueue = value; + } + + public double getExecutorTime() { + return executorTime; + } + + public void setExecutorTime(double value) { + this.executorTime = value; + } + + public double getSerializeQueue() { + return serializeQueue; + } + + public void setSerializeQueue(double value) { + this.serializeQueue = value; + } + + public double getSerializeTime() { + return serializeTime; + } + + public void setSerializeTime(double value) { + this.serializeTime = value; + } + + public double getAckerTime() { + return ackerTime; + } + + public void setAckerTime(double value) { + this.ackerTime = value; + } + + public double getEmptyCpuRatio() { + return emptyCpuRatio; + } + + public void setEmptyCpuRatio(double value) { + this.emptyCpuRatio = value; + } + + public double getPendingNum() { + return pendingNum; + } + + public void setPendingNum(double value) { + this.pendingNum = value; + } + + public double getEmitTime() { + return emitTime; + } + + public void setEmitTime(double value) { + this.emitTime = value; + } + + public void updateTaskMetricData(TaskMetricData metricData) { + taskId = metricData.get_task_id(); + componentId = metricData.get_component_id(); + deserializeQueue = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.DESERIALIZE_QUEUE)); + deserializeTime = UIUtils.getDoubleValue( + metricData.get_timer().get(MetricDef.DESERIALIZE_TIME)); + executorQueue = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.EXECUTE_QUEUE)); + executorTime = UIUtils.getDoubleValue( + metricData.get_timer().get(MetricDef.EXECUTE_TIME)); + serializeQueue = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.SERIALIZE_QUEUE)); + serializeTime = UIUtils.getDoubleValue( + metricData.get_timer().get(MetricDef.SERIALIZE_TIME)); + ackerTime = UIUtils.getDoubleValue( + metricData.get_timer().get(MetricDef.ACKER_TIME)); + emitTime = UIUtils.getDoubleValue( + metricData.get_timer().get(MetricDef.EMIT_TIME)); + emptyCpuRatio = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.EMPTY_CPU_RATIO)); + pendingNum = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.PENDING_MAP)); + } + +} \ No newline at end of file diff --git a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/TopologySumm.java b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/TopologySumm.java index e9c52cba6..6b57c18d0 100644 --- a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/TopologySumm.java +++ b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/TopologySumm.java @@ -17,6 +17,7 @@ public class TopologySumm implements Serializable { private String uptime; private String numWorkers; private String numTasks; + private String errorInfo; public String getTopologyName() { return topologyName; @@ -65,5 +66,13 @@ public String getNumTasks() { public void setNumTasks(String numTasks) { this.numTasks = numTasks; } + + public String getErrorInfo() { + return this.errorInfo; + } + + public void setErrorInfo(String errorInfo) { + this.errorInfo = errorInfo; + } } diff --git a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/WorkerMetrics.java b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/WorkerMetrics.java new file mode 100644 index 000000000..80a5267dd --- /dev/null +++ b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/WorkerMetrics.java @@ -0,0 +1,179 @@ +package com.alibaba.jstorm.ui.model; + +import org.apache.log4j.Logger; + +import com.alibaba.jstorm.metric.MetricDef; +import com.alibaba.jstorm.ui.UIUtils; +import com.alibaba.jstorm.ui.model.data.SpoutPage; +import com.alibaba.jstorm.utils.JStormUtils; + +import backtype.storm.generated.*; + +public class WorkerMetrics { + private static final Logger LOG = Logger.getLogger(WorkerMetrics.class); + + String hostName; + int port; + double usedCpu; + double usedMem; + double nettyservDecodeTime; + double nettyservMsgTransTime; + double dispatchTime; + double dispatchQueue; + double batchTupleTime; + double batchTupleQueue; + double nettycliSendTime; + double nettycliBatchSize; + double nettycliSendPending; + double nettycliSyncBatchQueue; + double nettycliSyncDisrpQueue; + + public WorkerMetrics() { + } + + public String getHostName() { + return hostName; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public double getNettyservDecodeTime() { + return nettyservDecodeTime; + } + + public void setNettyservDecodeTime(double value) { + this.nettyservDecodeTime = value; + } + + public double getNettyservMsgTransTime() { + return nettyservMsgTransTime; + } + + public void setNettyservMsgTransTime(double value) { + this.nettyservMsgTransTime = value; + } + + public double getDispatchTime() { + return dispatchTime; + } + + public void setDispatchTime(double value) { + this.dispatchTime = value; + } + + public double getDispatchQueue() { + return dispatchQueue; + } + + public void setDispatchQueue(double value) { + this.dispatchQueue = value; + } + + public double getBatchTupleTime() { + return batchTupleTime; + } + + public void setBatchTupleTime(double value) { + this.batchTupleTime = value; + } + + public double getBatchTupleQueue() { + return batchTupleQueue; + } + + public void setBatchTupleQueue(double value) { + this.batchTupleQueue = value; + } + + public double getNettycliSendTime() { + return nettycliSendTime; + } + + public void setNettycliSendTime(double value) { + this.nettycliSendTime = value; + } + + public double getNettycliBatchSize() { + return nettycliBatchSize; + } + + public void setNettycliBatchSize(double value) { + this.nettycliBatchSize = value; + } + + public double getNettycliSendPending() { + return nettycliSendPending; + } + + public void setNettycliSendPending(double value) { + this.nettycliSendPending = value; + } + + public double getNettycliSyncBatchQueue() { + return nettycliSyncBatchQueue; + } + + public void setNettycliSyncBatchQueue(double value) { + this.nettycliSyncBatchQueue = value; + } + + public double getNettycliSyncDisrpQueue() { + return nettycliSyncDisrpQueue; + } + + public void setNettycliSyncDisrpQueue(double value) { + this.nettycliSyncDisrpQueue = value; + } + + public double getUsedCpu() { + return usedCpu; + } + + public double getusedMem() { + return usedMem; + } + + public void updateWorkerMetricData(WorkerMetricData metricData) { + hostName = metricData.get_hostname(); + port = metricData.get_port(); + + usedCpu = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.CPU_USED_RATIO)); + usedMem = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.MEMORY_USED)); + usedMem = JStormUtils.formatDoubleDecPoint2(usedMem/(1024*1204)); + + batchTupleQueue = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.BATCH_TUPLE_QUEUE)); + batchTupleTime = UIUtils.getDoubleValue( + metricData.get_timer().get(MetricDef.BATCH_TUPLE_TIME)); + dispatchQueue = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.DISPATCH_QUEUE)); + dispatchTime = UIUtils.getDoubleValue( + metricData.get_timer().get(MetricDef.DISPATCH_TIME)); + nettycliBatchSize = UIUtils.getDoubleValue( + metricData.get_histogram().get(MetricDef.NETTY_CLI_BATCH_SIZE)); + nettycliSendTime = UIUtils.getDoubleValue( + metricData.get_timer().get(MetricDef.NETTY_CLI_SEND_TIME)); + nettyservDecodeTime = UIUtils.getDoubleValue( + metricData.get_timer().get(MetricDef.NETTY_SERV_DECODE_TIME)); + nettyservMsgTransTime = UIUtils.getDoubleValue( + metricData.get_histogram().get(MetricDef.NETWORK_MSG_TRANS_TIME)); + nettycliSendPending = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.NETTY_CLI_SEND_PENDING)); + nettycliSyncBatchQueue = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.NETTY_CLI_SYNC_BATCH_QUEUE)); + nettycliSyncDisrpQueue = UIUtils.getDoubleValue( + metricData.get_gauge().get(MetricDef.NETTY_CLI_SYNC_DISR_QUEUE)); + } +} \ No newline at end of file diff --git a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/WorkerSumm.java b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/WorkerSumm.java index 7e743c754..c8b2f750c 100644 --- a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/WorkerSumm.java +++ b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/WorkerSumm.java @@ -1,6 +1,7 @@ package com.alibaba.jstorm.ui.model; import java.io.Serializable; +import java.util.List; import backtype.storm.generated.TaskSummary; import backtype.storm.generated.WorkerSummary; @@ -21,6 +22,10 @@ public class WorkerSumm implements Serializable { private String uptime; private String tasks; private String components; + private String cpuNum; + private String memNum; + private String disks; + private List taskSummList; public WorkerSumm() { } @@ -34,7 +39,8 @@ public WorkerSumm(WorkerSummary workerSummary) { boolean isFirst = true; int minUptime = 0; - for (TaskSummary taskSummary : workerSummary.get_tasks()) { + taskSummList = workerSummary.get_tasks(); + for (TaskSummary taskSummary : taskSummList) { if (isFirst == false) { taskSB.append(','); componentSB.append(','); @@ -98,4 +104,11 @@ public void setComponents(String components) { this.components = components; } + public List gettaskSummList() { + return taskSummList; + } + + public void settaskSummList(List taskSummList) { + this.taskSummList = taskSummList; + } } diff --git a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/BoltPage.java b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/BoltPage.java index 6b87b35bb..904bcfe6e 100644 --- a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/BoltPage.java +++ b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/BoltPage.java @@ -21,9 +21,11 @@ import backtype.storm.generated.GlobalStreamId; import backtype.storm.generated.NotAliveException; import backtype.storm.generated.StormTopology; +import backtype.storm.generated.TaskMetricData; import backtype.storm.generated.TaskStats; import backtype.storm.generated.TaskSummary; import backtype.storm.generated.TopologyInfo; +import backtype.storm.generated.TopologyMetricInfo; import backtype.storm.utils.NimbusClient; import com.alibaba.jstorm.common.stats.StatBuckets; @@ -34,6 +36,7 @@ import com.alibaba.jstorm.ui.model.ComponentSummary; import com.alibaba.jstorm.ui.model.ComponentTask; import com.alibaba.jstorm.ui.model.WinComponentStats; +import com.alibaba.jstorm.ui.model.TaskMetrics; import com.alibaba.jstorm.utils.JStormUtils; /** @@ -51,11 +54,13 @@ public class BoltPage implements Serializable { private String topologyid = null; private String window = null; private String componentid = null; + private String topologyName = null; private List coms = null; private List comstats = null; private List coos = null; private List cois = null; private List cts = null; + private List taskmetrics = null; public BoltPage() throws TException, NotAliveException { FacesContext ctx = FacesContext.getCurrentInstance(); @@ -235,6 +240,19 @@ private void getInputOutputSummary(List taskSummaries, return; } + public List getTaskMetricsList(List totalTskMetrList) { + if (totalTskMetrList == null) return null; + List ret = new ArrayList(); + LOG.debug("get task metrics list: component ID: " + this.componentid); + for (TaskMetricData taskMetricData : totalTskMetrList) { + if ((taskMetricData.get_component_id()).equals(this.componentid)) { + TaskMetrics taskMetircs = new TaskMetrics(); + taskMetircs.updateTaskMetricData(taskMetricData); + ret.add(taskMetircs); + } + } + return ret; + } @SuppressWarnings("rawtypes") private void init() throws TException, NotAliveException { @@ -247,11 +265,14 @@ private void init() throws TException, NotAliveException { TopologyInfo summ = client.getClient().getTopologyInfo(topologyid); StormTopology topology = client.getClient().getTopology(topologyid); + TopologyMetricInfo topologyMetricInfo = client.getClient().getTopologyMetric(topologyid); String type = UIUtils.componentType(topology, componentid); List ts = UIUtils.getTaskList(summ.get_tasks(), componentid); + + topologyName = summ.get_name(); coms = getComponentSummaries(summ, ts); @@ -260,6 +281,8 @@ private void init() throws TException, NotAliveException { comstats = getWinComponentStats(ts, window); getInputOutputSummary(ts, window); + List totoaltaskmetrics = topologyMetricInfo.get_task_metric_list(); + taskmetrics = getTaskMetricsList(totoaltaskmetrics); } catch (TException e) { LOG.error(e.getCause(), e); @@ -314,6 +337,22 @@ public List getComs() { public void setComs(List coms) { this.coms = coms; } + + public List gettaskmetrics() { + return this.taskmetrics; + } + + public void settaskmetrics(List taskmetrs) { + this.taskmetrics = taskmetrs; + } + + public String getTopologyName() { + return topologyName; + } + + public void setTopologyName(String topologyName) { + this.topologyName = topologyName; + } public static void main(String[] args) { try { diff --git a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/LogPage.java b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/LogPage.java index 4ece34d35..a07756681 100644 --- a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/LogPage.java +++ b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/LogPage.java @@ -193,8 +193,10 @@ private void generateLogFileName() throws Exception { host = componentTask.getHost(); - logFileName = componentTask.getTopologyid() + "-worker-" - + componentTask.getPort() + ".log"; +// logFileName = componentTask.getTopologyid() + "-worker-" +// + componentTask.getPort() + ".log"; + logFileName = JStormUtils.genLogName(summ.get_name(), + Integer.valueOf(componentTask.getPort())); } catch (TException e) { LOG.error(e.getCause(), e); diff --git a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/SpoutPage.java b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/SpoutPage.java index 14d1dc109..018a777a9 100644 --- a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/SpoutPage.java +++ b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/SpoutPage.java @@ -24,6 +24,8 @@ import backtype.storm.generated.TaskStats; import backtype.storm.generated.TaskSummary; import backtype.storm.generated.TopologyInfo; +import backtype.storm.generated.TopologyMetricInfo; +import backtype.storm.generated.TaskMetricData; import backtype.storm.utils.NimbusClient; import com.alibaba.jstorm.common.stats.StatBuckets; @@ -33,6 +35,7 @@ import com.alibaba.jstorm.ui.model.ComponentTask; import com.alibaba.jstorm.ui.model.SpoutOutput; import com.alibaba.jstorm.ui.model.WinComponentStats; +import com.alibaba.jstorm.ui.model.TaskMetrics; import com.alibaba.jstorm.utils.JStormUtils; /** @@ -54,6 +57,7 @@ public class SpoutPage implements Serializable { private List comstats = null; private List coos = null; private List cts = null; + private List taskmetrics = null; public SpoutPage() throws TException, NotAliveException { FacesContext ctx = FacesContext.getCurrentInstance(); @@ -238,6 +242,19 @@ private void getOutputSummary(List taskSummaries, String window) { return; } + public List getTaskMetricsList(List totalTskMetrList) { + if (totalTskMetrList == null) return null; + List ret = new ArrayList(); + LOG.debug("get task metrics list: component ID: " + this.componentid); + for (TaskMetricData taskMetricData : totalTskMetrList) { + if ((taskMetricData.get_component_id()).equals(this.componentid)) { + TaskMetrics taskMetircs = new TaskMetrics(); + taskMetircs.updateTaskMetricData(taskMetricData); + ret.add(taskMetircs); + } + } + return ret; + } @SuppressWarnings("rawtypes") private void init() throws TException, NotAliveException { @@ -250,6 +267,7 @@ private void init() throws TException, NotAliveException { TopologyInfo summ = client.getClient().getTopologyInfo(topologyid); StormTopology topology = client.getClient().getTopology(topologyid); + TopologyMetricInfo topologyMetricInfo = client.getClient().getTopologyMetric(topologyid); String type = UIUtils.componentType(topology, componentid); @@ -263,6 +281,8 @@ private void init() throws TException, NotAliveException { comstats = getWinComponentStats(ts, window); getOutputSummary(ts, window); + List totoaltaskmetrics = topologyMetricInfo.get_task_metric_list(); + taskmetrics = getTaskMetricsList(totoaltaskmetrics); } catch (TException e) { LOG.error(e.getCause(), e); @@ -333,6 +353,14 @@ public List getCoos() { public void setCoos(List coos) { this.coos = coos; } + + public List gettaskmetrics() { + return this.taskmetrics; + } + + public void settaskmetrics(List taskmetrs) { + this.taskmetrics = taskmetrs; + } public static void main(String[] args) { try { diff --git a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/SupervisorPage.java b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/SupervisorPage.java index 9ce34a704..ba14ec65b 100644 --- a/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/SupervisorPage.java +++ b/jstorm-ui/src/main/java/com/alibaba/jstorm/ui/model/data/SupervisorPage.java @@ -12,12 +12,15 @@ import org.apache.log4j.Logger; import backtype.storm.generated.SupervisorWorkers; +import backtype.storm.generated.TopologyMetricInfo; import backtype.storm.generated.WorkerSummary; +import backtype.storm.generated.WorkerMetricData; import backtype.storm.utils.NimbusClient; import com.alibaba.jstorm.ui.UIUtils; import com.alibaba.jstorm.ui.model.SupervisorSumm; import com.alibaba.jstorm.ui.model.WorkerSumm; +import com.alibaba.jstorm.ui.model.WorkerMetrics; /** * @@ -32,9 +35,13 @@ public class SupervisorPage implements Serializable { private static final Logger LOG = Logger.getLogger(SupervisorPage.class); private String host = "localhost"; + private String ip = null; private List ssumm = null; private List wsumm = null; + private List topologyList = null; + private List topologyMetricsList = null; + private List workermetrics = null; public SupervisorPage() throws Exception { FacesContext ctx = FacesContext.getCurrentInstance(); @@ -57,8 +64,13 @@ public void init(String host) throws Exception { SupervisorWorkers supervisorWorkers = client.getClient() .getSupervisorWorkers(host); ssumm = new ArrayList(); - ssumm.add(new SupervisorSumm(supervisorWorkers.get_supervisor())); + SupervisorSumm supervSumm = new SupervisorSumm(supervisorWorkers.get_supervisor()); + ssumm.add(supervSumm); + ip = supervSumm.getIp(); generateWorkerSum(supervisorWorkers.get_workers()); + getTopoList(); + getTopoMetrList(client); + getWorkerMetrData(); } catch (Exception e) { LOG.error("Failed to get cluster information:", e); @@ -93,6 +105,57 @@ public List getWsumm() { public void setWsumm(List wsumm) { this.wsumm = wsumm; } + public void setworkermetrics(List wrkMetrList) { + this.workermetrics = wrkMetrList; + } + public List getworkermetrics(){ + return this.workermetrics; + } + public void getTopoList() { + if (topologyList == null) { + topologyList = new ArrayList(); + } + if (wsumm == null) return; + for(WorkerSumm workerSumm : wsumm) { + String topologyId = workerSumm.getTopology(); + if (!(topologyList.contains(topologyId))) { + topologyList.add(topologyId); + } + } + } + public void getTopoMetrList(NimbusClient client) throws Exception { + if (topologyList == null) return; + if (topologyMetricsList == null) { + topologyMetricsList = new ArrayList(); + } + for (String topologyId : topologyList) { + try { + TopologyMetricInfo topoMetrInfo = client.getClient().getTopologyMetric(topologyId); + topologyMetricsList.add(topoMetrInfo); + } catch (Exception e) { + LOG.error("Failed to get topology metrics information:", e); + throw e; + } + } + } + public void getWorkerMetrData() { + if (topologyMetricsList == null) return; + if (workermetrics == null) { + workermetrics = new ArrayList(); + } + for (TopologyMetricInfo topoMetr : topologyMetricsList) { + List wrkMetrLstFromTopo = topoMetr.get_worker_metric_list(); + if (wrkMetrLstFromTopo == null) return; + for (WorkerMetricData wrkMetrData : wrkMetrLstFromTopo) { + if (wrkMetrData.get_hostname().equals(host) || + wrkMetrData.get_hostname().equals(ip)) { + WorkerMetrics workerMetrics = new WorkerMetrics(); + workerMetrics.updateWorkerMetricData(wrkMetrData); + workermetrics.add(workerMetrics); + } + } + } + } public static void main(String[] args) { try { diff --git a/jstorm-ui/src/main/webapp/bolt.xhtml b/jstorm-ui/src/main/webapp/bolt.xhtml index fffe23955..511e2ce8d 100644 --- a/jstorm-ui/src/main/webapp/bolt.xhtml +++ b/jstorm-ui/src/main/webapp/bolt.xhtml @@ -234,7 +234,11 @@ + + + + @@ -305,6 +309,100 @@ + + +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/jstorm-ui/src/main/webapp/cluster.xhtml b/jstorm-ui/src/main/webapp/cluster.xhtml index 4183a4a90..72dc891de 100644 --- a/jstorm-ui/src/main/webapp/cluster.xhtml +++ b/jstorm-ui/src/main/webapp/cluster.xhtml @@ -41,11 +41,29 @@ + + + + + + + + + + + + + + + + + + - + @@ -95,41 +113,6 @@ -

Resource Summary

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Topology Summary

+ + + + + + +

Supervisor Summary

@@ -254,4 +244,4 @@ - \ No newline at end of file + diff --git a/jstorm-ui/src/main/webapp/spout.xhtml b/jstorm-ui/src/main/webapp/spout.xhtml index c0cddc72e..acccaf0ff 100644 --- a/jstorm-ui/src/main/webapp/spout.xhtml +++ b/jstorm-ui/src/main/webapp/spout.xhtml @@ -203,7 +203,11 @@ + + + + @@ -274,6 +278,100 @@ + + +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/jstorm-ui/src/main/webapp/supervisor.xhtml b/jstorm-ui/src/main/webapp/supervisor.xhtml index 0d736b700..2994136c9 100644 --- a/jstorm-ui/src/main/webapp/supervisor.xhtml +++ b/jstorm-ui/src/main/webapp/supervisor.xhtml @@ -26,6 +26,13 @@ + + + + + + + @@ -62,8 +69,8 @@

Used Worker Summary

- + @@ -86,20 +93,133 @@ + + + + + + + + + + + + + + + + + + +

Worker Metrics

+ + + + + + + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - \ No newline at end of file + diff --git a/other/storm.thrift b/other/storm.thrift index ef52fbc88..d5c24fc61 100644 --- a/other/storm.thrift +++ b/other/storm.thrift @@ -120,6 +120,7 @@ struct TopologySummary { 4: required i32 uptime_secs; 5: required i32 num_tasks; 6: required i32 num_workers; + 7: required string error_info; } struct SupervisorSummary { @@ -183,7 +184,7 @@ struct TaskSummary { 5: required i32 uptime_secs; 6: required list errors; 7: optional TaskStats stats; - + 8: optional string component_type; } struct WorkerSummary { @@ -223,6 +224,33 @@ struct SubmitOptions { 1: required TopologyInitialStatus initial_status; } +struct MonitorOptions { + 1: optional bool isEnable; +} +struct TaskMetricData { + 1: required i32 task_id; + 2: required string component_id; + 3: required map gauge; + 4: required map counter; + 5: required map meter; + 6: required map timer; + 7: required map histogram; +} +struct WorkerMetricData { + 1: required string hostname; + 2: required i32 port; + 3: required map gauge; + 4: required map counter; + 5: required map meter; + 6: required map timer; + 7: required map histogram; +} +struct TopologyMetricInfo { + 1: required string topology_id; + 2: optional list task_metric_list; + 3: optional list worker_metric_list; +} + service Nimbus { void submitTopology(1: string name, 2: string uploadedJarLocation, 3: string jsonConf, 4: StormTopology topology) throws (1: AlreadyAliveException e, 2: InvalidTopologyException ite, 3: TopologyAssignException tae); void submitTopologyWithOpts(1: string name, 2: string uploadedJarLocation, 3: string jsonConf, 4: StormTopology topology, 5: SubmitOptions options) throws (1: AlreadyAliveException e, 2: InvalidTopologyException ite, 3:TopologyAssignException tae); @@ -231,6 +259,7 @@ service Nimbus { void activate(1: string name) throws (1: NotAliveException e); void deactivate(1: string name) throws (1: NotAliveException e); void rebalance(1: string name, 2: RebalanceOptions options) throws (1: NotAliveException e, 2: InvalidTopologyException ite); + void metricMonitor(1: string name, 2: MonitorOptions options) throws (1: NotAliveException e); // need to add functions for asking about status of storms, what nodes they're running on, looking at task logs @@ -253,6 +282,7 @@ service Nimbus { string getTopologyConf(1: string id) throws (1: NotAliveException e); StormTopology getTopology(1: string id) throws (1: NotAliveException e); StormTopology getUserTopology(1: string id) throws (1: NotAliveException e); + TopologyMetricInfo getTopologyMetric(1: string id) throws (1: NotAliveException e); } struct DRPCRequest { diff --git a/pom.xml b/pom.xml index ffc92d26a..cdb8271a4 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.alibaba.jstorm jstorm-all - 0.9.5.1 + 0.9.6 pom java storm java storm diff --git a/version b/version index f76f91317..9cf038687 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.9.2 \ No newline at end of file +0.9.6 \ No newline at end of file