Elasticsearch: error after server restart

Hello,

we are getting elasticsearch error fter server power failure.

Here is, tan output for the command:

curl -XGET 172.19.3.40:9200/_cat/indices

{"error":{"root_cause":[{"type":"null_pointer_exception","reason":null}],"type":"null_pointer_exception","reason":null},"status":500}(cchq)

Here is the associated log on the node tail -f /opt/data/elasticsearch-2.4.6/logs/echis-es.log** **

[2021-03-12 14:20:07,123][WARN ][rest.suppressed ] path: /_cat/indices, params: {}
java.lang.NullPointerException
at org.elasticsearch.rest.action.cat.RestIndicesAction.buildTable(RestIndicesAction.java:345)
at org.elasticsearch.rest.action.cat.RestIndicesAction.access$100(RestIndicesAction.java:52)
at org.elasticsearch.rest.action.cat.RestIndicesAction$1$1$1.buildResponse(RestIndicesAction.java:111)
at org.elasticsearch.rest.action.cat.RestIndicesAction$1$1$1.buildResponse(RestIndicesAction.java:108)
at org.elasticsearch.rest.action.support.RestResponseListener.processResponse(RestResponseListener.java:43)
at org.elasticsearch.rest.action.support.RestActionListener.onResponse(RestActionListener.java:49)
at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:89)
at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:85)
at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$AsyncAction.onCompletion(TransportBroadcastByNodeAction.java:394)
at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$AsyncAction.onNodeResponse(TransportBroadcastByNodeAction.java:363)
at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$AsyncAction$1.handleResponse(TransportBroadcastByNodeAction.java:335)
at org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction$AsyncAction$1.handleResponse(TransportBroadcastByNodeAction.java:327)
at org.elasticsearch.transport.netty.MessageChannelHandler.handleResponse(MessageChannelHandler.java:158)
at org.elasticsearch.transport.netty.MessageChannelHandler.messageReceived(MessageChannelHandler.java:124)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296)
at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:462)
at org.jboss.netty.handler.codec.frame.FrameDecoder.callDecode(FrameDecoder.java:443)
at org.jboss.netty.handler.codec.frame.FrameDecoder.messageReceived(FrameDecoder.java:310)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255)
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108)
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89)
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178)
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

The following output is also frequently displayed

UnavailableShardsException[[case_search_2018-05-29][2] primary shard is not active Timeout: [1m], request: [index {[case_search][case][ae609cf7-fea8-42a9-81a2-3c7452da90ce], source[{"closed": false, "closed_by": null, "closed_on": null, "doc_type": "CommCareCase", "domain": "fmoh-echis", "external_id": null, "indices": [{"case_id": "ae609cf7-fea8-42a9-81a2-3c7452da90ce", "identifier": "host", "referenced_id": "fba17309-f6f5-49d7-8ed4-b2bf26765b1b", "referenced_type": "family", "relationship": "extension"}], "location_id": null, "modified_on": "2021-03-04T12:28:41.249000Z", "name": "Bultumee Tullu Jimaa", "opened_by": "296e062de387454b9c78717c049f5ef0", "opened_on": "2021-03-04T12:28:41.249000Z", "owner_id": "b1c3094c31cd4fe7b6704cadaa36d704", "server_modified_on": "2021-03-12T13:57:29.512088Z", "type": "delegation", "user_id": "296e062de387454b9c78717c049f5ef0", "@indexed_on": "2021-03-12T19:45:49.977837Z", "case_properties": [{"key": "@case_id", "value": "ae609cf7-fea8-42a9-81a2-3c7452da90ce"}, {"key": "@case_type", "value": "delegation"}, {"key": "@owner_id", "value": "b1c3094c31cd4fe7b6704cadaa36d704"}, {"key": "@status", "value": "open"}, {"key": "name", "value": "Bultumee Tullu Jimaa"}, {"key": "case_name", "value": "Bultumee Tullu Jimaa"}, {"key": "external_id", "value": null}, {"key": "date_opened", "value": "2021-03-04T12:28:41.249000Z"}, {"key": "closed_on", "value": null}, {"key": "last_modified", "value": "2021-03-04T12:28:41.249000Z"}]}]}]]
at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.retryBecauseUnavailable(TransportReplicationAction.java:614)
at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:474)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase$2.onTimeout(TransportReplicationAction.java:576)
at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onTimeout(ClusterStateObserver.java:236)
at org.elasticsearch.cluster.service.InternalClusterService$NotifyTimeout.run(InternalClusterService.java:816)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)