https://coder.tw/?p=7235
https://coder.tw/?p=7238
https://fwit.win/?p=1392
Kibana Dev Tools
GET _cat GET _cat/indices?v GET _cat/indices?v&s=index GET _cat/segments?v GET /_settings GET /_stats GET /_template GET _cluster/health GET filebeat-6.5.1-2019.01.01 POST filebeat-6.5.1-2019.01.01 PUT filebeat-6.5.1-2019.01.01 DELETE filebeat-6.5.1-2019.01.01 GET filebeat-6.5.1-2019.01.* POST filebeat-6.5.1-2019.01.* PUT filebeat-6.5.1-2019.01.* DELETE filebeat-6.5.1-2019.01.* GET filebeat-6.5.1-2019.01.01/_stats GET filebeat-6.5.1-2019.01.01/_mapping POST /_refresh POST /_cache/clear POST /_flush/synced ?v show column name
Segments Merge https://my.oschina.net/fufangchun/blog/1541156
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html#forcemerge-multi-index
GET _cat/segments?v POST /filebeat-6.5.1-2019.01.01/_forcemerge?max_num_segments=1&flush=true
https://my.oschina.net/weiweiblog/blog/2989931
filter { if [etltype] == "blocks" { #[fields][srctype] csv { columns => [ "number", "hash", "parent_hash", "nonce", "sha3_uncles", "logs_bloom", "transactions_root", "state_root", "receipts_root", "miner", "difficulty", "total_difficulty", "size", "extra_data", "gas_limit", "gas_used", "timestamp", "transaction_count" ] separator => "," remove_field => ["message"] skip_empty_columns => true skip_empty_rows => true } }else if [etltype] == "contracts" { #[fields][srctype] csv { columns => [ "address", "bytecode", "function_sighashes", "is_erc20", "is_erc721" ] separator => "," remove_field => ["message"] skip_empty_columns => true skip_empty_rows => true } }else if [etltype] == "logs" { #[fields][srctype] csv { columns => [ "log_index", "transaction_hash", "transaction_index", "block_hash", "block_number", "address", "data", "topics" ] separator => "," remove_field => ["message"] skip_empty_columns => true skip_empty_rows => true } }else if [etltype] == "receipts" { #[fields][srctype] csv { columns => [ "transaction_hash", "transaction_index", "block_hash", "block_number", "cumulative_gas_used", "gas_used", "contract_address", "root", "status" ] separator => "," remove_field => ["message"] skip_empty_columns => true skip_empty_rows => true } }else if [etltype] == "token_transfers" { #[fields][srctype] csv { columns => [ "" ] separator => "," remove_field => ["message"] skip_empty_columns => true skip_empty_rows => true } }else if [etltype] == "tokens" { #[fields][srctype] csv { columns => [ "" ] separator => "," remove_field => ["message"] skip_empty_columns => true skip_empty_rows => true } }else if [etltype] == "transactions" { #[fields][srctype] csv { columns => [ "hash", "nonce", "block_hash", "block_number", "transaction_index", "from_address", "to_address", "value", "gas", "gas_price", "inputcontext" ] separator => "," remove_field => ["message"] skip_empty_columns => true skip_empty_rows => true } } } output { if [etltype] == "blocks" { elasticsearch { hosts => "xxx.
Use filebeat nginx module send nginx log to logstash or driect to elastick all get error!!
BUT some nginx log record can send success. That success records try to copy to other VM that have filebeat and logstash, try to send again. all get error!!
https://stackoverflow.com/questions/41703689/how-do-i-force-rebuild-logs-data-in-filebeat-5
sudo service filbeat stop mv /var/lib/filebeat/registry /var/lib/filebeat/registry.old sudo service filbeat start