#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import requests
import argparse

SCRIPT_NAME = os.path.basename(__file__)

# Variables which will be populated by arg parser
LOGIN_USER = ""
LOGIN_PASS = ""
HAPROXY_STATS_URL = ""

BACKEND_UPTIME_CRITICAL = 3600
BACKEND_UPTIME_WARNING = 7200

BACKENDS = []

# Nagios return codes
UNKNOWN = -1
OK = 0
WARNING = 1
CRITICAL = 2

# Setup argument parser
argparser = argparse.ArgumentParser(prog=SCRIPT_NAME, description="Nagios check for HAProxy backend server uptime")
argparser.add_argument("user", type=str, help="Username for stats access")
argparser.add_argument("password", type=str, help="Password for stats access")
argparser.add_argument("statsurl", type=str, help="Stats URL (;csv appended automatically) e.g. http(s)://foo.bar:port/url")
argparser.add_argument("-c", "--crit-time", type=int, help="Critical backend uptime in seconds (default: {})".format(BACKEND_UPTIME_CRITICAL))
argparser.add_argument("-w", "--warn-time", type=int, help="Warning backend uptime in seconds (default: {})".format(BACKEND_UPTIME_WARNING))
argparser.add_argument("-b", "--backends", type=str, help="Comma separated list of backends to check (default: all)")

# Parse arguments
args = argparser.parse_args()

# Set optional arguments
if args.crit_time:
	BACKEND_UPTIME_CRITICAL = args.crit_time

if args.warn_time:
	BACKEND_UPTIME_WARNING = args.warn_time

if args.backends:
	BACKENDS = args.backends.split(",")

# Set positional arguments
LOGIN_USER = args.user
LOGIN_PASS = args.password
HAPROXY_STATS_URL = "{}/;csv".format(args.statsurl.rstrip("/"))

# Create Session object
SESSION = requests.Session()
SESSION.auth = (LOGIN_USER,LOGIN_PASS)


'''
Generic return code commands for Nagios
'''
def ret_check(retcode=UNKNOWN, status="UNKNOWN: An error occurred.", perfdata=""):
	print("{} | {}".format(status, perfdata))
	sys.exit(retcode)

def ret_ok(status="Backend uptimes are good.", perf=""):
	ret_check(OK, "OK: {}".format(status), perf)

def ret_warning(status="Backend uptime too low!", perf=""):
	ret_check(WARNING, "WARNING: {}".format(status), perf)

def ret_critical(status="Backend uptime too low!", perf=""):
	ret_check(CRITICAL, "CRITICAL: {}".format(status), perf)


'''
Fetch the stats page from HAProxy
'''
def get_stats_page():
	global SESSION
	global HAPROXY_STATS_URL
	try:
		response = SESSION.get(HAPROXY_STATS_URL, timeout=10)
		response.raise_for_status()
		return response.text
	except requests.exceptions.HTTPError as err:
		ret_check(UNKNOWN, "UNKNOWN: HTTP Error: {}".format(err), "")
	except requests.exceptions.ConnectionError as err:
		ret_check(UNKNOWN, "UNKNOWN: Connection error: {}".format(err), "")
	except requests.exceptions.Timeout as err:
		ret_check(UNKNOWN, "UNKNOWN: Connection timed out: {}".format(err), "")
	except requests.exceptions.RequestException as err:
		ret_check(UNKNOWN, "UNKNOWN: Request error: {}".format(err), "")
	except:
		ret_check(UNKNOWN, "UNKNOWN: An unknown error occurred.", "")

'''
Gets the field index for the "last change" field and returns it
'''
def get_lastchg_field_index(data):
	csv_header = data.split("\n")[0].lstrip("#").strip()
	csv_header_fields = csv_header.split(",")
	for idx,fieldname in enumerate(csv_header_fields):
		if fieldname == "lastchg":
			return idx

'''
Filter response and only return backends that are wanted
'''		
def filter_backends(data):
	# Split by line endings and skip first line
	data_lines = data.split("\n")[1:]

	# Will be populated with the wanted backends
	backends = []

	# Iterate over lines in response
	for line in data_lines:
		# Split the line into fields
		csv_line = line.split(",")

		# Skip lines that have too few fields
		if len(csv_line) < 20: continue

		# Filter out entries with "FRONTEND" or "BACKEND" as svname
		if csv_line[1] not in ["FRONTEND", "BACKEND"]:
			# Check if backends were specified on the commandline
			if BACKENDS:
				# Only add backends if they match
				if csv_line[0] in BACKENDS:
					backends.append(csv_line)
			else:
				# Otherwise just add every backend found
				backends.append(csv_line)
	return backends

def do_check():
	# Fetch data and filter results
	statspage = get_stats_page()
	lastchg_idx = get_lastchg_field_index(statspage)
	backends = filter_backends(statspage)
	
	# Default return code should be UNKNOWN
	status = "UNKNOWN: An unknown error occurred."
	status_code = UNKNOWN

	# Contains backends that failed our check
	bad_backends = []

	# Stores performance data
	perfdata = []

	# Actual check routine
	for backend in backends:
		perfdata.append("{}-{}={}".format(backend[0], backend[1], backend[lastchg_idx]))
		if int(backend[lastchg_idx]) < BACKEND_UPTIME_CRITICAL:
			status_code = CRITICAL
			bad_backends.append("{}-{}".format(backend[0],backend[1]))
		elif int(backend[lastchg_idx]) < BACKEND_UPTIME_WARNING and status_code is not CRITICAL:
			status_code = WARNING
			bad_backends.append("{}-{}".format(backend[0],backend[1]))

	# Prepate perfdata string
	perfdata = ", ".join(perfdata)

	# Return check result
	if status_code is CRITICAL:
		ret_critical("The following backends have low uptimes: {}".format(", ".join(bad_backends)), perfdata)
	elif status_code is WARNING:
		ret_warning("The following backends have low uptimes: {}".format(", ".join(bad_backends)), perfdata)
	else:
		status_code = OK
		ret_ok(perf=perfdata)

if __name__ == "__main__": do_check()
