# By: Riasat Ullah
# This class represents a TaskPayload object.

from data_syncers import syncer_policies
from dbqueries import db_business_services, db_task_instances
from exceptions.user_exceptions import ServiceUnavailable, UnauthorizedRequest
from modules.instance_comparator import InstanceComparator
from objects.assignee import Assignee
from objects.conditional_route import ConditionalRoute
from objects.instance_state import InstanceState
from objects.routing_rule import RoutingRule
from objects.task import Task
from utils import constants, errors, helpers, key_manager, logging, permissions, times, var_names
from validations import string_validator
import ast
import configuration as configs
import datetime


class TaskPayload(object):

    def __init__(self, timestamp, organization_id, start_date, title, timezone, task_time,
                 created_by=None, repeat_on=[], text_msg=None, urgency_level=configs.default_urgency_level,
                 trigger_method=constants.email, trigger_info=None, instantiate=True, alert=True, integration_id=None,
                 routing_id=None, related_task_id=None, task_status=None, notes=None, tags=None, dedup_key=None,
                 integration_key=None, api_key_id=None, service_id=None, service_ref_id=None, service_policy_id=None,
                 assignees=None, assignees_ref=None, snapshots=None, voice_messages=None, vendor_incident_url=None):
        '''
        TaskPayload object. It should only represent a task's information before an entry is made in the database.
        :param timestamp: (datetime.datetime) timestamp when the task was created
        :param organization_id: ID of the organization this task is for
        :param start_date: date the task should start from
        :param title: title of the task
        :param timezone: timezone the task is being created for
        :param task_time: time the task should trigger at in the format HH:MM
        :param created_by: (can be None) username of the user who created the task
        :param repeat_on: (list) of days the task should be repeated on
        :param text_msg: text message of the task
        :param urgency_level: urgency level of the task
        :param trigger_method: the method used to trigger the task creation [APP, EMAIL, API, etc]
        :param trigger_info: additional information that may have been provided with the trigger
        :param instantiate: (boolean) True if instances should be created for this task
        :param alert: (boolean) True if alerts should be generated from the instances created this task
        :param integration_id: ID of the integration that created the task
        :param routing_id: (list) of conditional routing ID
        :param related_task_id: ID of any other task this task is related to; will be useful for grouping/suppressing
        :param task_status: a status of the task - GROUPED, SUPPRESSED, etc
        :param notes: any notes that were added to the task
        :param tags: list of tags to help classify a task
        :param dedup_key: a key to stop de-duplication; if this key is present in active task instance, then instances
                        will not be created for subsequent tasks containing the key
        :param integration_key: the integration key
        :param api_key_id: ID of the API key that triggered the this task
        :param service_id: ID of the service this task is for
        :param service_ref_id: reference ID of the service this task is for (concealed)
        :param service_policy_id: policy ID of the service
        :param assignees: (list of int) of policy ids of assignees (both users and groups)
        :param assignees_ref: (list of str) of policy ids of assignees (concealed)
        :param snapshots: (list) of urls of images associated to the incident
        :param voice_messages: (list) of urls from where incident voice messages can be retrieved
        :param vendor_incident_url: (str) url with which the incident can be addressed on the vendor's platform
                        (only for incidents created through an integration)
        '''
        self.timestamp = timestamp
        self.organization_id = organization_id
        self.start_date = start_date
        self.title = title
        self.timezone = timezone
        self.task_time = task_time
        self.created_by = created_by
        self.repeat = repeat_on
        self.description = text_msg
        self.urgency_level = urgency_level
        self.trigger_method = trigger_method
        self.source_payload = trigger_info
        self.to_instantiate = instantiate
        self.to_alert = alert
        self.integration_id = integration_id
        self.routing_id = routing_id
        self.related_task_id = related_task_id
        self.status = task_status
        self.notes = notes
        self.tags = tags
        self.dedup_key = dedup_key
        self.integration_key = integration_key
        self.api_key_id = api_key_id
        self.service_id = service_id
        self.service_ref_id = service_ref_id
        self.service_policy_id = service_policy_id
        self.assignees = assignees
        self.assignees_ref = assignees_ref
        self.snapshots = snapshots
        self.voice_messages = voice_messages
        self.vendor_incident_url = vendor_incident_url
        self.amended_urgency = None
        self.next_alert_timestamp = None
        self.status_update = None
        self.subscribers = None
        self.conference_bridge = None
        self.impacted_business_services = None

        self.resolve_incident_hours = None
        self.unmask_concealed_payload_keys()

    def unmask_concealed_payload_keys(self):
        if self.service_ref_id is not None:
            self.service_ref_id = key_manager.unmask_reference_key(self.service_ref_id)
        if self.assignees_ref is not None:
            self.assignees_ref = [key_manager.unmask_reference_key(x) for x in self.assignees_ref]

    def resolve_alert(self):
        '''
        Resolve the alert. Make sure this is handled in the db query with the proper timestamp.
        The task and instances should be ended the same time they are created.
        Do NOT put the created instance in cache.
        '''
        self.status = constants.resolved_state
        self.to_instantiate = True
        self.to_alert = False

    def suppress_alert_directly(self):
        '''
        Gets suppressed directly without need to check for any related incidents.
        '''
        self.status = constants.suppressed_state
        self.to_instantiate = False
        self.to_alert = False

    def re_route_alert(self, policy_id):
        if self.service_id is None:
            self.assignees = [policy_id]
        else:
            self.service_policy_id = policy_id

    def suppress_alert_on_similarity(self, conn, max_count, for_minutes, routing_id, for_before=False):
        min_timestamp = self.timestamp - datetime.timedelta(minutes=for_minutes)
        related_tasks = db_task_instances.get_related_task_ids(
            conn, self.timestamp, routing_id=routing_id, min_timestamp=min_timestamp, active=False,
            organization_id=self.organization_id
        )
        to_suppress = False
        if for_before:
            if len(related_tasks) < max_count:
                to_suppress = True
        else:
            if len(related_tasks) >= max_count:
                to_suppress = True

        if to_suppress:
            self.status = constants.suppressed_state
            self.to_instantiate = False
            self.to_alert = False

    def group_alert(self, conn, for_minutes, routing_id):
        min_timestamp = self.timestamp - datetime.timedelta(minutes=for_minutes)
        related_tasks = db_task_instances.get_related_task_ids(
            conn, self.timestamp, routing_id=routing_id, min_timestamp=min_timestamp, active=True,
            organization_id=self.organization_id
        )
        if len(related_tasks) > 0:
            self.status = constants.grouped_state
            self.related_task_id = related_tasks[0]
            self.to_instantiate = False
            self.to_alert = False

    def intelligent_grouping(self, conn, for_minutes, additional_policy_ids):
        min_start = self.timestamp - datetime.timedelta(minutes=for_minutes)
        curr_instances = db_task_instances.get_instances(conn, self.timestamp, min_timestamp=min_start,
                                                         active=True, created_by=self.created_by,
                                                         integration_id=self.integration_id,
                                                         service_id=self.service_id)

        if len(curr_instances) > 0:
            details = {var_names.start_date: self.start_date,
                       var_names.created_by: self.created_by,
                       var_names.task_title: self.title,
                       var_names.task_timezone: self.timezone,
                       var_names.task_time: self.task_time,
                       var_names.repeat: self.repeat,
                       var_names.text_msg: self.description,
                       var_names.urgency_level: self.urgency_level,
                       var_names.service_id: self.service_id,
                       var_names.integration_id: self.integration_id}

            assignees = [Assignee(x, x, 1) for x in additional_policy_ids]
            labels = {var_names.tags: self.tags, var_names.dedup_key: self.dedup_key}
            dummy_task = Task(-1, details, assignees, labels)
            dummy_instance = InstanceState(-1, -1, self.organization_id, self.timestamp, self.timestamp, 1,
                                           last_alert_timestamp=None, next_alert_timestamp=None,
                                           status=constants.open_state, state_assignees=None,
                                           task=dummy_task, events=[])

            all_instances = [dummy_instance] + list(curr_instances.values())
            inst_comparator = InstanceComparator(all_instances, -1)
            similar_instance = inst_comparator.get_most_similar_instance(min_relevance=0.85)

            if similar_instance is not None:
                self.status = constants.grouped_state
                self.related_task_id = similar_instance.task.taskid
                self.to_instantiate = False
                self.to_alert = False

    def dedup_alert(self, parent_task_id):
        self.status = constants.grouped_state
        self.related_task_id = parent_task_id
        self.to_instantiate = False
        self.to_alert = False

    def extract_tags(self, tag_keys):
        new_tags = []
        try:
            disp_load = self.get_displayable_payload()
            for key in tag_keys:
                ext_tag = helpers.extract_field_values_from_dict(key, disp_load)
                if string_validator.is_literal_list(ext_tag):
                    try:
                        new_tags += ast.literal_eval(ext_tag)
                    except ValueError:
                        new_tags.append(ext_tag)
                else:
                    new_tags.append(ext_tag)
            return new_tags
        except KeyError:
            logging.error('Skipping tags extraction - ' + str(tag_keys))
            return new_tags

    def extract_incident_title(self, field_msg):
        try:
            self.title = helpers.extract_field_values_from_dict(field_msg, self.get_displayable_payload())
        except KeyError:
            logging.error('Could not update incident title - ' + field_msg)

    def extract_incident_description(self, field_msg):
        try:
            self.description = helpers.extract_field_values_from_dict(field_msg, self.get_displayable_payload())
        except KeyError:
            logging.error('Could not update incident description - ' + field_msg)

    def extract_dedup_key(self, field_msg):
        try:
            self.dedup_key = helpers.extract_field_values_from_dict(field_msg, self.get_displayable_payload())
        except KeyError:
            logging.error('Could not set dedup key - ' + field_msg)

    def get_displayable_payload(self):
        '''
        Get a dict of the information that can be shown to the users.
        :return: (dict) with information that is displayable to users
        '''
        body = {
            var_names.utc_timestamp: self.timestamp,
            var_names.title: self.title,
            var_names.description: self.description,
            var_names.service_ref_id: key_manager.conceal_reference_key(self.service_ref_id)
            if self.service_ref_id is not None else None,
            var_names.urgency_level: self.urgency_level,
            var_names.trigger_method: self.trigger_method,
            var_names.source_payload: self.source_payload,
            var_names.notes: self.notes,
            var_names.tags: self.tags,
            var_names.dedup_key: self.dedup_key,
            var_names.integration_key: self.integration_key,
            var_names.snapshots: self.snapshots,
            var_names.voice_messages: self.voice_messages,
            var_names.vendor_url: self.vendor_incident_url
        }
        return body

    def qualifies_by_time(self, route: ConditionalRoute):
        '''
        Checks if a conditional route will apply solely based on the time of this payload.
        :param route: a ConditionalRoute object
        :return: True if it applies; False otherwise
        '''
        region_timestamp = self.timestamp
        if route.routing_timezone != configs.standard_timezone:
            region_timestamp = times.utc_to_region_time(self.timestamp, route.routing_timezone)

        if route.valid_start <= region_timestamp.date() < route.valid_end and\
                route.rule_start_time <= region_timestamp.time() <= route.rule_end_time and\
                region_timestamp.weekday() in route.repeat_on:
            return True
        return False

    def qualifies_by_conditions(self, route: ConditionalRoute):
        '''
        Checks if the conditions laid out in a conditional routing applies
        or not considering the number of conditions that must apply.
        :param route: a ConditionalRoute object
        :return: True if this payload qualifies for routing; False otherwise
        '''
        qualifications = 0
        for rule in route.routing_rules:
            if self.qualifies_by_rule(rule):
                qualifications += 1
        if qualifications >= route.rule_application_count:
            return True
        else:
            return False

    def qualifies_by_rule(self, rule: RoutingRule):
        '''
        Checks if this payload qualifies a given rule/condition of a routing directive.
        :param rule: (dict) details of the task
        :return: True if it qualifies; False otherwise
        '''
        try:
            if rule.rule_type == var_names.service and rule.field_name == var_names.service_id:
                if self.service_id == rule.expected_value:
                    return True
                return False
            else:
                given_value = helpers.get_dict_value_from_dotted_key(self.get_displayable_payload(), rule.field_name)

                # Service ref id in the displayable_payload is concealed, and is stored as such in the database as well.
                # The given value above is also concealed. Hence, no unmasking is required.

                # "exists" comparator is automatically validated if given value is found
                if rule.comparator == constants.rule_exists:
                    return True
                elif rule.comparator == constants.rule_equals:
                    return rule.equals(given_value)
                elif rule.comparator == constants.rule_contains:
                    return rule.contains(given_value)
                elif rule.comparator == constants.rule_matches:
                    return rule.matches(given_value)
                elif rule.comparator == constants.rule_not_equals:
                    return not rule.equals(given_value)
                elif rule.comparator == constants.rule_not_contains:
                    return not rule.contains(given_value)
                elif rule.comparator == constants.rule_not_matches:
                    return not rule.matches(given_value)
                else:
                    logging.error('Unknown comparator provided. Automatically disqualifying rule - ' + rule.comparator)
                    return False
        except (KeyError, TypeError):
            # Only validate "not_exists" when there is a KeyError
            if rule.comparator == constants.rule_not_exists:
                return True
            else:
                return False

    def process_actions(self, conn, route: ConditionalRoute, re_route_service=None, org_perm=None):
        '''
        Process conditional routing actions and update the payload.
        :param conn: db connection
        :param route: a ConditionalRoute object
        :param re_route_service: Service object to re-route to
        :param org_perm: permission the organization has
        :return: updated TaskPayload object
        '''
        if self.routing_id is None:
            self.routing_id = []
        self.routing_id.append(route.routing_id)

        # Alert handling
        handler = route.actions[var_names.alert_handling]
        if handler == constants.resolve_alert:
            if var_names.resolve_incidents in route.actions and route.actions[var_names.resolve_incidents] and \
                    var_names.resolve_hours in route.actions:
                self.suppress_alert_directly()
                self.resolve_incident_hours = route.actions[var_names.resolve_hours]
            else:
                self.resolve_alert()
                self.resolve_incident_hours = None
        elif handler == constants.cancel_alert:
            self.suppress_alert_directly()
        elif handler == constants.re_route_alert:
            self.re_route_alert(route.actions[var_names.route_to])
        elif handler == constants.re_route_service and re_route_service is not None:
            self.handle_service_level_processes(conn, re_route_service)
        else:
            # If to alert as normal then check for similarity based suppression and grouping.
            # Suppressing will take priority over grouping. (Suppression and grouping are redundant when the alert
            # is set to be resolved, cancelled or re-routed. Grouping is redundant for alerts that should be re-routed
            # because grouping algorithm uses assignees as a factor.)
            if var_names.suppress_count in route.actions and var_names.suppress_minutes in route.actions:
                to_supp_before = False
                if var_names.suppress_before in route.actions and route.actions[var_names.suppress_before]:
                    to_supp_before = True
                self.suppress_alert_on_similarity(conn, route.actions[var_names.suppress_count],
                                                  route.actions[var_names.suppress_minutes], route.routing_id,
                                                  for_before=to_supp_before)
            if var_names.group_minutes in route.actions and self.status != constants.suppressed_state:
                self.group_alert(conn, route.actions[var_names.group_minutes], route.routing_id)

        # Field extraction
        if var_names.incident_title_field in route.actions:
            self.extract_incident_title(route.actions[var_names.incident_title_field])
        if var_names.incident_description_field in route.actions:
            self.extract_incident_description(route.actions[var_names.incident_description_field])
        if var_names.dedup_key_field in route.actions:
            self.extract_dedup_key(route.actions[var_names.dedup_key_field])

        # Attribute updates
        if var_names.urgency_level in route.actions:
            self.urgency_level = route.actions[var_names.urgency_level]
        if var_names.tags in route.actions and \
                (org_perm is None or (org_perm is not None and permissions.has_org_permission(
                    org_perm, permissions.ORG_CONTEXTUAL_SEARCH_PERMISSION))):
            self.tags = self.extract_tags(route.actions[var_names.tags])
        if var_names.notes in route.actions:
            self.notes = helpers.extract_field_values_from_dict(
                route.actions[var_names.notes], self.get_displayable_payload())

    def get_additional_policies(self, conn, cache_client):
        '''
        Get the policy ids of all additional assignee IDs. It excludes the ID
        of the policy this task's service is associated to.
        :param conn: db connection
        :param cache_client: cache client
        :return: (list) -> of policy IDs
        '''
        pol = []
        if self.assignees is not None:
            pol += self.assignees
        if self.assignees_ref is not None:
            pol += syncer_policies.get_policy_ids_from_ref_ids(
                conn, cache_client, self.timestamp, self.organization_id, self.assignees_ref
            )
        return pol

    def check_for_impacted_business_services(self, conn, business_services):
        '''
        Checks if any business service have been impacted or not. If they have,
        then adds them to the list of impacted business services.
        :param conn: db connection
        :param business_services: (list of dict) of business service details
        '''
        impacted_bus_ids = []
        for bus_serv in business_services:
            bus_serv_urgency = bus_serv[var_names.min_urgency]
            if bus_serv_urgency is None or self.urgency_level >= bus_serv[var_names.min_urgency]:
                impacted_bus_ids.append(bus_serv[var_names.business_service_id])

        if len(impacted_bus_ids) > 0:
            related_business_services = db_business_services.get_related_business_services(
                conn, self.timestamp, impacted_bus_ids)

            self.impacted_business_services = []
            for id_ in related_business_services:
                for item in related_business_services[id_]:
                    rel_min_urg = item[var_names.min_urgency]

                    if rel_min_urg is None or self.urgency_level >= rel_min_urg:
                        rel_bus_id = item[var_names.business_service_id]
                        rel_bus_name = item[var_names.business_service_name]
                        rel_sub_count = item[var_names.subscribers]
                        sts_pg_id = item[var_names.page_id]

                        self.impacted_business_services.append((rel_bus_id, rel_bus_name, rel_sub_count, sts_pg_id))

    def get_impacted_business_service_tuples(self):
        '''
        Gets the IDs of the business services that have been impacted.
        :return: (list of int) of business service IDs
        '''
        if self.impacted_business_services is None:
            return None
        else:
            return [(x[0], x[1]) for x in self.impacted_business_services]

    def get_impacted_business_service_names(self):
        '''
        Gets the names of the business services that have been impacted.
        :return: (list of str) of business service IDs
        '''
        if self.impacted_business_services is None:
            return None
        else:
            return [x[1] for x in self.impacted_business_services]

    def handle_service_level_processes(self, conn, task_service):
        '''
        Handles service specific actions (not the actions of the conditional routing).
        :param conn: db connection
        :param task_service: the service that the actions should be processed for
        '''
        # Ensure that the service belongs to the organization. This will not be checked later on,
        # unlike policies which get cross-checked in the syncer.
        if task_service.organization_id != self.organization_id:
            raise UnauthorizedRequest(errors.err_unknown_resource)

        if task_service.is_available(self.timestamp):
            self.service_id = task_service.service_id
            self.service_ref_id = task_service.service_ref_id
            self.service_policy_id = task_service.for_policy_id

            if task_service.off_hours_deprioritize and not task_service.in_support_mode(self.timestamp):
                self.urgency_level = constants.low_urgency
                self.amended_urgency = constants.low_urgency
                self.next_alert_timestamp = task_service.next_utc_support_start(self.timestamp)
        else:
            raise ServiceUnavailable(errors.err_service_unavailable)
