From c78def3919525718cddaf01d31f68194272c322b Mon Sep 17 00:00:00 2001 From: pmarasse Date: Fri, 27 Jun 2025 15:10:57 +0200 Subject: [PATCH] Fix loglevels (#255) * Modified some loglevels to make output lighter at INFO level Co-authored-by: Philippe MARASSE --- .changelogs/1.1.4/255_fix_loglevels.yml | 2 ++ proxlb/models/balancing.py | 4 ++-- proxlb/models/calculations.py | 6 +++--- proxlb/models/nodes.py | 6 +++--- 4 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 .changelogs/1.1.4/255_fix_loglevels.yml diff --git a/.changelogs/1.1.4/255_fix_loglevels.yml b/.changelogs/1.1.4/255_fix_loglevels.yml new file mode 100644 index 0000000..603f252 --- /dev/null +++ b/.changelogs/1.1.4/255_fix_loglevels.yml @@ -0,0 +1,2 @@ +fixed: + - Modified log levels to make output lighter at INFO level (@pmarasse) [#255] diff --git a/proxlb/models/balancing.py b/proxlb/models/balancing.py index 754d243..d6d45fa 100644 --- a/proxlb/models/balancing.py +++ b/proxlb/models/balancing.py @@ -149,7 +149,7 @@ class Balancing: } try: - logger.debug(f"Balancing: Starting to migrate guest {guest_name} of type VM.") + logger.info(f"Balancing: Starting to migrate VM guest {guest_name} from {guest_node_current} to {guest_node_target}.") job_id = proxmox_api.nodes(guest_node_current).qemu(guest_id).migrate().post(**migration_options) except proxmoxer.core.ResourceException as proxmox_api_error: logger.critical(f"Balancing: Failed to migrate guest {guest_name} of type VM due to some Proxmox errors. Please check if resource is locked or similar.") @@ -178,7 +178,7 @@ class Balancing: guest_node_target = proxlb_data["guests"][guest_name]["node_target"] try: - logger.debug(f"Balancing: Starting to migrate guest {guest_name} of type CT.") + logger.info(f"Balancing: Starting to migrate CT guest {guest_name} from {guest_node_current} to {guest_node_target}.") job_id = proxmox_api.nodes(guest_node_current).lxc(guest_id).migrate().post(target=guest_node_target, restart=1) except proxmoxer.core.ResourceException as proxmox_api_error: logger.critical(f"Balancing: Failed to migrate guest {guest_name} of type CT due to some Proxmox errors. Please check if resource is locked or similar.") diff --git a/proxlb/models/calculations.py b/proxlb/models/calculations.py index 7199ca9..94baaf4 100644 --- a/proxlb/models/calculations.py +++ b/proxlb/models/calculations.py @@ -305,7 +305,7 @@ class Calculations: proxlb_data["guests"][guest_name]["processed"] = True if len(proxlb_data["guests"][guest_name]["node_relationships"]) > 0: - logger.info(f"Guest '{guest_name}' has relationships defined to node(s): {','.join(proxlb_data['guests'][guest_name]['node_relationships'])}. Pinning to node.") + logger.debug(f"Guest '{guest_name}' has relationships defined to node(s): {','.join(proxlb_data['guests'][guest_name]['node_relationships'])}. Pinning to node.") # Get the node with the most free resources of the group guest_node_relation_list = proxlb_data["guests"][guest_name]["node_relationships"] @@ -313,12 +313,12 @@ class Calculations: # Validate if the specified node name is really part of the cluster if proxlb_data["meta"]["balancing"]["balance_next_node"] in proxlb_data["nodes"].keys(): - logger.info(f"Guest '{guest_name}' has a specific relationship defined to node: {proxlb_data['meta']['balancing']['balance_next_node']} is a known hypervisor node in the cluster.") + logger.debug(f"Guest '{guest_name}' has a specific relationship defined to node: {proxlb_data['meta']['balancing']['balance_next_node']} is a known hypervisor node in the cluster.") else: logger.warning(f"Guest '{guest_name}' has a specific relationship defined to node: {proxlb_data['meta']['balancing']['balance_next_node']} but this node name is not known in the cluster!") else: - logger.info(f"Guest '{guest_name}' does not have any specific node relationships.") + logger.debug(f"Guest '{guest_name}' does not have any specific node relationships.") logger.debug("Finished: val_node_relationships.") diff --git a/proxlb/models/nodes.py b/proxlb/models/nodes.py index 8cbb0a0..8f566b3 100644 --- a/proxlb/models/nodes.py +++ b/proxlb/models/nodes.py @@ -112,7 +112,7 @@ class Nodes: if proxlb_config.get("proxmox_cluster", None).get("maintenance_nodes", None) is not None: if len(proxlb_config.get("proxmox_cluster", {}).get("maintenance_nodes", [])) > 0: if node_name in proxlb_config.get("proxmox_cluster", {}).get("maintenance_nodes", []): - logger.warning(f"Node: {node_name} has been set to maintenance mode (by ProxLB config).") + logger.info(f"Node: {node_name} has been set to maintenance mode (by ProxLB config).") return True else: logger.debug(f"Node: {node_name} is not in maintenance mode by ProxLB config.") @@ -122,7 +122,7 @@ class Nodes: if ha_element.get("status"): if "maintenance mode" in ha_element.get("status"): if ha_element.get("node") == node_name: - logger.warning(f"Node: {node_name} has been set to maintenance mode (by Proxmox HA API).") + logger.info(f"Node: {node_name} has been set to maintenance mode (by Proxmox HA API).") return True else: logger.debug(f"Node: {node_name} is not in maintenance mode by Proxmox HA API.") @@ -149,7 +149,7 @@ class Nodes: if proxlb_config.get("proxmox_cluster", None).get("ignore_nodes", None) is not None: if len(proxlb_config.get("proxmox_cluster", {}).get("ignore_nodes", [])) > 0: if node_name in proxlb_config.get("proxmox_cluster", {}).get("ignore_nodes", []): - logger.warning(f"Node: {node_name} has been set to be ignored. Not adding node!") + logger.info(f"Node: {node_name} has been set to be ignored. Not adding node!") return True logger.debug("Finished: set_node_ignore.")