mirror of
https://github.com/gyptazy/ProxLB.git
synced 2026-04-05 20:31:57 +02:00
feature: Add new option to enforce node/guest pinning even when cluster is balanced from a resource perspective.
Fixes: #414
This commit is contained in:
committed by
gyptazy
parent
da193f9d27
commit
34e340c25c
2
.changelogs/1.1.11/414_add_pinning_enforcement.yml
Normal file
2
.changelogs/1.1.11/414_add_pinning_enforcement.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
added:
|
||||
- Add new option to enforce node/guest pinning even when cluster is balanced from a resource perspective (@gyptazy). [#414]
|
||||
@@ -289,7 +289,8 @@ The following options can be set in the configuration file `proxlb.yaml`:
|
||||
| | overprovisioning | | False | `Bool` | Avoids balancing when nodes would become overprovisioned. |
|
||||
| `balancing` | | | | | |
|
||||
| | enable | | True | `Bool` | Enables the guest balancing.|
|
||||
| | enforce_affinity | | True | `Bool` | Enforcing affinity/anti-affinity rules but balancing might become worse. |
|
||||
| | enforce_affinity | | False | `Bool` | Enforcing affinity/anti-affinity rules but balancing might become worse. |
|
||||
| | enforce_pinning | | False | `Bool` | Enforcing pinning rules but balancing might become worse. |
|
||||
| | parallel | | False | `Bool` | If guests should be moved in parallel or sequentially.|
|
||||
| | parallel_jobs | | 5 | `Int` | The amount if parallel jobs when migrating guests. (default: `5`)|
|
||||
| | live | | True | `Bool` | If guests should be moved live or shutdown.|
|
||||
@@ -340,6 +341,7 @@ proxmox_cluster:
|
||||
balancing:
|
||||
enable: True
|
||||
enforce_affinity: False
|
||||
enforce_pinning: False
|
||||
parallel: False
|
||||
live: True
|
||||
with_local_disks: True
|
||||
|
||||
@@ -19,6 +19,7 @@ proxmox_cluster:
|
||||
balancing:
|
||||
enable: True
|
||||
enforce_affinity: False
|
||||
enforce_pinning: False
|
||||
parallel: False
|
||||
# If running parallel job, you can define
|
||||
# the amount of prallel jobs (default: 5)
|
||||
|
||||
@@ -369,7 +369,12 @@ class Calculations:
|
||||
None
|
||||
"""
|
||||
logger.debug("Starting: relocate_guests.")
|
||||
if proxlb_data["meta"]["balancing"]["balance"] or proxlb_data["meta"]["balancing"].get("enforce_affinity", False):
|
||||
|
||||
# Balance only if it is required by:
|
||||
# - balanciness
|
||||
# - Affinity/Anti-Affinity rules
|
||||
# - Pinning rules
|
||||
if proxlb_data["meta"]["balancing"]["balance"] or proxlb_data["meta"]["balancing"].get("enforce_affinity", False) or proxlb_data["meta"]["balancing"].get("enforce_pinning", False):
|
||||
|
||||
if proxlb_data["meta"]["balancing"].get("balance", False):
|
||||
logger.debug("Balancing of guests will be performed. Reason: balanciness")
|
||||
@@ -377,6 +382,9 @@ class Calculations:
|
||||
if proxlb_data["meta"]["balancing"].get("enforce_affinity", False):
|
||||
logger.debug("Balancing of guests will be performed. Reason: enforce affinity balancing")
|
||||
|
||||
if proxlb_data["meta"]["balancing"].get("enforce_pinning", False):
|
||||
logger.debug("Balancing of guests will be performed. Reason: enforce pinning balancing")
|
||||
|
||||
# Sort guests by used memory
|
||||
# Allows processing larger guests first or smaller guests first
|
||||
larger_first = proxlb_data.get("meta", {}).get("balancing", {}).get("balance_larger_guests_first", False)
|
||||
@@ -404,7 +412,8 @@ class Calculations:
|
||||
# Validate balanciness again before processing each group
|
||||
Calculations.get_balanciness(proxlb_data)
|
||||
logger.debug(proxlb_data["meta"]["balancing"]["balance"])
|
||||
if (not proxlb_data["meta"]["balancing"]["balance"]) and (not proxlb_data["meta"]["balancing"].get("enforce_affinity", False)):
|
||||
|
||||
if (not proxlb_data["meta"]["balancing"]["balance"]) and (not proxlb_data["meta"]["balancing"].get("enforce_affinity", False)) and (not proxlb_data["meta"]["balancing"].get("enforce_pinning", False)):
|
||||
logger.debug("Skipping further guest relocations as balanciness is now ok.")
|
||||
break
|
||||
|
||||
|
||||
Reference in New Issue
Block a user