| 20 Jun 2025 |
hexa | oh … https://pkg.go.dev/github.com/lxc/incus/shared/api/scriptlet#InstancePlacement | 15:19:13 |
hexa | Redacted or Malformed Event | 16:53:10 |
hexa | * def instance_placement(request, candidate_members):
config = request.config
group = config.get("user.ha_group")
if group == None:
log_info("Using default placement strategy.")
return
log_info(
"Determining placement of instance '{}' for high-availability group '{}'".format(
request.name, group
)
)
# Get all instances in the same project
instances = get_instances(location="", project=request.project)
# Track which cluster members already host a group instance
unavailable_members = []
for instance in instances:
if instance.status != "Running":
continue
if instance.config.get("user.ha_group") == group:
unavailable_members.add(instance.location)
log_info(
"Cluster members hosting resources in high-availability group '{}': {}".format(
group, unavailable_members
)
)
available_members = candidate_members - unavailable_members
if not available_members:
log_warn(
"No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
group
)
)
return
# Choose the eligible member with the fewest instances
def instance_count(name):
return get_instances_count(location=name, project=request.project, pending=True)
target_member = min(available_members, key=instance_count)
set_target(target_member)
log_info(
"Placing instance '{instance}' on '{member}, because it fits the high-availability criteria and has the lowest running resources".format(
instance=request.name, member=target_member
)
)
| 16:53:15 |
hexa | * def instance_placement(request, candidate_members):
config = request.config
group = config.get("user.ha_group")
if group == None:
log_info("Using default placement strategy.")
return
log_info(
"Determining placement of instance '{}' for high-availability group '{}'".format(
request.name, group
)
)
# Get all instances in the same project
instances = get_instances(location="", project=request.project)
# Track which cluster members already host a group instance
unavailable_members = []
for instance in instances:
if instance.status != "Running":
continue
if instance.config.get("user.ha_group") == group:
unavailable_members.add(instance.location)
log_info(
"Cluster members hosting resources in high-availability group '{}': {}".format(
group, unavailable_members
)
)
available_members = candidate_members - unavailable_members
if not available_members:
log_warn(
"No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
group
)
)
return
# Choose the eligible member with the fewest instances
def instance_count(name):
return get_instances_count(location=name, project=request.project, pending=True)
target_member = min(available_members, key=instance_count)
set_target(target_member)
log_info(
"Placing instance '{instance}' on '{member}, because it fits the high-availability criteria and has the lowest running resources".format(
instance=request.name, member=target_member
)
)
| 16:53:18 |
hexa | * def instance_placement(request, candidate_members):
config = request.config
group = config.get("user.ha_group")
if group == None:
log_info("Using default placement strategy.")
return
log_info(
"Determining placement of instance '{}' for high-availability group '{}'".format(
request.name, group
)
)
# Get all instances in the same project
instances = get_instances(location="", project=request.project)
# Track which cluster members already host a group instance
unavailable_members = {}
for instance in instances:
if instance.status != "Running":
continue
if instance.config.get("user.ha_group") == group:
unavailable_members.add(instance.location)
log_info(
"Cluster members hosting resources in high-availability group '{}': {}".format(
group, unavailable_members
)
)
available_members = candidate_members - unavailable_members
if not available_members:
log_warn(
"No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
group
)
)
return
# Choose the eligible member with the fewest instances
def instance_count(name):
return get_instances_count(location=name, project=request.project, pending=True)
target_member = min(available_members, key=instance_count)
set_target(target_member)
log_info(
"Placing instance '{instance}' on '{member}, because it fits the high-availability criteria and has the lowest running resources".format(
instance=request.name, member=target_member
)
)
| 16:53:48 |
hexa | * def instance_placement(request, candidate_members):
config = request.config
group = config.get("user.ha_group")
if group == None:
log_info("Using default placement strategy.")
return
log_info(
"Determining placement of instance '{}' for high-availability group '{}'".format(
request.name, group
)
)
# Get all instances in the same project
instances = get_instances(location="", project=request.project)
# Track which cluster members already host a group instance
unavailable_members = set()
for instance in instances:
if instance.status != "Running":
continue
if instance.config.get("user.ha_group") == group:
unavailable_members.add(instance.location)
log_info(
"Cluster members hosting resources in high-availability group '{}': {}".format(
group, unavailable_members
)
)
available_members = candidate_members - unavailable_members
if not available_members:
log_warn(
"No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
group
)
)
return
# Choose the eligible member with the fewest instances
def instance_count(name):
return get_instances_count(location=name, project=request.project, pending=True)
target_member = min(available_members, key=instance_count)
set_target(target_member)
log_info(
"Placing instance '{instance}' on '{member}, because it fits the high-availability criteria and has the lowest running resources".format(
instance=request.name, member=target_member
)
)
| 16:54:13 |
hexa |
this Starlark dialect does not support sets
| 16:55:39 |
hexa | oh fucking hell | 16:55:41 |
hexa | not being able to work with sets makes this slightly more annyoing | 16:55:53 |
adamcstephens | there's some api functions in the docs above. appears the necessary inputs are there | 16:55:53 |
hexa | I already have something written | 16:56:08 |
hexa | but since I don't have a proper linter beyond incus itself | 16:56:23 |
hexa | lol, where t f do log messages go | 17:06:53 |
hexa | loglevel is info and I use log_info and log_warn | 17:07:40 |
adamcstephens | turn on debug logging? | 17:08:00 |
hexa | def instance_placement(request, candidate_members):
config = request.config
group = config.get("user.ha_group")
if group == None:
log_info("Using default placement strategy.")
return
log_info(
"Determining placement of instance '{}' for high-availability group '{}'".format(
request.name, group
)
)
# Get all instances in the same project
instances = get_instances(location="", project=request.project)
# Track which cluster members already host a group instance
unavailable_member_names = []
for instance in instances:
if instance.status != "Running":
continue
if instance.config.get("user.ha_group") == group:
unavailable_member_names.append(instance.location)
log_info(
"Cluster members hosting resources in high-availability group '{}': {}".format(
group, ", ".join(unavailable_member_names)
)
)
available_members = [member.server_name for member in candidate_members if member.server_name not in unavailable_member_names]
if not available_members:
log_warn(
"No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
group
)
)
return
log_info(
"Available cluster members for high-availability group '{}': {}".format(group, ", ".join(available_members))
)
# Choose the eligible member with the fewest instances
def instance_count(name):
return get_instances_count(location=name, project=request.project, pending=True)
target_member = min(available_members, key=instance_count)
set_target(target_member)
log_info(
"Placing instance '{instance}' on '{member}, because it has the lowest number of running resources.".format(
instance=request.name, member=target_member
)
)
| 17:08:11 |
hexa | fwiw | 17:08:12 |
hexa | feels very much like sad python | 17:08:41 |
adamcstephens | nice | 17:08:52 |
hexa | also very telling that I didn't find any placement script example out there | 17:09:30 |
hexa | only examples in the repo for testing purposes | 17:09:38 |
hexa | https://github.com/lxc/incus/blob/cdc0adbbb2c9202289190ace1825b105abf8c61b/test/suites/clustering_move.sh#L109
https://github.com/lxc/incus/blob/cdc0adbbb2c9202289190ace1825b105abf8c61b/test/suites/clustering_instance_placement_scriptlet.sh#L5
https://github.com/tomponline/starlark-examples/blob/826762c080ab1f8d15c541e8a9021f07b8cb0434/instance_placement.star#L6 | 17:09:58 |
adamcstephens | yeah, i'm not surprised. | 17:12:32 |
hexa | so, adam | 17:21:37 |
hexa | since you already maintain incus, and incus uses skylark | 17:21:44 |
hexa | why didn't you package skylint yet? | 17:21:57 |
hexa | $ git clone https://github.com/bazelbuild/bazel.git
$ cd bazel
$ bazel build //src/tools/skylark/java/com/google/devtools/skylark/skylint:Skylint
| 17:22:43 |
adamcstephens | well, i've never even seen it before :) | 17:22:47 |
hexa |  Download image.png | 17:22:55 |
hexa | the correct answer is: I hate bazel builds | 17:23:17 |