!VhzbGHamdfMiGxpXyg:robins.wtf

NixOS Incus and LXC

46 Members
lxc, lxd, incus discussions related to NixOS16 Servers

Load older messages


SenderMessageTime
20 Jun 2025
@hexa:lossy.networkhexaoh … https://pkg.go.dev/github.com/lxc/incus/shared/api/scriptlet#InstancePlacement15:19:13
@hexa:lossy.networkhexaRedacted or Malformed Event16:53:10
@hexa:lossy.networkhexa *
def instance_placement(request, candidate_members):
    config = request.config
    group = config.get("user.ha_group")
    if group == None:
        log_info("Using default placement strategy.")
        return

    log_info(
        "Determining placement of instance '{}' for high-availability group '{}'".format(
            request.name, group
        )
    )

    # Get all instances in the same project
    instances = get_instances(location="", project=request.project)

    # Track which cluster members already host a group instance
    unavailable_members = []
    for instance in instances:
        if instance.status != "Running":
            continue
        if instance.config.get("user.ha_group") == group:
            unavailable_members.add(instance.location)

    log_info(
        "Cluster members hosting resources in high-availability group '{}': {}".format(
            group, unavailable_members
        )
    )

    available_members = candidate_members - unavailable_members

    if not available_members:
        log_warn(
            "No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
                group
            )
        )
        return

    # Choose the eligible member with the fewest instances
    def instance_count(name):
        return get_instances_count(location=name, project=request.project, pending=True)

    target_member = min(available_members, key=instance_count)
    set_target(target_member)
    log_info(
        "Placing instance '{instance}' on '{member}, because it fits the high-availability criteria and has the lowest running resources".format(
            instance=request.name, member=target_member
        )
    )
16:53:15
@hexa:lossy.networkhexa *
def instance_placement(request, candidate_members):
    config = request.config
    group = config.get("user.ha_group")
    if group == None:
        log_info("Using default placement strategy.")
        return

    log_info(
        "Determining placement of instance '{}' for high-availability group '{}'".format(
            request.name, group
        )
    )

    # Get all instances in the same project
    instances = get_instances(location="", project=request.project)

    # Track which cluster members already host a group instance
    unavailable_members = []
    for instance in instances:
        if instance.status != "Running":
            continue
        if instance.config.get("user.ha_group") == group:
            unavailable_members.add(instance.location)

    log_info(
        "Cluster members hosting resources in high-availability group '{}': {}".format(
            group, unavailable_members
        )
    )

    available_members = candidate_members - unavailable_members

    if not available_members:
        log_warn(
            "No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
                group
            )
        )
        return

    # Choose the eligible member with the fewest instances
    def instance_count(name):
        return get_instances_count(location=name, project=request.project, pending=True)

    target_member = min(available_members, key=instance_count)
    set_target(target_member)
    log_info(
        "Placing instance '{instance}' on '{member}, because it fits the high-availability criteria and has the lowest running resources".format(
            instance=request.name, member=target_member
        )
    )
16:53:18
@hexa:lossy.networkhexa *
def instance_placement(request, candidate_members):
    config = request.config
    group = config.get("user.ha_group")
    if group == None:
        log_info("Using default placement strategy.")
        return

    log_info(
        "Determining placement of instance '{}' for high-availability group '{}'".format(
            request.name, group
        )
    )

    # Get all instances in the same project
    instances = get_instances(location="", project=request.project)

    # Track which cluster members already host a group instance
    unavailable_members = {}
    for instance in instances:
        if instance.status != "Running":
            continue
        if instance.config.get("user.ha_group") == group:
            unavailable_members.add(instance.location)

    log_info(
        "Cluster members hosting resources in high-availability group '{}': {}".format(
            group, unavailable_members
        )
    )

    available_members = candidate_members - unavailable_members

    if not available_members:
        log_warn(
            "No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
                group
            )
        )
        return

    # Choose the eligible member with the fewest instances
    def instance_count(name):
        return get_instances_count(location=name, project=request.project, pending=True)

    target_member = min(available_members, key=instance_count)
    set_target(target_member)
    log_info(
        "Placing instance '{instance}' on '{member}, because it fits the high-availability criteria and has the lowest running resources".format(
            instance=request.name, member=target_member
        )
    )
16:53:48
@hexa:lossy.networkhexa *
def instance_placement(request, candidate_members):
    config = request.config
    group = config.get("user.ha_group")
    if group == None:
        log_info("Using default placement strategy.")
        return

    log_info(
        "Determining placement of instance '{}' for high-availability group '{}'".format(
            request.name, group
        )
    )

    # Get all instances in the same project
    instances = get_instances(location="", project=request.project)

    # Track which cluster members already host a group instance
    unavailable_members = set()
    for instance in instances:
        if instance.status != "Running":
            continue
        if instance.config.get("user.ha_group") == group:
            unavailable_members.add(instance.location)

    log_info(
        "Cluster members hosting resources in high-availability group '{}': {}".format(
            group, unavailable_members
        )
    )

    available_members = candidate_members - unavailable_members

    if not available_members:
        log_warn(
            "No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
                group
            )
        )
        return

    # Choose the eligible member with the fewest instances
    def instance_count(name):
        return get_instances_count(location=name, project=request.project, pending=True)

    target_member = min(available_members, key=instance_count)
    set_target(target_member)
    log_info(
        "Placing instance '{instance}' on '{member}, because it fits the high-availability criteria and has the lowest running resources".format(
            instance=request.name, member=target_member
        )
    )
16:54:13
@hexa:lossy.networkhexa

this Starlark dialect does not support sets

16:55:39
@hexa:lossy.networkhexaoh fucking hell16:55:41
@hexa:lossy.networkhexanot being able to work with sets makes this slightly more annyoing16:55:53
@adam:robins.wtfadamcstephensthere's some api functions in the docs above. appears the necessary inputs are there16:55:53
@hexa:lossy.networkhexaI already have something written16:56:08
@hexa:lossy.networkhexabut since I don't have a proper linter beyond incus itself16:56:23
@hexa:lossy.networkhexalol, where t f do log messages go17:06:53
@hexa:lossy.networkhexaloglevel is info and I use log_info and log_warn17:07:40
@adam:robins.wtfadamcstephensturn on debug logging?17:08:00
@hexa:lossy.networkhexa
def instance_placement(request, candidate_members):
    config = request.config
    group = config.get("user.ha_group")
    if group == None:
        log_info("Using default placement strategy.")
        return

    log_info(
        "Determining placement of instance '{}' for high-availability group '{}'".format(
            request.name, group
        )
    )

    # Get all instances in the same project
    instances = get_instances(location="", project=request.project)

    # Track which cluster members already host a group instance
    unavailable_member_names = []
    for instance in instances:
        if instance.status != "Running":
            continue
        if instance.config.get("user.ha_group") == group:
            unavailable_member_names.append(instance.location)

    log_info(
        "Cluster members hosting resources in high-availability group '{}': {}".format(
            group, ", ".join(unavailable_member_names)
        )
    )

    available_members = [member.server_name for member in candidate_members if member.server_name not in unavailable_member_names]

    if not available_members:
        log_warn(
            "No available cluster members for high-availability group '{}' criteria. Falling back to default placement strategy.".format(
                group
            )
        )
        return

    log_info(
        "Available cluster members for high-availability group '{}': {}".format(group, ", ".join(available_members))
    )

    # Choose the eligible member with the fewest instances
    def instance_count(name):
        return get_instances_count(location=name, project=request.project, pending=True)

    target_member = min(available_members, key=instance_count)
    set_target(target_member)
    log_info(
        "Placing instance '{instance}' on '{member}, because it has the lowest number of running resources.".format(
            instance=request.name, member=target_member
        )
    )
17:08:11
@hexa:lossy.networkhexafwiw17:08:12
@hexa:lossy.networkhexafeels very much like sad python17:08:41
@adam:robins.wtfadamcstephensnice17:08:52
@hexa:lossy.networkhexaalso very telling that I didn't find any placement script example out there17:09:30
@hexa:lossy.networkhexaonly examples in the repo for testing purposes17:09:38
@hexa:lossy.networkhexahttps://github.com/lxc/incus/blob/cdc0adbbb2c9202289190ace1825b105abf8c61b/test/suites/clustering_move.sh#L109 https://github.com/lxc/incus/blob/cdc0adbbb2c9202289190ace1825b105abf8c61b/test/suites/clustering_instance_placement_scriptlet.sh#L5 https://github.com/tomponline/starlark-examples/blob/826762c080ab1f8d15c541e8a9021f07b8cb0434/instance_placement.star#L617:09:58
@adam:robins.wtfadamcstephensyeah, i'm not surprised. 17:12:32
@hexa:lossy.networkhexaso, adam17:21:37
@hexa:lossy.networkhexasince you already maintain incus, and incus uses skylark17:21:44
@hexa:lossy.networkhexa why didn't you package skylint yet? 17:21:57
@hexa:lossy.networkhexa
$ git clone https://github.com/bazelbuild/bazel.git
$ cd bazel
$ bazel build //src/tools/skylark/java/com/google/devtools/skylark/skylint:Skylint
17:22:43
@adam:robins.wtfadamcstephenswell, i've never even seen it before :)17:22:47
@hexa:lossy.networkhexaimage.png
Download image.png
17:22:55
@hexa:lossy.networkhexathe correct answer is: I hate bazel builds17:23:17

Show newer messages


Back to Room ListRoom Version: 10