code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def down_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""
Stops old apps, doesn't start any new apps.
Used for the graceful_app_drain script.
"""
return {"create_app": False, "tasks_to_drain": set(old_non_draining_tasks)}
|
Stops old apps, doesn't start any new apps.
Used for the graceful_app_drain script.
|
down_bounce
|
python
|
Yelp/paasta
|
paasta_tools/bounce_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py
|
Apache-2.0
|
def broadcast_log_all_services_running_here(line: str, soa_dir=DEFAULT_SOA_DIR) -> None:
"""Log a line of text to paasta logs of all services running on this host.
:param line: text to log
"""
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
services = get_all_services_running_here(cluster, soa_dir)
for service, instance, _ in services:
_log(
line=line,
service=service,
instance=instance,
component="monitoring",
cluster=cluster,
)
|
Log a line of text to paasta logs of all services running on this host.
:param line: text to log
|
broadcast_log_all_services_running_here
|
python
|
Yelp/paasta
|
paasta_tools/broadcast_log_to_services.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/broadcast_log_to_services.py
|
Apache-2.0
|
def get_registrations(self) -> List[str]:
"""
To support apollo we always register in
cassandra_<cluster>.main
"""
registrations = self.config_dict.get("registrations", [])
for registration in registrations:
try:
decompose_job_id(registration)
except InvalidJobNameError:
log.error(
"Provided registration {} for service "
"{} is invalid".format(registration, self.service)
)
return registrations or [
compose_job_id(self.get_service_name_smartstack(), "main")
]
|
To support apollo we always register in
cassandra_<cluster>.main
|
get_registrations
|
python
|
Yelp/paasta
|
paasta_tools/cassandracluster_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cassandracluster_tools.py
|
Apache-2.0
|
def load_cassandracluster_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> CassandraClusterDeploymentConfig:
"""Read a service instance's configuration for CassandraCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "cassandracluster", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = CassandraClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return CassandraClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
|
Read a service instance's configuration for CassandraCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance
|
load_cassandracluster_instance_config
|
python
|
Yelp/paasta
|
paasta_tools/cassandracluster_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cassandracluster_tools.py
|
Apache-2.0
|
def container_lifetime(
pod: V1Pod,
) -> datetime.timedelta:
"""Return a time duration for how long the pod is alive"""
st = pod.status.start_time
return datetime.datetime.now(st.tzinfo) - st
|
Return a time duration for how long the pod is alive
|
container_lifetime
|
python
|
Yelp/paasta
|
paasta_tools/check_flink_services_health.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_flink_services_health.py
|
Apache-2.0
|
def healthy_flink_containers_cnt(si_pods: Sequence[V1Pod], container_type: str) -> int:
"""Return count of healthy Flink containers with given type"""
return len(
[
pod
for pod in si_pods
if pod.metadata.labels["flink.yelp.com/container-type"] == container_type
and is_pod_ready(pod)
and container_lifetime(pod).total_seconds() > 60
]
)
|
Return count of healthy Flink containers with given type
|
healthy_flink_containers_cnt
|
python
|
Yelp/paasta
|
paasta_tools/check_flink_services_health.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_flink_services_health.py
|
Apache-2.0
|
def check_under_registered_taskmanagers(
instance_config: FlinkDeploymentConfig,
expected_count: int,
cr_name: str,
is_eks: bool,
) -> Tuple[bool, str, str]:
"""Check if not enough taskmanagers have been registered to the jobmanager and
returns both the result of the check in the form of a boolean and a human-readable
text to be used in logging or monitoring events.
"""
unhealthy = True
if cr_name != "":
try:
overview = flink_tools.get_flink_jobmanager_overview(
cr_name, instance_config.cluster, is_eks
)
num_reported = overview.get("taskmanagers", 0)
crit_threshold = instance_config.get_replication_crit_percentage()
output = (
f"{instance_config.job_id} has {num_reported}/{expected_count} "
f"taskmanagers reported by dashboard (threshold: {crit_threshold}%)"
)
unhealthy, _ = is_under_replicated(
num_reported, expected_count, crit_threshold
)
except ValueError as e:
output = (
f"Dashboard of service {instance_config.job_id} is not available ({e})"
)
else:
output = f"Dashboard of service {instance_config.job_id} is not available"
if unhealthy:
description = f"""
This alert means that the Flink dashboard is not reporting the expected
number of taskmanagers.
Reasons this might be happening:
The service may simply be unhealthy. There also may not be enough resources
in the cluster to support the requested instance count.
Things you can do:
* Fix the cause of the unhealthy service. Try running:
paasta status -s {instance_config.service} -i {instance_config.instance} -c {instance_config.cluster} -vv
"""
else:
description = f"{instance_config.job_id} taskmanager is available"
return unhealthy, output, description
|
Check if not enough taskmanagers have been registered to the jobmanager and
returns both the result of the check in the form of a boolean and a human-readable
text to be used in logging or monitoring events.
|
check_under_registered_taskmanagers
|
python
|
Yelp/paasta
|
paasta_tools/check_flink_services_health.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_flink_services_health.py
|
Apache-2.0
|
def get_cr_name(si_pods: Sequence[V1Pod]) -> str:
"""Returns the flink custom resource name based on the pod name. We are randomly choosing jobmanager pod here.
This change is related to FLINK-3129
"""
jobmanager_pod = [
pod
for pod in si_pods
if pod.metadata.labels["flink.yelp.com/container-type"] == "jobmanager"
and is_pod_ready(pod)
and container_lifetime(pod).total_seconds() > 60
]
if len(jobmanager_pod) == 1:
return jobmanager_pod[0].metadata.name.split("-jobmanager-")[0]
else:
return ""
|
Returns the flink custom resource name based on the pod name. We are randomly choosing jobmanager pod here.
This change is related to FLINK-3129
|
get_cr_name
|
python
|
Yelp/paasta
|
paasta_tools/check_flink_services_health.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_flink_services_health.py
|
Apache-2.0
|
def check_kubernetes_pod_replication(
instance_config: Union[KubernetesDeploymentConfig, EksDeploymentConfig],
pods_by_service_instance: Dict[str, Dict[str, List[V1Pod]]],
replication_checker: KubeSmartstackEnvoyReplicationChecker,
dry_run: bool = False,
) -> Optional[bool]:
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack/envoy or k8s)
:param instance_config: an instance of KubernetesDeploymentConfig or EksDeploymentConfig
:param replication_checker: an instance of KubeSmartstackEnvoyReplicationChecker
"""
default_alert_after = DEFAULT_ALERT_AFTER
expected_count = instance_config.get_instances()
log.info(
"Expecting %d total tasks for %s" % (expected_count, instance_config.job_id)
)
proxy_port = get_proxy_port_for_instance(instance_config)
registrations = instance_config.get_registrations()
# If this instance does not autoscale and only has 1 instance, set alert after to 20m.
# Otherwise, set it to 10 min.
if (
not instance_config.is_autoscaling_enabled()
and instance_config.get_instances() == 1
):
default_alert_after = "20m"
if "monitoring" not in instance_config.config_dict:
instance_config.config_dict["monitoring"] = {}
instance_config.config_dict["monitoring"][
"alert_after"
] = instance_config.config_dict["monitoring"].get(
"alert_after", default_alert_after
)
# if the primary registration does not match the service_instance name then
# the best we can do is check k8s for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
is_well_replicated = monitoring_tools.check_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
replication_checker=replication_checker,
dry_run=dry_run,
)
return is_well_replicated
else:
check_healthy_kubernetes_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
pods_by_service_instance=pods_by_service_instance,
dry_run=dry_run,
)
return None
|
Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack/envoy or k8s)
:param instance_config: an instance of KubernetesDeploymentConfig or EksDeploymentConfig
:param replication_checker: an instance of KubeSmartstackEnvoyReplicationChecker
|
check_kubernetes_pod_replication
|
python
|
Yelp/paasta
|
paasta_tools/check_kubernetes_services_replication.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_kubernetes_services_replication.py
|
Apache-2.0
|
def read_oom_events_from_scribe(cluster, superregion, num_lines=1000):
"""Read the latest 'num_lines' lines from OOM_EVENTS_STREAM and iterate over them."""
# paasta configs incls a map for cluster -> env that is expected by scribe
log_reader_config = load_system_paasta_config().get_log_reader()
cluster_map = log_reader_config["options"]["cluster_map"]
scribe_env = cluster_map[cluster]
# `scribe_env_to_locations` slightly mutates the scribe env based on whether
# or not it is in dev or prod
host, port = scribereader.get_tail_host_and_port(
**scribe_env_to_locations(scribe_env),
)
stream = scribereader.get_stream_tailer(
stream_name=OOM_EVENTS_STREAM,
tailing_host=host,
tailing_port=port,
lines=num_lines,
superregion=superregion,
)
try:
for line in stream:
try:
j = json.loads(line)
if j.get("cluster", "") == cluster:
yield j
except json.decoder.JSONDecodeError:
pass
except StreamTailerSetupError as e:
if "No data in stream" in str(e):
pass
else:
raise e
|
Read the latest 'num_lines' lines from OOM_EVENTS_STREAM and iterate over them.
|
read_oom_events_from_scribe
|
python
|
Yelp/paasta
|
paasta_tools/check_oom_events.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_oom_events.py
|
Apache-2.0
|
def latest_oom_events(cluster, superregion, interval=60):
"""
:returns: {(service, instance): [OOMEvent, OOMEvent,...] }
if the number of events > 0
"""
start_timestamp = int(time.time()) - interval
res = {}
for e in read_oom_events_from_scribe(cluster, superregion):
if e["timestamp"] > start_timestamp:
key = (e["service"], e["instance"])
res.setdefault(key, set()).add(e.get("container_id", ""))
return res
|
:returns: {(service, instance): [OOMEvent, OOMEvent,...] }
if the number of events > 0
|
latest_oom_events
|
python
|
Yelp/paasta
|
paasta_tools/check_oom_events.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_oom_events.py
|
Apache-2.0
|
def compose_sensu_status(
instance, oom_events, is_check_enabled, alert_threshold, check_interval
):
"""
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
:param is_check_enabled: boolean to indicate whether the check enabled for the instance
"""
interval_string = f"{check_interval} minute(s)"
instance_name = f"{instance.service}.{instance.instance}"
if not is_check_enabled:
return (Status.OK, f"This check is disabled for {instance_name}.")
if not oom_events:
return (
Status.OK,
f"No oom events for {instance_name} in the last {interval_string}.",
)
elif len(oom_events) >= alert_threshold:
return (
Status.CRITICAL,
f"The Out Of Memory killer killed processes for {instance_name} "
f"in the last {interval_string}.",
)
else:
# If the number of OOM kills isn't above the alert threshold,
# don't send anything. This will keep an alert open if it's already open,
# but won't start a new alert if there wasn't one yet
return None
|
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
:param is_check_enabled: boolean to indicate whether the check enabled for the instance
|
compose_sensu_status
|
python
|
Yelp/paasta
|
paasta_tools/check_oom_events.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_oom_events.py
|
Apache-2.0
|
def send_sensu_event(instance, oom_events, args):
"""
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
"""
check_name = compose_check_name_for_service_instance(
"oom-killer", instance.service, instance.instance
)
monitoring_overrides = instance.get_monitoring()
status = compose_sensu_status(
instance=instance,
oom_events=oom_events,
is_check_enabled=monitoring_overrides.get("check_oom_events", True),
alert_threshold=args.alert_threshold,
check_interval=args.check_interval,
)
if not status:
return
memory_limit = instance.get_mem()
try:
memory_limit_str = f"{int(memory_limit)}MB"
except ValueError:
memory_limit_str = memory_limit
monitoring_overrides.update(
{
"page": False,
"alert_after": "0m",
"realert_every": args.realert_every,
"runbook": "y/check-oom-events",
"tip": (
"Follow the runbook to investigate and rightsize memory usage "
f"(curr: {memory_limit_str})"
),
}
)
return monitoring_tools.send_event(
service=instance.service,
check_name=check_name,
overrides=monitoring_overrides,
status=status[0],
output=status[1],
soa_dir=instance.soa_dir,
dry_run=args.dry_run,
)
|
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
|
send_sensu_event
|
python
|
Yelp/paasta
|
paasta_tools/check_oom_events.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_oom_events.py
|
Apache-2.0
|
def set_local_vars_configuration_to_none(obj: Any, visited: Set[int] = None) -> None:
"""
Recursive function to ensure that k8s clientlib objects are pickleable.
Without this, k8s clientlib objects can't be used by multiprocessing functions
as those pickle data to shuttle between processes.
"""
if visited is None:
visited = set()
# Avoid infinite recursion for objects that have already been visited
obj_id = id(obj)
if obj_id in visited:
return
visited.add(obj_id)
# if the object has the attribute, set it to None to essentially delete it
if hasattr(obj, "local_vars_configuration"):
setattr(obj, "local_vars_configuration", None)
# recursively check attributes of the object
if hasattr(obj, "__dict__"):
for attr_name, attr_value in obj.__dict__.items():
set_local_vars_configuration_to_none(attr_value, visited)
# if the object is iterable/a collection, iterate over its elements
elif isinstance(obj, (list, tuple, set)):
for item in obj:
set_local_vars_configuration_to_none(item, visited)
elif isinstance(obj, dict):
for value in obj.values():
set_local_vars_configuration_to_none(value, visited)
|
Recursive function to ensure that k8s clientlib objects are pickleable.
Without this, k8s clientlib objects can't be used by multiprocessing functions
as those pickle data to shuttle between processes.
|
set_local_vars_configuration_to_none
|
python
|
Yelp/paasta
|
paasta_tools/check_services_replication_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/check_services_replication_tools.py
|
Apache-2.0
|
def instance_is_not_bouncing(
instance_config: Union[KubernetesDeploymentConfig, EksDeploymentConfig],
applications: List[Application],
) -> bool:
"""
:param instance_config: a KubernetesDeploymentConfig or an EksDeploymentConfig with the configuration of the instance
:param applications: a list of all deployments or stateful sets on the cluster that match the service
and instance of provided instance_config
"""
for application in applications:
if isinstance(application, DeploymentWrapper):
existing_app = application.item
if (
(
existing_app.metadata.namespace != instance_config.get_namespace()
and (instance_config.get_bounce_method() == "downthenup")
)
or (
existing_app.metadata.namespace == instance_config.get_namespace()
and (
instance_config.get_instances()
<= (existing_app.status.ready_replicas or 0)
)
)
) or instance_config.get_desired_state() == "stop":
return True
elif (
isinstance(application, StatefulSetWrapper)
and application.item.metadata.namespace != instance_config.get_namespace()
):
log.critical(
"Paasta detected a StatefulSet that was migrated to a new namespace"
"StatefulSet bouncing across namespaces is not supported"
)
raise StatefulSetsAreNotSupportedError
return False
|
:param instance_config: a KubernetesDeploymentConfig or an EksDeploymentConfig with the configuration of the instance
:param applications: a list of all deployments or stateful sets on the cluster that match the service
and instance of provided instance_config
|
instance_is_not_bouncing
|
python
|
Yelp/paasta
|
paasta_tools/cleanup_kubernetes_jobs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cleanup_kubernetes_jobs.py
|
Apache-2.0
|
def get_applications_to_kill(
applications_dict: Dict[Tuple[str, str], List[Application]],
cluster: str,
valid_services: Set[Tuple[str, str]],
soa_dir: str,
eks: bool = False,
) -> List[Application]:
"""
:param applications_dict: A dictionary with (service, instance) as keys and a list of applications for each tuple
:param cluster: paasta cluster
:param valid_services: a set with the valid (service, instance) tuples for this cluster
:param soa_dir: The SOA config directory to read from
:return: list of applications to kill
"""
log.info("Determining apps to be killed")
applications_to_kill: List[Application] = []
for (service, instance), applications in applications_dict.items():
if len(applications) >= 1:
if (service, instance) not in valid_services:
applications_to_kill.extend(applications)
else:
instance_config: Union[KubernetesDeploymentConfig, EksDeploymentConfig]
if eks:
instance_config = load_eks_service_config(
cluster=cluster,
service=service,
instance=instance,
soa_dir=soa_dir,
)
else:
instance_config = load_kubernetes_service_config(
cluster=cluster,
service=service,
instance=instance,
soa_dir=soa_dir,
)
try:
not_bouncing = instance_is_not_bouncing(
instance_config, applications
)
except StatefulSetsAreNotSupportedError:
overrides = {
"page": True,
"alert_after": 0,
"tip": f"Revert {service}.{instance} in soa-configs to not include the namespace key.",
"runbook": "y/rb-paasta-namespace",
"ticket": True,
}
send_event(
service=service,
check_name=f"statefulset_bounce_{service}.{instance}",
overrides=overrides,
status=Status.CRITICAL, # type: ignore
output=f"Unsupported bounce: {service}.{instance}. PaaSTA managed StatefulSets do not support custom namespace",
soa_dir=soa_dir,
)
else:
for application in applications:
if (
application.kube_deployment.namespace
!= instance_config.get_namespace()
and not_bouncing
):
applications_to_kill.append(application)
return applications_to_kill
|
:param applications_dict: A dictionary with (service, instance) as keys and a list of applications for each tuple
:param cluster: paasta cluster
:param valid_services: a set with the valid (service, instance) tuples for this cluster
:param soa_dir: The SOA config directory to read from
:return: list of applications to kill
|
get_applications_to_kill
|
python
|
Yelp/paasta
|
paasta_tools/cleanup_kubernetes_jobs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cleanup_kubernetes_jobs.py
|
Apache-2.0
|
def cleanup_unused_apps(
soa_dir: str,
cluster: str,
kill_threshold: float = 0.5,
force: bool = False,
eks: bool = False,
) -> None:
"""Clean up old or invalid jobs/apps from kubernetes. Retrieves
both a list of apps currently in kubernetes and a list of valid
app ids in order to determine what to kill.
:param soa_dir: The SOA config directory to read from
:param cluster: paasta cluster to clean
:param kill_threshold: The decimal fraction of apps we think is
sane to kill when this job runs.
:param force: Force the cleanup if we are above the kill_threshold"""
log.info("Creating KubeClient")
kube_client = KubeClient()
log.info("Loading running Kubernetes apps")
applications_dict = list_all_applications(kube_client, APPLICATION_TYPES)
log.info("Retrieving valid apps from yelpsoa_configs")
valid_services = set(
get_services_for_cluster(
instance_type="eks" if eks else "kubernetes", soa_dir=soa_dir
)
)
applications_to_kill: List[Application] = get_applications_to_kill(
applications_dict, cluster, valid_services, soa_dir, eks
)
log.debug("Running apps: %s" % list(applications_dict))
log.debug("Valid apps: %s" % valid_services)
log.debug("Terminating: %s" % applications_to_kill)
if applications_to_kill:
above_kill_threshold = float(len(applications_to_kill)) / float(
len(applications_dict)
) > float(kill_threshold)
if above_kill_threshold and not force:
log.critical(
"Paasta was about to kill more than %s of the running services, this "
"is probably a BAD mistake!, run again with --force if you "
"really need to destroy everything" % kill_threshold
)
raise DontKillEverythingError
for applicaton in applications_to_kill:
with alert_state_change(applicaton, cluster):
applicaton.deep_delete(kube_client)
|
Clean up old or invalid jobs/apps from kubernetes. Retrieves
both a list of apps currently in kubernetes and a list of valid
app ids in order to determine what to kill.
:param soa_dir: The SOA config directory to read from
:param cluster: paasta cluster to clean
:param kill_threshold: The decimal fraction of apps we think is
sane to kill when this job runs.
:param force: Force the cleanup if we are above the kill_threshold
|
cleanup_unused_apps
|
python
|
Yelp/paasta
|
paasta_tools/cleanup_kubernetes_jobs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cleanup_kubernetes_jobs.py
|
Apache-2.0
|
def write_auto_config_data(
service: str,
extra_info: str,
data: Dict[str, Any],
soa_dir: str = DEFAULT_SOA_DIR,
sub_dir: Optional[str] = None,
comment: Optional[str] = None,
) -> Optional[str]:
"""
Replaces the contents of an automated config file for a service, or creates the file if it does not exist.
Returns the filename of the modified file, or None if no file was written.
"""
yaml.YAML().representer.add_representer(type(None), my_represent_none)
service_dir = f"{soa_dir}/{service}"
if not os.path.exists(service_dir):
log.warning(
f"Service {service} does not exist in configs, skipping auto config update"
)
return None
subdir = f"{service_dir}/{sub_dir}" if sub_dir else service_dir
if not os.path.exists(subdir):
os.mkdir(subdir)
filename = f"{subdir}/{extra_info}.yaml"
with open(filename, "w") as f:
# TODO: this can be collapsed into one codeblock. It is separated as two
# because doing content.update(data) results in losing comments from `data`
# we should be able to handle adding a header comment and yaml with comments in it
# without this if/else block
if comment:
content = (
yaml.round_trip_load(
comment.format(
# this is a bit of a hack, but we've decided to not rename files back to kubernetes-*
# files. while we still need to update things to reference the eks files directly, there's
# still a couple of places where we still need kubernetes-* files (e.g., unmigrated operators)
# so for now let's just assume that autotuned things will always actually have their human-managed
# config in eks-* files
regular_filename=f"{service}/{extra_info.replace('kubernetes-', 'eks-')}.yaml",
)
)
if comment
else {}
)
content.update(data)
else:
# avoids content.update to preserve comments in `data`
content = data
f.write(yaml.round_trip_dump(content))
return filename
|
Replaces the contents of an automated config file for a service, or creates the file if it does not exist.
Returns the filename of the modified file, or None if no file was written.
|
write_auto_config_data
|
python
|
Yelp/paasta
|
paasta_tools/config_utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/config_utils.py
|
Apache-2.0
|
def get_currently_deployed_sha(service, deploy_group, soa_dir=DEFAULT_SOA_DIR):
"""Tries to determine the currently deployed sha for a service and deploy_group,
returns None if there isn't one ready yet"""
try:
deployments = load_v2_deployments_json(service=service, soa_dir=soa_dir)
return deployments.get_git_sha_for_deploy_group(deploy_group=deploy_group)
except NoDeploymentsAvailable:
return None
|
Tries to determine the currently deployed sha for a service and deploy_group,
returns None if there isn't one ready yet
|
get_currently_deployed_sha
|
python
|
Yelp/paasta
|
paasta_tools/deployment_utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/deployment_utils.py
|
Apache-2.0
|
def get_currently_deployed_version(
service, deploy_group, soa_dir=DEFAULT_SOA_DIR
) -> Optional[DeploymentVersion]:
"""Tries to determine the currently deployed version for a service and deploy_group,
returns None if there isn't one ready yet"""
try:
deployments = load_v2_deployments_json(service=service, soa_dir=soa_dir)
return deployments.get_deployment_version_for_deploy_group(
deploy_group=deploy_group
)
except NoDeploymentsAvailable:
return None
|
Tries to determine the currently deployed version for a service and deploy_group,
returns None if there isn't one ready yet
|
get_currently_deployed_version
|
python
|
Yelp/paasta
|
paasta_tools/deployment_utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/deployment_utils.py
|
Apache-2.0
|
def register_drain_method(
name: str,
) -> Callable[[_RegisterDrainMethod_T], _RegisterDrainMethod_T]:
"""Returns a decorator that registers a DrainMethod subclass at a given name
so get_drain_method/list_drain_methods can find it."""
def outer(drain_method: _RegisterDrainMethod_T) -> _RegisterDrainMethod_T:
_drain_methods[name] = drain_method
return drain_method
return outer
|
Returns a decorator that registers a DrainMethod subclass at a given name
so get_drain_method/list_drain_methods can find it.
|
register_drain_method
|
python
|
Yelp/paasta
|
paasta_tools/drain_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py
|
Apache-2.0
|
async def drain(self, task: DrainTask) -> None:
"""Make a task stop receiving new traffic."""
raise NotImplementedError()
|
Make a task stop receiving new traffic.
|
drain
|
python
|
Yelp/paasta
|
paasta_tools/drain_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py
|
Apache-2.0
|
async def stop_draining(self, task: DrainTask) -> None:
"""Make a task that has previously been downed start receiving traffic again."""
raise NotImplementedError()
|
Make a task that has previously been downed start receiving traffic again.
|
stop_draining
|
python
|
Yelp/paasta
|
paasta_tools/drain_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py
|
Apache-2.0
|
async def is_draining(self, task: DrainTask) -> bool:
"""Return whether a task is being drained."""
raise NotImplementedError()
|
Return whether a task is being drained.
|
is_draining
|
python
|
Yelp/paasta
|
paasta_tools/drain_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py
|
Apache-2.0
|
async def is_safe_to_kill(self, task: DrainTask) -> bool:
"""Return True if a task is drained and ready to be killed, or False if we should wait."""
raise NotImplementedError()
|
Return True if a task is drained and ready to be killed, or False if we should wait.
|
is_safe_to_kill
|
python
|
Yelp/paasta
|
paasta_tools/drain_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py
|
Apache-2.0
|
def parse_success_codes(self, success_codes_str: str) -> Set[int]:
"""Expand a string like 200-399,407-409,500 to a set containing all the integers in between."""
acceptable_response_codes: Set[int] = set()
for series_str in str(success_codes_str).split(","):
if "-" in series_str:
start, end = series_str.split("-")
acceptable_response_codes.update(range(int(start), int(end) + 1))
else:
acceptable_response_codes.add(int(series_str))
return acceptable_response_codes
|
Expand a string like 200-399,407-409,500 to a set containing all the integers in between.
|
parse_success_codes
|
python
|
Yelp/paasta
|
paasta_tools/drain_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py
|
Apache-2.0
|
async def issue_request(self, url_spec: UrlSpec, task: DrainTask) -> None:
"""Issue a request to the URL specified by url_spec regarding the task given."""
format_params = self.get_format_params(task)
urls = [
self.format_url(url_spec["url_format"], param) for param in format_params
]
method = url_spec.get("method", "GET").upper()
async with aiohttp.ClientSession() as session:
reqs = [
session.request(
method=method,
url=url,
headers={"User-Agent": get_user_agent()},
timeout=15,
)
for url in urls
]
res = await asyncio.gather(*reqs)
for response in res:
if not self.check_response_code(
response.status, url_spec["success_codes"]
):
raise StatusCodeNotAcceptableError(
f"Unacceptable status code {response.status} not in {url_spec['success_codes']} when hitting {response.url}"
)
|
Issue a request to the URL specified by url_spec regarding the task given.
|
issue_request
|
python
|
Yelp/paasta
|
paasta_tools/drain_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py
|
Apache-2.0
|
def load_eks_service_config_no_cache(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> "EksDeploymentConfig":
"""Read a service instance's configuration for EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "eks", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = EksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return EksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
|
Read a service instance's configuration for EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance
|
load_eks_service_config_no_cache
|
python
|
Yelp/paasta
|
paasta_tools/eks_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/eks_tools.py
|
Apache-2.0
|
def are_services_up_in_pod(
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
registrations: Collection[str],
pod_ip: str,
pod_port: int,
) -> bool:
"""Returns whether a service in a k8s pod is reachable via envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param registrations: The service_name.instance_name of the services
:param pod_ip: IP of the pod itself
:param pod_port: The port to reach the service in the pod
"""
for registration in registrations:
backends_per_registration = get_backends(
registration,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
healthy_backends = [
backend
for backend in backends_per_registration.get(registration, [])
if backend[0]["address"] == pod_ip
and backend[0]["port_value"] == pod_port
and backend[0]["eds_health_status"] == "HEALTHY"
]
if not healthy_backends:
return False
return True
|
Returns whether a service in a k8s pod is reachable via envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param registrations: The service_name.instance_name of the services
:param pod_ip: IP of the pod itself
:param pod_port: The port to reach the service in the pod
|
are_services_up_in_pod
|
python
|
Yelp/paasta
|
paasta_tools/envoy_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py
|
Apache-2.0
|
def are_namespaces_up_in_eds(
envoy_eds_path: str,
namespaces: Collection[str],
pod_ip: str,
pod_port: int,
) -> bool:
"""Returns whether a Pod is registered on Envoy through the EDS
:param envoy_eds_path: path where EDS yaml files are stored
:param namespaces: list of namespaces to check
:param pod_ip: IP of the pod
:param pod_port: The port to reach the service in the pod
"""
for namespace in namespaces:
backends_from_eds = get_backends_from_eds(namespace, envoy_eds_path)
if (pod_ip, pod_port) not in backends_from_eds:
return False
return True
|
Returns whether a Pod is registered on Envoy through the EDS
:param envoy_eds_path: path where EDS yaml files are stored
:param namespaces: list of namespaces to check
:param pod_ip: IP of the pod
:param pod_port: The port to reach the service in the pod
|
are_namespaces_up_in_eds
|
python
|
Yelp/paasta
|
paasta_tools/envoy_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py
|
Apache-2.0
|
def get_casper_endpoints(
clusters_info: Mapping[str, Any]
) -> FrozenSet[Tuple[str, int]]:
"""Filters out and returns casper endpoints from Envoy clusters."""
casper_endpoints: Set[Tuple[str, int]] = set()
for cluster_status in clusters_info["cluster_statuses"]:
if "host_statuses" in cluster_status:
if cluster_status["name"].startswith("spectre.") and cluster_status[
"name"
].endswith(".egress_cluster"):
for host_status in cluster_status["host_statuses"]:
casper_endpoints.add(
(
host_status["address"]["socket_address"]["address"],
host_status["address"]["socket_address"]["port_value"],
)
)
return frozenset(casper_endpoints)
|
Filters out and returns casper endpoints from Envoy clusters.
|
get_casper_endpoints
|
python
|
Yelp/paasta
|
paasta_tools/envoy_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py
|
Apache-2.0
|
def get_backends_from_eds(namespace: str, envoy_eds_path: str) -> List[Tuple[str, int]]:
"""Returns a list of backends for a given namespace. Casper backends are also returned (if present).
:param namespace: return backends for this namespace
:param envoy_eds_path: path where EDS yaml files are stored
:returns backends: a list of touples representing the backends for
the requested service
"""
backends = []
eds_file_for_namespace = f"{envoy_eds_path}/{namespace}/{namespace}.yaml"
if os.access(eds_file_for_namespace, os.R_OK):
with open(eds_file_for_namespace) as f:
eds_yaml = yaml.safe_load(f)
for resource in eds_yaml.get("resources", []):
endpoints = resource.get("endpoints")
# endpoints could be None if there are no backends listed
if endpoints:
for endpoint in endpoints:
for lb_endpoint in endpoint.get("lb_endpoints", []):
address = lb_endpoint["endpoint"]["address"][
"socket_address"
]["address"]
port_value = lb_endpoint["endpoint"]["address"][
"socket_address"
]["port_value"]
backends.append((address, port_value))
return backends
|
Returns a list of backends for a given namespace. Casper backends are also returned (if present).
:param namespace: return backends for this namespace
:param envoy_eds_path: path where EDS yaml files are stored
:returns backends: a list of touples representing the backends for
the requested service
|
get_backends_from_eds
|
python
|
Yelp/paasta
|
paasta_tools/envoy_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py
|
Apache-2.0
|
def get_backends(
service: str,
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
) -> Dict[str, List[Tuple[EnvoyBackend, bool]]]:
"""Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param service: If None, return backends for all services, otherwise only return backends for this particular
service.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
"""
if service:
services = [service]
else:
services = None
return get_multiple_backends(
services,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
|
Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param service: If None, return backends for all services, otherwise only return backends for this particular
service.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
|
get_backends
|
python
|
Yelp/paasta
|
paasta_tools/envoy_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py
|
Apache-2.0
|
def get_multiple_backends(
services: Optional[Sequence[str]],
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
resolve_hostnames: bool = True,
) -> Dict[str, List[Tuple[EnvoyBackend, bool]]]:
"""Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param services: If None, return backends for all services, otherwise only return backends for these particular
services.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
"""
clusters_info = retrieve_envoy_clusters(
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
casper_endpoints = get_casper_endpoints(clusters_info)
backends: DefaultDict[
str, List[Tuple[EnvoyBackend, bool]]
] = collections.defaultdict(list)
for cluster_status in clusters_info["cluster_statuses"]:
if "host_statuses" in cluster_status:
if cluster_status["name"].endswith(".egress_cluster"):
service_name = cluster_status["name"][: -len(".egress_cluster")]
if services is None or service_name in services:
cluster_backends = []
casper_endpoint_found = False
for host_status in cluster_status["host_statuses"]:
address = host_status["address"]["socket_address"]["address"]
port_value = host_status["address"]["socket_address"][
"port_value"
]
# Check if this endpoint is actually a casper backend
# If so, omit from the service's list of backends
if not service_name.startswith("spectre."):
if (address, port_value) in casper_endpoints:
casper_endpoint_found = True
continue
hostname = address
if resolve_hostnames:
try:
hostname = socket.gethostbyaddr(address)[0].split(".")[
0
]
except socket.herror:
# Default to the raw IP address if we can't lookup the hostname
pass
cluster_backends.append(
(
EnvoyBackend(
address=address,
port_value=port_value,
hostname=hostname,
eds_health_status=host_status["health_status"][
"eds_health_status"
],
weight=host_status["weight"],
),
casper_endpoint_found,
)
)
backends[service_name] += cluster_backends
return backends
|
Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param services: If None, return backends for all services, otherwise only return backends for these particular
services.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
|
get_multiple_backends
|
python
|
Yelp/paasta
|
paasta_tools/envoy_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py
|
Apache-2.0
|
def match_backends_and_pods(
backends: Iterable[EnvoyBackend],
pods: Iterable[V1Pod],
) -> List[Tuple[Optional[EnvoyBackend], Optional[V1Pod]]]:
"""Returns tuples of matching (backend, pod) pairs, as matched by IP. Each backend will be listed exactly
once. If a backend does not match with a pod, (backend, None) will be included.
If a pod's IP does not match with any backends, (None, pod) will be included.
:param backends: An iterable of Envoy backend dictionaries, e.g. the list returned by
envoy_tools.get_multiple_backends.
:param pods: A list of pods
"""
# { ip : [backend1, backend2], ... }
backends_by_ip: DefaultDict[str, List[EnvoyBackend]] = collections.defaultdict(list)
backend_pod_pairs = []
for backend in backends:
ip = backend["address"]
backends_by_ip[ip].append(backend)
for pod in pods:
ip = pod.status.pod_ip
for backend in backends_by_ip.pop(ip, [None]):
backend_pod_pairs.append((backend, pod))
# we've been popping in the above loop, so anything left didn't match a k8s pod.
for backends in backends_by_ip.values():
for backend in backends:
backend_pod_pairs.append((backend, None))
return backend_pod_pairs
|
Returns tuples of matching (backend, pod) pairs, as matched by IP. Each backend will be listed exactly
once. If a backend does not match with a pod, (backend, None) will be included.
If a pod's IP does not match with any backends, (None, pod) will be included.
:param backends: An iterable of Envoy backend dictionaries, e.g. the list returned by
envoy_tools.get_multiple_backends.
:param pods: A list of pods
|
match_backends_and_pods
|
python
|
Yelp/paasta
|
paasta_tools/envoy_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py
|
Apache-2.0
|
def get_replication_for_all_services(
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
) -> Dict[str, int]:
"""Returns the replication level for all services known to this Envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port number that this check should contact for replication information.
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available replicas.
"""
backends = get_multiple_backends(
services=None,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
resolve_hostnames=False, # we're not really going to use the hostnames, so skip fetching them to save time
)
return collections.Counter(
[
service_name
for service_name, service_backends in backends.items()
for b in service_backends
if backend_is_up(b[0])
]
)
|
Returns the replication level for all services known to this Envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port number that this check should contact for replication information.
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available replicas.
|
get_replication_for_all_services
|
python
|
Yelp/paasta
|
paasta_tools/envoy_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py
|
Apache-2.0
|
def _yocalhost_rule(port, comment, protocol="tcp"):
"""Return an iptables rule allowing access to a yocalhost port."""
return iptables.Rule(
protocol=protocol,
src="0.0.0.0/0.0.0.0",
dst="169.254.255.254/255.255.255.255",
target="ACCEPT",
matches=(
("comment", (("comment", (comment,)),)),
(protocol, (("dport", (str(port),)),)),
),
target_parameters=(),
)
|
Return an iptables rule allowing access to a yocalhost port.
|
_yocalhost_rule
|
python
|
Yelp/paasta
|
paasta_tools/firewall.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py
|
Apache-2.0
|
def services_running_here():
"""Generator helper that yields (service, instance, mac address) of both
mesos tasks.
"""
for container in get_running_mesos_docker_containers():
if container["HostConfig"]["NetworkMode"] != "bridge":
continue
service = container["Labels"].get("paasta_service")
instance = container["Labels"].get("paasta_instance")
if service is None or instance is None:
continue
network_info = container["NetworkSettings"]["Networks"]["bridge"]
mac = network_info["MacAddress"]
ip = network_info["IPAddress"]
yield service, instance, mac, ip
|
Generator helper that yields (service, instance, mac address) of both
mesos tasks.
|
services_running_here
|
python
|
Yelp/paasta
|
paasta_tools/firewall.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py
|
Apache-2.0
|
def _ensure_common_chain():
"""The common chain allows access for all services to certain resources."""
iptables.ensure_chain(
"PAASTA-COMMON",
(
# Allow return traffic for incoming connections
iptables.Rule(
protocol="ip",
src="0.0.0.0/0.0.0.0",
dst="0.0.0.0/0.0.0.0",
target="ACCEPT",
matches=(("conntrack", (("ctstate", ("ESTABLISHED",)),)),),
target_parameters=(),
),
_yocalhost_rule(1463, "scribed"),
_yocalhost_rule(8125, "metrics-relay", protocol="udp"),
_yocalhost_rule(3030, "sensu"),
iptables.Rule(
protocol="ip",
src="0.0.0.0/0.0.0.0",
dst="0.0.0.0/0.0.0.0",
target="PAASTA-DNS",
matches=(),
target_parameters=(),
),
),
)
|
The common chain allows access for all services to certain resources.
|
_ensure_common_chain
|
python
|
Yelp/paasta
|
paasta_tools/firewall.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py
|
Apache-2.0
|
def ensure_service_chains(service_groups, soa_dir, synapse_service_dir):
"""Ensure service chains exist and have the right rules.
service_groups is a dict {ServiceGroup: set([mac_address..])}
Returns dictionary {[service chain] => [list of mac addresses]}.
"""
chains = {}
for service, macs in service_groups.items():
service.update_rules(soa_dir, synapse_service_dir)
chains[service.chain_name] = macs
return chains
|
Ensure service chains exist and have the right rules.
service_groups is a dict {ServiceGroup: set([mac_address..])}
Returns dictionary {[service chain] => [list of mac addresses]}.
|
ensure_service_chains
|
python
|
Yelp/paasta
|
paasta_tools/firewall.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py
|
Apache-2.0
|
def general_update(soa_dir, synapse_service_dir):
"""Update iptables to match the current PaaSTA state."""
ensure_shared_chains()
service_chains = ensure_service_chains(
active_service_groups(), soa_dir, synapse_service_dir
)
ensure_dispatch_chains(service_chains)
garbage_collect_old_service_chains(service_chains)
|
Update iptables to match the current PaaSTA state.
|
general_update
|
python
|
Yelp/paasta
|
paasta_tools/firewall.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py
|
Apache-2.0
|
def prepare_new_container(soa_dir, synapse_service_dir, service, instance, mac):
"""Update iptables to include rules for a new (not yet running) MAC address"""
ensure_shared_chains() # probably already set, but just to be safe
service_group = ServiceGroup(service, instance)
service_group.update_rules(soa_dir, synapse_service_dir)
iptables.insert_rule("PAASTA", dispatch_rule(service_group.chain_name, mac))
|
Update iptables to include rules for a new (not yet running) MAC address
|
prepare_new_container
|
python
|
Yelp/paasta
|
paasta_tools/firewall.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py
|
Apache-2.0
|
def firewall_flock(flock_path=DEFAULT_FIREWALL_FLOCK_PATH):
"""Grab an exclusive flock to avoid concurrent iptables updates"""
with io.FileIO(flock_path, "w") as f:
with timed_flock(f, seconds=DEFAULT_FIREWALL_FLOCK_TIMEOUT_SECS):
yield
|
Grab an exclusive flock to avoid concurrent iptables updates
|
firewall_flock
|
python
|
Yelp/paasta
|
paasta_tools/firewall.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py
|
Apache-2.0
|
def load_flinkeks_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> FlinkEksDeploymentConfig:
"""Read a service instance's configuration for Flink on EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "flinkeks", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = FlinkEksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return FlinkEksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
|
Read a service instance's configuration for Flink on EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance
|
load_flinkeks_instance_config
|
python
|
Yelp/paasta
|
paasta_tools/flinkeks_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/flinkeks_tools.py
|
Apache-2.0
|
def get_pool(self) -> Optional[str]:
"""
Parses flink_pool from a specific Flink Deployment instance's configuration data, using key 'spot'.
Args:
flink_deployment_config_data: The FlinkDeploymentConfig for a specific Flink yelpsoa instance
Returns:
The flink pool string.
"""
spot_config = self.config_dict.get("spot", None)
if spot_config is False:
return "flink"
else:
# if not set or True, Flink instance defaults to use flink-spot pool
return "flink-spot"
|
Parses flink_pool from a specific Flink Deployment instance's configuration data, using key 'spot'.
Args:
flink_deployment_config_data: The FlinkDeploymentConfig for a specific Flink yelpsoa instance
Returns:
The flink pool string.
|
get_pool
|
python
|
Yelp/paasta
|
paasta_tools/flink_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py
|
Apache-2.0
|
def load_flink_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> FlinkDeploymentConfig:
"""Read a service instance's configuration for Flink.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "flink", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = FlinkDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return FlinkDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
|
Read a service instance's configuration for Flink.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance
|
load_flink_instance_config
|
python
|
Yelp/paasta
|
paasta_tools/flink_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py
|
Apache-2.0
|
def _filter_for_endpoint(json_response: Any, endpoint: str) -> Mapping[str, Any]:
"""
Filter json response to include only a subset of fields.
"""
if endpoint == "config":
return {
key: value for (key, value) in json_response.items() if key in CONFIG_KEYS
}
if endpoint == "overview":
return {
key: value for (key, value) in json_response.items() if key in OVERVIEW_KEYS
}
if endpoint == "jobs":
return json_response
if endpoint.startswith("jobs"):
return {
key: value
for (key, value) in json_response.items()
if key in JOB_DETAILS_KEYS
}
return json_response
|
Filter json response to include only a subset of fields.
|
_filter_for_endpoint
|
python
|
Yelp/paasta
|
paasta_tools/flink_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py
|
Apache-2.0
|
def get_flink_jobs_from_paasta_api_client(
service: str, instance: str, client: PaastaOApiClient
) -> FlinkJobs:
"""Get flink jobs for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink jobs in the flink cluster"""
return client.service.list_flink_cluster_jobs(
service=service,
instance=instance,
)
|
Get flink jobs for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink jobs in the flink cluster
|
get_flink_jobs_from_paasta_api_client
|
python
|
Yelp/paasta
|
paasta_tools/flink_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py
|
Apache-2.0
|
async def get_flink_job_details_from_paasta_api_client(
service: str, instance: str, job_id: str, client: PaastaOApiClient
) -> FlinkJobDetails:
"""Get flink job details for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink jobs in the flink cluster"""
return client.service.get_flink_cluster_job_details(
service=service,
instance=instance,
job_id=job_id,
)
|
Get flink job details for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink jobs in the flink cluster
|
get_flink_job_details_from_paasta_api_client
|
python
|
Yelp/paasta
|
paasta_tools/flink_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py
|
Apache-2.0
|
def get_flink_config_from_paasta_api_client(
service: str, instance: str, client: PaastaOApiClient
) -> FlinkConfig:
"""Get flink config for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink cluster configurations"""
return client.service.get_flink_cluster_config(
service=service,
instance=instance,
)
|
Get flink config for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink cluster configurations
|
get_flink_config_from_paasta_api_client
|
python
|
Yelp/paasta
|
paasta_tools/flink_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py
|
Apache-2.0
|
def get_flink_overview_from_paasta_api_client(
service: str, instance: str, client: PaastaOApiClient
) -> FlinkClusterOverview:
"""Get flink cluster overview for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink cluster overview"""
return client.service.get_flink_cluster_overview(
service=service,
instance=instance,
)
|
Get flink cluster overview for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink cluster overview
|
get_flink_overview_from_paasta_api_client
|
python
|
Yelp/paasta
|
paasta_tools/flink_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py
|
Apache-2.0
|
def get_deploy_group_mappings(
soa_dir: str, service: str
) -> Tuple[Dict[str, V1_Mapping], V2_Mappings]:
"""Gets mappings from service:deploy_group to services-service:paasta-hash-image_version,
where hash is the current SHA at the HEAD of branch_name and image_version
can be used to provide additional version information for the Docker image.
This is done for all services in soa_dir.
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary mapping service:deploy_group to a dictionary
containing:
- 'docker_image': something like "services-service:paasta-hash". This is
relative to the paasta docker registry.
- 'desired_state': either 'start' or 'stop'. Says whether this branch
should be running.
- 'force_bounce': An arbitrary value, which may be None. A change in this
value should trigger a bounce, even if the other properties of this app
have not changed.
"""
mappings: Dict[str, V1_Mapping] = {}
v2_mappings: V2_Mappings = {"deployments": {}, "controls": {}}
git_url = get_git_url(service=service, soa_dir=soa_dir)
# Some pseudo-services like toolboxes explicitly have no git_url, and therefore no deployments
if git_url is None:
return mappings, v2_mappings
# Most of the time of this function is in two parts:
# 1. getting remote refs from git. (Mostly IO, just waiting for git to get back to us.)
# 2. loading instance configs. (Mostly CPU, copy.deepcopying yaml over and over again)
# Let's do these two things in parallel.
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
remote_refs_future = executor.submit(remote_git.list_remote_refs, git_url)
service_configs = get_instance_configs_for_service(soa_dir=soa_dir, service=service)
deploy_group_branch_mappings = {
config.get_branch(): config.get_deploy_group() for config in service_configs
}
if not deploy_group_branch_mappings:
log.info("Service %s has no valid deploy groups. Skipping.", service)
return mappings, v2_mappings
remote_refs = remote_refs_future.result()
tag_by_deploy_group = {
dg: get_latest_deployment_tag(remote_refs, dg)
for dg in set(deploy_group_branch_mappings.values())
}
state_by_branch_and_sha = get_desired_state_by_branch_and_sha(remote_refs)
for control_branch, deploy_group in deploy_group_branch_mappings.items():
(deploy_ref_name, deploy_ref_sha, image_version) = tag_by_deploy_group[
deploy_group
]
if deploy_ref_name in remote_refs:
commit_sha = remote_refs[deploy_ref_name]
control_branch_alias = f"{service}:paasta-{control_branch}"
control_branch_alias_v2 = f"{service}:{control_branch}"
docker_image = build_docker_image_name(service, commit_sha, image_version)
desired_state, force_bounce = state_by_branch_and_sha.get(
(control_branch, deploy_ref_sha), ("start", None)
)
log.info("Mapping %s to docker image %s", control_branch, docker_image)
v2_mappings["deployments"][deploy_group] = {
"docker_image": docker_image,
"git_sha": commit_sha,
"image_version": image_version,
}
mappings[control_branch_alias] = {
"docker_image": docker_image,
"desired_state": desired_state,
"force_bounce": force_bounce,
}
v2_mappings["controls"][control_branch_alias_v2] = {
"desired_state": desired_state,
"force_bounce": force_bounce,
}
return mappings, v2_mappings
|
Gets mappings from service:deploy_group to services-service:paasta-hash-image_version,
where hash is the current SHA at the HEAD of branch_name and image_version
can be used to provide additional version information for the Docker image.
This is done for all services in soa_dir.
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary mapping service:deploy_group to a dictionary
containing:
- 'docker_image': something like "services-service:paasta-hash". This is
relative to the paasta docker registry.
- 'desired_state': either 'start' or 'stop'. Says whether this branch
should be running.
- 'force_bounce': An arbitrary value, which may be None. A change in this
value should trigger a bounce, even if the other properties of this app
have not changed.
|
get_deploy_group_mappings
|
python
|
Yelp/paasta
|
paasta_tools/generate_deployments_for_service.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/generate_deployments_for_service.py
|
Apache-2.0
|
async def get_spool(spool_url: str) -> SpoolInfo:
"""Query hacheck for the state of a task, and parse the result into a dictionary."""
if spool_url is None:
return None
# TODO: aiohttp says not to create a session per request. Fix this.
async with aiohttp.ClientSession(timeout=HACHECK_TIMEOUT) as session:
async with session.get(
spool_url, headers={"User-Agent": get_user_agent()}
) as response:
if response.status == 200:
return {"state": "up"}
regex = "".join(
[
"^",
r"Service (?P<service>.+)",
r" in (?P<state>.+) state",
r"(?: since (?P<since>[0-9.]+))?",
r"(?: until (?P<until>[0-9.]+))?",
r"(?:: (?P<reason>.*))?",
"$",
]
)
response_text = await response.text()
match = re.match(regex, response_text)
groupdict = match.groupdict()
info: SpoolInfo = {}
info["service"] = groupdict["service"]
info["state"] = groupdict["state"]
if "since" in groupdict:
info["since"] = float(groupdict["since"] or 0)
if "until" in groupdict:
info["until"] = float(groupdict["until"] or 0)
if "reason" in groupdict:
info["reason"] = groupdict["reason"]
return info
|
Query hacheck for the state of a task, and parse the result into a dictionary.
|
get_spool
|
python
|
Yelp/paasta
|
paasta_tools/hacheck.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/hacheck.py
|
Apache-2.0
|
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has an exact set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = set(list_chain(chain))
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
insert_rule(chain, rule)
extra_rules = current_rules - set(rules)
if extra_rules:
delete_rules(chain, extra_rules)
|
Idempotently ensure a chain exists and has an exact set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
|
ensure_chain
|
python
|
Yelp/paasta
|
paasta_tools/iptables.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/iptables.py
|
Apache-2.0
|
def reorder_chain(chain_name):
"""Ensure that any REJECT rules are last, and any LOG rules are second-to-last"""
table = iptc.Table(iptc.Table.FILTER)
with iptables_txn(table):
rules = list_chain(chain_name)
chain = iptc.Chain(table, chain_name)
# sort the rules by rule_key, which uses (RULE_TARGET_SORT_ORDER, idx)
sorted_rules_with_indices = sorted(enumerate(rules), key=_rule_sort_key)
for new_index, (old_index, rule) in enumerate(sorted_rules_with_indices):
if new_index == old_index:
continue
log.debug(f"reordering chain {chain_name} rule {rule} to #{new_index}")
chain.replace_rule(rule.to_iptc(), new_index)
|
Ensure that any REJECT rules are last, and any LOG rules are second-to-last
|
reorder_chain
|
python
|
Yelp/paasta
|
paasta_tools/iptables.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/iptables.py
|
Apache-2.0
|
def list_chain(chain_name):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
table = iptc.Table(iptc.Table.FILTER)
chain = iptc.Chain(table, chain_name)
# TODO: is there any way to do this without listing all chains? (probably slow)
# If the chain doesn't exist, chain.rules will be an empty list, so we need
# to make sure the chain actually _does_ exist.
if chain in table.chains:
return tuple(Rule.from_iptc(rule) for rule in chain.rules)
else:
raise ChainDoesNotExist(chain_name)
|
List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
|
list_chain
|
python
|
Yelp/paasta
|
paasta_tools/iptables.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/iptables.py
|
Apache-2.0
|
def load_kafkacluster_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> KafkaClusterDeploymentConfig:
"""Read a service instance's configuration for KafkaCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "kafkacluster", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = KafkaClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return KafkaClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
|
Read a service instance's configuration for KafkaCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance
|
load_kafkacluster_instance_config
|
python
|
Yelp/paasta
|
paasta_tools/kafkacluster_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kafkacluster_tools.py
|
Apache-2.0
|
def load_kubernetes_service_config_no_cache(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> "KubernetesDeploymentConfig":
"""Read a service instance's configuration for kubernetes.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "kubernetes", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = KubernetesDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return KubernetesDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
|
Read a service instance's configuration for kubernetes.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance
|
load_kubernetes_service_config_no_cache
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def __new__(
cls,
component: Optional[str] = None,
config_file: Optional[str] = None,
context: Optional[str] = None,
) -> "KubeClient":
"""By @lru_cache'ing this function, repeated instantiations of KubeClient with the same arguments will return the
exact same object. This makes it possible to effectively cache function calls that take a KubeClient as an
argument."""
return super().__new__(cls)
|
By @lru_cache'ing this function, repeated instantiations of KubeClient with the same arguments will return the
exact same object. This makes it possible to effectively cache function calls that take a KubeClient as an
argument.
|
__new__
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def allowlist_denylist_to_requirements(
allowlist: DeployWhitelist, denylist: DeployBlacklist
) -> List[Tuple[str, str, List[str]]]:
"""Converts deploy_whitelist and deploy_blacklist to a list of
requirements, which can be converted to node affinities.
"""
requirements = []
# convert whitelist into a node selector req
if allowlist:
location_type, alloweds = allowlist
requirements.append((to_node_label(location_type), "In", alloweds))
# convert blacklist into multiple node selector reqs
if denylist:
# not going to prune for duplicates, or group blacklist items for
# same location_type. makes testing easier and k8s can handle it.
for location_type, not_allowed in denylist:
requirements.append((to_node_label(location_type), "NotIn", [not_allowed]))
return requirements
|
Converts deploy_whitelist and deploy_blacklist to a list of
requirements, which can be converted to node affinities.
|
allowlist_denylist_to_requirements
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def raw_selectors_to_requirements(
raw_selectors: Mapping[str, NodeSelectorConfig]
) -> List[Tuple[str, str, List[str]]]:
"""Converts certain node_selectors into requirements, which can be
converted to node affinities.
"""
requirements: List[Tuple[str, str, List[str]]] = []
for label, configs in raw_selectors.items():
operator_configs: List[NodeSelectorOperator] = []
if type(configs) is not list or len(configs) == 0:
continue
elif type(configs[0]) is str:
# specifying an array/list of strings for a label is shorthand
# for the "In" operator
operator_configs = [
NodeSelectorInNotIn(
{"operator": "In", "values": cast(List[str], configs)}
)
]
else:
# configs should already be a List[NodeSelectorOperator]
operator_configs = cast(List[NodeSelectorOperator], configs)
label = to_node_label(label)
for config in operator_configs:
if config["operator"] in {"In", "NotIn"}:
config = cast(NodeSelectorInNotIn, config)
values = config["values"]
elif config["operator"] in {"Exists", "DoesNotExist"}:
config = cast(NodeSelectorExistsDoesNotExist, config)
values = []
elif config["operator"] in {"Gt", "Lt"}:
config = cast(NodeSelectorGtLt, config)
# config["value"] is validated by jsonschema to be an int. but,
# k8s expects singleton list of the int represented as a str
# for these operators.
values = [str(config["value"])]
else:
raise ValueError(
f"Unknown k8s node affinity operator: {config['operator']}"
)
requirements.append((label, config["operator"], values))
return requirements
|
Converts certain node_selectors into requirements, which can be
converted to node affinities.
|
raw_selectors_to_requirements
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_bounce_method(self) -> str:
"""Get the bounce method specified in the service's kubernetes configuration."""
# map existing bounce methods to k8s equivalents.
# but if there's an EBS volume we must downthenup to free up the volume.
# in the future we may support stateful sets to dynamically create the volumes
bounce_method = self.config_dict.get("bounce_method", "crossover")
if self.get_aws_ebs_volumes() and not bounce_method == "downthenup":
raise Exception(
"If service instance defines an EBS volume it must use a downthenup bounce_method"
)
return bounce_method
|
Get the bounce method specified in the service's kubernetes configuration.
|
get_bounce_method
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_autoscaling_scaling_policy(
self,
max_replicas: int,
autoscaling_params: AutoscalingParamsDict,
) -> Dict:
"""Returns the k8s HPA scaling policy in raw JSON. Requires k8s v1.18
to work.
"""
# The HPA scaling algorithm is as follows. Every sync period (default:
# 15 seconds), the HPA will:
# 1. determine what the desired capacity is from metrics
# 2. apply min/max replica scaling limits
# 3. rate-limit the scaling magnitude (e.g. scale down by no more than
# 30% of current replicas)
# 4. constrain the scaling magnitude by the period seconds (e.g. scale
# down by no more than 30% of current replicas per 60 seconds)
# 5. record the desired capacity, then pick the highest capacity from
# the stabilization window (default: last 300 seconds) as the final
# desired capacity.
# - the idea is to stabilize scaling against (heavily) fluctuating
# metrics
policy = {
"scaleDown": {
"stabilizationWindowSeconds": 300,
# the policy in a human-readable way: scale down every 60s by
# at most 30% of current replicas.
"selectPolicy": "Max",
"policies": [{"type": "Percent", "value": 30, "periodSeconds": 60}],
}
}
policy["scaleDown"].update(autoscaling_params.get("scaledown_policies", {}))
return policy
|
Returns the k8s HPA scaling policy in raw JSON. Requires k8s v1.18
to work.
|
get_autoscaling_scaling_policy
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_sanitised_volume_name(self, volume_name: str, length_limit: int = 0) -> str:
"""I know but we really aren't allowed many characters..."""
volume_name = volume_name.rstrip("/")
sanitised = volume_name.replace("/", "slash-").replace(".", "dot-")
sanitised_name = sanitise_kubernetes_name(sanitised)
if length_limit and len(sanitised_name) > length_limit:
sanitised_name = (
sanitised_name[0 : length_limit - 6]
+ "--"
+ hashlib.md5(sanitised_name.encode("ascii")).hexdigest()[:4]
)
return sanitised_name
|
I know but we really aren't allowed many characters...
|
get_sanitised_volume_name
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_readiness_check_script(
self, system_paasta_config: SystemPaastaConfig
) -> List[str]:
"""Script to check if a service is up in smartstack / envoy"""
enable_envoy_check = self.get_enable_envoy_readiness_check(system_paasta_config)
enable_nerve_check = self.get_enable_nerve_readiness_check(system_paasta_config)
if enable_nerve_check and enable_envoy_check:
return system_paasta_config.get_envoy_nerve_readiness_check_script()
elif enable_envoy_check:
return system_paasta_config.get_envoy_readiness_check_script()
else:
return system_paasta_config.get_nerve_readiness_check_script()
|
Script to check if a service is up in smartstack / envoy
|
get_readiness_check_script
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_env_vars_that_use_secrets(self) -> Tuple[Dict[str, str], Dict[str, str]]:
"""Returns two dictionaries of environment variable name->value; the first is vars that use non-shared
secrets, and the second is vars that use shared secrets.
The values of the dictionaries are the secret refs as formatted in yelpsoa-configs, e.g. "SECRET(foo)"
or "SHARED_SECRET(bar)". These can be decoded with get_secret_name_from_ref.
"""
secret_env_vars = {}
shared_secret_env_vars = {}
for k, v in self.get_env().items():
if is_secret_ref(v):
if is_shared_secret(v):
shared_secret_env_vars[k] = v
else:
secret_env_vars[k] = v
return secret_env_vars, shared_secret_env_vars
|
Returns two dictionaries of environment variable name->value; the first is vars that use non-shared
secrets, and the second is vars that use shared secrets.
The values of the dictionaries are the secret refs as formatted in yelpsoa-configs, e.g. "SECRET(foo)"
or "SHARED_SECRET(bar)". These can be decoded with get_secret_name_from_ref.
|
get_env_vars_that_use_secrets
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_hacheck_prestop_sleep_seconds(self) -> int:
"""The number of seconds to sleep between hadown and terminating the hacheck container. We want hacheck to be
up for slightly longer than the main container is, so we default to pre_stop_drain_seconds + 1.
It doesn't super matter if hacheck goes down before the main container -- if it's down, healthchecks will fail
and the service will be removed from smartstack, which is the same effect we get after running hadown.
"""
# Everywhere this value is currently used (hacheck sidecar or gunicorn sidecar), we can pretty safely
# assume that the service is in smartstack.
return self.get_prestop_sleep_seconds(is_in_smartstack=True) + 1
|
The number of seconds to sleep between hadown and terminating the hacheck container. We want hacheck to be
up for slightly longer than the main container is, so we default to pre_stop_drain_seconds + 1.
It doesn't super matter if hacheck goes down before the main container -- if it's down, healthchecks will fail
and the service will be removed from smartstack, which is the same effect we get after running hadown.
|
get_hacheck_prestop_sleep_seconds
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_datastore_credentials_secrets_volume(self) -> V1Volume:
"""
All credentials are stored in 1 Kubernetes Secret, which are mapped on an item->path
structure to /datastore/<datastore>/<credential>/<password file>.
"""
datastore_credentials = self.get_datastore_credentials()
if not datastore_credentials:
return None
# Assume k8s secret exists if its configmap signature exists
secret_hash = self.get_datastore_credentials_secret_hash()
if not secret_hash:
log.warning(
f"Expected to find datastore_credentials secret signature {self.get_datastore_credentials_secret_name()} for {self.get_service()}.{self.get_instance()} on {self.get_namespace()}"
)
return None
secrets_with_custom_mountpaths = []
for datastore, credentials in datastore_credentials.items():
# mypy loses type hints on '.items' and throws false positives. unfortunately have to type: ignore
# https://github.com/python/mypy/issues/7178
for credential in credentials: # type: ignore
secrets_with_custom_mountpaths.append(
{
"key": get_vault_key_secret_name(
f"secrets/datastore/{datastore}/{credential}"
),
"mode": mode_to_int("0444"),
"path": f"{datastore}/{credential}/credentials",
}
)
return V1Volume(
name=self.get_datastore_secret_volume_name(),
secret=V1SecretVolumeSource(
secret_name=self.get_datastore_credentials_secret_name(),
default_mode=mode_to_int("0444"),
items=secrets_with_custom_mountpaths,
optional=False,
),
)
|
All credentials are stored in 1 Kubernetes Secret, which are mapped on an item->path
structure to /datastore/<datastore>/<credential>/<password file>.
|
get_datastore_credentials_secrets_volume
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_boto_secret_signature_name(self) -> str:
"""
Keep the following signature naming convention so that bounces do not happen because boto_keys configmap signatures already exist, see PAASTA-17910
Note: Since hashing is done only on a portion of secret, it may explode if service or instance names are too long
"""
secret_instance = limit_size_with_hash(
f"paasta-boto-key-{self.get_sanitised_deployment_name()}"
)
return f"{self.get_namespace()}-secret-{self.get_sanitised_service_name()}-{secret_instance}-signature"
|
Keep the following signature naming convention so that bounces do not happen because boto_keys configmap signatures already exist, see PAASTA-17910
Note: Since hashing is done only on a portion of secret, it may explode if service or instance names are too long
|
get_boto_secret_signature_name
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_datastore_credentials_signature_name(self) -> str:
"""
All datastore credentials are stored in a single Kubernetes secret, so they share a name
"""
return _get_secret_signature_name(
self.get_namespace(),
"datastore-credentials",
self.get_service(),
# key is on instances, which get their own configurations
key_name=self.get_instance(),
)
|
All datastore credentials are stored in a single Kubernetes secret, so they share a name
|
get_datastore_credentials_signature_name
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def set_autoscaled_instances(
self, instance_count: int, kube_client: KubeClient
) -> None:
"""Set the number of instances in the same way that the autoscaler does."""
set_instances_for_kubernetes_service(
kube_client=kube_client, service_config=self, instance_count=instance_count
)
|
Set the number of instances in the same way that the autoscaler does.
|
set_autoscaled_instances
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_desired_instances(self) -> int:
"""For now if we have an EBS instance it means we can only have 1 instance
since we can't attach to multiple instances. In the future we might support
statefulsets which are clever enough to manage EBS for you"""
instances = super().get_desired_instances()
if self.get_aws_ebs_volumes() and instances not in [1, 0]:
raise Exception(
"Number of instances must be 1 or 0 if an EBS volume is defined."
)
return instances
|
For now if we have an EBS instance it means we can only have 1 instance
since we can't attach to multiple instances. In the future we might support
statefulsets which are clever enough to manage EBS for you
|
get_desired_instances
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_enable_nerve_readiness_check(
self, system_paasta_config: SystemPaastaConfig
) -> bool:
"""Enables a k8s readiness check on the Pod to ensure that all registrations
are UP on the local synapse haproxy"""
return self.config_dict.get("bounce_health_params", {}).get(
"check_haproxy", system_paasta_config.get_enable_nerve_readiness_check()
)
|
Enables a k8s readiness check on the Pod to ensure that all registrations
are UP on the local synapse haproxy
|
get_enable_nerve_readiness_check
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_enable_envoy_readiness_check(
self, system_paasta_config: SystemPaastaConfig
) -> bool:
"""Enables a k8s readiness check on the Pod to ensure that all registrations
are UP on the local Envoy"""
return self.config_dict.get("bounce_health_params", {}).get(
"check_envoy", system_paasta_config.get_enable_envoy_readiness_check()
)
|
Enables a k8s readiness check on the Pod to ensure that all registrations
are UP on the local Envoy
|
get_enable_envoy_readiness_check
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_namespace(self) -> str:
"""Get namespace from config, default to 'paasta'"""
return self.config_dict.get(
"namespace", f"paastasvc-{self.get_sanitised_service_name()}"
)
|
Get namespace from config, default to 'paasta'
|
get_namespace
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def format_kubernetes_job(
self,
job_label: str,
deadline_seconds: int = 3600,
keep_routable_ip: bool = False,
include_sidecars: bool = False,
) -> V1Job:
"""Create the config for launching the deployment as a Job
:param str job_label: value to set for the "job type" label
:param int deadline_seconds: maximum allowed duration for the job
:param bool keep_routable_ip: maintain routable IP annotation in pod template
:param bool include_sidecars: do not discard sidecar containers when building pod spec
:return: job object
"""
additional_labels = {paasta_prefixed(JOB_TYPE_LABEL_NAME): job_label}
try:
docker_url = self.get_docker_url()
git_sha = get_git_sha_from_dockerurl(docker_url, long=True)
system_paasta_config = load_system_paasta_config()
image_version = self.get_image_version()
if image_version is not None:
additional_labels[paasta_prefixed("image_version")] = image_version
pod_template = self.get_pod_template_spec(
git_sha=git_sha,
system_paasta_config=system_paasta_config,
restart_on_failure=False,
include_sidecars=include_sidecars,
force_no_routable_ip=not keep_routable_ip,
)
pod_template.metadata.labels.update(additional_labels)
complete_config = V1Job(
api_version="batch/v1",
kind="Job",
metadata=self.get_kubernetes_metadata(git_sha),
spec=V1JobSpec(
active_deadline_seconds=deadline_seconds,
ttl_seconds_after_finished=0, # remove job resource after completion
template=pod_template,
),
)
complete_config.metadata.labels.update(additional_labels)
except Exception as e:
raise InvalidKubernetesConfig(e, self.get_service(), self.get_instance())
log.debug(
f"Complete configuration for job instance is: {complete_config}",
)
return complete_config
|
Create the config for launching the deployment as a Job
:param str job_label: value to set for the "job type" label
:param int deadline_seconds: maximum allowed duration for the job
:param bool keep_routable_ip: maintain routable IP annotation in pod template
:param bool include_sidecars: do not discard sidecar containers when building pod spec
:return: job object
|
format_kubernetes_job
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def format_kubernetes_app(self) -> Union[V1Deployment, V1StatefulSet]:
"""Create the configuration that will be passed to the Kubernetes REST API."""
try:
system_paasta_config = load_system_paasta_config()
docker_url = self.get_docker_url()
git_sha = get_git_sha_from_dockerurl(docker_url, long=True)
complete_config: Union[V1StatefulSet, V1Deployment]
if self.get_persistent_volumes():
complete_config = V1StatefulSet(
api_version="apps/v1",
kind="StatefulSet",
metadata=self.get_kubernetes_metadata(git_sha),
spec=V1StatefulSetSpec(
service_name=self.get_sanitised_deployment_name(),
volume_claim_templates=self.get_volume_claim_templates(),
replicas=self.get_desired_instances(),
revision_history_limit=0,
selector=V1LabelSelector(
match_labels={
"paasta.yelp.com/service": self.get_service(),
"paasta.yelp.com/instance": self.get_instance(),
}
),
template=self.get_pod_template_spec(
git_sha=git_sha, system_paasta_config=system_paasta_config
),
pod_management_policy=self.get_pod_management_policy(),
),
)
else:
complete_config = V1Deployment(
api_version="apps/v1",
kind="Deployment",
metadata=self.get_kubernetes_metadata(git_sha),
spec=V1DeploymentSpec(
replicas=self.get_desired_instances(),
min_ready_seconds=self.get_min_task_uptime(),
selector=V1LabelSelector(
match_labels={
"paasta.yelp.com/service": self.get_service(),
"paasta.yelp.com/instance": self.get_instance(),
}
),
revision_history_limit=0,
template=self.get_pod_template_spec(
git_sha=git_sha, system_paasta_config=system_paasta_config
),
strategy=self.get_deployment_strategy_config(),
),
)
prometheus_shard = self.get_prometheus_shard()
if prometheus_shard:
complete_config.metadata.labels[
"paasta.yelp.com/prometheus_shard"
] = prometheus_shard
image_version = self.get_image_version()
if image_version is not None:
complete_config.metadata.labels[
"paasta.yelp.com/image_version"
] = image_version
# DO NOT ADD LABELS AFTER THIS LINE
config_hash = get_config_hash(
self.sanitize_for_config_hash(complete_config),
force_bounce=self.get_force_bounce(),
)
complete_config.metadata.labels["yelp.com/paasta_config_sha"] = config_hash
complete_config.metadata.labels["paasta.yelp.com/config_sha"] = config_hash
complete_config.spec.template.metadata.labels[
"yelp.com/paasta_config_sha"
] = config_hash
complete_config.spec.template.metadata.labels[
"paasta.yelp.com/config_sha"
] = config_hash
except Exception as e:
raise InvalidKubernetesConfig(e, self.get_service(), self.get_instance())
log.debug("Complete configuration for instance is: %s", complete_config)
return complete_config
|
Create the configuration that will be passed to the Kubernetes REST API.
|
format_kubernetes_app
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def has_routable_ip(
self,
service_namespace_config: ServiceNamespaceConfig,
system_paasta_config: SystemPaastaConfig,
) -> str:
"""Return whether the routable_ip label should be true or false.
Services with a `prometheus_port` defined or that use certain sidecars must have a routable IP
address to allow Prometheus shards to scrape metrics.
"""
if (
self.config_dict.get("routable_ip", False)
or service_namespace_config.is_in_smartstack()
or self.get_prometheus_port() is not None
or self.should_use_metrics_provider(METRICS_PROVIDER_UWSGI)
or self.should_use_metrics_provider(METRICS_PROVIDER_GUNICORN)
):
return "true"
return "false"
|
Return whether the routable_ip label should be true or false.
Services with a `prometheus_port` defined or that use certain sidecars must have a routable IP
address to allow Prometheus shards to scrape metrics.
|
has_routable_ip
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_node_selector(self) -> Mapping[str, str]:
"""Converts simple node restrictions into node selectors. Unlike node
affinities, selectors will show up in `kubectl describe`.
"""
raw_selectors: Mapping[str, Any] = self.config_dict.get("node_selectors", {})
node_selectors = {
to_node_label(label): value
for label, value in raw_selectors.items()
if type(value) is str
}
node_selectors["yelp.com/pool"] = self.get_pool()
return node_selectors
|
Converts simple node restrictions into node selectors. Unlike node
affinities, selectors will show up in `kubectl describe`.
|
get_node_selector
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_node_affinity(
self, pool_node_affinities: Dict[str, Dict[str, List[str]]] = None
) -> Optional[V1NodeAffinity]:
"""Converts deploy_whitelist and deploy_blacklist in node affinities.
note: At the time of writing, `kubectl describe` does not show affinities,
only selectors. To see affinities, use `kubectl get pod -o json` instead.
"""
requirements = allowlist_denylist_to_requirements(
allowlist=self.get_deploy_whitelist(),
denylist=self.get_deploy_blacklist(),
)
node_selectors = self.config_dict.get("node_selectors", {})
requirements.extend(
raw_selectors_to_requirements(
raw_selectors=node_selectors,
)
)
# PAASTA-18198: To improve AZ balance with Karpenter, we temporarily allow specifying zone affinities per pool
if pool_node_affinities and self.get_pool() in pool_node_affinities:
current_pool_node_affinities = pool_node_affinities[self.get_pool()]
# If the service already has a node selector for a zone, we don't want to override it
if current_pool_node_affinities and not contains_zone_label(node_selectors):
requirements.extend(
raw_selectors_to_requirements(
raw_selectors=current_pool_node_affinities,
)
)
preferred_terms = []
for node_selectors_prefered_config_dict in self.config_dict.get(
"node_selectors_preferred", []
):
preferred_terms.append(
V1PreferredSchedulingTerm(
weight=node_selectors_prefered_config_dict["weight"],
preference=V1NodeSelectorTerm(
match_expressions=[
V1NodeSelectorRequirement(
key=key,
operator=op,
values=vs,
)
for key, op, vs in raw_selectors_to_requirements(
raw_selectors=node_selectors_prefered_config_dict[
"preferences"
]
)
]
),
)
)
# package everything into a node affinity - lots of layers :P
if len(requirements) == 0 and len(preferred_terms) == 0:
return None
required_term = (
V1NodeSelectorTerm(
match_expressions=[
V1NodeSelectorRequirement(
key=key,
operator=op,
values=vs,
)
for key, op, vs in requirements
]
)
if requirements
else None
)
if not preferred_terms:
preferred_terms = None
return V1NodeAffinity(
required_during_scheduling_ignored_during_execution=(
V1NodeSelector(node_selector_terms=[required_term])
if required_term
else None
),
preferred_during_scheduling_ignored_during_execution=preferred_terms,
)
|
Converts deploy_whitelist and deploy_blacklist in node affinities.
note: At the time of writing, `kubectl describe` does not show affinities,
only selectors. To see affinities, use `kubectl get pod -o json` instead.
|
get_node_affinity
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_pod_anti_affinity(self) -> Optional[V1PodAntiAffinity]:
"""
Converts the given anti-affinity on service and instance to pod
affinities with the "paasta.yelp.com" prefixed label selector
:return:
"""
required_terms = self.get_pod_required_anti_affinity_terms()
preferred_terms = self.get_pod_preferred_anti_affinity_terms()
if required_terms is None and preferred_terms is None:
return None
return V1PodAntiAffinity(
required_during_scheduling_ignored_during_execution=required_terms,
preferred_during_scheduling_ignored_during_execution=preferred_terms,
)
|
Converts the given anti-affinity on service and instance to pod
affinities with the "paasta.yelp.com" prefixed label selector
:return:
|
get_pod_anti_affinity
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def _kube_affinity_condition_to_label_selector(
self, condition: KubeAffinityCondition
) -> Optional[V1LabelSelector]:
"""Converts the given condition to label selectors with paasta prefix"""
labels = {}
if "service" in condition:
labels[PAASTA_ATTRIBUTE_PREFIX + "service"] = condition.get("service")
if "instance" in condition:
labels[PAASTA_ATTRIBUTE_PREFIX + "instance"] = condition.get("instance")
return V1LabelSelector(match_labels=labels) if labels else None
|
Converts the given condition to label selectors with paasta prefix
|
_kube_affinity_condition_to_label_selector
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def sanitize_for_config_hash(
self, config: Union[V1Deployment, V1StatefulSet]
) -> Mapping[str, Any]:
"""Removes some data from config to make it suitable for
calculation of config hash.
:param config: complete_config hash to sanitise
:returns: sanitised copy of complete_config hash
"""
ahash = config.to_dict() # deep convert to dict
ahash["paasta_secrets"] = get_kubernetes_secret_hashes(
service=self.get_service(),
environment_variables=self.get_env(),
namespace=self.get_namespace(),
)
# remove data we dont want used to hash configs
# replica count
if ahash["spec"] is not None:
ahash["spec"].pop("replicas", None)
if ahash["metadata"] is not None:
ahash["metadata"]["namespace"] = None
# soa-configs SHA
try:
for container in ahash["spec"]["template"]["spec"]["containers"]:
container["env"] = [
e
for e in container["env"]
if e.get("name", "") != "PAASTA_SOA_CONFIGS_SHA"
]
except TypeError: # any of the values can be None
pass
return ahash
|
Removes some data from config to make it suitable for
calculation of config hash.
:param config: complete_config hash to sanitise
:returns: sanitised copy of complete_config hash
|
sanitize_for_config_hash
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_termination_grace_period(
self, service_namespace_config: ServiceNamespaceConfig
) -> Optional[int]:
"""Return the number of seconds that kubernetes should wait for pre-stop hooks to finish (or for the main
process to exit after signaling) before forcefully terminating the pod.
For smartstack services, defaults to a value long enough to allow the default pre-stop hook to finish.
For non-smartstack services, defaults to None (kubernetes default of 30s).
"""
if service_namespace_config.is_in_smartstack():
default = self.get_hacheck_prestop_sleep_seconds() + 1
if self.get_pre_stop_wait_for_connections_to_complete(
service_namespace_config
):
# If the max timeout is more than 30 minutes, cap it to 30 minutes.
# Most services with ultra-long timeouts are probably able to handle SIGTERM gracefully anyway.
default += int(
math.ceil(
min(
1800,
service_namespace_config.get_longest_timeout_ms() / 1000,
)
)
)
else:
default = None
return self.get_lifecycle_dict().get(
"termination_grace_period_seconds", default
)
|
Return the number of seconds that kubernetes should wait for pre-stop hooks to finish (or for the main
process to exit after signaling) before forcefully terminating the pod.
For smartstack services, defaults to a value long enough to allow the default pre-stop hook to finish.
For non-smartstack services, defaults to None (kubernetes default of 30s).
|
get_termination_grace_period
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_all_kubernetes_services_running_here() -> List[Tuple[str, str, int]]:
"""Returns all k8s paasta services, even if not in smartstack. Returns a service, instance, port
tuple to match the return value of other similar functions"""
services = []
try:
pods = get_k8s_pods()
except requests.exceptions.ConnectionError:
log.debug("Failed to connect to the kublet when trying to get pods")
return []
for pod in pods["items"]:
try:
service = pod["metadata"]["labels"]["paasta.yelp.com/service"]
instance = pod["metadata"]["labels"]["paasta.yelp.com/instance"]
services.append((service, instance, 0))
except KeyError:
log.debug(f"Skipping listing what looks like a non-paasta pod: {pod}")
return services
|
Returns all k8s paasta services, even if not in smartstack. Returns a service, instance, port
tuple to match the return value of other similar functions
|
get_all_kubernetes_services_running_here
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def list_all_paasta_deployments(kube_client: KubeClient) -> Sequence[KubeDeployment]:
"""Gets deployments in all namespaces by passing the service label selector"""
label_selectors = "paasta.yelp.com/service"
return list_deployments_in_all_namespaces(
kube_client=kube_client, label_selector=label_selectors
)
|
Gets deployments in all namespaces by passing the service label selector
|
list_all_paasta_deployments
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def filter_nodes_by_blacklist(
nodes: Sequence[V1Node], blacklist: DeployBlacklist, whitelist: DeployWhitelist
) -> Sequence[V1Node]:
"""Takes an input list of nodes and filters them based on the given blacklist.
The blacklist is in the form of:
[["location_type", "location]]
Where the list inside is something like ["region", "uswest1-prod"]
:returns: The list of nodes after the filter
"""
if whitelist:
whitelist = (paasta_prefixed(whitelist[0]), whitelist[1])
blacklist = [(paasta_prefixed(entry[0]), entry[1]) for entry in blacklist]
return [
node
for node in nodes
if host_passes_whitelist(node.metadata.labels, whitelist)
and host_passes_blacklist(node.metadata.labels, blacklist)
]
|
Takes an input list of nodes and filters them based on the given blacklist.
The blacklist is in the form of:
[["location_type", "location]]
Where the list inside is something like ["region", "uswest1-prod"]
:returns: The list of nodes after the filter
|
filter_nodes_by_blacklist
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def update_secret(
kube_client: KubeClient,
service_name: str,
secret_name: str,
secret_data: Dict[str, str],
namespace: str,
) -> None:
"""
Expect secret_name to exist, e.g. kubectl get secret
:param service_name: Expect unsanitised service name
:param secret_data: Expect a mapping of string-to-string where values are base64-encoded
:param namespace: Unsanitized namespace of a service that will use the secret
:raises ApiException:
"""
kube_client.core.replace_namespaced_secret(
name=secret_name,
namespace=namespace,
body=V1Secret(
metadata=V1ObjectMeta(
name=secret_name,
labels={
"yelp.com/paasta_service": sanitise_label_value(service_name),
"paasta.yelp.com/service": sanitise_label_value(service_name),
},
),
data=secret_data,
),
)
|
Expect secret_name to exist, e.g. kubectl get secret
:param service_name: Expect unsanitised service name
:param secret_data: Expect a mapping of string-to-string where values are base64-encoded
:param namespace: Unsanitized namespace of a service that will use the secret
:raises ApiException:
|
update_secret
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_secret_signature(
kube_client: KubeClient,
signature_name: str,
namespace: str,
) -> Optional[str]:
"""
:param signature_name: Expect the signature to exist in kubernetes configmap
:return: Kubernetes configmap as a signature
:raises ApiException:
"""
try:
signature = kube_client.core.read_namespaced_config_map(
name=signature_name,
namespace=namespace,
)
except ApiException as e:
if e.status == 404:
return None
else:
raise
if not signature:
return None
else:
return signature.data["signature"]
|
:param signature_name: Expect the signature to exist in kubernetes configmap
:return: Kubernetes configmap as a signature
:raises ApiException:
|
get_secret_signature
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def update_secret_signature(
kube_client: KubeClient,
service_name: str,
signature_name: str,
secret_signature: str,
namespace: str,
) -> None:
"""
:param service_name: Expect unsanitised service_name
:param signature_name: Expect signature_name to exist in kubernetes configmap
:param secret_signature: Signature to replace with
:raises ApiException:
"""
kube_client.core.replace_namespaced_config_map(
name=signature_name,
namespace=namespace,
body=V1ConfigMap(
metadata=V1ObjectMeta(
name=signature_name,
labels={
"yelp.com/paasta_service": sanitise_label_value(service_name),
"paasta.yelp.com/service": sanitise_label_value(service_name),
},
),
data={"signature": secret_signature},
),
)
|
:param service_name: Expect unsanitised service_name
:param signature_name: Expect signature_name to exist in kubernetes configmap
:param secret_signature: Signature to replace with
:raises ApiException:
|
update_secret_signature
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def create_secret_signature(
kube_client: KubeClient,
service_name: str,
signature_name: str,
secret_signature: str,
namespace: str,
) -> None:
"""
:param service_name: Expect unsanitised service_name
:param signature_name: Expected properly formatted signature, see _get_secret_signature_name()
:param secret_signature: Signature value
:param namespace: Unsanitized namespace of a service that will use the signature
"""
kube_client.core.create_namespaced_config_map(
namespace=namespace,
body=V1ConfigMap(
metadata=V1ObjectMeta(
name=signature_name,
labels={
"yelp.com/paasta_service": sanitise_label_value(service_name),
"paasta.yelp.com/service": sanitise_label_value(service_name),
},
),
data={"signature": secret_signature},
),
)
|
:param service_name: Expect unsanitised service_name
:param signature_name: Expected properly formatted signature, see _get_secret_signature_name()
:param secret_signature: Signature value
:param namespace: Unsanitized namespace of a service that will use the signature
|
create_secret_signature
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def sanitise_kubernetes_name(
service: str,
) -> str:
"""
Sanitizes kubernetes name so that hyphen (-) can be used a delimeter
"""
name = service.replace("_", "--")
if name.startswith("--"):
name = name.replace("--", "underscore-", 1)
return name.lower()
|
Sanitizes kubernetes name so that hyphen (-) can be used a delimeter
|
sanitise_kubernetes_name
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def create_pod_topology_spread_constraints(
service: str,
instance: str,
topology_spread_constraints: List[TopologySpreadConstraintDict],
) -> List[V1TopologySpreadConstraint]:
"""
Applies cluster-level topology spread constraints to every Pod template.
This allows us to configure default topology spread constraints on EKS where we cannot configure the scheduler.
"""
if not topology_spread_constraints:
return []
selector = V1LabelSelector(
match_labels={
"paasta.yelp.com/service": service,
"paasta.yelp.com/instance": instance,
}
)
pod_topology_spread_constraints = []
for constraint in topology_spread_constraints:
pod_topology_spread_constraints.append(
V1TopologySpreadConstraint(
label_selector=selector,
topology_key=constraint.get(
"topology_key", None
), # ValueError will be raised if unset
max_skew=constraint.get("max_skew", 1),
when_unsatisfiable=constraint.get(
"when_unsatisfiable", "ScheduleAnyway"
),
)
)
return pod_topology_spread_constraints
|
Applies cluster-level topology spread constraints to every Pod template.
This allows us to configure default topology spread constraints on EKS where we cannot configure the scheduler.
|
create_pod_topology_spread_constraints
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_pod_hostname(kube_client: KubeClient, pod: V1Pod) -> str:
"""Gets the hostname of a pod's node from labels"""
if not pod.spec.node_name: # can be none, if pod not yet scheduled
return "NotScheduled"
try:
node = kube_client.core.read_node(name=pod.spec.node_name)
except ApiException:
# fall back to node name (which has the IP) if node somehow doesnt exist
return pod.spec.node_name
# if label has disappeared (say we changed it), default to node name
return node.metadata.labels.get("yelp.com/hostname", pod.spec.node_name)
|
Gets the hostname of a pod's node from labels
|
get_pod_hostname
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def _get_secret_signature_name(
namespace: str, secret_identifier: str, service_name: str, key_name: str
) -> str:
"""
:param namespace: Unsanitised namespace of a service that will use the signature
:param secret_identifier: Identifies the type of secret
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised signature name as kubernetes configmap name with at most 253 characters
"""
return limit_size_with_hash(
"-".join(
[
namespace,
secret_identifier,
sanitise_kubernetes_name(service_name),
sanitise_kubernetes_name(key_name),
"signature",
]
),
limit=253,
)
|
:param namespace: Unsanitised namespace of a service that will use the signature
:param secret_identifier: Identifies the type of secret
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised signature name as kubernetes configmap name with at most 253 characters
|
_get_secret_signature_name
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_paasta_secret_name(namespace: str, service_name: str, key_name: str) -> str:
"""
Use whenever creating or references a PaaSTA secret
:param namespace: Unsanitised namespace of a service that will use the signature
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised PaaSTA secret name
"""
return _get_secret_name(
namespace=namespace,
secret_identifier="secret",
service_name=service_name,
key_name=key_name,
)
|
Use whenever creating or references a PaaSTA secret
:param namespace: Unsanitised namespace of a service that will use the signature
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised PaaSTA secret name
|
get_paasta_secret_name
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_paasta_secret_signature_name(
namespace: str, service_name: str, key_name: str
) -> str:
"""
Get PaaSTA signature name stored as kubernetes configmap
:param namespace: Unsanitised namespace of a service that will use the signature
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised PaaSTA signature name
"""
return _get_secret_signature_name(
namespace=namespace,
secret_identifier="secret",
service_name=service_name,
key_name=key_name,
)
|
Get PaaSTA signature name stored as kubernetes configmap
:param namespace: Unsanitised namespace of a service that will use the signature
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised PaaSTA signature name
|
get_paasta_secret_signature_name
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_secret(
kube_client: KubeClient,
secret_name: str,
key_name: str,
*,
namespace: str,
decode: bool = True,
) -> Union[str, bytes]:
"""
:param secret_name: Expect properly formatted kubernetes secret name and that it exists
:param key_name: Expect key_name to be a key in a data section
:raises ApiException:
:raises KeyError: if key_name does not exists in kubernetes secret's data section
"""
secret_data = kube_client.core.read_namespaced_secret(
name=secret_name, namespace=namespace
).data[key_name]
# String secrets (e.g. yaml config files) need to be decoded
# Binary secrets (e.g. TLS Keystore or binary certificate files) cannot be decoded
if decode:
return base64.b64decode(secret_data).decode("utf-8")
return base64.b64decode(secret_data)
|
:param secret_name: Expect properly formatted kubernetes secret name and that it exists
:param key_name: Expect key_name to be a key in a data section
:raises ApiException:
:raises KeyError: if key_name does not exists in kubernetes secret's data section
|
get_secret
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def patch_namespaced_configmap(
name: str,
body: Dict[str, str],
*,
namespace: str,
kube_client: KubeClient,
) -> V1ConfigMap:
"""
Patches a configmap with the given body. The body should be a dictionary of key-value pairs.
"""
try:
return kube_client.core.patch_namespaced_config_map(
name=name, namespace=namespace, body=body
)
except ApiException as e:
if e.status == 404:
raise ValueError(f"ConfigMap {name} not found in namespace {namespace}")
else:
raise
|
Patches a configmap with the given body. The body should be a dictionary of key-value pairs.
|
patch_namespaced_configmap
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_or_create_namespaced_configmap(
configmap: str,
*,
namespace: str,
kube_client: KubeClient,
) -> Tuple[V1ConfigMap, bool]:
"""
Returns a 2-tuple of (the configmap, a bool representing whether it was just created)
"""
try:
return (
kube_client.core.read_namespaced_config_map(
name=configmap, namespace=namespace
),
False,
)
except ApiException as e:
if e.status == 404:
configmap = V1ConfigMap(
metadata=V1ObjectMeta(name=configmap, namespace=namespace),
data={},
)
return (
kube_client.core.create_namespaced_config_map(
namespace=namespace, body=configmap
),
True,
)
else:
raise
|
Returns a 2-tuple of (the configmap, a bool representing whether it was just created)
|
get_or_create_namespaced_configmap
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.