code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def create_prometheus_adapter_config( paasta_cluster: str, soa_dir: Path ) -> PrometheusAdapterConfig: """ Given a paasta cluster and a soaconfigs directory, create the necessary Prometheus adapter config to autoscale services. Currently supports the following metrics providers: * uwsgi """ rules: List[PrometheusAdapterRule] = [] # get_services_for_cluster() returns a list of (service, instance) tuples, but this # is not great for us: if we were to iterate over that we'd end up getting duplicates # for every service as PaastaServiceConfigLoader does not expose a way to get configs # for a single instance by name. instead, we get the unique set of service names and then # let PaastaServiceConfigLoader iterate over instances for us later services = { service_name for service_name, _ in get_services_for_cluster( cluster=paasta_cluster, instance_type="kubernetes", soa_dir=str(soa_dir) ) } services.update( { service_name for service_name, _ in get_services_for_cluster( cluster=paasta_cluster, instance_type="eks", soa_dir=str(soa_dir) ) } ) for service_name in services: config_loader = PaastaServiceConfigLoader( service=service_name, soa_dir=str(soa_dir) ) for instance_type_class in K8S_INSTANCE_TYPE_CLASSES: for instance_config in config_loader.instance_configs( cluster=paasta_cluster, instance_type_class=instance_type_class, ): rules.extend( get_rules_for_service_instance( service_name=service_name, instance_config=instance_config, paasta_cluster=paasta_cluster, ) ) return { # we sort our rules so that we can easily compare between two different configmaps # as otherwise we'd need to do fancy order-independent comparisons between the two # sets of rules later due to the fact that we're not iterating in a deterministic # way and can add rules in any arbitrary order "rules": sorted(rules, key=lambda rule: rule["name"]["as"]), }
Given a paasta cluster and a soaconfigs directory, create the necessary Prometheus adapter config to autoscale services. Currently supports the following metrics providers: * uwsgi
create_prometheus_adapter_config
python
Yelp/paasta
paasta_tools/setup_prometheus_adapter_config.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py
Apache-2.0
def retrieve_haproxy_csv( synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str, scope: str ) -> Iterable[Dict[str, str]]: """Retrieves the haproxy csv from the haproxy web interface :param synapse_host: A host that this check should contact for replication information. :param synapse_port: A integer that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :param scope: scope :returns reader: a csv.DictReader object """ synapse_uri = synapse_haproxy_url_format.format( host=synapse_host, port=synapse_port, scope=scope ) # timeout after 1 second and retry 3 times haproxy_request = requests.Session() haproxy_request.headers.update({"User-Agent": get_user_agent()}) haproxy_request.mount("http://", requests.adapters.HTTPAdapter(max_retries=3)) haproxy_request.mount("https://", requests.adapters.HTTPAdapter(max_retries=3)) haproxy_response = haproxy_request.get(synapse_uri, timeout=1) haproxy_data = haproxy_response.text reader = csv.DictReader(haproxy_data.splitlines()) return reader
Retrieves the haproxy csv from the haproxy web interface :param synapse_host: A host that this check should contact for replication information. :param synapse_port: A integer that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :param scope: scope :returns reader: a csv.DictReader object
retrieve_haproxy_csv
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def get_backends( service: str, synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str ) -> List[HaproxyBackend]: """Fetches the CSV from haproxy and returns a list of backends, regardless of their state. :param service: If None, return backends for all services, otherwise only return backends for this particular service. :param synapse_host: A host that this check should contact for replication information. :param synapse_port: A integer that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :returns backends: A list of dicts representing the backends of all services or the requested service """ if service: services = [service] else: services = None return get_multiple_backends( services, synapse_host=synapse_host, synapse_port=synapse_port, synapse_haproxy_url_format=synapse_haproxy_url_format, )
Fetches the CSV from haproxy and returns a list of backends, regardless of their state. :param service: If None, return backends for all services, otherwise only return backends for this particular service. :param synapse_host: A host that this check should contact for replication information. :param synapse_port: A integer that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :returns backends: A list of dicts representing the backends of all services or the requested service
get_backends
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def get_multiple_backends( services: Optional[Collection[str]], synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str, ) -> List[HaproxyBackend]: """Fetches the CSV from haproxy and returns a list of backends, regardless of their state. :param services: If None, return backends for all services, otherwise only return backends for these particular services. :param synapse_host: A host that this check should contact for replication information. :param synapse_port: A integer that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :returns backends: A list of dicts representing the backends of all services or the requested service """ if services is not None and len(services) == 1: (scope,) = services else: # Maybe if there's like two or three services we could make two queries, or find the longest common substring. # For now let's just hope this is rare and fetch all data. scope = "" reader = retrieve_haproxy_csv( synapse_host, synapse_port, synapse_haproxy_url_format=synapse_haproxy_url_format, scope=scope, ) backends = [] for line in reader: # clean up two irregularities of the CSV output, relative to # DictReader's behavior there's a leading "# " for no good reason: line["pxname"] = line.pop("# pxname") # and there's a trailing comma on every line: line.pop("") # Look for the service in question and ignore the fictional # FRONTEND/BACKEND hosts, use starts_with so that hosts that are UP # with 1/X healthchecks to go before going down get counted as UP: ha_slave, ha_service = line["svname"], line["pxname"] if (services is None or ha_service in services) and ha_slave not in ( "FRONTEND", "BACKEND", ): backends.append(cast(HaproxyBackend, line)) return backends
Fetches the CSV from haproxy and returns a list of backends, regardless of their state. :param services: If None, return backends for all services, otherwise only return backends for these particular services. :param synapse_host: A host that this check should contact for replication information. :param synapse_port: A integer that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :returns backends: A list of dicts representing the backends of all services or the requested service
get_multiple_backends
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def load_smartstack_info_for_service( service: str, namespace: str, blacklist: DeployBlacklist, system_paasta_config: SystemPaastaConfig, soa_dir: str = DEFAULT_SOA_DIR, ) -> Dict[str, Dict[str, int]]: """Retrieves number of available backends for given service :param service: A service name :param namespace: A Smartstack namespace :param blacklist: A list of blacklisted location tuples in the form (location, value) :param system_paasta_config: A SystemPaastaConfig object representing the system configuration. :param soa_dir: SOA dir :returns: a dictionary of the form :: { 'location_type': { 'unique_location_name': { 'service.instance': <# ofavailable backends> }, 'other_unique_location_name': ... } } """ service_namespace_config = long_running_service_tools.load_service_namespace_config( service=service, namespace=namespace, soa_dir=soa_dir ) discover_location_type = service_namespace_config.get_discover() return get_smartstack_replication_for_attribute( attribute=discover_location_type, service=service, namespace=namespace, blacklist=blacklist, system_paasta_config=system_paasta_config, )
Retrieves number of available backends for given service :param service: A service name :param namespace: A Smartstack namespace :param blacklist: A list of blacklisted location tuples in the form (location, value) :param system_paasta_config: A SystemPaastaConfig object representing the system configuration. :param soa_dir: SOA dir :returns: a dictionary of the form :: { 'location_type': { 'unique_location_name': { 'service.instance': <# ofavailable backends> }, 'other_unique_location_name': ... } }
load_smartstack_info_for_service
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def get_smartstack_replication_for_attribute( attribute: str, service: str, namespace: str, blacklist: DeployBlacklist, system_paasta_config: SystemPaastaConfig, ) -> Dict[str, Dict[str, int]]: """Loads smartstack replication from a host with the specified attribute :param attribute: a Mesos attribute :param service: A service name, like 'example_service' :param namespace: A particular smartstack namespace to inspect, like 'main' :param blacklist: A list of blacklisted location tuples in the form of (location, value) :param system_paasta_config: A SystemPaastaConfig object representing the system configuration. :returns: a dictionary of the form {'<unique_attribute_value>': <smartstack replication hash>} (the dictionary will contain keys for unique all attribute values) """ replication_info = {} filtered_slaves = mesos_tools.get_all_slaves_for_blacklist_whitelist( blacklist=blacklist, whitelist=None ) if not filtered_slaves: raise NoSlavesAvailableError attribute_slave_dict = mesos_tools.get_mesos_slaves_grouped_by_attribute( slaves=filtered_slaves, attribute=attribute ) full_name = compose_job_id(service, namespace) for value, hosts in attribute_slave_dict.items(): # arbitrarily choose the first host with a given attribute to query for replication stats synapse_host = hosts[0]["hostname"] repl_info = get_replication_for_services( synapse_host=synapse_host, synapse_port=system_paasta_config.get_synapse_port(), synapse_haproxy_url_format=system_paasta_config.get_synapse_haproxy_url_format(), services=[full_name], ) replication_info[value] = repl_info return replication_info
Loads smartstack replication from a host with the specified attribute :param attribute: a Mesos attribute :param service: A service name, like 'example_service' :param namespace: A particular smartstack namespace to inspect, like 'main' :param blacklist: A list of blacklisted location tuples in the form of (location, value) :param system_paasta_config: A SystemPaastaConfig object representing the system configuration. :returns: a dictionary of the form {'<unique_attribute_value>': <smartstack replication hash>} (the dictionary will contain keys for unique all attribute values)
get_smartstack_replication_for_attribute
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def get_replication_for_all_services( synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str ) -> Dict[str, int]: """Returns the replication level for all services known to this synapse haproxy :param synapse_host: The host that this check should contact for replication information. :param synapse_port: The port that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :returns available_instance_counts: A dictionary mapping the service names to an integer number of available replicas. """ backends = get_multiple_backends( services=None, synapse_host=synapse_host, synapse_port=synapse_port, synapse_haproxy_url_format=synapse_haproxy_url_format, ) return collections.Counter([b["pxname"] for b in backends if backend_is_up(b)])
Returns the replication level for all services known to this synapse haproxy :param synapse_host: The host that this check should contact for replication information. :param synapse_port: The port that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :returns available_instance_counts: A dictionary mapping the service names to an integer number of available replicas.
get_replication_for_all_services
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def get_replication_for_services( synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str, services: Collection[str], ) -> Dict[str, int]: """Returns the replication level for the provided services This check is intended to be used with an haproxy load balancer, and relies on the implementation details of that choice. :param synapse_host: The host that this check should contact for replication information. :param synapse_port: The port that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :param services: A list of strings that are the service names that should be checked for replication. :returns available_instance_counts: A dictionary mapping the service names to an integer number of available replicas :returns None: If it cannot connect to the specified synapse host and port """ backends = get_multiple_backends( services=services, synapse_host=synapse_host, synapse_port=synapse_port, synapse_haproxy_url_format=synapse_haproxy_url_format, ) counter = collections.Counter([b["pxname"] for b in backends if backend_is_up(b)]) return {sn: counter[sn] for sn in services}
Returns the replication level for the provided services This check is intended to be used with an haproxy load balancer, and relies on the implementation details of that choice. :param synapse_host: The host that this check should contact for replication information. :param synapse_port: The port that this check should contact for replication information. :param synapse_haproxy_url_format: The format of the synapse haproxy URL. :param services: A list of strings that are the service names that should be checked for replication. :returns available_instance_counts: A dictionary mapping the service names to an integer number of available replicas :returns None: If it cannot connect to the specified synapse host and port
get_replication_for_services
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def ip_port_hostname_from_svname(svname: str) -> Tuple[str, int, str]: """This parses the haproxy svname that smartstack creates. In old versions of synapse, this is in the format ip:port_hostname. In versions newer than dd5843c987740a5d5ce1c83b12b258b7253784a8 it is hostname_ip:port :param svname: A svname, in either of the formats described above :returns ip_port_hostname: A tuple of ip, port, hostname. """ # split into parts parts = set(svname.split("_")) # find those that can be split by : - this is the ip:port # there will only be 1 of these ip_ports = {part for part in parts if len(part.split(":")) == 2} # the one *not* in the list is the hostname hostname = parts.difference(ip_ports).pop() ip, port = ip_ports.pop().split(":") return ip, int(port), hostname
This parses the haproxy svname that smartstack creates. In old versions of synapse, this is in the format ip:port_hostname. In versions newer than dd5843c987740a5d5ce1c83b12b258b7253784a8 it is hostname_ip:port :param svname: A svname, in either of the formats described above :returns ip_port_hostname: A tuple of ip, port, hostname.
ip_port_hostname_from_svname
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def match_backends_and_pods( backends: Iterable[HaproxyBackend], pods: Iterable[V1Pod] ) -> List[Tuple[Optional[HaproxyBackend], Optional[V1Pod]]]: """Returns tuples of matching (backend, pod) pairs, as matched by IP. Each backend will be listed exactly once. If a backend does not match with a pod, (backend, None) will be included. If a pod's IP does not match with any backends, (None, pod) will be included. :param backends: An iterable of haproxy backend dictionaries, e.g. the list returned by smartstack_tools.get_multiple_backends. :param pods: An iterable of V1Pod objects. """ # { ip : [backend1, backend2], ... } backends_by_ip: DefaultDict[str, List[HaproxyBackend]] = collections.defaultdict( list ) backend_pod_pairs = [] for backend in backends: ip, port, _ = ip_port_hostname_from_svname(backend["svname"]) backends_by_ip[ip].append(backend) for pod in pods: ip = pod.status.pod_ip for backend in backends_by_ip.pop(ip, [None]): backend_pod_pairs.append((backend, pod)) # we've been popping in the above loop, so anything left didn't match a k8s pod. for backends in backends_by_ip.values(): for backend in backends: backend_pod_pairs.append((backend, None)) return backend_pod_pairs
Returns tuples of matching (backend, pod) pairs, as matched by IP. Each backend will be listed exactly once. If a backend does not match with a pod, (backend, None) will be included. If a pod's IP does not match with any backends, (None, pod) will be included. :param backends: An iterable of haproxy backend dictionaries, e.g. the list returned by smartstack_tools.get_multiple_backends. :param pods: An iterable of V1Pod objects.
match_backends_and_pods
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def get_replication_for_instance( self, instance_config: LongRunningServiceConfig ) -> Dict[str, Dict[str, Dict[str, int]]]: """Returns the number of registered instances in each discoverable location for each service dicrovery provider. :param instance_config: An instance of LongRunningServiceConfig. :returns: a dict {'service_discovery_provider': {'location_type': {'service.instance': int}}} """ replication_infos = {} for provider in self._service_discovery_providers: replication_info = {} attribute_host_dict = self.get_allowed_locations_and_hosts(instance_config) instance_pool = instance_config.get_pool() for location, hosts in attribute_host_dict.items(): # Try to get information from all available hosts in the pool before giving up hostnames = self.get_hostnames_in_pool(hosts, instance_pool) for hostname in hostnames: try: replication_info[location] = self._get_replication_info( location, hostname, instance_config, provider ) break except Exception as e: log.warning( f"Error while getting replication info for {location} from {hostname}: {e}" ) if hostname == hostnames[-1]: # Last hostname failed, giving up raise replication_infos[provider.NAME] = replication_info return replication_infos
Returns the number of registered instances in each discoverable location for each service dicrovery provider. :param instance_config: An instance of LongRunningServiceConfig. :returns: a dict {'service_discovery_provider': {'location_type': {'service.instance': int}}}
get_replication_for_instance
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def _get_replication_info( self, location: str, hostname: str, instance_config: LongRunningServiceConfig, provider: ServiceDiscoveryProvider, ) -> Dict[str, int]: """Returns service.instance and the number of instances registered in smartstack at the location as a dict. :param location: A string that identifies a habitat, a region and etc. :param hostname: A mesos slave hostname to read replication information from. :param instance_config: An instance of LongRunningServiceConfig. :returns: A dict {"service.instance": number_of_instances}. """ full_name = compose_job_id(instance_config.service, instance_config.instance) key = (location, provider.NAME) replication_info = self._cache.get(key) if replication_info is None: replication_info = provider.get_replication_for_all_services(hostname) self._cache[key] = replication_info return {full_name: replication_info[full_name]}
Returns service.instance and the number of instances registered in smartstack at the location as a dict. :param location: A string that identifies a habitat, a region and etc. :param hostname: A mesos slave hostname to read replication information from. :param instance_config: An instance of LongRunningServiceConfig. :returns: A dict {"service.instance": number_of_instances}.
_get_replication_info
python
Yelp/paasta
paasta_tools/smartstack_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py
Apache-2.0
def setup_volume_mounts(volumes: List[DockerVolume]) -> Dict[str, str]: """ Returns Docker volume mount configurations in the format expected by Spark. """ conf = {} # XXX: why are these necessary? extra_volumes: List[DockerVolume] = cast( "List[DockerVolume]", [ {"containerPath": "/etc/passwd", "hostPath": "/etc/passwd", "mode": "RO"}, {"containerPath": "/etc/group", "hostPath": "/etc/group", "mode": "RO"}, ], ) seen_paths: Set[str] = set() # dedupe volumes, just in case for index, volume in enumerate(volumes + extra_volumes): host_path, container_path, mode = ( volume["hostPath"], volume["containerPath"], volume["mode"], ) if host_path in seen_paths: log.warning(f"Skipping {host_path} - already added a binding for it.") continue seen_paths.add(host_path) # the names here don't matter too much, so we just use the index in the volume # list as an arbitrary name conf[ f"spark.kubernetes.executor.volumes.hostPath.{index}.mount.path" ] = container_path conf[ f"spark.kubernetes.executor.volumes.hostPath.{index}.options.path" ] = host_path conf[ f"spark.kubernetes.executor.volumes.hostPath.{index}.mount.readOnly" ] = str(mode.lower() == "ro").lower() return conf
Returns Docker volume mount configurations in the format expected by Spark.
setup_volume_mounts
python
Yelp/paasta
paasta_tools/spark_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/spark_tools.py
Apache-2.0
def get_spark_driver_monitoring_annotations( spark_config: Dict[str, str], ) -> Dict[str, str]: """ Returns Spark driver pod annotations - currently used for Prometheus metadata. """ annotations: Dict[str, str] = {} ui_port_str = spark_config.get("spark.ui.port") if ui_port_str: annotations.update( { "prometheus.io/port": ui_port_str, "prometheus.io/path": "/metrics/prometheus", } ) paasta_service_non_truncated = spark_config.get( "spark.kubernetes.executor.annotation.paasta.yelp.com/service" ) paasta_instance_non_truncated = spark_config.get( "spark.kubernetes.executor.annotation.paasta.yelp.com/instance" ) if paasta_service_non_truncated and paasta_instance_non_truncated: annotations.update( { "paasta.yelp.com/service": paasta_service_non_truncated, "paasta.yelp.com/instance": paasta_instance_non_truncated, } ) return annotations
Returns Spark driver pod annotations - currently used for Prometheus metadata.
get_spark_driver_monitoring_annotations
python
Yelp/paasta
paasta_tools/spark_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/spark_tools.py
Apache-2.0
def get_spark_driver_monitoring_labels( spark_config: Dict[str, str], user: str, ) -> Dict[str, str]: """ Returns Spark driver pod labels - generally for Prometheus metric relabeling. """ ui_port_str = str(spark_config.get("spark.ui.port", "")) labels = { "paasta.yelp.com/prometheus_shard": SPARK_PROMETHEUS_SHARD, "spark.yelp.com/user": user, "spark.yelp.com/driver_ui_port": ui_port_str, } return labels
Returns Spark driver pod labels - generally for Prometheus metric relabeling.
get_spark_driver_monitoring_labels
python
Yelp/paasta
paasta_tools/spark_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/spark_tools.py
Apache-2.0
def get_cluster_name(self): """:returns The name of the Tron cluster""" try: return self["cluster_name"] except KeyError: raise TronNotConfigured( "Could not find name of Tron cluster in system Tron config" )
:returns The name of the Tron cluster
get_cluster_name
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def get_url(self): """:returns The URL for the Tron master's API""" try: return self["url"] except KeyError: raise TronNotConfigured( "Could not find URL of Tron master in system Tron config" )
:returns The URL for the Tron master's API
get_url
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def parse_time_variables(command: str, parse_time: datetime.datetime = None) -> str: """Parses an input string and uses the Tron-style dateparsing to replace time variables. Currently supports only the date/time variables listed in the tron documentation: http://tron.readthedocs.io/en/latest/command_context.html#built-in-cc :param input_string: input string to be parsed :param parse_time: Reference Datetime object to parse the date and time strings, defaults to now. :returns: A string with the date and time variables replaced """ if parse_time is None: parse_time = datetime.datetime.now() # We build up a tron context object that has the right # methods to parse tron-style time syntax job_context = tron_command_context.JobRunContext( tron_command_context.CommandContext() ) # The tron context object needs the run_time attribute set so it knows # how to interpret the date strings job_context.job_run.run_time = parse_time return StringFormatter(job_context).format(command)
Parses an input string and uses the Tron-style dateparsing to replace time variables. Currently supports only the date/time variables listed in the tron documentation: http://tron.readthedocs.io/en/latest/command_context.html#built-in-cc :param input_string: input string to be parsed :param parse_time: Reference Datetime object to parse the date and time strings, defaults to now. :returns: A string with the date and time variables replaced
parse_time_variables
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def _get_tron_k8s_cluster_override(cluster: str) -> Optional[str]: """ Return the name of a compute cluster if there's a different compute cluster that should be used to run a Tronjob. Will return None if no override mapping is present We have certain Tron masters that are named differently from the compute cluster that should actually be used ( e.g., we might have tron-XYZ-test-prod, but instead of scheduling on XYZ-test-prod, we'd like to schedule jobs on test-prod). To control this, we have an optional config item that we'll puppet onto Tron masters that need this type of tron master -> compute cluster override which this function will read. """ return ( load_system_paasta_config() .get_tron_k8s_cluster_overrides() .get( cluster, None, ) )
Return the name of a compute cluster if there's a different compute cluster that should be used to run a Tronjob. Will return None if no override mapping is present We have certain Tron masters that are named differently from the compute cluster that should actually be used ( e.g., we might have tron-XYZ-test-prod, but instead of scheduling on XYZ-test-prod, we'd like to schedule jobs on test-prod). To control this, we have an optional config item that we'll puppet onto Tron masters that need this type of tron master -> compute cluster override which this function will read.
_get_tron_k8s_cluster_override
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def get_secret_volumes(self) -> List[TronSecretVolume]: # type: ignore """Adds the secret_volume_name to the object so tron/task_processing can load it downstream without replicating code.""" secret_volumes = super().get_secret_volumes() tron_secret_volumes = [] for secret_volume in secret_volumes: tron_secret_volume = TronSecretVolume( secret_volume_name=self.get_secret_volume_name( secret_volume["secret_name"] ), secret_name=secret_volume["secret_name"], container_path=secret_volume["container_path"], items=secret_volume.get("items", []), ) # we have a different place where the default can come from (tron) and we don't want to insert the wrong default here if "default_mode" in secret_volume: tron_secret_volume["default_mode"] = secret_volume["default_mode"] tron_secret_volumes.append(tron_secret_volume) return tron_secret_volumes
Adds the secret_volume_name to the object so tron/task_processing can load it downstream without replicating code.
get_secret_volumes
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def get_node_affinities(self) -> Optional[List[Dict[str, Union[str, List[str]]]]]: """Converts deploy_whitelist and deploy_blacklist in node affinities. NOTE: At the time of writing, `kubectl describe` does not show affinities, only selectors. To see affinities, use `kubectl get pod -o json` instead. WARNING: At the time of writing, we only used requiredDuringSchedulingIgnoredDuringExecution node affinities in Tron as we currently have no use case for preferredDuringSchedulingIgnoredDuringExecution node affinities. """ requirements = allowlist_denylist_to_requirements( allowlist=self.get_deploy_whitelist(), denylist=self.get_deploy_blacklist(), ) node_selectors = self.config_dict.get("node_selectors", {}) requirements.extend( raw_selectors_to_requirements( raw_selectors=node_selectors, ) ) system_paasta_config = load_system_paasta_config() if system_paasta_config.get_enable_tron_tsc(): # PAASTA-18198: To improve AZ balance with Karpenter, we temporarily allow specifying zone affinities per pool pool_node_affinities = system_paasta_config.get_pool_node_affinities() if pool_node_affinities and self.get_pool() in pool_node_affinities: current_pool_node_affinities = pool_node_affinities[self.get_pool()] # If the service already has a node selector for a zone, we don't want to override it if current_pool_node_affinities and not contains_zone_label( node_selectors ): requirements.extend( raw_selectors_to_requirements( raw_selectors=current_pool_node_affinities, ) ) if not requirements: return None return [ {"key": key, "operator": op, "value": value} for key, op, value in requirements ]
Converts deploy_whitelist and deploy_blacklist in node affinities. NOTE: At the time of writing, `kubectl describe` does not show affinities, only selectors. To see affinities, use `kubectl get pod -o json` instead. WARNING: At the time of writing, we only used requiredDuringSchedulingIgnoredDuringExecution node affinities in Tron as we currently have no use case for preferredDuringSchedulingIgnoredDuringExecution node affinities.
get_node_affinities
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def get_pool(self) -> str: """ Returns the default pool override if pool is not defined in the action configuration. This is useful for environments like spam to allow us to default the pool to spam but allow users to override this value. To control this, we have an optional config item that we'll puppet onto Tron masters which this function will read. """ if self.get_executor() == "spark": pool = load_system_paasta_config().get_default_spark_driver_pool_override() else: pool = self.config_dict.get( "pool", load_system_paasta_config().get_tron_default_pool_override() ) return pool
Returns the default pool override if pool is not defined in the action configuration. This is useful for environments like spam to allow us to default the pool to spam but allow users to override this value. To control this, we have an optional config item that we'll puppet onto Tron masters which this function will read.
get_pool
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def format_tron_job_dict(job_config: TronJobConfig, k8s_enabled: bool = False): """Generate a dict of tronfig for a job, from the TronJobConfig. :param job_config: TronJobConfig """ action_dict = { action_config.get_action_name(): format_tron_action_dict( action_config=action_config, ) for action_config in job_config.get_actions() } result = { "node": job_config.get_node(), "schedule": job_config.get_schedule(), "actions": action_dict, "monitoring": job_config.get_monitoring(), "queueing": job_config.get_queueing(), "run_limit": job_config.get_run_limit(), "all_nodes": job_config.get_all_nodes(), "enabled": job_config.get_enabled(), "allow_overlap": job_config.get_allow_overlap(), "max_runtime": job_config.get_max_runtime(), "time_zone": job_config.get_time_zone(), "expected_runtime": job_config.get_expected_runtime(), } cleanup_config = job_config.get_cleanup_action() if cleanup_config: cleanup_action = format_tron_action_dict( action_config=cleanup_config, ) result["cleanup_action"] = cleanup_action # Only pass non-None values, so Tron will use defaults for others return {key: val for key, val in result.items() if val is not None}
Generate a dict of tronfig for a job, from the TronJobConfig. :param job_config: TronJobConfig
format_tron_job_dict
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def load_tron_service_config_no_cache( service, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR, for_validation=False, ): """Load all configured jobs for a service, and any additional config values.""" config = read_extra_service_information( service_name=service, extra_info=f"tron-{cluster}", soa_dir=soa_dir ) jobs = filter_templates_from_config(config) job_configs = [ TronJobConfig( name=name, service=service, cluster=cluster, config_dict=job, load_deployments=load_deployments, soa_dir=soa_dir, for_validation=for_validation, ) for name, job in jobs.items() ] return job_configs
Load all configured jobs for a service, and any additional config values.
load_tron_service_config_no_cache
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def create_complete_config( service: str, cluster: str, soa_dir: str = DEFAULT_SOA_DIR, k8s_enabled: bool = False, dry_run: bool = False, ): """Generate a namespace configuration file for Tron, for a service.""" job_configs = load_tron_service_config( service=service, cluster=cluster, load_deployments=True, soa_dir=soa_dir, for_validation=dry_run, ) preproccessed_config = {} preproccessed_config["jobs"] = { job_config.get_name(): format_tron_job_dict( job_config=job_config, k8s_enabled=k8s_enabled ) for job_config in job_configs } return yaml.dump(preproccessed_config, Dumper=Dumper, default_flow_style=False)
Generate a namespace configuration file for Tron, for a service.
create_complete_config
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def list_tron_clusters(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[str]: """Returns the Tron clusters a service is configured to deploy to.""" search_re = r"/tron-([0-9a-z-_]*)\.yaml$" service_dir = os.path.join(soa_dir, service) clusters = [] for filename in glob.glob(f"{service_dir}/*.yaml"): cluster_re_match = re.search(search_re, filename) if cluster_re_match is not None: clusters.append(cluster_re_match.group(1)) return clusters
Returns the Tron clusters a service is configured to deploy to.
list_tron_clusters
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def parse_service_instance_from_executor_id(task_id: str) -> Tuple[str, str]: """Parses tron mesos task ids, like schematizer.traffic_generator.28414.turnstyle.46da87d7-6092-4ed4-b926-ffa7b21c7785""" try: service, job, job_run, action, uuid = task_id.split(".") except Exception as e: log.warning( f"Couldn't parse the mesos task id into a valid tron job: {task_id}: {e}" ) service, job, action = "unknown_service", "unknown_job", "unknown_action" return service, f"{job}.{action}"
Parses tron mesos task ids, like schematizer.traffic_generator.28414.turnstyle.46da87d7-6092-4ed4-b926-ffa7b21c7785
parse_service_instance_from_executor_id
python
Yelp/paasta
paasta_tools/tron_tools.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py
Apache-2.0
def get_namespace(self) -> str: """Get namespace from config, default to the value from INSTANCE_TYPE_TO_K8S_NAMESPACE for this instance type, 'paasta' if that isn't defined.""" return self.config_dict.get( "namespace", INSTANCE_TYPE_TO_K8S_NAMESPACE.get(self.get_instance_type(), "paasta"), )
Get namespace from config, default to the value from INSTANCE_TYPE_TO_K8S_NAMESPACE for this instance type, 'paasta' if that isn't defined.
get_namespace
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_cpu_quota(self) -> float: """Gets the --cpu-quota option to be passed to docker Calculation: (cpus + cpus_burst_add) * cfs_period_us :returns: The number to be passed to the --cpu-quota docker flag""" cpu_burst_add = self.get_cpu_burst_add() return (self.get_cpus() + cpu_burst_add) * self.get_cpu_period()
Gets the --cpu-quota option to be passed to docker Calculation: (cpus + cpus_burst_add) * cfs_period_us :returns: The number to be passed to the --cpu-quota docker flag
get_cpu_quota
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_cap_add(self) -> Iterable[DockerParameter]: """Get the --cap-add options to be passed to docker Generated from the cap_add configuration option, which is a list of capabilities. Example configuration: {'cap_add': ['IPC_LOCK', 'SYS_PTRACE']} :returns: A generator of cap_add options to be passed as --cap-add flags""" for value in self.config_dict.get("cap_add", []): yield {"key": "cap-add", "value": f"{value}"}
Get the --cap-add options to be passed to docker Generated from the cap_add configuration option, which is a list of capabilities. Example configuration: {'cap_add': ['IPC_LOCK', 'SYS_PTRACE']} :returns: A generator of cap_add options to be passed as --cap-add flags
get_cap_add
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_cap_drop(self) -> Iterable[DockerParameter]: """Generates --cap-drop options to be passed to docker by default, which makes them not able to perform special privilege escalation stuff https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities """ for cap in CAPS_DROP: yield {"key": "cap-drop", "value": cap}
Generates --cap-drop options to be passed to docker by default, which makes them not able to perform special privilege escalation stuff https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
get_cap_drop
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_cap_args(self) -> Iterable[DockerParameter]: """Generate all --cap-add/--cap-drop parameters, ensuring not to have overlapping settings""" cap_adds = list(self.get_cap_add()) if cap_adds and is_using_unprivileged_containers(): log.warning( "Unprivileged containerizer detected, adding capabilities will not work properly" ) yield from cap_adds added_caps = [cap["value"] for cap in cap_adds] for cap in self.get_cap_drop(): if cap["value"] not in added_caps: yield cap
Generate all --cap-add/--cap-drop parameters, ensuring not to have overlapping settings
get_cap_args
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def format_docker_parameters( self, with_labels: bool = True, system_paasta_config: Optional["SystemPaastaConfig"] = None, ) -> List[DockerParameter]: """Formats extra flags for running docker. Will be added in the format `["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command Note: values must be strings :param with_labels: Whether to build docker parameters with or without labels :returns: A list of parameters to be added to docker run""" parameters: List[DockerParameter] = [ {"key": "memory-swap", "value": self.get_mem_swap()}, {"key": "cpu-period", "value": "%s" % int(self.get_cpu_period())}, {"key": "cpu-quota", "value": "%s" % int(self.get_cpu_quota())}, ] if self.use_docker_disk_quota(system_paasta_config=system_paasta_config): parameters.append( { "key": "storage-opt", "value": f"size={int(self.get_disk() * 1024 * 1024)}", } ) if with_labels: parameters.extend( [ {"key": "label", "value": "paasta_service=%s" % self.service}, {"key": "label", "value": "paasta_instance=%s" % self.instance}, ] ) extra_docker_args = self.get_extra_docker_args() if extra_docker_args: for key, value in extra_docker_args.items(): parameters.extend([{"key": key, "value": value}]) parameters.extend(self.get_docker_init()) parameters.extend(self.get_cap_args()) return parameters
Formats extra flags for running docker. Will be added in the format `["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command Note: values must be strings :param with_labels: Whether to build docker parameters with or without labels :returns: A list of parameters to be added to docker run
format_docker_parameters
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_container_type(self) -> Optional[str]: """Get Mesos containerizer type. Default to DOCKER if gpus are not used. :returns: Mesos containerizer type, DOCKER or MESOS""" if self.get_gpus() is not None: container_type = "MESOS" else: container_type = "DOCKER" return container_type
Get Mesos containerizer type. Default to DOCKER if gpus are not used. :returns: Mesos containerizer type, DOCKER or MESOS
get_container_type
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_env_dictionary( self, system_paasta_config: Optional["SystemPaastaConfig"] = None ) -> Dict[str, str]: """A dictionary of key/value pairs that represent environment variables to be injected to the container environment""" env = { "PAASTA_SERVICE": self.service, "PAASTA_INSTANCE": self.instance, "PAASTA_CLUSTER": self.cluster, "PAASTA_DEPLOY_GROUP": self.get_deploy_group(), "PAASTA_DOCKER_IMAGE": self.get_docker_image(), "PAASTA_RESOURCE_CPUS": str(self.get_cpus()), "PAASTA_RESOURCE_MEM": str(self.get_mem()), "PAASTA_RESOURCE_DISK": str(self.get_disk()), } if self.get_gpus() is not None: env["PAASTA_RESOURCE_GPUS"] = str(self.get_gpus()) try: env["PAASTA_GIT_SHA"] = get_git_sha_from_dockerurl( self.get_docker_url(system_paasta_config=system_paasta_config) ) except Exception: pass image_version = self.get_image_version() if image_version is not None: env["PAASTA_IMAGE_VERSION"] = image_version team = self.get_team() if team: env["PAASTA_MONITORING_TEAM"] = team instance_type = self.get_instance_type() if instance_type: env["PAASTA_INSTANCE_TYPE"] = instance_type # Our workloads interact with AWS quite a lot, so it comes handy to # propagate an "application ID" in the user-agent of API requests # for debugging purposes (max length is 50 chars from AWS docs). env["AWS_SDK_UA_APP_ID"] = f"{self.service}.{self.instance}"[:50] user_env = self.config_dict.get("env", {}) env.update(user_env) return {str(k): str(v) for (k, v) in env.items()}
A dictionary of key/value pairs that represent environment variables to be injected to the container environment
get_env_dictionary
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_env( self, system_paasta_config: Optional["SystemPaastaConfig"] = None ) -> Dict[str, str]: """Basic get_env that simply returns the basic env, other classes might need to override this getter for more implementation-specific env getting""" return self.get_env_dictionary(system_paasta_config=system_paasta_config)
Basic get_env that simply returns the basic env, other classes might need to override this getter for more implementation-specific env getting
get_env
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_args(self) -> Optional[List[str]]: """Get the docker args specified in the service's configuration. If not specified in the config and if cmd is not specified, defaults to an empty array. If not specified in the config but cmd is specified, defaults to null. If specified in the config and if cmd is also specified, throws an exception. Only one may be specified. :param service_config: The service instance's configuration dictionary :returns: An array of args specified in the config, ``[]`` if not specified and if cmd is not specified, otherwise None if not specified but cmd is specified""" if self.get_cmd() is None: return self.config_dict.get("args", []) else: args = self.config_dict.get("args", None) if args is None: return args else: # TODO validation stuff like this should be moved into a check_* raise InvalidInstanceConfig( "Instance configuration can specify cmd or args, but not both." )
Get the docker args specified in the service's configuration. If not specified in the config and if cmd is not specified, defaults to an empty array. If not specified in the config but cmd is specified, defaults to null. If specified in the config and if cmd is also specified, throws an exception. Only one may be specified. :param service_config: The service instance's configuration dictionary :returns: An array of args specified in the config, ``[]`` if not specified and if cmd is not specified, otherwise None if not specified but cmd is specified
get_args
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_deploy_constraints( self, blacklist: DeployBlacklist, whitelist: DeployWhitelist, system_deploy_blacklist: DeployBlacklist, system_deploy_whitelist: DeployWhitelist, ) -> List[Constraint]: """Return the combination of deploy_blacklist and deploy_whitelist as a list of constraints. """ return ( deploy_blacklist_to_constraints(blacklist) + deploy_whitelist_to_constraints(whitelist) + deploy_blacklist_to_constraints(system_deploy_blacklist) + deploy_whitelist_to_constraints(system_deploy_whitelist) )
Return the combination of deploy_blacklist and deploy_whitelist as a list of constraints.
get_deploy_constraints
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_docker_image(self) -> str: """Get the docker image name (with tag) for a given service branch from a generated deployments.json file.""" if self.branch_dict is not None: return self.branch_dict["docker_image"] else: return ""
Get the docker image name (with tag) for a given service branch from a generated deployments.json file.
get_docker_image
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_image_version(self) -> Optional[str]: """Get additional information identifying the Docker image from a generated deployments.json file.""" if self.branch_dict is not None and "image_version" in self.branch_dict: return self.branch_dict["image_version"] else: return None
Get additional information identifying the Docker image from a generated deployments.json file.
get_image_version
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_desired_state(self) -> str: """Get the desired state (either 'start' or 'stop') for a given service branch from a generated deployments.json file.""" if self.branch_dict is not None: return self.branch_dict["desired_state"] else: return "start"
Get the desired state (either 'start' or 'stop') for a given service branch from a generated deployments.json file.
get_desired_state
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_force_bounce(self) -> Optional[str]: """Get the force_bounce token for a given service branch from a generated deployments.json file. This is a token that, when changed, indicates that the instance should be recreated and bounced, even if no other parameters have changed. This may be None or a string, generally a timestamp. """ if self.branch_dict is not None: return self.branch_dict["force_bounce"] else: return None
Get the force_bounce token for a given service branch from a generated deployments.json file. This is a token that, when changed, indicates that the instance should be recreated and bounced, even if no other parameters have changed. This may be None or a string, generally a timestamp.
get_force_bounce
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_dependencies(self) -> Optional[Dict]: """Get the contents of the dependencies_dict pointed to by the dependency_reference or 'main' if no dependency_reference exists Defaults to None if not specified in the config. :returns: A list of dictionaries specified in the dependencies_dict, None if not specified """ dependencies = self.config_dict.get("dependencies") if not dependencies: return None dependency_ref = self.get_dependencies_reference() or "main" return dependencies.get(dependency_ref)
Get the contents of the dependencies_dict pointed to by the dependency_reference or 'main' if no dependency_reference exists Defaults to None if not specified in the config. :returns: A list of dictionaries specified in the dependencies_dict, None if not specified
get_dependencies
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_outbound_firewall(self) -> Optional[str]: """Return 'block', 'monitor', or None as configured in security->outbound_firewall Defaults to None if not specified in the config :returns: A string specified in the config, None if not specified""" security = self.config_dict.get("security") if not security: return None return security.get("outbound_firewall")
Return 'block', 'monitor', or None as configured in security->outbound_firewall Defaults to None if not specified in the config :returns: A string specified in the config, None if not specified
get_outbound_firewall
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def color_text(color: str, text: str) -> str: """Return text that can be printed color. :param color: ANSI color code :param text: a string :return: a string with ANSI color encoding""" if os.getenv("NO_COLOR", "0") == "1": return text # any time text returns to default, we want to insert our color. replaced = text.replace(PaastaColors.DEFAULT, PaastaColors.DEFAULT + color) # then wrap the beginning and end in our color/default. return color + replaced + PaastaColors.DEFAULT
Return text that can be printed color. :param color: ANSI color code :param text: a string :return: a string with ANSI color encoding
color_text
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_git_url(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> str: """Get the git url for a service. Assumes that the service's repo matches its name, and that it lives in services- i.e. if this is called with the string 'test', the returned url will be [email protected]:services/test. :param service: The service name to get a URL for :returns: A git url to the service's repository""" general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir ) # TODO: PAASTA-16927: get this from system config `.git_config` default_location = format_git_url( "git", "github.yelpcorp.com", f"services/{service}" ) return general_config.get("git_url", default_location)
Get the git url for a service. Assumes that the service's repo matches its name, and that it lives in services- i.e. if this is called with the string 'test', the returned url will be [email protected]:services/test. :param service: The service name to get a URL for :returns: A git url to the service's repository
get_git_url
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def register_log_writer(name: str) -> Callable[[_LogWriterTypeT], _LogWriterTypeT]: """Returns a decorator that registers that log writer class at a given name so get_log_writer_class can find it.""" def outer(log_writer_class: _LogWriterTypeT) -> _LogWriterTypeT: _log_writer_classes[name] = log_writer_class return log_writer_class return outer
Returns a decorator that registers that log writer class at a given name so get_log_writer_class can find it.
register_log_writer
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def configure_log() -> None: """We will log to the yocalhost binded scribe.""" log_writer_config = load_system_paasta_config().get_log_writer() global _log_writer LogWriterClass = get_log_writer_class(log_writer_config["driver"]) _log_writer = LogWriterClass(**log_writer_config.get("options", {}))
We will log to the yocalhost binded scribe.
configure_log
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def format_log_line( level: str, cluster: str, service: str, instance: str, component: str, line: str, timestamp: str = None, ) -> str: """Accepts a string 'line'. Returns an appropriately-formatted dictionary which can be serialized to JSON for logging and which contains 'line'. """ validate_log_component(component) if not timestamp: timestamp = _now() line = remove_ansi_escape_sequences(line.strip()) message = json.dumps( { "timestamp": timestamp, "level": level, "cluster": cluster, "service": service, "instance": instance, "component": component, "message": line, }, sort_keys=True, ) return message
Accepts a string 'line'. Returns an appropriately-formatted dictionary which can be serialized to JSON for logging and which contains 'line'.
format_log_line
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def format_audit_log_line( cluster: str, instance: str, user: str, host: str, action: str, action_details: dict = None, service: str = None, timestamp: str = None, ) -> str: """Accepts: * a string 'user' describing the user that initiated the action * a string 'host' describing the server where the user initiated the action * a string 'action' describing an action performed by paasta_tools * a dict 'action_details' optional information about the action Returns an appropriately-formatted dictionary which can be serialized to JSON for logging and which contains details about an action performed on a service/instance. """ if not timestamp: timestamp = _now() if not action_details: action_details = {} message = json.dumps( { "timestamp": timestamp, "cluster": cluster, "service": service, "instance": instance, "user": user, "host": host, "action": action, "action_details": action_details, }, sort_keys=True, ) return message
Accepts: * a string 'user' describing the user that initiated the action * a string 'host' describing the server where the user initiated the action * a string 'action' describing an action performed by paasta_tools * a dict 'action_details' optional information about the action Returns an appropriately-formatted dictionary which can be serialized to JSON for logging and which contains details about an action performed on a service/instance.
format_audit_log_line
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def log( self, service: str, line: str, component: str, level: str = DEFAULT_LOGLEVEL, cluster: str = ANY_CLUSTER, instance: str = ANY_INSTANCE, ) -> None: """This expects someone (currently the paasta cli main()) to have already configured the log object. We'll just write things to it. """ if level == "event": print(f"[service {service}] {line}", file=sys.stdout) elif level == "debug": print(f"[service {service}] {line}", file=sys.stderr) else: raise NoSuchLogLevel log_name = get_log_name_for_service(service) formatted_line = format_log_line( level, cluster, service, instance, component, line ) clog.log_line(log_name, formatted_line)
This expects someone (currently the paasta cli main()) to have already configured the log object. We'll just write things to it.
log
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def timed_flock(fd: _AnyIO, seconds: int = 1) -> Iterator[None]: """Attempt to grab an exclusive flock with a timeout. Uses Timeout, so will raise a TimeoutError if `seconds` elapses before the flock can be obtained """ # We don't want to wrap the user code in the timeout, just the flock grab flock_context = flock(fd) with Timeout(seconds=seconds): flock_context.__enter__() try: yield finally: flock_context.__exit__(*sys.exc_info())
Attempt to grab an exclusive flock with a timeout. Uses Timeout, so will raise a TimeoutError if `seconds` elapses before the flock can be obtained
timed_flock
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def _timeout(process: Popen) -> None: """Helper function for _run. It terminates the process. Doesn't raise OSError, if we try to terminate a non-existing process as there can be a very small window between poll() and kill() """ if process.poll() is None: try: # sending SIGKILL to the process process.kill() except OSError as e: # No such process error # The process could have been terminated meanwhile if e.errno != errno.ESRCH: raise
Helper function for _run. It terminates the process. Doesn't raise OSError, if we try to terminate a non-existing process as there can be a very small window between poll() and kill()
_timeout
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_readable_files_in_glob(glob: str, path: str) -> List[str]: """ Returns a sorted list of files that are readable in an input glob by recursively searching a path """ globbed_files = [] for root, dirs, files in os.walk(path): for f in files: fn = os.path.join(root, f) if os.path.isfile(fn) and os.access(fn, os.R_OK) and fnmatch(fn, glob): globbed_files.append(fn) return sorted(globbed_files)
Returns a sorted list of files that are readable in an input glob by recursively searching a path
get_readable_files_in_glob
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def load_system_paasta_config( path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR, ) -> "SystemPaastaConfig": """ Reads Paasta configs in specified directory in lexicographical order and deep merges the dictionaries (last file wins). """ if not os.path.isdir(path): raise PaastaNotConfiguredError( "Could not find system paasta configuration directory: %s" % path ) if not os.access(path, os.R_OK): raise PaastaNotConfiguredError( "Could not read from system paasta configuration directory: %s" % path ) try: file_stats = frozenset( { (fn, os.stat(fn)) for fn in get_readable_files_in_glob(glob="*.json", path=path) } ) return parse_system_paasta_config(file_stats, path) except IOError as e: raise PaastaNotConfiguredError( f"Could not load system paasta config file {e.filename}: {e.strerror}" )
Reads Paasta configs in specified directory in lexicographical order and deep merges the dictionaries (last file wins).
load_system_paasta_config
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def optionally_load_system_paasta_config( path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR, ) -> "SystemPaastaConfig": """ Tries to load the system paasta config, but will return an empty configuration if not available, without raising. """ try: return load_system_paasta_config(path=path) except PaastaNotConfiguredError: return SystemPaastaConfig({}, "")
Tries to load the system paasta config, but will return an empty configuration if not available, without raising.
optionally_load_system_paasta_config
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def parse_system_paasta_config( file_stats: FrozenSet[Tuple[str, os.stat_result]], path: str ) -> "SystemPaastaConfig": """Pass in a dictionary of filename -> os.stat_result, and this returns the merged parsed configs""" config: SystemPaastaConfigDict = {} for filename, _ in file_stats: with open(filename) as f: config = deep_merge_dictionaries( json.load(f), config, allow_duplicate_keys=False ) return SystemPaastaConfig(config, path)
Pass in a dictionary of filename -> os.stat_result, and this returns the merged parsed configs
parse_system_paasta_config
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_default_spark_driver_pool_override(self) -> str: """ If defined, fetches the override for what pool to run a Spark driver in. Otherwise, returns the default Spark driver pool. :returns: The default_spark_driver_pool_override specified in the paasta configuration """ return self.config_dict.get( "default_spark_driver_pool_override", DEFAULT_SPARK_DRIVER_POOL )
If defined, fetches the override for what pool to run a Spark driver in. Otherwise, returns the default Spark driver pool. :returns: The default_spark_driver_pool_override specified in the paasta configuration
get_default_spark_driver_pool_override
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_zk_hosts(self) -> str: """Get the zk_hosts defined in this hosts's cluster config file. Strips off the zk:// prefix, if it exists, for use with Kazoo. :returns: The zk_hosts specified in the paasta configuration """ try: hosts = self.config_dict["zookeeper"] except KeyError: raise PaastaNotConfiguredError( "Could not find zookeeper connection string in configuration directory: %s" % self.directory ) # how do python strings not have a method for doing this if hosts.startswith("zk://"): return hosts[len("zk://") :] return hosts
Get the zk_hosts defined in this hosts's cluster config file. Strips off the zk:// prefix, if it exists, for use with Kazoo. :returns: The zk_hosts specified in the paasta configuration
get_zk_hosts
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_system_docker_registry(self) -> str: """Get the docker_registry defined in this host's cluster config file. :returns: The docker_registry specified in the paasta configuration """ try: return self.config_dict["docker_registry"] except KeyError: raise PaastaNotConfiguredError( "Could not find docker registry in configuration directory: %s" % self.directory )
Get the docker_registry defined in this host's cluster config file. :returns: The docker_registry specified in the paasta configuration
get_system_docker_registry
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_hacheck_sidecar_volumes(self) -> List[DockerVolume]: """Get the hacheck sidecar volumes defined in this host's hacheck_sidecar_volumes config file. :returns: The list of volumes specified in the paasta configuration """ try: volumes = self.config_dict["hacheck_sidecar_volumes"] except KeyError: raise PaastaNotConfiguredError( "Could not find hacheck_sidecar_volumes in configuration directory: %s" % self.directory ) return _reorder_docker_volumes(list(volumes))
Get the hacheck sidecar volumes defined in this host's hacheck_sidecar_volumes config file. :returns: The list of volumes specified in the paasta configuration
get_hacheck_sidecar_volumes
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_volumes(self) -> Sequence[DockerVolume]: """Get the volumes defined in this host's volumes config file. :returns: The list of volumes specified in the paasta configuration """ try: return self.config_dict["volumes"] except KeyError: raise PaastaNotConfiguredError( "Could not find volumes in configuration directory: %s" % self.directory )
Get the volumes defined in this host's volumes config file. :returns: The list of volumes specified in the paasta configuration
get_volumes
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_cluster(self) -> str: """Get the cluster defined in this host's cluster config file. :returns: The name of the cluster defined in the paasta configuration """ try: return self.config_dict["cluster"] except KeyError: raise PaastaNotConfiguredError( "Could not find cluster in configuration directory: %s" % self.directory )
Get the cluster defined in this host's cluster config file. :returns: The name of the cluster defined in the paasta configuration
get_cluster
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_log_writer(self) -> LogWriterConfig: """Get the log_writer configuration out of global paasta config :returns: The log_writer dictionary. """ try: return self.config_dict["log_writer"] except KeyError: raise PaastaNotConfiguredError( "Could not find log_writer in configuration directory: %s" % self.directory )
Get the log_writer configuration out of global paasta config :returns: The log_writer dictionary.
get_log_writer
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_log_reader(self) -> LogReaderConfig: """Get the log_reader configuration out of global paasta config :returns: the log_reader dictionary. """ try: return self.config_dict["log_reader"] except KeyError: raise PaastaNotConfiguredError( "Could not find log_reader in configuration directory: %s" % self.directory )
Get the log_reader configuration out of global paasta config :returns: the log_reader dictionary.
get_log_reader
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_log_readers(self) -> List[LogReaderConfig]: """Get the log_readers configuration out of global paasta config :returns: the log_readers list of dicts. """ try: return self.config_dict["log_readers"] except KeyError: raise PaastaNotConfiguredError( "Could not find log_readers in configuration directory: %s" % self.directory )
Get the log_readers configuration out of global paasta config :returns: the log_readers list of dicts.
get_log_readers
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_metrics_provider(self) -> Optional[str]: """Get the metrics_provider configuration out of global paasta config :returns: A string identifying the metrics_provider """ deployd_metrics_provider = self.config_dict.get("deployd_metrics_provider") if deployd_metrics_provider is not None: return deployd_metrics_provider return self.config_dict.get("metrics_provider")
Get the metrics_provider configuration out of global paasta config :returns: A string identifying the metrics_provider
get_metrics_provider
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_synapse_haproxy_url_format(self) -> str: """Get a format string for the URL to query for haproxy-synapse state. This format string gets two keyword arguments, host and port. Defaults to "http://{host:s}:{port:d}/;csv;norefresh". :returns: A format string for constructing the URL of haproxy-synapse's status page. """ return self.config_dict.get( "synapse_haproxy_url_format", DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT )
Get a format string for the URL to query for haproxy-synapse state. This format string gets two keyword arguments, host and port. Defaults to "http://{host:s}:{port:d}/;csv;norefresh". :returns: A format string for constructing the URL of haproxy-synapse's status page.
get_synapse_haproxy_url_format
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_envoy_admin_endpoint_format(self) -> str: """Get the format string for Envoy's admin interface.""" return self.config_dict.get( "envoy_admin_endpoint_format", "http://{host:s}:{port:d}/{endpoint:s}" )
Get the format string for Envoy's admin interface.
get_envoy_admin_endpoint_format
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_envoy_admin_port(self) -> int: """Get the port that Envoy's admin interface is listening on from /etc/services.""" return socket.getservbyname( self.config_dict.get("envoy_admin_domain_name", "envoy-admin") )
Get the port that Envoy's admin interface is listening on from /etc/services.
get_envoy_admin_port
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_git_config(self) -> Dict: """Gets git configuration. Includes repo names and their git servers. :returns: the git config dict """ return self.config_dict.get( "git_config", { "git_user": "git", "repos": { "yelpsoa-configs": { "repo_name": "yelpsoa-configs", "git_server": DEFAULT_SOA_CONFIGS_GIT_URL, "deploy_server": DEFAULT_SOA_CONFIGS_GIT_URL, }, }, }, )
Gets git configuration. Includes repo names and their git servers. :returns: the git config dict
get_git_config
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_gunicorn_exporter_sidecar_image_url(self) -> str: """Get the docker image URL for the gunicorn_exporter sidecar container""" return self.config_dict.get( "gunicorn_exporter_sidecar_image_url", "docker-paasta.yelpcorp.com:443/gunicorn_exporter-k8s-sidecar:v0.24.0-yelp0", )
Get the docker image URL for the gunicorn_exporter sidecar container
get_gunicorn_exporter_sidecar_image_url
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_readiness_check_prefix_template(self) -> List[str]: """A prefix that will be added to the beginning of the readiness check command. Meant for e.g. `flock` and `timeout`.""" # We use flock+timeout here to work around issues discovered in PAASTA-17673: # In k8s 1.18, probe timeout wasn't respected at all. # When we upgraded to k8s 1.20, the timeout started being partially respected - k8s would stop waiting for a # response, but wouldn't kill the command within the container (with the dockershim CRI). # Flock prevents multiple readiness probes from running at once, using lots of CPU. # The generous timeout allows for a slow readiness probe, but ensures that a truly-stuck readiness probe command # will eventually be killed so another process can retry. # Once we move off dockershim, we'll likely need to increase the readiness probe timeout, but we can then remove # this wrapper. return self.config_dict.get( "readiness_check_prefix_template", ["flock", "-n", "/readiness_check_lock", "timeout", "120"], )
A prefix that will be added to the beginning of the readiness check command. Meant for e.g. `flock` and `timeout`.
get_readiness_check_prefix_template
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def _run( command: Union[str, List[str]], env: Mapping[str, str] = os.environ, timeout: float = None, log: bool = False, stream: bool = False, stdin: Any = None, stdin_interrupt: bool = False, popen_kwargs: Dict = {}, **kwargs: Any, ) -> Tuple[int, str]: """Given a command, run it. Return a tuple of the return code and any output. :param timeout: If specified, the command will be terminated after timeout seconds. :param log: If True, the _log will be handled by _run. If set, it is mandatory to pass at least a :service: and a :component: parameter. Optionally you can pass :cluster:, :instance: and :loglevel: parameters for logging. We wanted to use plumbum instead of rolling our own thing with subprocess.Popen but were blocked by https://github.com/tomerfiliba/plumbum/issues/162 and our local BASH_FUNC magic. """ output: List[str] = [] if log: service = kwargs["service"] component = kwargs["component"] cluster = kwargs.get("cluster", ANY_CLUSTER) instance = kwargs.get("instance", ANY_INSTANCE) loglevel = kwargs.get("loglevel", DEFAULT_LOGLEVEL) try: if not isinstance(command, list): command = shlex.split(command) popen_kwargs["stdout"] = PIPE popen_kwargs["stderr"] = STDOUT popen_kwargs["stdin"] = stdin popen_kwargs["env"] = env process = Popen(command, **popen_kwargs) if stdin_interrupt: def signal_handler(signum: int, frame: FrameType) -> None: process.stdin.write("\n".encode("utf-8")) process.stdin.flush() process.wait() signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) # start the timer if we specified a timeout if timeout: proctimer = threading.Timer(timeout, _timeout, [process]) proctimer.start() outfn: Any = print if stream else output.append for linebytes in iter(process.stdout.readline, b""): line = linebytes.decode("utf-8", errors="replace").rstrip("\n") outfn(line) if log: _log( service=service, line=line, component=component, level=loglevel, cluster=cluster, instance=instance, ) # when finished, get the exit code process.wait() returncode = process.returncode except OSError as e: if log: _log( service=service, line=e.strerror.rstrip("\n"), component=component, level=loglevel, cluster=cluster, instance=instance, ) output.append(e.strerror.rstrip("\n")) returncode = e.errno except (KeyboardInterrupt, SystemExit): # need to clean up the timing thread here if timeout: proctimer.cancel() raise else: # Stop the timer if timeout: proctimer.cancel() if returncode == -9: output.append(f"Command '{command}' timed out (longer than {timeout}s)") return returncode, "\n".join(output)
Given a command, run it. Return a tuple of the return code and any output. :param timeout: If specified, the command will be terminated after timeout seconds. :param log: If True, the _log will be handled by _run. If set, it is mandatory to pass at least a :service: and a :component: parameter. Optionally you can pass :cluster:, :instance: and :loglevel: parameters for logging. We wanted to use plumbum instead of rolling our own thing with subprocess.Popen but were blocked by https://github.com/tomerfiliba/plumbum/issues/162 and our local BASH_FUNC magic.
_run
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_umask() -> int: """Get the current umask for this process. NOT THREAD SAFE.""" old_umask = os.umask(0o0022) os.umask(old_umask) return old_umask
Get the current umask for this process. NOT THREAD SAFE.
get_umask
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def compose_job_id( name: str, instance: str, git_hash: Optional[str] = None, config_hash: Optional[str] = None, spacer: str = SPACER, ) -> str: """Compose a job/app id by concatenating its name, instance, git hash, and config hash. :param name: The name of the service :param instance: The instance of the service :param git_hash: The git_hash portion of the job_id. If git_hash is set, config_hash must also be set. :param config_hash: The config_hash portion of the job_id. If config_hash is set, git_hash must also be set. :returns: <name><SPACER><instance> if no tag, or <name><SPACER><instance><SPACER><hashes>... if extra hash inputs are provided. """ composed = f"{name}{spacer}{instance}" if git_hash and config_hash: composed = f"{composed}{spacer}{git_hash}{spacer}{config_hash}" elif git_hash or config_hash: raise InvalidJobNameError( "invalid job id because git_hash (%s) and config_hash (%s) must " "both be defined or neither can be defined" % (git_hash, config_hash) ) return composed
Compose a job/app id by concatenating its name, instance, git hash, and config hash. :param name: The name of the service :param instance: The instance of the service :param git_hash: The git_hash portion of the job_id. If git_hash is set, config_hash must also be set. :param config_hash: The config_hash portion of the job_id. If config_hash is set, git_hash must also be set. :returns: <name><SPACER><instance> if no tag, or <name><SPACER><instance><SPACER><hashes>... if extra hash inputs are provided.
compose_job_id
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def decompose_job_id(job_id: str, spacer: str = SPACER) -> Tuple[str, str, str, str]: """Break a composed job id into its constituent (service name, instance, git hash, config hash) by splitting with ``spacer``. :param job_id: The composed id of the job/app :returns: A tuple (service name, instance, git hash, config hash) that comprise the job_id """ decomposed = job_id.split(spacer) if len(decomposed) == 2: git_hash = None config_hash = None elif len(decomposed) == 4: git_hash = decomposed[2] config_hash = decomposed[3] else: raise InvalidJobNameError("invalid job id %s" % job_id) return (decomposed[0], decomposed[1], git_hash, config_hash)
Break a composed job id into its constituent (service name, instance, git hash, config hash) by splitting with ``spacer``. :param job_id: The composed id of the job/app :returns: A tuple (service name, instance, git hash, config hash) that comprise the job_id
decompose_job_id
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def build_docker_image_name(service: str) -> str: """docker-paasta.yelpcorp.com:443 is the URL for the Registry where PaaSTA will look for your images. :returns: a sanitized-for-Jenkins (s,/,-,g) version of the service's path in git. E.g. For github.yelpcorp.com:services/foo the docker image name is docker_registry/services-foo. """ docker_registry_url = get_service_docker_registry(service) name = f"{docker_registry_url}/services-{service}" return name
docker-paasta.yelpcorp.com:443 is the URL for the Registry where PaaSTA will look for your images. :returns: a sanitized-for-Jenkins (s,/,-,g) version of the service's path in git. E.g. For github.yelpcorp.com:services/foo the docker image name is docker_registry/services-foo.
build_docker_image_name
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def check_docker_image( service: str, commit: str, image_version: Optional[str] = None, ) -> bool: """Checks whether the given image for :service: with :tag: exists. :raises: ValueError if more than one docker image with :tag: found. :returns: True if there is exactly one matching image found. """ docker_client = get_docker_client() image_name = build_docker_image_name(service) docker_tag = build_docker_tag(service, commit, image_version) images = docker_client.images(name=image_name) # image['RepoTags'] may be None # Fixed upstream but only in docker-py 2. # https://github.com/docker/docker-py/issues/1401 result = [image for image in images if docker_tag in (image["RepoTags"] or [])] if len(result) > 1: raise ValueError( f"More than one docker image found with tag {docker_tag}\n{result}" ) return len(result) == 1
Checks whether the given image for :service: with :tag: exists. :raises: ValueError if more than one docker image with :tag: found. :returns: True if there is exactly one matching image found.
check_docker_image
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_files_of_type_in_dir( file_type: str, service: str = None, soa_dir: str = DEFAULT_SOA_DIR, ) -> List[str]: """Recursively search path if type of file exists. :param file_type: a string of a type of a file (kubernetes, slo, etc.) :param service: a string of a service :param soa_dir: a string of a path to a soa_configs directory :return: a list """ # TODO: Only use INSTANCE_TYPES as input by making file_type Literal service = "**" if service is None else service soa_dir = DEFAULT_SOA_DIR if soa_dir is None else soa_dir file_type += "-*.yaml" return [ file_path for file_path in glob.glob( os.path.join(soa_dir, service, file_type), recursive=True, ) ]
Recursively search path if type of file exists. :param file_type: a string of a type of a file (kubernetes, slo, etc.) :param service: a string of a service :param soa_dir: a string of a path to a soa_configs directory :return: a list
get_files_of_type_in_dir
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def list_clusters( service: str = None, soa_dir: str = DEFAULT_SOA_DIR, instance_type: str = None ) -> List[str]: """Returns a sorted list of clusters a service is configured to deploy to, or all clusters if ``service`` is not specified. Includes every cluster that has a ``kubernetes-*.yaml`` or ``tron-*.yaml`` file associated with it. :param service: The service name. If unspecified, clusters running any service will be included. :returns: A sorted list of cluster names """ clusters = set() for cluster, _ in get_soa_cluster_deploy_files( service=service, soa_dir=soa_dir, instance_type=instance_type ): clusters.add(cluster) return sorted(clusters)
Returns a sorted list of clusters a service is configured to deploy to, or all clusters if ``service`` is not specified. Includes every cluster that has a ``kubernetes-*.yaml`` or ``tron-*.yaml`` file associated with it. :param service: The service name. If unspecified, clusters running any service will be included. :returns: A sorted list of cluster names
list_clusters
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_service_instance_list_no_cache( service: str, cluster: Optional[str] = None, instance_type: str = None, soa_dir: str = DEFAULT_SOA_DIR, ) -> List[Tuple[str, str]]: """Enumerate the instances defined for a service as a list of tuples. :param service: The service name :param cluster: The cluster to read the configuration for :param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both :param soa_dir: The SOA config directory to read from :returns: A list of tuples of (name, instance) for each instance defined for the service name """ instance_types: Tuple[str, ...] if not cluster: cluster = load_system_paasta_config().get_cluster() if instance_type in INSTANCE_TYPES: instance_types = (instance_type,) else: instance_types = INSTANCE_TYPES instance_list: List[Tuple[str, str]] = [] for srv_instance_type in instance_types: instance_list.extend( read_service_instance_names( service=service, instance_type=srv_instance_type, cluster=cluster, soa_dir=soa_dir, ) ) log.debug("Enumerated the following instances: %s", instance_list) return instance_list
Enumerate the instances defined for a service as a list of tuples. :param service: The service name :param cluster: The cluster to read the configuration for :param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both :param soa_dir: The SOA config directory to read from :returns: A list of tuples of (name, instance) for each instance defined for the service name
get_service_instance_list_no_cache
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_services_for_cluster( cluster: str = None, instance_type: str = None, soa_dir: str = DEFAULT_SOA_DIR ) -> List[Tuple[str, str]]: """Retrieve all services and instances defined to run in a cluster. :param cluster: The cluster to read the configuration for :param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both :param soa_dir: The SOA config directory to read from :returns: A list of tuples of (service, instance) """ if not cluster: cluster = load_system_paasta_config().get_cluster() rootdir = os.path.abspath(soa_dir) log.debug( "Retrieving all service instance names from %s for cluster %s", rootdir, cluster ) instance_list: List[Tuple[str, str]] = [] for srv_dir in os.listdir(rootdir): instance_list.extend( get_service_instance_list(srv_dir, cluster, instance_type, soa_dir) ) return instance_list
Retrieve all services and instances defined to run in a cluster. :param cluster: The cluster to read the configuration for :param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both :param soa_dir: The SOA config directory to read from :returns: A list of tuples of (service, instance)
get_services_for_cluster
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_latest_deployment_tag( refs: Dict[str, str], deploy_group: str ) -> Tuple[str, str, Optional[str]]: """Gets the latest deployment tag and sha for the specified deploy_group :param refs: A dictionary mapping git refs to shas :param deploy_group: The deployment group to return a deploy tag for :returns: A tuple of the form (ref, sha, image_version) where ref is the actual deployment tag (with the most recent timestamp), sha is the sha it points at and image_version provides additional version information about the image """ most_recent_dtime = None most_recent_ref = None most_recent_sha = None most_recent_image_version = None pattern = re.compile( r"^refs/tags/paasta-%s(?:\+(?P<image_version>.*)){0,1}-(?P<dtime>\d{8}T\d{6})-deploy$" % deploy_group ) for ref_name, sha in refs.items(): match = pattern.match(ref_name) if match: gd = match.groupdict() dtime = gd["dtime"] if most_recent_dtime is None or dtime > most_recent_dtime: most_recent_dtime = dtime most_recent_ref = ref_name most_recent_sha = sha most_recent_image_version = gd["image_version"] return most_recent_ref, most_recent_sha, most_recent_image_version
Gets the latest deployment tag and sha for the specified deploy_group :param refs: A dictionary mapping git refs to shas :param deploy_group: The deployment group to return a deploy tag for :returns: A tuple of the form (ref, sha, image_version) where ref is the actual deployment tag (with the most recent timestamp), sha is the sha it points at and image_version provides additional version information about the image
get_latest_deployment_tag
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_config_hash(config: Any, force_bounce: str = None) -> str: """Create an MD5 hash of the configuration dictionary to be sent to Kubernetes. Or anything really, so long as str(config) works. Returns the first 8 characters so things are not really long. :param config: The configuration to hash :param force_bounce: a timestamp (in the form of a string) that is appended before hashing that can be used to force a hash change :returns: A MD5 hash of str(config) """ hasher = hashlib.md5() hasher.update( json.dumps(config, sort_keys=True).encode("UTF-8") + (force_bounce or "").encode("UTF-8") ) return "config%s" % hasher.hexdigest()[:8]
Create an MD5 hash of the configuration dictionary to be sent to Kubernetes. Or anything really, so long as str(config) works. Returns the first 8 characters so things are not really long. :param config: The configuration to hash :param force_bounce: a timestamp (in the form of a string) that is appended before hashing that can be used to force a hash change :returns: A MD5 hash of str(config)
get_config_hash
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_git_sha_from_dockerurl(docker_url: str, long: bool = False) -> str: """We encode the sha of the code that built a docker image *in* the docker url. This function takes that url as input and outputs the sha. """ if ":paasta-" in docker_url: deployment_version = get_deployment_version_from_dockerurl(docker_url) git_sha = deployment_version.sha if deployment_version else "" # Fall back to the old behavior if the docker_url does not follow the # expected pattern else: parts = docker_url.split("/") parts = parts[-1].split("-") git_sha = parts[-1] # Further ensure to only grab the image label in case not using paasta images git_sha = git_sha.split(":")[-1] return git_sha if long else git_sha[:8]
We encode the sha of the code that built a docker image *in* the docker url. This function takes that url as input and outputs the sha.
get_git_sha_from_dockerurl
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_image_version_from_dockerurl(docker_url: str) -> Optional[str]: """We can optionally encode additional metadata about the docker image *in* the docker url. This function takes that url as input and outputs the sha. """ deployment_version = get_deployment_version_from_dockerurl(docker_url) return deployment_version.image_version if deployment_version else None
We can optionally encode additional metadata about the docker image *in* the docker url. This function takes that url as input and outputs the sha.
get_image_version_from_dockerurl
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def is_under_replicated( num_available: int, expected_count: int, crit_threshold: int ) -> Tuple[bool, float]: """Calculates if something is under replicated :param num_available: How many things are up :param expected_count: How many things you think should be up :param crit_threshold: Int from 0-100 :returns: Tuple of (bool, ratio) """ if expected_count == 0: ratio = 100.0 else: ratio = (num_available / float(expected_count)) * 100 if ratio < int(crit_threshold): return (True, ratio) else: return (False, ratio)
Calculates if something is under replicated :param num_available: How many things are up :param expected_count: How many things you think should be up :param crit_threshold: Int from 0-100 :returns: Tuple of (bool, ratio)
is_under_replicated
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def deploy_blacklist_to_constraints( deploy_blacklist: DeployBlacklist, ) -> List[Constraint]: """Converts a blacklist of locations into tron appropriate constraints. :param blacklist: List of lists of locations to blacklist :returns: List of lists of constraints """ constraints: List[Constraint] = [] for blacklisted_location in deploy_blacklist: constraints.append([blacklisted_location[0], "UNLIKE", blacklisted_location[1]]) return constraints
Converts a blacklist of locations into tron appropriate constraints. :param blacklist: List of lists of locations to blacklist :returns: List of lists of constraints
deploy_blacklist_to_constraints
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def deploy_whitelist_to_constraints( deploy_whitelist: DeployWhitelist, ) -> List[Constraint]: """Converts a whitelist of locations into tron appropriate constraints :param deploy_whitelist: List of lists of locations to whitelist :returns: List of lists of constraints """ if deploy_whitelist is not None: (region_type, regions) = deploy_whitelist regionstr = "|".join(regions) return [[region_type, "LIKE", regionstr]] return []
Converts a whitelist of locations into tron appropriate constraints :param deploy_whitelist: List of lists of locations to whitelist :returns: List of lists of constraints
deploy_whitelist_to_constraints
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def format_table( rows: Iterable[Union[str, Sequence[str]]], min_spacing: int = 2 ) -> List[str]: """Formats a table for use on the command line. :param rows: List of rows, each of which can either be a tuple of strings containing the row's values, or a string to be inserted verbatim. Each row (except literal strings) should be the same number of elements as all the others. :returns: A string containing rows formatted as a table. """ list_rows = [r for r in rows if not isinstance(r, str)] # If all of the rows are strings, we have nothing to do, so short-circuit. if not list_rows: return cast(List[str], rows) widths = [] for i in range(len(list_rows[0])): widths.append(max(terminal_len(r[i]) for r in list_rows)) expanded_rows = [] for row in rows: if isinstance(row, str): expanded_rows.append([row]) else: expanded_row = [] for i, cell in enumerate(row): if i == len(row) - 1: padding = "" else: padding = " " * (widths[i] - terminal_len(cell)) expanded_row.append(cell + padding) expanded_rows.append(expanded_row) return [(" " * min_spacing).join(r) for r in expanded_rows]
Formats a table for use on the command line. :param rows: List of rows, each of which can either be a tuple of strings containing the row's values, or a string to be inserted verbatim. Each row (except literal strings) should be the same number of elements as all the others. :returns: A string containing rows formatted as a table.
format_table
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def is_deploy_step(step: str) -> bool: """ Returns true if the given step deploys to an instancename Returns false if the step is a predefined step-type, e.g. itest or command-* """ return not ( (step in DEPLOY_PIPELINE_NON_DEPLOY_STEPS) or (step.startswith("command-")) )
Returns true if the given step deploys to an instancename Returns false if the step is a predefined step-type, e.g. itest or command-*
is_deploy_step
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def ldap_user_search( cn: str, search_base: str, search_ou: str, ldap_host: str, username: str, password: str, ) -> Set[str]: """Connects to LDAP and raises a subclass of LDAPOperationResult when it fails""" tls_config = ldap3.Tls( validate=ssl.CERT_REQUIRED, ca_certs_file="/etc/ssl/certs/ca-certificates.crt" ) server = ldap3.Server(ldap_host, use_ssl=True, tls=tls_config) conn = ldap3.Connection( server, user=username, password=password, raise_exceptions=True ) conn.bind() search_filter = f"(&(memberOf=CN={cn},{search_ou})(!(userAccountControl=514)))" entries = conn.extend.standard.paged_search( search_base=search_base, search_scope=ldap3.SUBTREE, search_filter=search_filter, attributes=["sAMAccountName"], paged_size=1000, time_limit=10, ) return {entry["attributes"]["sAMAccountName"] for entry in entries}
Connects to LDAP and raises a subclass of LDAPOperationResult when it fails
ldap_user_search
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def get_k8s_url_for_cluster(cluster: str) -> Optional[str]: """ Annoyingly, there's two layers of aliases: one to figure out what k8s server url to use (this one) and another to figure out what soaconfigs filename to use ;_; This exists so that we can map something like `--cluster pnw-devc` into spark-pnw-devc's k8s apiserver url without needing to update any soaconfigs/alter folk's muscle memory. Ideally we can get rid of this entirely once spark-run reads soaconfigs in a manner more closely aligned to what we do with other paasta workloads (i.e., have it automatically determine where to run based on soaconfigs filenames - and not rely on explicit config) """ realized_cluster = ( load_system_paasta_config().get_eks_cluster_aliases().get(cluster, cluster) ) return ( load_system_paasta_config() .get_kube_clusters() .get(realized_cluster, {}) .get("server") )
Annoyingly, there's two layers of aliases: one to figure out what k8s server url to use (this one) and another to figure out what soaconfigs filename to use ;_; This exists so that we can map something like `--cluster pnw-devc` into spark-pnw-devc's k8s apiserver url without needing to update any soaconfigs/alter folk's muscle memory. Ideally we can get rid of this entirely once spark-run reads soaconfigs in a manner more closely aligned to what we do with other paasta workloads (i.e., have it automatically determine where to run based on soaconfigs filenames - and not rely on explicit config)
get_k8s_url_for_cluster
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def maybe_load_previous_config( filename: str, config_loader: Callable[[TextIO], dict] ) -> Optional[dict]: """Try to load configuration file :param str filename: path to load from :param Callable[[TextIO], dict] config_loader: parser for the configuration :return: configuration data, None if loading fails """ try: with open(filename, "r") as fp: previous_config = config_loader(fp) return previous_config except Exception: pass return None
Try to load configuration file :param str filename: path to load from :param Callable[[TextIO], dict] config_loader: parser for the configuration :return: configuration data, None if loading fails
maybe_load_previous_config
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def write_json_configuration_file(filename: str, configuration: dict) -> None: """Atomically write configuration to JSON file :param str filename: path to write to :param dict configuration: configuration data """ with atomic_file_write(filename) as fp: json.dump( obj=configuration, fp=fp, indent=2, sort_keys=True, separators=(",", ": "), )
Atomically write configuration to JSON file :param str filename: path to write to :param dict configuration: configuration data
write_json_configuration_file
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def write_yaml_configuration_file( filename: str, configuration: dict, check_existing: bool = True ) -> None: """Atomically write configuration to YAML file :param str filename: path to write to :param dict configuration: configuration data :param bool check_existing: if existing file already matches config, do not overwrite """ if check_existing: previous_config = maybe_load_previous_config(filename, yaml.safe_load) if previous_config and previous_config == configuration: return with atomic_file_write(filename) as fp: fp.write( "# This file is automatically generated by paasta_tools.\n" "# It was automatically generated at {now} on {host}.\n".format( host=socket.getfqdn(), now=datetime.datetime.now().isoformat() ) ) yaml.safe_dump( configuration, fp, indent=2, explicit_start=True, default_flow_style=False, allow_unicode=False, )
Atomically write configuration to YAML file :param str filename: path to write to :param dict configuration: configuration data :param bool check_existing: if existing file already matches config, do not overwrite
write_yaml_configuration_file
python
Yelp/paasta
paasta_tools/utils.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py
Apache-2.0
def __call__(self, request: Request) -> Response: """ Extracts relevant metadata from request, and checks if it is authorized """ token = request.headers.get("Authorization", "").strip() token = token.split()[-1] if token else "" # removes "Bearer" prefix auth_outcome = self.is_request_authorized( request.path, token, request.method, request.swagger_data.get("service", None), ) if self.enforce and not auth_outcome.authorized: return HTTPForbidden( body=json.dumps({"reason": auth_outcome.reason}), headers={"X-Auth-Failure-Reason": auth_outcome.reason}, content_type="application/json", charset="utf-8", ) return self.handler(request)
Extracts relevant metadata from request, and checks if it is authorized
__call__
python
Yelp/paasta
paasta_tools/api/tweens/auth.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/api/tweens/auth.py
Apache-2.0
def is_request_authorized( self, path: str, token: str, method: str, service: Optional[str], ) -> AuthorizationOutcome: """Check if API request is authorized :param str path: API path :param str token: authentication token :param str method: http method :return: auth outcome """ try: response = self.session.post( url=self.endpoint, json={ "input": { "path": path, "backend": "paasta", "token": token, "method": method, "service": service, }, }, timeout=2, ).json() except Exception as e: logger.exception(f"Issue communicating with auth endpoint: {e}") return AuthorizationOutcome(False, "Auth backend error") auth_result_allowed = response.get("result", {}).get("allowed") if auth_result_allowed is None: return AuthorizationOutcome(False, "Malformed auth response") if not auth_result_allowed: reason = response["result"].get("reason", "Denied") return AuthorizationOutcome(False, reason) reason = response["result"].get("reason", "Ok") return AuthorizationOutcome(True, reason)
Check if API request is authorized :param str path: API path :param str token: authentication token :param str method: http method :return: auth outcome
is_request_authorized
python
Yelp/paasta
paasta_tools/api/tweens/auth.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/api/tweens/auth.py
Apache-2.0
def cprofile_tween_factory(handler, registry): """Tween for profiling API requests and sending them to scribe. yelp_profiling does define a tween, but it is designed more for PaaSTA services. So, we need to define our own. """ def cprofile_tween(request): if yelp_profiling is None: return handler(request) config = PaastaCProfileConfig(registry.settings) processor = YelpSOARequestProcessor(config, registry) context_manager = CProfileContextManager(config, processor) # uses the config and processor to decide whether or not to cprofile # the request with context_manager(request): processor.begin_request(request) status_code = 500 try: response = handler(request) status_code = response.status_code return response finally: processor.end_request(request, status_code) return cprofile_tween
Tween for profiling API requests and sending them to scribe. yelp_profiling does define a tween, but it is designed more for PaaSTA services. So, we need to define our own.
cprofile_tween_factory
python
Yelp/paasta
paasta_tools/api/tweens/profiling.py
https://github.com/Yelp/paasta/blob/master/paasta_tools/api/tweens/profiling.py
Apache-2.0