Skip to content

Commit

Permalink
fixing incorrectly merged code
Browse files Browse the repository at this point in the history
  • Loading branch information
MiaAltieri committed Oct 24, 2023
1 parent 1dcab3c commit 5fe28d1
Showing 1 changed file with 1 addition and 172 deletions.
173 changes: 1 addition & 172 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1117,178 +1117,7 @@ def _peer_data(self, scope: Scopes):
if not self._peers:
return {}.setdefault(scope, {})
scope_obj = self._scope_obj(scope)
return self._peers.data[scope_obj]

@staticmethod
def _compare_secret_ids(secret_id1: str, secret_id2: str) -> bool:
"""Reliable comparison on secret equality.
NOTE: Secret IDs may be of any of these forms:
- secret://9663a790-7828-4186-8b21-2624c58b6cfe/citb87nubg2s766pab40
- secret:citb87nubg2s766pab40
"""
if not secret_id1 or not secret_id2:
return False

regex = re.compile(".*[^/][/:]")

pure_id1 = regex.sub("", secret_id1)
pure_id2 = regex.sub("", secret_id2)

if pure_id1 and pure_id2:
return pure_id1 == pure_id2
return False

def _juju_secret_set(self, scope: Scopes, key: str, value: str) -> str:
"""Helper function setting Juju secret in Juju versions >3.0."""
peer_data = self._peer_data(scope)
self._update_juju_secrets_cache(scope)

secret = self.secrets[scope].get(Config.Secrets.SECRET_LABEL)

# It's not the first secret for the scope, we can reuse the existing one
# that was fetched in the previous call, as fetching secrets from juju is
# slow
if secret:
secret_cache = self.secrets[scope][Config.Secrets.SECRET_CACHE_LABEL]

if secret_cache.get(key) == value:
logging.debug(f"Key {scope}:{key} has this value defined already")
else:
secret_cache[key] = value
try:
secret.set_content(secret_cache)
logging.debug(f"Secret {scope}:{key} was {key} set")
except OSError as error:
logging.error(
f"Error in attempt to set '{key}' secret for scope '{scope}'. "
f"Existing keys were: {list(secret_cache.keys())}. {error}"
)

# We need to create a brand-new secret for this scope
else:
scope_obj = self._scope_obj(scope)

secret = scope_obj.add_secret({key: value})
if not secret:
raise SecretNotAddedError(f"Couldn't set secret {scope}:{key}")

self.secrets[scope][Config.Secrets.SECRET_LABEL] = secret
self.secrets[scope][Config.Secrets.SECRET_CACHE_LABEL] = {key: value}
logging.debug(f"Secret {scope}:{key} published (as first). ID: {secret.id}")
peer_data.update({Config.Secrets.SECRET_INTERNAL_LABEL: secret.id})

return self.secrets[scope][Config.Secrets.SECRET_LABEL].id

def _update_juju_secrets_cache(self, scope: Scopes) -> None:
"""Helper function to retrieve all Juju secrets.
This function is responsible for direct communication with the Juju Secret
store to retrieve the Mono Charm's single, unique Secret object's metadata,
and --on success-- its contents.
In parallel with retrieving secret information, it's immediately locally cached,
making sure that we have the snapshot of the secret for the lifetime of the event
(that's being processed) without additional fetch requests to the Juju Secret Store.
(Note: metadata, i.e. the Secret object itself is cached as it may be necessary for
later operations, like updating contents.)
The function is returning a boolean that reflects success or failure of the above.
"""
peer_data = self._peer_data(scope)

if not peer_data.get(Config.Secrets.SECRET_INTERNAL_LABEL):
return

if Config.Secrets.SECRET_CACHE_LABEL not in self.secrets[scope]:
try:
# NOTE: Secret contents are not yet available!
secret = self.model.get_secret(id=peer_data[Config.Secrets.SECRET_INTERNAL_LABEL])
except SecretNotFoundError as e:
logging.debug(
f"No secret found for ID {peer_data[Config.Secrets.SECRET_INTERNAL_LABEL]}, {e}"
)
return

logging.debug(f"Secret {peer_data[Config.Secrets.SECRET_INTERNAL_LABEL]} downloaded")

# We keep the secret object around -- needed when applying modifications
self.secrets[scope][Config.Secrets.SECRET_LABEL] = secret

# We retrieve and cache actual secret data for the lifetime of the event scope
self.secrets[scope][Config.Secrets.SECRET_CACHE_LABEL] = secret.get_content()

def _get_juju_secrets_cache(self, scope: Scopes):
return self.secrets[scope].get(Config.Secrets.SECRET_CACHE_LABEL)

def _juju_secret_get(self, scope: Scopes, key: str) -> Optional[str]:
"""Helper function to get Juju secret."""
if not key:
return

self._update_juju_secrets_cache(scope)
secret_cache = self._get_juju_secrets_cache(scope)
if secret_cache:
secret_data = secret_cache.get(key)
if secret_data and secret_data != Config.Secrets.SECRET_DELETED_LABEL:
logging.debug(f"Getting secret {scope}:{key}")
return secret_data
logging.debug(f"No value found for secret {scope}:{key}")

def _juju_secret_remove(self, scope: Scopes, key: str) -> None:
"""Remove a Juju 3.x secret."""
self._update_juju_secrets_cache(scope)

secret = self.secrets[scope].get(Config.Secrets.SECRET_LABEL)
if not secret:
logging.error(f"Secret {scope}:{key} wasn't deleted: no secrets are available")
return

secret_cache = self.secrets[scope].get(Config.Secrets.SECRET_CACHE_LABEL)
if not secret_cache or key not in secret_cache:
logging.error(f"No secret {scope}:{key}")
return

secret_cache[key] = Config.Secrets.SECRET_DELETED_LABEL
secret.set_content(secret_cache)
logging.debug(f"Secret {scope}:{key}")

def check_relation_broken_or_scale_down(self, event: RelationDepartedEvent) -> None:
"""Checks relation departed event is the result of removed relation or scale down.
Relation departed and relation broken events occur during scaling down or during relation
removal, only relation departed events have access to metadata to determine which case.
"""
self.set_scaling_down(event)

if self.is_scaling_down(event.relation.id):
logger.info(
"Scaling down the application, no need to process removed relation in broken hook."
)

def is_scaling_down(self, rel_id: int) -> bool:
"""Returns True if the application is scaling down."""
rel_departed_key = self._generate_relation_departed_key(rel_id)
return json.loads(self.unit_peer_data[rel_departed_key])

def has_departed_run(self, rel_id: int) -> bool:
"""Returns True if the relation departed event has run."""
rel_departed_key = self._generate_relation_departed_key(rel_id)
return rel_departed_key in self.unit_peer_data

def set_scaling_down(self, event: RelationDepartedEvent) -> None:
"""Sets whether or not the current unit is scaling down."""
# check if relation departed is due to current unit being removed. (i.e. scaling down the
# application.)
rel_departed_key = self._generate_relation_departed_key(event.relation.id)
self.unit_peer_data[rel_departed_key] = json.dumps(event.departing_unit == self.unit)

@staticmethod
def _generate_relation_departed_key(rel_id: int) -> str:
"""Generates the relation departed key for a specified relation id."""
return f"relation_{rel_id}_departed"

# END: helper functions
return self._peers.data[scope_obj] # END: helper functions


if __name__ == "__main__":
Expand Down

0 comments on commit 5fe28d1

Please sign in to comment.