diff --git a/ammico/__init__.py b/ammico/__init__.py
index 3a780334..9ec19fc2 100644
--- a/ammico/__init__.py
+++ b/ammico/__init__.py
@@ -5,7 +5,7 @@
import importlib_metadata as metadata # type: ignore
from ammico.cropposts import crop_media_posts, crop_posts_from_refs
from ammico.display import AnalysisExplorer
-from ammico.faces import EmotionDetector
+from ammico.faces import EmotionDetector, ethical_disclosure
from ammico.multimodal_search import MultimodalSearch
from ammico.summary import SummaryDetector
from ammico.text import TextDetector, TextAnalyzer, PostprocessText
@@ -27,4 +27,5 @@
"PostprocessText",
"find_files",
"get_dataframe",
+ "ethical_disclosure",
]
diff --git a/ammico/colors.py b/ammico/colors.py
index 43cdb0c5..c9581b00 100644
--- a/ammico/colors.py
+++ b/ammico/colors.py
@@ -120,7 +120,7 @@ def rgb2name(
output_color = output_color.lower().replace("grey", "gray")
except ValueError:
delta_e_lst = []
- filtered_colors = webcolors.CSS3_NAMES_TO_HEX
+ filtered_colors = webcolors._definitions._CSS3_NAMES_TO_HEX
for _, img_hex in filtered_colors.items():
cur_clr = webcolors.hex_to_rgb(img_hex)
diff --git a/ammico/display.py b/ammico/display.py
index ed150ab1..ef732c0b 100644
--- a/ammico/display.py
+++ b/ammico/display.py
@@ -101,6 +101,9 @@ def __init__(self, mydict: dict) -> None:
State("setting_Text_revision_numbers", "value"),
State("setting_Emotion_emotion_threshold", "value"),
State("setting_Emotion_race_threshold", "value"),
+ State("setting_Emotion_gender_threshold", "value"),
+ State("setting_Emotion_age_threshold", "value"),
+ State("setting_Emotion_env_var", "value"),
State("setting_Color_delta_e_method", "value"),
State("setting_Summary_analysis_type", "value"),
State("setting_Summary_model", "value"),
@@ -200,6 +203,13 @@ def _create_setting_layout(self):
style={"width": "100%"},
),
),
+ dbc.Col(
+ [
+ html.P(
+ "Select name of the environment variable to accept or reject the disclosure*:"
+ ),
+ ]
+ ),
dbc.Col(
dcc.Input(
type="text",
@@ -246,6 +256,48 @@ def _create_setting_layout(self):
],
align="start",
),
+ dbc.Col(
+ [
+ html.P("Gender threshold"),
+ dcc.Input(
+ type="number",
+ value=50,
+ max=100,
+ min=0,
+ id="setting_Emotion_gender_threshold",
+ style={"width": "100%"},
+ ),
+ ],
+ align="start",
+ ),
+ dbc.Col(
+ [
+ html.P("Age threshold"),
+ dcc.Input(
+ type="number",
+ value=50,
+ max=100,
+ min=0,
+ id="setting_Emotion_age_threshold",
+ style={"width": "100%"},
+ ),
+ ],
+ align="start",
+ ),
+ dbc.Col(
+ [
+ html.P(
+ "Disclosure acceptance environment variable"
+ ),
+ dcc.Input(
+ type="text",
+ value="DISCLOSURE_AMMICO",
+ id="setting_Emotion_env_var",
+ style={"width": "100%"},
+ ),
+ ],
+ align="start",
+ ),
],
style={"width": "100%"},
),
@@ -441,6 +493,9 @@ def _right_output_analysis(
settings_text_revision_numbers: str,
setting_emotion_emotion_threshold: int,
setting_emotion_race_threshold: int,
+ setting_emotion_gender_threshold: int,
+ setting_emotion_age_threshold: int,
+ setting_emotion_env_var: str,
setting_color_delta_e_method: str,
setting_summary_analysis_type: str,
setting_summary_model: str,
@@ -493,8 +548,15 @@ def _right_output_analysis(
elif detector_value == "EmotionDetector":
detector_class = identify_function(
image_copy,
- race_threshold=setting_emotion_race_threshold,
emotion_threshold=setting_emotion_emotion_threshold,
+ race_threshold=setting_emotion_race_threshold,
+ gender_threshold=setting_emotion_gender_threshold,
+ age_threshold=setting_emotion_age_threshold,
+ accept_disclosure=(
+ setting_emotion_env_var
+ if setting_emotion_env_var
+ else "DISCLOSURE_AMMICO"
+ ),
)
elif detector_value == "ColorDetector":
detector_class = identify_function(
diff --git a/ammico/faces.py b/ammico/faces.py
index d3258115..e049a1a5 100644
--- a/ammico/faces.py
+++ b/ammico/faces.py
@@ -80,12 +80,78 @@ def _processor(fname, action, pooch):
)
+def ethical_disclosure(accept_disclosure: str = "DISCLOSURE_AMMICO"):
+ """
+ Asks the user to accept the ethical disclosure.
+
+ Args:
+ accept_disclosure (str): The name of the disclosure variable (default: "DISCLOSURE_AMMICO").
+ """
+ if not os.environ.get(accept_disclosure):
+ accepted = _ask_for_disclosure_acceptance(accept_disclosure)
+ elif os.environ.get(accept_disclosure) == "False":
+ accepted = False
+ elif os.environ.get(accept_disclosure) == "True":
+ accepted = True
+ else:
+ print(
+ "Could not determine disclosure - skipping \
+ race/ethnicity, gender and age detection."
+ )
+ accepted = False
+ return accepted
+
+
+def _ask_for_disclosure_acceptance(accept_disclosure: str = "DISCLOSURE_AMMICO"):
+ """
+ Asks the user to accept the disclosure.
+ """
+ print("This analysis uses the DeepFace and RetinaFace libraries.")
+ print(
+ """
+ DeepFace and RetinaFace provide wrappers to trained models in face recognition and
+ emotion detection. Age, gender and race / ethnicity models were trained
+ on the backbone of VGG-Face with transfer learning.
+ ETHICAL DISCLOSURE STATEMENT:
+ The Emotion Detector uses RetinaFace to probabilistically assess the gender, age and
+ race of the detected faces. Such assessments may not reflect how the individuals
+ identified by the tool view themselves. Additionally, the classification is carried
+ out in simplistic categories and contains only the most basic classes, for example
+ “male” and “female” for gender. By continuing to use the tool, you certify that you
+ understand the ethical implications such assessments have for the interpretation of
+ the results.
+ """
+ )
+ answer = input("Do you accept the disclosure? (yes/no): ")
+ answer = answer.lower().strip()
+ if answer == "yes":
+ print("You have accepted the disclosure.")
+ print(
+ """Age, gender, race/ethnicity detection will be performed based on the provided
+ confidence thresholds."""
+ )
+ os.environ[accept_disclosure] = "True"
+ accepted = True
+ elif answer == "no":
+ print("You have not accepted the disclosure.")
+ print("No age, gender, race/ethnicity detection will be performed.")
+ os.environ[accept_disclosure] = "False"
+ accepted = False
+ else:
+ print("Please answer with yes or no.")
+ accepted = _ask_for_disclosure_acceptance()
+ return accepted
+
+
class EmotionDetector(AnalysisMethod):
def __init__(
self,
subdict: dict,
emotion_threshold: float = 50.0,
race_threshold: float = 50.0,
+ gender_threshold: float = 50.0,
+ age_threshold: float = 50.0,
+ accept_disclosure: str = "DISCLOSURE_AMMICO",
) -> None:
"""
Initializes the EmotionDetector object.
@@ -94,6 +160,10 @@ def __init__(
subdict (dict): The dictionary to store the analysis results.
emotion_threshold (float): The threshold for detecting emotions (default: 50.0).
race_threshold (float): The threshold for detecting race (default: 50.0).
+ gender_threshold (float): The threshold for detecting gender (default: 50.0).
+ age_threshold (float): The threshold for detecting age (default: 50.0).
+ accept_disclosure (str): The name of the disclosure variable, that is
+ set upon accepting the disclosure (default: "DISCLOSURE_AMMICO").
"""
super().__init__(subdict)
self.subdict.update(self.set_keys())
@@ -102,8 +172,14 @@ def __init__(
raise ValueError("Emotion threshold must be between 0 and 100.")
if race_threshold < 0 or race_threshold > 100:
raise ValueError("Race threshold must be between 0 and 100.")
+ if gender_threshold < 0 or gender_threshold > 100:
+ raise ValueError("Gender threshold must be between 0 and 100.")
+ if age_threshold < 0 or age_threshold > 100:
+ raise ValueError("Age threshold must be between 0 and 100.")
self.emotion_threshold = emotion_threshold
self.race_threshold = race_threshold
+ self.gender_threshold = gender_threshold
+ self.age_threshold = age_threshold
self.emotion_categories = {
"angry": "Negative",
"disgust": "Negative",
@@ -113,6 +189,7 @@ def __init__(
"surprise": "Neutral",
"neutral": "Neutral",
}
+ self.accepted = ethical_disclosure(accept_disclosure)
def set_keys(self) -> dict:
"""
@@ -143,6 +220,44 @@ def analyse_image(self) -> dict:
"""
return self.facial_expression_analysis()
+ def _define_actions(self, fresult: dict) -> list:
+ # Adapt the features we are looking for depending on whether a mask is worn.
+ # White masks screw race detection, emotion detection is useless.
+ # also, depending on the disclosure, we might not want to run the analysis
+ # for gender, age, ethnicity/race
+ conditional_actions = {
+ "all": ["age", "gender", "race", "emotion"],
+ "all_with_mask": ["age", "gender"],
+ "restricted_access": ["emotion"],
+ "restricted_access_with_mask": [],
+ }
+ if fresult["wears_mask"] and self.accepted:
+ actions = conditional_actions["all_with_mask"]
+ elif fresult["wears_mask"] and not self.accepted:
+ actions = conditional_actions["restricted_access_with_mask"]
+ elif not fresult["wears_mask"] and self.accepted:
+ actions = conditional_actions["all"]
+ elif not fresult["wears_mask"] and not self.accepted:
+ actions = conditional_actions["restricted_access"]
+ else:
+ raise ValueError(
+ "Invalid mask detection {} and disclosure \
+ acceptance {} result.".format(
+ fresult["wears_mask"], self.accepted
+ )
+ )
+ return actions
+
+ def _ensure_deepface_models(self, actions: list):
+ # Ensure that all data has been fetched by pooch
+ deepface_face_expression_model.get()
+ if "race" in actions:
+ deepface_race_model.get()
+ if "age" in actions:
+ deepface_age_model.get()
+ if "gender" in actions:
+ deepface_gender_model.get()
+
def analyze_single_face(self, face: np.ndarray) -> dict:
"""
Analyzes the features of a single face.
@@ -156,16 +271,8 @@ def analyze_single_face(self, face: np.ndarray) -> dict:
fresult = {}
# Determine whether the face wears a mask
fresult["wears_mask"] = self.wears_mask(face)
- # Adapt the features we are looking for depending on whether a mask is worn.
- # White masks screw race detection, emotion detection is useless.
- actions = ["age", "gender"]
- if not fresult["wears_mask"]:
- actions = actions + ["race", "emotion"]
- # Ensure that all data has been fetched by pooch
- deepface_age_model.get()
- deepface_face_expression_model.get()
- deepface_gender_model.get()
- deepface_race_model.get()
+ actions = self._define_actions(fresult)
+ self._ensure_deepface_models(actions)
# Run the full DeepFace analysis
fresult.update(
DeepFace.analyze(
diff --git a/ammico/notebooks/DemoNotebook_ammico.ipynb b/ammico/notebooks/DemoNotebook_ammico.ipynb
index 12bd6ea7..e1835326 100644
--- a/ammico/notebooks/DemoNotebook_ammico.ipynb
+++ b/ammico/notebooks/DemoNotebook_ammico.ipynb
@@ -166,7 +166,7 @@
"source": [
"image_dict = ammico.find_files(\n",
" # path=\"/content/drive/MyDrive/misinformation-data/\",\n",
- " path=data_path.as_posix(),\n",
+ " path=str(data_path),\n",
" limit=15,\n",
")"
]
@@ -177,7 +177,30 @@
"source": [
"## Step 2: Inspect the input files using the graphical user interface\n",
"A Dash user interface is to select the most suitable options for the analysis, before running a complete analysis on the whole data set. The options for each detector module are explained below in the corresponding sections; for example, different models can be selected that will provide slightly different results. This way, the user can interactively explore which settings provide the most accurate results. In the interface, the nested `image_dict` is passed through the `AnalysisExplorer` class. The interface is run on a specific port which is passed using the `port` keyword; if a port is already in use, it will return an error message, in which case the user should select a different port number. \n",
- "The interface opens a dash app inside the Jupyter Notebook and allows selection of the input file in the top left dropdown menu, as well as selection of the detector type in the top right, with options for each detector type as explained below. The output of the detector is shown directly on the right next to the image. This way, the user can directly inspect how updating the options for each detector changes the computed results, and find the best settings for a production run."
+ "The interface opens a dash app inside the Jupyter Notebook and allows selection of the input file in the top left dropdown menu, as well as selection of the detector type in the top right, with options for each detector type as explained below. The output of the detector is shown directly on the right next to the image. This way, the user can directly inspect how updating the options for each detector changes the computed results, and find the best settings for a production run.\n",
+ "\n",
+ "### Ethical disclosure statement\n",
+ "\n",
+ "If you want to run an analysis using the EmotionDetector detector type, you have first have to respond to an ethical disclosure statement. This disclosure statement ensures that you only use the full capabilities of the EmotionDetector after you have been made aware of its shortcomings.\n",
+ "\n",
+ "For this, answer \"yes\" or \"no\" to the below prompt. This will set an environment variable with the name given as in `accept_disclosure`. To re-run the disclosure prompt, unset the variable by uncommenting the line `os.environ.pop(accept_disclosure, None)`. To permanently set this envorinment variable, add it to your shell via your `.profile` or `.bashr` file.\n",
+ "\n",
+ "If the disclosure statement is accepted, the EmotionDetector will perform age, gender and race/ethnicity classification dependend on the provided thresholds. If the disclosure is rejected, only the presence of faces and emotion (if not wearing a mask) is detected."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# respond to the disclosure statement\n",
+ "# this will set an environment variable for you\n",
+ "# if you do not want to re-accept the disclosure every time, you can set this environment variable in your shell\n",
+ "# to re-set the environment variable, uncomment the below line\n",
+ "accept_disclosure = \"DISCLOSURE_AMMICO\"\n",
+ "# os.environ.pop(accept_disclosure, None)\n",
+ "_ = ammico.ethical_disclosure(accept_disclosure=accept_disclosure)"
]
},
{
@@ -843,7 +866,7 @@
"metadata": {},
"source": [
"## Detection of faces and facial expression analysis\n",
- "Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface.\n",
+ "Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface, but only if the disclosure statement has been accepted (see above).\n",
"\n",
"
\n",
"\n",
@@ -853,10 +876,11 @@
"\n",
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
"\n",
- "A similar threshold as for the emotion recognition is set for the race detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
+ "A similar threshold as for the emotion recognition is set for the race/ethnicity, gender and age detection, `race_threshold`, `gender_threshold`, `age_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
+ "\n",
+ "You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
"\n",
- "Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold` and \n",
- "`race_threshold` are optional:"
+ "Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `age_threshold` are optional:"
]
},
{
@@ -866,7 +890,9 @@
"outputs": [],
"source": [
"for key in image_dict.keys():\n",
- " image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50).analyse_image()"
+ " image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
+ " gender_threshold=50, age_threshold=50, \n",
+ " accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
]
},
{
@@ -1371,7 +1397,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.11.5"
+ "version": "3.9.16"
}
},
"nbformat": 4,
diff --git a/ammico/test/test_display.py b/ammico/test/test_display.py
index 8b19073a..102f9731 100644
--- a/ammico/test/test_display.py
+++ b/ammico/test/test_display.py
@@ -42,7 +42,8 @@ def test_AnalysisExplorer(get_AE, get_options):
assert get_AE.update_picture(None) is None
-def test_right_output_analysis_summary(get_AE, get_options):
+def test_right_output_analysis_summary(get_AE, get_options, monkeypatch):
+ monkeypatch.setenv("OTHER_VAR", "True")
get_AE._right_output_analysis(
2,
get_options[3],
@@ -53,6 +54,9 @@ def test_right_output_analysis_summary(get_AE, get_options):
None,
50,
50,
+ 50,
+ 50,
+ "OTHER_VAR",
"CIE 1976",
"summary_and_questions",
"base",
@@ -60,7 +64,8 @@ def test_right_output_analysis_summary(get_AE, get_options):
)
-def test_right_output_analysis_emotions(get_AE, get_options):
+def test_right_output_analysis_emotions(get_AE, get_options, monkeypatch):
+ monkeypatch.setenv("OTHER_VAR", "True")
get_AE._right_output_analysis(
2,
get_options[3],
@@ -71,6 +76,9 @@ def test_right_output_analysis_emotions(get_AE, get_options):
None,
50,
50,
+ 50,
+ 50,
+ "OTHER_VAR",
"CIE 1976",
"summary_and_questions",
"base",
diff --git a/ammico/test/test_faces.py b/ammico/test/test_faces.py
index 7baa6665..53e2a115 100644
--- a/ammico/test/test_faces.py
+++ b/ammico/test/test_faces.py
@@ -1,14 +1,51 @@
import ammico.faces as fc
import json
import pytest
+import os
-def test_set_keys():
- ed = fc.EmotionDetector({})
+def test_init_EmotionDetector(monkeypatch):
+ # standard input
+ monkeypatch.setattr("builtins.input", lambda _: "yes")
+ ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
assert ed.subdict["face"] == "No"
assert ed.subdict["multiple_faces"] == "No"
assert ed.subdict["wears_mask"] == ["No"]
assert ed.subdict["emotion"] == [None]
+ assert ed.subdict["age"] == [None]
+ assert ed.emotion_threshold == 50
+ assert ed.age_threshold == 50
+ assert ed.gender_threshold == 50
+ assert ed.race_threshold == 50
+ assert ed.emotion_categories["angry"] == "Negative"
+ assert ed.emotion_categories["happy"] == "Positive"
+ assert ed.emotion_categories["surprise"] == "Neutral"
+ assert ed.accept_disclosure == "OTHER_VAR"
+ assert os.environ.get(ed.accept_disclosure) == "True"
+ assert ed.accepted
+ monkeypatch.delenv(ed.accept_disclosure, raising=False)
+ # different thresholds
+ ed = fc.EmotionDetector(
+ {},
+ emotion_threshold=80,
+ race_threshold=30,
+ gender_threshold=70,
+ age_threshold=90,
+ accept_disclosure="OTHER_VAR",
+ )
+ assert ed.emotion_threshold == 80
+ assert ed.race_threshold == 30
+ assert ed.gender_threshold == 70
+ assert ed.age_threshold == 90
+ monkeypatch.delenv(ed.accept_disclosure, raising=False)
+ # do not accept disclosure
+ monkeypatch.setattr("builtins.input", lambda _: "no")
+ ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
+ assert os.environ.get(ed.accept_disclosure) == "False"
+ assert not ed.accepted
+ monkeypatch.delenv(ed.accept_disclosure, raising=False)
+ # now test the exceptions: thresholds
+ monkeypatch.setattr("builtins.input", lambda _: "yes")
with pytest.raises(ValueError):
fc.EmotionDetector({}, emotion_threshold=150)
with pytest.raises(ValueError):
@@ -17,13 +54,56 @@ def test_set_keys():
fc.EmotionDetector({}, race_threshold=150)
with pytest.raises(ValueError):
fc.EmotionDetector({}, race_threshold=-50)
+ with pytest.raises(ValueError):
+ fc.EmotionDetector({}, gender_threshold=150)
+ with pytest.raises(ValueError):
+ fc.EmotionDetector({}, gender_threshold=-50)
+ with pytest.raises(ValueError):
+ fc.EmotionDetector({}, age_threshold=150)
+ with pytest.raises(ValueError):
+ fc.EmotionDetector({}, age_threshold=-50)
+ # test pre-set variables: disclosure
+ monkeypatch.delattr("builtins.input", raising=False)
+ monkeypatch.setenv("OTHER_VAR", "something")
+ ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
+ assert not ed.accepted
+ monkeypatch.setenv("OTHER_VAR", "False")
+ ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
+ assert not ed.accepted
+ monkeypatch.setenv("OTHER_VAR", "True")
+ ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
+ assert ed.accepted
+
+
+def test_define_actions(monkeypatch):
+ monkeypatch.setenv("OTHER_VAR", "True")
+ ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
+ actions = ed._define_actions({"wears_mask": True})
+ assert actions == ["age", "gender"]
+ actions = ed._define_actions({"wears_mask": False})
+ assert actions == ["age", "gender", "race", "emotion"]
+ monkeypatch.setenv("OTHER_VAR", "False")
+ ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
+ actions = ed._define_actions({"wears_mask": True})
+ assert actions == []
+ actions = ed._define_actions({"wears_mask": False})
+ assert actions == ["emotion"]
+
+
+def test_ensure_deepface_models(monkeypatch):
+ monkeypatch.setenv("OTHER_VAR", "True")
+ ed = fc.EmotionDetector({}, accept_disclosure="OTHER_VAR")
+ ed._ensure_deepface_models(["age", "gender", "race", "emotion"])
-def test_analyse_faces(get_path):
+def test_analyse_faces(get_path, monkeypatch):
mydict = {
"filename": get_path + "pexels-pixabay-415829.jpg",
}
- mydict.update(fc.EmotionDetector(mydict).analyse_image())
+ monkeypatch.setenv("OTHER_VAR", "True")
+ mydict.update(
+ fc.EmotionDetector(mydict, accept_disclosure="OTHER_VAR").analyse_image()
+ )
with open(get_path + "example_faces.json", "r") as file:
out_dict = json.load(file)
diff --git a/docs/source/notebooks/DemoNotebook_ammico.ipynb b/docs/source/notebooks/DemoNotebook_ammico.ipynb
index 292a93d5..f6a53daa 100644
--- a/docs/source/notebooks/DemoNotebook_ammico.ipynb
+++ b/docs/source/notebooks/DemoNotebook_ammico.ipynb
@@ -166,7 +166,7 @@
"source": [
"image_dict = ammico.find_files(\n",
" # path=\"/content/drive/MyDrive/misinformation-data/\",\n",
- " path=data_path.as_posix(),\n",
+ " path=str(data_path),\n",
" limit=15,\n",
")"
]
@@ -177,7 +177,30 @@
"source": [
"## Step 2: Inspect the input files using the graphical user interface\n",
"A Dash user interface is to select the most suitable options for the analysis, before running a complete analysis on the whole data set. The options for each detector module are explained below in the corresponding sections; for example, different models can be selected that will provide slightly different results. This way, the user can interactively explore which settings provide the most accurate results. In the interface, the nested `image_dict` is passed through the `AnalysisExplorer` class. The interface is run on a specific port which is passed using the `port` keyword; if a port is already in use, it will return an error message, in which case the user should select a different port number. \n",
- "The interface opens a dash app inside the Jupyter Notebook and allows selection of the input file in the top left dropdown menu, as well as selection of the detector type in the top right, with options for each detector type as explained below. The output of the detector is shown directly on the right next to the image. This way, the user can directly inspect how updating the options for each detector changes the computed results, and find the best settings for a production run."
+ "The interface opens a dash app inside the Jupyter Notebook and allows selection of the input file in the top left dropdown menu, as well as selection of the detector type in the top right, with options for each detector type as explained below. The output of the detector is shown directly on the right next to the image. This way, the user can directly inspect how updating the options for each detector changes the computed results, and find the best settings for a production run.\n",
+ "\n",
+ "### Ethical disclosure statement\n",
+ "\n",
+ "If you want to run an analysis using the EmotionDetector detector type, you have first have to respond to an ethical disclosure statement. This disclosure statement ensures that you only use the full capabilities of the EmotionDetector after you have been made aware of its shortcomings.\n",
+ "\n",
+ "For this, answer \"yes\" or \"no\" to the below prompt. This will set an environment variable with the name given as in `accept_disclosure`. To re-run the disclosure prompt, unset the variable by uncommenting the line `os.environ.pop(accept_disclosure, None)`. To permanently set this envorinment variable, add it to your shell via your `.profile` or `.bashr` file.\n",
+ "\n",
+ "If the disclosure statement is accepted, the EmotionDetector will perform age, gender and race/ethnicity classification dependend on the provided thresholds. If the disclosure is rejected, only the presence of faces and emotion (if not wearing a mask) is detected."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# respond to the disclosure statement\n",
+ "# this will set an environment variable for you\n",
+ "# if you do not want to re-accept the disclosure every time, you can set this environment variable in your shell\n",
+ "# to re-set the environment variable, uncomment the below line\n",
+ "accept_disclosure = \"DISCLOSURE_AMMICO\"\n",
+ "# os.environ.pop(accept_disclosure, None)\n",
+ "_ = ammico.ethical_disclosure(accept_disclosure=accept_disclosure)"
]
},
{
@@ -822,7 +845,7 @@
"metadata": {},
"source": [
"## Detection of faces and facial expression analysis\n",
- "Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface.\n",
+ "Faces and facial expressions are detected and analyzed using the `EmotionDetector` class from the `faces` module. Initially, it is detected if faces are present on the image using RetinaFace, followed by analysis if face masks are worn (Face-Mask-Detection). The detection of age, gender, race, and emotions is carried out with deepface, but only if the disclosure statement has been accepted (see above).\n",
"\n",
"
\n",
"\n",
@@ -832,10 +855,11 @@
"\n",
"From the seven facial expressions, an overall dominating emotion category is identified: negative, positive, or neutral emotion. These are defined with the facial expressions angry, disgust, fear and sad for the negative category, happy for the positive category, and surprise and neutral for the neutral category.\n",
"\n",
- "A similar threshold as for the emotion recognition is set for the race detection, `race_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
+ "A similar threshold as for the emotion recognition is set for the race/ethnicity, gender and age detection, `race_threshold`, `gender_threshold`, `age_threshold`, with the default set to 50% so that a confidence for the race above 0.5 only will return a value in the analysis. \n",
+ "\n",
+ "You may also pass the name of the environment variable that determines if you accept or reject the ethical disclosure statement. By default, the variable is named `DISCLOSURE_AMMICO`.\n",
"\n",
- "Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold` and \n",
- "`race_threshold` are optional:"
+ "Summarizing, the face detection is carried out using the following method call and keywords, where `emotion_threshold`, `race_threshold`, `gender_threshold`, `age_threshold` are optional:"
]
},
{
@@ -845,7 +869,9 @@
"outputs": [],
"source": [
"for key in image_dict.keys():\n",
- " image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50).analyse_image()"
+ " image_dict[key] = ammico.EmotionDetector(image_dict[key], emotion_threshold=50, race_threshold=50,\n",
+ " gender_threshold=50, age_threshold=50, \n",
+ " accept_disclosure=\"DISCLOSURE_AMMICO\").analyse_image()"
]
},
{
diff --git a/pyproject.toml b/pyproject.toml
index 1ca61cfb..47fd782b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -52,7 +52,7 @@ dependencies = [
"google-cloud-vision",
"dash_bootstrap_components",
"colorgram.py",
- "webcolors",
+ "webcolors>1.13",
"colour-science",
"scikit-learn>1.3.0",
"tqdm"