-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Still missing the DOI, HAL link (and eventually link to IEEE, depending on the conference proceedings).
- Loading branch information
Showing
3 changed files
with
41 additions
and
0 deletions.
There are no files selected for viewing
Binary file added
BIN
+351 KB
content/publication/ictai2023/Chaput_Learning_identify_settle_dilemmas_paper.pdf
Binary file not shown.
Binary file added
BIN
+915 KB
content/publication/ictai2023/Chaput_Learning_identify_settle_dilemmas_slides.pdf
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,41 @@ | ||
+++ | ||
title = "Learning to identify and settle dilemmas through contextual user preferences" | ||
date = 2023-11-06 | ||
authors = ["Rémy Chaput", "Laetitia Matignon", "Mathieu Guillermin"] | ||
profile = false | ||
|
||
publication_types = ["1"] | ||
publication = "*The 35th IEEE International Conference on Tools with Artificial Intelligence (ICTAI)*" | ||
publication_short = "ICTAI2023" | ||
|
||
abstract = """ | ||
Artificial Intelligence systems have a significant impact on human lives. | ||
Machine Ethics tries to align these systems with human values, by integrating | ||
"ethical considerations". However, most approaches consider a single objective, | ||
and thus cannot accommodate different, contextual human preferences. | ||
Multi-Objective Reinforcement Learning algorithms account for various preferences, | ||
but they often are not intelligible nor contextual (e.g., weighted preferences). | ||
Our novel approach identifies dilemmas, presents them to users, and learns to | ||
settle them, based on intelligible and contextualized preferences over actions. | ||
We intend to maximize understandability and opportunities for user-system | ||
co-construction by showing dilemmas, and triggering interactions, thus empowering | ||
users. The block-based architecture enables leveraging simple mechanisms that | ||
can be updated and improved. Validation on a Smart Grid use-case shows that our | ||
algorithm finds actions for various trade-offs, and quickly learns to settle | ||
dilemmas, reducing the cognitive load on users. | ||
""" | ||
|
||
summary = """ | ||
This paper presents a novel Multi-Objective Reinforcement Learning approach to | ||
settle dilemmas, by putting humans in the loop. | ||
""" | ||
|
||
tags = ["Machine Ethics", "Multi-Objective Reinforcement Learning", "Moral Dilemmas", | ||
"Human Preferences"] | ||
featured = false | ||
|
||
url_pdf = "Chaput_Learning_identify_settle_dilemmas_paper.pdf" | ||
|
||
url_slides = "Chaput_Learning_identify_settle_dilemmas_slides.pdf" | ||
|
||
+++ |