diff --git a/index.html b/index.html
index 4a241f9..70e39f5 100644
--- a/index.html
+++ b/index.html
@@ -105,6 +105,351 @@
Total number of rows: XX |
+
+
+
+
+
+
+
+
+ Title |
+ Venue |
+ Year |
+ Code |
+ Target Explanations |
+ Attacks |
+ Defenses |
+
+
+
+
+
+
+ Please Tell Me More: Privacy Impact of Explainability through the Lens of Membership Inference Attack |
+ 2024 |
+ SP |
+ Feature-based |
+ Membership Inference |
+ Differential Privacy, Privacy-Preserving Models, DP-SGD |
+ - |
+
+
+ On the Privacy Risks of Algorithmic Recourse |
+ 2023 |
+ AISTATS |
+ Counterfactual |
+ Membership Inference |
+ Differential Privacy |
+ - |
+
+
+ The Privacy Issue of Counterfactual Explanations: Explanation Linkage Attacks |
+ 2023 |
+ TIST |
+ Counterfactual |
+ Linkage |
+ Anonymisaion |
+ - |
+
+
+ Feature-based Learning for Diverse and Privacy-Preserving Counterfactual Explanations |
+ 2023 |
+ KDD |
+ Counterfactual |
+ - |
+ Perturbation |
+ [Code] |
+
+
+ Private Graph Extraction via Feature Explanations |
+ 2023 |
+ PETS |
+ Feature-based |
+ Graph Extraction |
+ Perturbation |
+ [Code] |
+
+
+ Privacy-Preserving Algorithmic Recourse |
+ 2023 |
+ ICAIF |
+ Counterfactual |
+ - |
+ Differential Privacy |
+ - |
+
+
+ Accurate, Explainable, and Private Models: Providing Recourse While Minimizing Training Data Leakage |
+ 2023 |
+ ICML-Workshop |
+ Counterfactual |
+ Membership Inference |
+ Differential Privacy |
+ - |
+
+
+ Probabilistic Dataset Reconstruction from Interpretable Models |
+ 2023 |
+ arXiv |
+ Interpretable Surrogates |
+ Data Reconstruction |
+ - |
+ [Code] |
+
+
+ DeepFixCX: Explainable privacy-preserving image compression for medical image analysis |
+ 2023 |
+ WIREs-DMKD |
+ Case-based |
+ Identity recognition |
+ Anonymisation |
+ [Code] |
+
+
+ XorSHAP: Privacy-Preserving Explainable AI for Decision Tree Models |
+ 2023 |
+ Preprint |
+ Shapley |
+ - |
+ Multi-party Computation |
+ - |
+
+
+ - |
+ 2023 |
+ Github |
+ ALE plot |
+ - |
+ Differential Privacy |
+ [Code] |
+
+
+ Inferring Sensitive Attributes from Model Explanations |
+ 2022 |
+ CIKM |
+ Gradient-based, Perturbation-based |
+ Attribute Inference |
+ - |
+ [Code] |
+
+
+ Model explanations with differential privacy |
+ 2022 |
+ FAccT |
+ Feature-based |
+ - |
+ Differential Privacy |
+ - |
+
+
+ DualCF: Efficient Model Extraction Attack from Counterfactual Explanations |
+ 2022 |
+ FAccT |
+ Counterfactual |
+ Model Extraction |
+ - |
+ - |
+
+
+ Feature Inference Attack on Shapley Values |
+ 2022 |
+ CCS |
+ Shapley |
+ Attribute/Feature Inference |
+ Low-dimensional |
+ - |
+
+
+ Evaluating the privacy exposure of interpretable global explainers |
+ 2022 |
+ CogMI |
+ Interpretable Surrogates |
+ Membership Inference |
+ - |
+ - |
+
+
+ Privacy-Preserving Case-Based Explanations: Enabling Visual Interpretability by Protecting Privacy |
+ 2022 |
+ IEEE Access |
+ Example-based |
+ - |
+ Anonymisation |
+ - |
+
+
+ On the amplification of security and privacy risks by post-hoc explanations in machine learning models |
+ 2022 |
+ arXiv |
+ Feature-based |
+ Membership Inference |
+ - |
+ - |
+
+
+ Differentially Private Counterfactuals via Functional Mechanism |
+ 2022 |
+ arXiv |
+ Counterfactual |
+ - |
+ Differential Privacy |
+ - |
+
+
+ Differentially Private Shapley Values for Data Evaluation |
+ 2022 |
+ arXiv |
+ Shapley |
+ - |
+ Differential Privacy |
+ [Code] |
+
+
+ Exploiting Explanations for Model Inversion Attacks |
+ 2021 |
+ ICCV |
+ Gradient-based, Interpretable Surrogates |
+ Model Inversion |
+ - |
+ - |
+
+
+ On the Privacy Risks of Model Explanations |
+ 2021 |
+ AIES |
+ Feature-based, Shapley, Counterfactual |
+ Membership Inference |
+ - |
+ - |
+
+
+ Adversarial XAI Methods in Cybersecurity |
+ 2021 |
+ TIFS |
+ Counterfactual |
+ Membership Inference |
+ - |
+ - |
+
+
+ MEGEX: Data-Free Model Extraction Attack against Gradient-Based Explainable AI |
+ 2021 |
+ arXiv |
+ Gradient-based |
+ Model Extraction |
+ - |
+ [Code] |
+
+
+ Robust Counterfactual Explanations for Privacy-Preserving SVM |
+ 2021 |
+ ICML-Workshop |
+ Counterfactual |
+ - |
+ Private SVM |
+ [Code] |
+
+
+ When Differential Privacy Meets Interpretability: A Case Study |
+ 2021 |
+ RCV-CVPR |
+ Interpretable Models |
+ - |
+ Differential Privacy |
+ - |
+
+
+ Differentially Private Quantiles |
+ 2021 |
+ ICML |
+ Quantiles |
+ - |
+ Differential Privacy |
+ [Code] |
+
+
+ FOX: Fooling with Explanations : Privacy Protection with Adversarial Reactions in Social Media |
+ 2021 |
+ PST |
+ - |
+ Attribute Inference |
+ Privacy-Protecting Explanation |
+ - |
+
+
+ Privacy-preserving generative adversarial network for case-based explainability in medical image analysis |
+ 2021 |
+ IEEE Access |
+ Example-based |
+ - |
+ Generative Anonymisation |
+ - |
+
+
+ Interpretable and Differentially Private Predictions |
+ 2020 |
+ AAAI |
+ Locally linear maps |
+ - |
+ Differential Privacy |
+ [Code] |
+
+
+ Model extraction from counterfactual explanations |
+ 2020 |
+ arXiv |
+ Counterfactual |
+ Model Extraction |
+ - |
+ [Code] |
+
+
+ Model Reconstruction from Model Explanations |
+ 2019 |
+ FAT* |
+ Gradient-based |
+ Model Reconstruction, Model Extraction |
+ - |
+ - |
+
+
+ Interpret Federated Learning with Shapley Values |
+ 2019 |
+ - |
+ Shapley |
+ - |
+ Federated |
+ [Code] |
+
+
+ Collaborative Explanation of Deep Models with Limited Interaction for Trade Secret and Privacy Preservation |
+ 2019 |
+ WWW |
+ Feature-based |
+ - |
+ Collaborative rule-based model |
+ - |
+
+
+ Model inversion attacks that exploit confidence information and basic countermeasures |
+ 2015 |
+ CCS |
+ Confidence scores |
+ Reconstruction, Model Inversion |
+ - |
+ - |
+
+
+
+
+
+ |
+
+
+
+
+
III. Citations
diff --git a/script/input.txt b/script/input.txt
index 00a7d70..9dff662 100644
--- a/script/input.txt
+++ b/script/input.txt
@@ -1,141 +1,35 @@
-Model-Agnostic
-| [Towards Adversarial Evaluations for Inexact Machine Unlearning](https://arxiv.org/abs/2201.06640) | 2023 | Goel et al. | _arXiv_ | EU-k, CF-k | [[Code]](https://github.com/shash42/Evaluating-Inexact-Unlearning) |
-| [KGA: A General Machine Unlearning Framework Based on Knowledge Gap Alignment](https://arxiv.org/abs/2305.06535) | 2023 | Wang et al. | _arXiv_ | KGA | [[Code]](https://github.com/Lingzhi-WANG/KGAUnlearn) | |
-| [On the Trade-Off between Actionable Explanations and the Right to be Forgotten](https://openreview.net/pdf?id=HWt4BBZjVW) | 2023 | Pawelczyk et al. | _arXiv_ | - | - | |
-| [Towards Unbounded Machine Unlearning](https://arxiv.org/pdf/2302.09880) | 2023 | Kurmanji et al. | _arXiv_ | SCRUB | [[Code]](https://github.com/Meghdad92/SCRUB) | approximate unlearning |
-| [Netflix and Forget: Efficient and Exact Machine Unlearning from Bi-linear Recommendations](https://arxiv.org/abs/2302.06676) | 2023 | Xu et al. | _arXiv_ | Unlearn-ALS | - | Exact Unlearning |
-| [To Be Forgotten or To Be Fair: Unveiling Fairness Implications of Machine Unlearning Methods](https://arxiv.org/abs/2302.03350) | 2023 | Zhang et al. | _arXiv_ | - | [[Code]](https://github.com/cleverhans-lab/machine-unlearning) | |
-| [Sequential Informed Federated Unlearning: Efficient and Provable Client Unlearning in Federated Optimization](https://arxiv.org/abs/2211.11656) | 2022 | Fraboni et al. | _arXiv_ | SIFU | - | |
-| [Certified Data Removal in Sum-Product Networks](https://arxiv.org/abs/2210.01451) | 2022 | Becker and Liebig | _ICKG_ | UNLEARNSPN | [[Code]](https://github.com/ROYALBEFF/UnlearnSPN) | Certified Removal Mechanisms |
-| [Learning with Recoverable Forgetting](https://arxiv.org/abs/2207.08224) | 2022 | Ye et al. | _ECCV_ | LIRF | - | |
-| [Continual Learning and Private Unlearning](https://arxiv.org/abs/2203.12817) | 2022 | Liu et al. | _CoLLAs_ | CLPU | [[Code]](https://github.com/Cranial-XIX/Continual-Learning-Private-Unlearning) | |
-| [Verifiable and Provably Secure Machine Unlearning](https://arxiv.org/abs/2210.09126) | 2022 | Eisenhofer et al. | _arXiv_ | - | [[Code]](https://github.com/cleverhans-lab/verifiable-unlearning) | Certified Removal Mechanisms |
-| [VeriFi: Towards Verifiable Federated Unlearning](https://arxiv.org/abs/2205.12709) | 2022 | Gao et al. | _arXiv_ | VERIFI | - | Certified Removal Mechanisms |
-| [FedRecover: Recovering from Poisoning Attacks in Federated Learning using Historical Information](https://arxiv.org/abs/2210.10936) | 2022 | Cao et al. | _S&P_ | FedRecover | - | recovery method |
-| [Fast Yet Effective Machine Unlearning](https://arxiv.org/abs/2111.08947) | 2022 | Tarun et al. | _arXiv_ | UNSIR | - | |
-| [Membership Inference via Backdooring](https://arxiv.org/abs/2206.04823) | 2022 | Hu et al. | _IJCAI_ | MIB | [[Code]](https://github.com/HongshengHu/membership-inference-via-backdooring) | Membership Inferencing |
-| [Forget Unlearning: Towards True Data-Deletion in Machine Learning](https://arxiv.org/abs/2210.08911) | 2022 | Chourasia et al. | _ICLR_ | - | - | noisy gradient descent |
-| [Zero-Shot Machine Unlearning](https://arxiv.org/abs/2201.05629) | 2022 | Chundawat et al. | _arXiv_ | - | - | |
-| [Efficient Attribute Unlearning: Towards Selective Removal of Input Attributes from Feature Representations](https://arxiv.org/abs/2202.13295) | 2022 | Guo et al. | _arXiv_ | attribute unlearning | - | |
-| [Few-Shot Unlearning](https://download.huan-zhang.com/events/srml2022/accepted/yoon22fewshot.pdf) | 2022 | Yoon et al. | _ICLR_ | - | - | |
-| [Federated Unlearning: How to Efficiently Erase a Client in FL?](https://arxiv.org/abs/2207.05521) | 2022 | Halimi et al. | _UpML Workshop_ | - | - | federated learning |
-| [Machine Unlearning Method Based On Projection Residual](https://arxiv.org/abs/2209.15276) | 2022 | Cao et al. | _DSAA_ | - | - | Projection Residual Method |
-| [Hard to Forget: Poisoning Attacks on Certified Machine Unlearning](https://ojs.aaai.org/index.php/AAAI/article/view/20736) | 2022 | Marchant et al. | _AAAI_ | - | [[Code]](https://github.com/ngmarchant/attack-unlearning) | Certified Removal Mechanisms |
-| [Athena: Probabilistic Verification of Machine Unlearning](https://web.archive.org/web/20220721061150id_/https://petsymposium.org/popets/2022/popets-2022-0072.pdf) | 2022 | Sommer et al. | _PoPETs_ | ATHENA | - | |
-| [FP2-MIA: A Membership Inference Attack Free of Posterior Probability in Machine Unlearning](https://link.springer.com/chapter/10.1007/978-3-031-20917-8_12) | 2022 | Lu et al. | _ProvSec_ | FP2-MIA | - | inference attack |
-| [Deletion Inference, Reconstruction, and Compliance in Machine (Un)Learning](https://arxiv.org/abs/2202.03460) | 2022 | Gao et al. | _PETS_ | - | - | |
-| [Prompt Certified Machine Unlearning with Randomized Gradient Smoothing and Quantization](https://openreview.net/pdf?id=ue4gP8ZKiWb) | 2022 | Zhang et al. | _NeurIPS_ | PCMU | - | Certified Removal Mechanisms |
-| [The Right to be Forgotten in Federated Learning: An Efficient Realization with Rapid Retraining](https://arxiv.org/abs/2203.07320) | 2022 | Liu et al. | _INFOCOM_ | - | [[Code]](https://github.com/yiliucs/federated-unlearning) | |
-| [Backdoor Defense with Machine Unlearning](https://arxiv.org/abs/2201.09538) | 2022 | Liu et al. | _INFOCOM_ | BAERASER | - | Backdoor defense |
-| [Markov Chain Monte Carlo-Based Machine Unlearning: Unlearning What Needs to be Forgotten](https://dl.acm.org/doi/abs/10.1145/3488932.3517406) | 2022 | Nguyen et al. | _ASIA CCS_ | MCU | - | MCMC Unlearning |
-| [Federated Unlearning for On-Device Recommendation](https://arxiv.org/abs/2210.10958) | 2022 | Yuan et al. | _arXiv_ | - | - | |
-| [Can Bad Teaching Induce Forgetting? Unlearning in Deep Networks using an Incompetent Teacher](https://arxiv.org/abs/2205.08096) | 2022 | Chundawat et al. | _arXiv_ | - | - | Knowledge Adaptation |
-| [ Efficient Two-Stage Model Retraining for Machine Unlearning](https://openaccess.thecvf.com/content/CVPR2022W/HCIS/html/Kim_Efficient_Two-Stage_Model_Retraining_for_Machine_Unlearning_CVPRW_2022_paper.html) | 2022 | Kim and Woo | _CVPR Workshop_ | - | - | |
-| [Learn to Forget: Machine Unlearning Via Neuron Masking](https://ieeexplore.ieee.org/abstract/document/9844865?casa_token=_eowH3BTt1sAAAAA:X0uCpLxOwcFRNJHoo3AtA0ay4t075_cSptgTMznsjusnvgySq-rJe8GC285YhWG4Q0fUmP9Sodw0) | 2021 | Ma et al. | _IEEE_ | Forsaken | - | Mask Gradients |
-| [Adaptive Machine Unlearning](https://proceedings.neurips.cc/paper/2021/hash/87f7ee4fdb57bdfd52179947211b7ebb-Abstract.html) | 2021 | Gupta et al. | _NeurIPS_ | - | [[Code]](https://github.com/ChrisWaites/adaptive-machine-unlearning) | Differential Privacy |
-| [Descent-to-Delete: Gradient-Based Methods for Machine Unlearning](https://proceedings.mlr.press/v132/neel21a.html) | 2021 | Neel et al. | _ALT_ | - | - | Certified Removal Mechanisms |
-| [Remember What You Want to Forget: Algorithms for Machine Unlearning](https://arxiv.org/abs/2103.03279) | 2021 | Sekhari et al. | _NeurIPS_ | - | - | |
-| [FedEraser: Enabling Efficient Client-Level Data Removal from Federated Learning Models](https://ieeexplore.ieee.org/abstract/document/9521274) | 2021 | Liu et al. | _IWQoS_ | FedEraser | - | |
-| [Federated Unlearning](https://arxiv.org/abs/2012.13891) | 2021 | Liu et al. | _IWQoS_ | FedEraser | [[Code]](https://www.dropbox.com/s/1lhx962axovbbom/FedEraser-Code.zip?dl=0) | |
-| [Machine Unlearning via Algorithmic Stability](https://proceedings.mlr.press/v134/ullah21a.html) | 2021 | Ullah et al. | _COLT_ | TV | - | Certified Removal Mechanisms |
-| [EMA: Auditing Data Removal from Trained Models](https://link.springer.com/chapter/10.1007/978-3-030-87240-3_76) | 2021 | Huang et al. | _MICCAI_ | EMA | [[Code]](https://github.com/Hazelsuko07/EMA) | Certified Removal Mechanisms |
-| [Knowledge-Adaptation Priors](https://proceedings.neurips.cc/paper/2021/hash/a4380923dd651c195b1631af7c829187-Abstract.html) | 2021 | Khan and Swaroop | _NeurIPS_ | K-prior | [[Code]](https://github.com/team-approx-bayes/kpriors) | Knowledge Adaptation |
-| [PrIU: A Provenance-Based Approach for Incrementally Updating Regression Models](https://dl.acm.org/doi/abs/10.1145/3318464.3380571) | 2020 | Wu et al. | _NeurIPS_ | PrIU | - | Knowledge Adaptation |
-| [Eternal Sunshine of the Spotless Net: Selective Forgetting in Deep Networks](https://arxiv.org/abs/1911.04933) | 2020 | Golatkar et al. | _CVPR_ | - | - | Certified Removal Mechanisms |
-| [Learn to Forget: User-Level Memorization Elimination in Federated Learning](https://www.researchgate.net/profile/Ximeng-Liu-5/publication/340134612_Learn_to_Forget_User-Level_Memorization_Elimination_in_Federated_Learning/links/5e849e64a6fdcca789e5f955/Learn-to-Forget-User-Level-Memorization-Elimination-in-Federated-Learning.pdf) | 2020 | Liu et al. | _arXiv_ | Forsaken | - | |
-| [Certified Data Removal from Machine Learning Models](https://proceedings.mlr.press/v119/guo20c.html) | 2020 | Guo et al. | _ICML_ | - | - | Certified Removal Mechanisms |
-| [Class Clown: Data Redaction in Machine Unlearning at Enterprise Scale](https://arxiv.org/abs/2012.04699) | 2020 | Felps et al. | _arXiv_ | - | - | Decremental Learning |
-| [A Novel Online Incremental and Decremental Learning Algorithm Based on Variable Support Vector Machine](https://link.springer.com/article/10.1007/s10586-018-1772-4) | 2019 | Chen et al. | _Cluster Computing_ | - | - | Decremental Learning |
-| [Making AI Forget You: Data Deletion in Machine Learning](https://papers.nips.cc/paper/2019/hash/cb79f8fa58b91d3af6c9c991f63962d3-Abstract.html) | 2019 | Ginart et al. | _NeurIPS_ | - | - | Decremental Learning |
-| [Lifelong Anomaly Detection Through Unlearning](https://dl.acm.org/doi/abs/10.1145/3319535.3363226) | 2019 | Du et al. | _CCS_ | - | - | |
-| [Learning Not to Learn: Training Deep Neural Networks With Biased Data](https://openaccess.thecvf.com/content_CVPR_2019/html/Kim_Learning_Not_to_Learn_Training_Deep_Neural_Networks_With_Biased_CVPR_2019_paper.html) | 2019 | Kim et al. | _CVPR_ | - | - | |
-| [Efficient Repair of Polluted Machine Learning Systems via Causal Unlearning](https://dl.acm.org/citation.cfm?id=3196517) | 2018 | Cao et al. | _ASIACCS_ | KARMA | [[Code]](https://github.com/CausalUnlearning/KARMA) | |
-| [Understanding Black-box Predictions via Influence Functions](https://proceedings.mlr.press/v70/koh17a.html) | 2017 | Koh et al. | _ICML_ | - | [[Code]](https://github.com/kohpangwei/influence-release) | Certified Removal Mechanisms |
-| [Towards Making Systems Forget with Machine Unlearning](https://ieeexplore.ieee.org/abstract/document/7163042) | 2015 | Cao and Yang | _S&P_ | - | |
-| [Towards Making Systems Forget with Machine Unlearning](https://dl.acm.org/doi/10.1109/SP.2015.35) | 2015 | Cao et al. | _S&P_ | - | - | Statistical Query Learning |
-| [Incremental and decremental training for linear classification](https://dl.acm.org/doi/10.1145/2623330.2623661) | 2014 | Tsai et al. | _KDD_ | - | [[Code]](https://www.csie.ntu.edu.tw/~cjlin/papers/ws/) | Decremental Learning |
-| [Multiple Incremental Decremental Learning of Support Vector Machines](https://dl.acm.org/doi/10.5555/2984093.2984196) | 2009 | Karasuyama et al. | _NIPS_ | - | - | Decremental Learning |
-| [Incremental and Decremental Learning for Linear Support Vector Machines](https://dl.acm.org/doi/10.5555/1776814.1776838) | 2007 | Romero et al. | _ICANN_ | - | - | Decremental Learning |
-| [Decremental Learning Algorithms for Nonlinear Langrangian and Least Squares Support Vector Machines](https://www.semanticscholar.org/paper/Decremental-Learning-Algorithms-for-Nonlinear-and-Duan-Li/312c677f0882d0dfd60bfd77346588f52aefd10f) | 2007 | Duan et al. | _OSB_ | - | - | Decremental Learning |
-| [Multicategory Incremental Proximal Support Vector Classifiers](https://link.springer.com/chapter/10.1007/978-3-540-45224-9_54) | 2003 | Tveit et al. | _KES_ | - | - | Decremental Learning |
-| [Incremental and Decremental Proximal Support Vector Classification using Decay Coefficients](https://link.springer.com/chapter/10.1007/978-3-540-45228-7_42) | 2003 | Tveit et al. | _DaWak_ | - | - | Decremental Learning |
-| [Incremental and Decremental Support Vector Machine Learning](https://dl.acm.org/doi/10.5555/3008751.3008808) | 2000 | Cauwenberg et al. | _NeurIPS_ | - | - | Decremental Learning |
-Model-Intrinsic
-| [Heterogeneous Federated Knowledge Graph Embedding Learning and Unlearning](https://arxiv.org/abs/2302.02069) | 2023 | Zhu et al. | _WWW_ | FedLU | [[Code]](https://github.com/nju-websoft/FedLU/) | GNN-based Models |
-| [One-Shot Machine Unlearning with Mnemonic Code](https://arxiv.org/abs/2306.05670) | 2023 | Yamashita | _arXiv_ | One-Shot MU | - | |
-| [Inductive Graph Unlearning](https://arxiv.org/pdf/2304.03093.pdf) | 2023 | Wang et al. | _USENIX_ | GUIDE | [[Code]](https://github.com/Happy2Git/GUIDE) | GNN-based Models |
-| [ERM-KTP: Knowledge-level Machine Unlearning via Knowledge Transfer](https://openaccess.thecvf.com/content/CVPR2023/papers/Lin_ERM-KTP_Knowledge-Level_Machine_Unlearning_via_Knowledge_Transfer_CVPR_2023_paper.pdf) | 2023 | Lin et al. | _CVPR_ | ERM-KTP | [[Code]](https://github.com/RUIYUN-ML/ERM-KTP) | |
-| [GNNDelete: A General Strategy for Unlearning in Graph Neural Networks](https://arxiv.org/abs/2302.13406) | 2023 | Cheng et al. | _ICLR_ | GNNDELETE | [[Code]](https://github.com/mims-harvard/GNNDelete) | |
-| [Unfolded Self-Reconstruction LSH: Towards Machine Unlearning in Approximate Nearest Neighbour Search](https://arxiv.org/pdf/2304.02350.pdf) | 2023 | Tan et al. | _arXiv_ | USR-LSH | [[Code]](https://anonymous.4open.science/r/ann-benchmarks-3786/README.md) | |
-| [Efficiently Forgetting What You Have Learned in Graph Representation Learning via Projection](https://arxiv.org/abs/2302.08990) | 2023 | Cong and Mahdavi | _AISTATS_ | PROJECTOR | [[Code]](https://github.com/CongWeilin/Projector) | GNN-based Models |
-| [Unrolling SGD: Understanding Factors Influencing Machine Unlearning](https://ieeexplore.ieee.org/abstract/document/9797378) | 2022 | Thudi et al. | _EuroS&P_ | - | [[Code]](https://github.com/cleverhans-lab/unrolling-sgd) | SGD |
-| [Graph Unlearning](https://arxiv.org/abs/2103.14991) | 2022 | Chen et al. | _CCS_ | GraphEraser | [[Code]](https://github.com/MinChen00/Graph-Unlearning) | Graph Neural Networks |
-| [Certified Graph Unlearning](https://arxiv.org/abs/2206.09140) | 2022 | Chien et al. | _GLFrontiers Workshop_ | - | [[Code]](https://github.com/thupchnsky/sgc_unlearn) | Graph Neural Networks |
-| [Skin Deep Unlearning: Artefact and Instrument Debiasing in the Context of Melanoma Classification](https://arxiv.org/abs/2109.09818) | 2022 | Bevan and Atapour-Abarghouei | _ICML_ | - | [[Code]](https://github.com/pbevan1/Skin-Deep-Unlearning) | CNN Models |
-| [Near-Optimal Task Selection for Meta-Learning with Mutual Information and Online Variational Bayesian Unlearning](https://proceedings.mlr.press/v151/chen22h.html) | 2022 | Chen et al. | _AISTATS_ | - | - | Bayensian Models |
-| [Unlearning Protected User Attributes in Recommendations with Adversarial Training](https://arxiv.org/abs/2206.04500) | 2022 | Ganhor et al. | _SIGIR_ | ADV-MULTVAE | [[Code]](https://github.com/CPJKU/adv-multvae) | Autoencoder-based Model |
-| [Recommendation Unlearning](https://dl.acm.org/doi/abs/10.1145/3485447.3511997) | 2022 | Chen et al. | _TheWebConf_ | RecEraser | [[Code]](https://github.com/chenchongthu/Recommendation-Unlearning) | Attention-based Model |
-| [Knowledge Neurons in Pretrained Transformers](https://arxiv.org/abs/2104.08696) | 2022 | Dai et al. | _ACL_ | - | [[Code]](https://github.com/Hunter-DDM/knowledge-neurons) | Transformers
-| [Memory-Based Model Editing at Scale](https://proceedings.mlr.press/v162/mitchell22a/mitchell22a.pdf) | 2022 | Mitchell et al. | _MLR_ | SERAC | [[Code]](https://sites.google.com/view/serac-editing) | DNN-based Models |
-| [Forgetting Fast in Recommender Systems](https://arxiv.org/abs/2208.06875) | 2022 | Liu et al. | _arXiv_ | AltEraser | - | recommendation system |
-| [Unlearning Nonlinear Graph Classifiers in the Limited Training Data Regime](https://arxiv.org/abs/2211.03216) | 2022 | Pan et al. | _arXiv_ | - | - | GNN-based Models |
-| [Deep Regression Unlearning](https://arxiv.org/abs/2210.08196) | 2022 | Tarun et al. | _arXiv_ | Blindspot | - | Regression Model |
-| [Quark: Controllable Text Generation with Reinforced Unlearning](https://arxiv.org/abs/2205.13636) | 2022 | Lu et al. | _arXiv_ | Quark | [[Code]](https://github.com/GXimingLu/Quark) | language models |
-| [Forget-SVGD: Particle-Based Bayesian Federated Unlearning](https://ieeexplore.ieee.org/abstract/document/9820602) | 2022 | Gong et al. | _DSL Workshop_ | Forget-SVGD | - | Bayensian Models |
-| [Machine Unlearning of Federated Clusters](https://arxiv.org/abs/2210.16424) | 2022 | Pan et al. | _arXiv_ | SCMA | - | Federated clustering |
-| [Machine Unlearning for Image Retrieval: A Generative Scrubbing Approach](https://dl.acm.org/doi/abs/10.1145/3503161.3548378) | 2022 | Zhang et al. | _MM_ | - | - | DNN-based Models |
-| [Machine Unlearning: Linear Filtration for Logit-based Classifiers](https://link.springer.com/article/10.1007/s10994-022-06178-9) | 2022 | Baumhauer et al. | _Machine Learning_ | normalizing filtration | - | Softmax classifiers |
-| [Deep Unlearning via Randomized Conditionally Independent Hessians](https://openaccess.thecvf.com/content/CVPR2022/html/Mehta_Deep_Unlearning_via_Randomized_Conditionally_Independent_Hessians_CVPR_2022_paper.html) | 2022 | Mehta et al. | _CVPR_ | L-CODEC | [[Code]](https://github.com/vsingh-group/LCODEC-deep-unlearning) | DNN-based Models |
-| [Challenges and Pitfalls of Bayesian Unlearning](https://arxiv.org/abs/2207.03227) | 2022 | Rawat et al. | _UPML Workshop_ | - | - | Bayesian Models |
-| [Federated Unlearning via Class-Discriminative Pruning](https://arxiv.org/abs/2110.11794) | 2022 | Wang et al. | _WWW_ | - | - | CNN-Based |
-| [Active forgetting via influence estimation for neural networks](https://onlinelibrary.wiley.com/doi/abs/10.1002/int.22981) | 2022 | Meng et al. | _Int. J. Intel. Systems_ | SCRUBBER | - | Neural Network |
-| [Variational Bayesian unlearning](https://dl.acm.org/doi/abs/10.5555/3495724.3497068) | 2022 | Nguyen et al. | _NeurIPS_ | VI | - | Bayesian Models |
-| [Revisiting Machine Learning Training Process for Enhanced Data Privacy](https://dl.acm.org/doi/abs/10.1145/3474124.3474208) | 2021 | Goyal et al. | _IC3_ | - | - | DNN-based Models |
-| [Knowledge Removal in Sampling-based Bayesian Inference](https://openreview.net/forum?id=dTqOcTUOQO) | 2021 | Fu et al. | _ICLR_ | - | [[Code]](https://github.com/fshp971/mcmc-unlearning) | Bayesian Models |
-| [Mixed-Privacy Forgetting in Deep Networks](https://openaccess.thecvf.com/content/CVPR2021/html/Golatkar_Mixed-Privacy_Forgetting_in_Deep_Networks_CVPR_2021_paper.html) | 2021 | Golatkar et al. | _CVPR_ | - | - | DNN-based Models |
-| [HedgeCut: Maintaining Randomised Trees for Low-Latency Machine Unlearning](https://dl.acm.org/doi/abs/10.1145/3448016.3457239) | 2021 | Schelter et al. | _SIGMOD_ | HedgeCut | [[Code]](https://github.com/schelterlabs/hedgecut) | Tree-based Models |
-| [A Unified PAC-Bayesian Framework for Machine Unlearning via Information Risk Minimization](https://ieeexplore.ieee.org/abstract/document/9596170) | 2021 | Jose et al. | _MLSP_ | PAC-Bayesian| - | Bayesian Models |
-| [DeepObliviate: A Powerful Charm for Erasing Data Residual Memory in Deep Neural Networks](https://arxiv.org/abs/2105.06209) | 2021 | He et al. | _arXiv_ | DEEPOBLIVIATE | - | DNN-based Models |
-| [Approximate Data Deletion from Machine Learning Models: Algorithms and Evaluations](https://arxiv.org/abs/2002.10077) | 2021 | Izzo et al. | _AISTATS_ | PRU | [[Code]](https://github.com/zleizzo/datadeletion) | Linear/Logistics models |
-| [Bayesian Inference Forgetting](https://arxiv.org/abs/2101.06417) | 2021 | Fu et al. | _arXiv_ | BIF | [[Code]](https://github.com/fshp971/BIF) | Bayesian Models |
-| [Approximate Data Deletion from Machine Learning Models](https://proceedings.mlr.press/v130/izzo21a.html) | 2021 | Izzo et al. | _AISTATS_ | PRU | [[Code]](https://github.com/zleizzo/datadeletion) | Linear Models |
-| [Online Forgetting Process for Linear Regression Models](https://proceedings.mlr.press/v130/li21a.html) | 2021 | Li et al. | _AISTATS_ | FIFD-OLS | - | Linear Models |
-| [RevFRF: Enabling Cross-domain Random Forest Training with Revocable Federated Learning](https://ieeexplore.ieee.org/abstract/document/9514457) | 2021 | Liu et al. | _IEEE_ | RevFRF | - | Random Forrests |
-| [Coded Machine Unlearning](https://ieeexplore.ieee.org/abstract/document/9458237) | 2021 | Aldaghri et al. | _IEEE Access_ | - | - | Deep Learning Models |
-| [Machine Unlearning for Random Forests](http://proceedings.mlr.press/v139/brophy21a.html) | 2021 | Brophy and Lowd | _ICML_ | DaRE RF | - | Random Forrest |
-| [Bayesian Variational Federated Learning and Unlearning in Decentralized Networks](https://ieeexplore.ieee.org/abstract/document/9593225) | 2021 | Gong et al. | _SPAWC_ | - | - | Bayesian Models |
-| [Forgetting Outside the Box: Scrubbing Deep Networks of Information Accessible from Input-Output Observations](https://link.springer.com/chapter/10.1007/978-3-030-58526-6_23) | 2020 | Golatkar et al. | _ECCV_ | - | - | DNN-based Models |
-| [Influence Functions in Deep Learning Are Fragile](https://www.semanticscholar.org/paper/Influence-Functions-in-Deep-Learning-Are-Fragile-Basu-Pope/098076a2c90e42c81b843bf339446427c2ff02ed) | 2020 | Basu et al. | _arXiv_ | - | - | DNN-based Models |
-| [Deep Autoencoding Topic Model With Scalable Hybrid Bayesian Inference](https://ieeexplore.ieee.org/document/9121755) | 2020 | Zhang et al. | _IEEE_ | DATM | - | Bayesian Models |
-| [Eternal Sunshine of the Spotless Net: Selective Forgetting in Deep Networks](https://arxiv.org/abs/1911.04933) | 2020 | Golatkar et al. | _CVPR_ | - | - | DNN-based Models |
-| [Uncertainty in Neural Networks: Approximately Bayesian Ensembling](https://proceedings.mlr.press/v108/pearce20a.html) | 2020 | Pearce et al. | _AISTATS_ | - | [[Code]](https://teapearce.github.io/portfolio/github_io_1_ens/) | Bayesian Models |
-| [Certified Data Removal from Machine Learning Models](https://proceedings.mlr.press/v119/guo20c.html) | 2020 | Guo et al. | _ICML_ | - | - | DNN-based Models |
-| [DeltaGrad: Rapid retraining of machine learning models](https://proceedings.mlr.press/v119/wu20b.html) | 2020 | Wu et al. | _ICML_ | DeltaGrad | [[Code]](https://github.com/thuwuyinjun/DeltaGrad) | DNN-based Models |
-| [Making AI Forget You: Data Deletion in Machine Learning](https://papers.nips.cc/paper/2019/hash/cb79f8fa58b91d3af6c9c991f63962d3-Abstract.html) | 2019 | Ginart et al. | _NeurIPS_ | - | - | Linear Models |
-| [“Amnesia” – Towards Machine Learning Models That Can Forget User Data Very Fast](http://cidrdb.org/cidr2020/papers/p32-schelter-cidr20.pdf) | 2019 | Schelter | _AIDB Workshop_ | - | [[Code]](https://github.com/schelterlabs/projects-amnesia) | Collaborative Filtering |
-| [A Novel Online Incremental and Decremental Learning Algorithm Based on Variable Support Vector Machine](https://link.springer.com/article/10.1007/s10586-018-1772-4) | 2019 | Chen et al. | _Cluster Computing_ | - | - | SVM |
-| [Neural Text Degeneration With Unlikelihood Training](https://arxiv.org/abs/1908.04319) | 2019 | Welleck et al. | _arXiv_ | unlikelihood training | [[Code]](https://github.com/facebookresearch/unlikelihood_training) | DNN-based |
-| [Bayesian Neural Networks with Weight Sharing Using Dirichlet Processes](https://ieeexplore.ieee.org/document/8566011) | 2018 | Roth et al. | _IEEE_ | DP | [[Code]](https://github.com/wroth8/dp-bnn) | Bayesian Models |
-Data-Driven
-| [Hidden Poison: Machine Unlearning Enables Camouflaged Poisoning Attacks](https://arxiv.org/abs/2212.10717) | 2022 | Di et al. | _NeurIPS-TSRML_ | - | [[Code]](https://github.com/Jimmy-di/camouflage-poisoning) | Data Poisoning |
-| [Forget Unlearning: Towards True Data Deletion in Machine Learning](https://arxiv.org/pdf/2210.08911.pdf) | 2022 | Chourasia et al. | _ICLR_ | - | - | Data Influence |
-| [ARCANE: An Efficient Architecture for Exact Machine Unlearning](https://www.ijcai.org/proceedings/2022/0556.pdf) | 2022 | Yan et al. | _IJCAI_ | ARCANE | - | Data Partition |
-| [PUMA: Performance Unchanged Model Augmentation for Training Data Removal](https://ojs.aaai.org/index.php/AAAI/article/view/20846) | 2022 | Wu et al. | _AAAI_ | PUMA | - | Data Influence |
-| [Certifiable Unlearning Pipelines for Logistic Regression: An Experimental Study](https://www.mdpi.com/2504-4990/4/3/28) | 2022 | Mahadevan and Mathioudakis | _MAKE_ | - | [[Code]](https://version.helsinki.fi/mahadeva/unlearning-experiments) | Data Influence |
-| [Zero-Shot Machine Unlearning](https://arxiv.org/abs/2201.05629) | 2022 | Chundawat et al. | _arXiv_ | - | - | Data Influence |
-| [GRAPHEDITOR: An Efficient Graph Representation Learning and Unlearning Approach](https://congweilin.github.io/CongWeilin.io/files/GraphEditor.pdf) | 2022 | Cong and Mahdavi | - | GRAPHEDITOR | [[Code]](https://anonymous.4open.science/r/GraphEditor-NeurIPS22-856E/README.md) | Data Influence |
-| [Fast Model Update for IoT Traffic Anomaly Detection with Machine Unlearning](https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9927728) | 2022 | Fan et al. | _IEEE IoT-J_ | ViFLa | - | Data Partition |
-| [Learning to Refit for Convex Learning Problems](https://arxiv.org/abs/2111.12545) | 2021 | Zeng et al. | _arXiv_ | OPTLEARN | - | Data Influence |
-| [Fast Yet Effective Machine Unlearning](https://arxiv.org/abs/2111.08947) | 2021 | Ayush et al. | _arXiv_ | - | - | Data Augmentation |
-| [Learning with Selective Forgetting](https://www.ijcai.org/proceedings/2021/0137.pdf) | 2021 | Shibata et al. | _IJCAI_ | - | - | Data Augmentation |
-| [SSSE: Efficiently Erasing Samples from Trained Machine Learning Models](https://openreview.net/forum?id=GRMKEx3kEo) | 2021 | Peste et al. | _NeurIPS-PRIML_ | SSSE | - | Data Influence |
-| [How Does Data Augmentation Affect Privacy in Machine Learning?](https://arxiv.org/abs/2007.10567) | 2021 | Yu et al. | _AAAI_ | - | [[Code]](https://github.com/dayu11/MI_with_DA) | Data Augmentation |
-| [Coded Machine Unlearning](https://ieeexplore.ieee.org/document/9458237) | 2021 | Aldaghri et al. | _IEEE_ | - | - | Data Partitioning |
-| [Machine Unlearning](https://ieeexplore.ieee.org/document/9519428) | 2021 | Bourtoule et al. | _IEEE_ | SISA | [[Code]](https://github.com/cleverhans-lab/machine-unlearning) | Data Partitioning |
-| [How Does Data Augmentation Affect Privacy in Machine Learning?](https://ojs.aaai.org/index.php/AAAI/article/view/17284/) | 2021 | Yu et al. | _AAAI_ | - | [[Code]](https://github.com/dayu11/MI_with_DA) | Data Augmentation |
-| [Amnesiac Machine Learning](https://ojs.aaai.org/index.php/AAAI/article/view/17371) | 2021 | Graves et al. | _AAAI_ | AmnesiacML | [[Code]](https://github.com/lmgraves/AmnesiacML) | Data Influence |
-| [Unlearnable Examples: Making Personal Data Unexploitable](https://arxiv.org/abs/2101.04898) | 2021 | Huang et al. | _ICLR_ | - | [[Code]](https://github.com/HanxunH/Unlearnable-Examples) | Data Augmentation |
-| [Descent-to-Delete: Gradient-Based Methods for Machine Unlearning](https://proceedings.mlr.press/v132/neel21a.html) | 2021 | Neel et al. | _ALT_ | - | - | Data Influence |
-| [Fawkes: Protecting Privacy against Unauthorized Deep Learning Models](https://dl.acm.org/doi/abs/10.5555/3489212.3489302) | 2020 | Shan et al. | _USENIX Sec. Sym._ | Fawkes | [[Code]](https://github.com/Shawn-Shan/fawkes) | Data Augmentation |
-| [PrIU: A Provenance-Based Approach for Incrementally Updating Regression Models](https://dl.acm.org/doi/abs/10.1145/3318464.3380571) | 2020 | Wu et al. | _SIGMOD_ | PrIU/PrIU-opt | - | Data Influence |
-| [DeltaGrad: Rapid retraining of machine learning models](https://proceedings.mlr.press/v119/wu20b.html) | 2020 | Wu et al. | _ICML_ | DeltaGrad | [[Code]](https://github.com/thuwuyinjun/DeltaGrad) | Data Influence |
\ No newline at end of file
+| [Please Tell Me More: Privacy Impact of Explainability through the Lens of Membership Inference Attack](https://www.computer.org/csdl/proceedings-article/sp/2024/313000a120/1Ub23teQ7PG) | 2024 | _SP_ | Feature-based | Membership Inference | Differential Privacy, Privacy-Preserving Models, DP-SGD | - |
+| [On the Privacy Risks of Algorithmic Recourse](https://proceedings.mlr.press/v206/pawelczyk23a.html) | 2023 | _AISTATS_ | Counterfactual | Membership Inference | Differential Privacy | - |
+| [The Privacy Issue of Counterfactual Explanations: Explanation Linkage Attacks](https://dl.acm.org/doi/full/10.1145/3608482) | 2023 | _TIST_ | Counterfactual | Linkage | Anonymisaion | - |
+| [Feature-based Learning for Diverse and Privacy-Preserving Counterfactual Explanations](https://dl.acm.org/doi/abs/10.1145/3580305.3599343) | 2023 | _KDD_ | Counterfactual | - | Perturbation | [[Code]](https://github.com/isVy08/L2C/) |
+| [Private Graph Extraction via Feature Explanations](https://petsymposium.org/popets/2023/popets-2023-0041.pdf) | 2023 | _PETS_ | Feature-based | Graph Extraction | Perturbation | [[Code]](https://github.com/iyempissy/graph-stealing-attacks-with-explanation) |
+| [Privacy-Preserving Algorithmic Recourse](https://arxiv.org/abs/2311.14137) | 2023 | _ICAIF_ | Counterfactual | - | Differential Privacy | - |
+| [Accurate, Explainable, and Private Models: Providing Recourse While Minimizing Training Data Leakage](https://arxiv.org/abs/2308.04341) | 2023 | _ICML-Workshop_ | Counterfactual | Membership Inference | Differential Privacy | - |
+| [Probabilistic Dataset Reconstruction from Interpretable Models](https://arxiv.org/abs/2308.15099) | 2023 | _arXiv_ | Interpretable Surrogates | Data Reconstruction | - | [[Code]](https://github.com/ferryjul/ProbabilisticDatasetsReconstruction) |
+| [DeepFixCX: Explainable privacy-preserving image compression for medical image analysis](https://wires.onlinelibrary.wiley.com/doi/full/10.1002/widm.1495) | 2023 | _WIREs-DMKD_ | Case-based | Identity recognition | Anonymisation | [[Code]](https://github.com/adgaudio/DeepFixCX) |
+| [XorSHAP: Privacy-Preserving Explainable AI for Decision Tree Models](https://eprint.iacr.org/2023/1859) | 2023 | _Preprint_ | Shapley | - | Multi-party Computation | - |
+| DP-XAI | 2023 | _Github_ | ALE plot | - | Differential Privacy | [[Code]](https://github.com/lange-martin/dp-global-xai) |
+| [Inferring Sensitive Attributes from Model Explanations](https://dl.acm.org/doi/abs/10.1145/3511808.3557362) | 2022 | _CIKM_ | Gradient-based, Perturbation-based | Attribute Inference | - | [[Code]](https://github.com/vasishtduddu/AttInfExplanations) |
+| [Model explanations with differential privacy](https://dl.acm.org/doi/abs/10.1145/3531146.3533235) | 2022 | _FAccT_ | Feature-based | - | Differential Privacy | - |
+| [DualCF: Efficient Model Extraction Attack from Counterfactual Explanations](https://dl.acm.org/doi/10.1145/3531146.3533188) | 2022 | _FAccT_ | Counterfactual | Model Extraction | - | - |
+| [Feature Inference Attack on Shapley Values](https://dl.acm.org/doi/abs/10.1145/3548606.3560573) | 2022 | _CCS_ | Shapley | Attribute/Feature Inference | Low-dimensional | - |
+| [Evaluating the privacy exposure of interpretable global explainers](https://ieeexplore.ieee.org/abstract/document/10063510/), [Privacy Risk of Global Explainers](https://ebooks.iospress.nl/doi/10.3233/FAIA220206) | 2022 | _CogMI_ | Interpretable Surrogates | Membership Inference | - | - |
+| [Privacy-Preserving Case-Based Explanations: Enabling Visual Interpretability by Protecting Privacy](https://ieeexplore.ieee.org/document/9729808/) | 2022 | _IEEE Access_ | Example-based | - | Anonymisation | - |
+| [On the amplification of security and privacy risks by post-hoc explanations in machine learning models](https://arxiv.org/abs/2206.14004) | 2022 | _arXiv_ | Feature-based | Membership Inference | - | - |
+| [Differentially Private Counterfactuals via Functional Mechanism](https://arxiv.org/abs/2208.02878) | 2022 | _arXiv_ | Counterfactual | - | Differential Privacy | - |
+| [Differentially Private Shapley Values for Data Evaluation](https://arxiv.org/abs/2206.00511) | 2022 | _arXiv_ | Shapley | - | Differential Privacy | [[Code]](https://github.com/amiratag/DataShapley) |
+| [Exploiting Explanations for Model Inversion Attacks](https://openaccess.thecvf.com/content/ICCV2021/html/Zhao_Exploiting_Explanations_for_Model_Inversion_Attacks_ICCV_2021_paper.html) | 2021 | _ICCV_ | Gradient-based, Interpretable Surrogates | Model Inversion | - | - |
+| [On the Privacy Risks of Model Explanations](https://dl.acm.org/doi/abs/10.1145/3461702.3462533) | 2021 | AIES | Feature-based, Shapley, Counterfactual | Membership Inference | - | - |
+| [Adversarial XAI Methods in Cybersecurity](https://ieeexplore.ieee.org/abstract/document/9555622) | 2021 | TIFS | Counterfactual | Membership Inference | - | - |
+| [MEGEX: Data-Free Model Extraction Attack against Gradient-Based Explainable AI](https://arxiv.org/abs/2107.08909) | 2021 | _arXiv_ | Gradient-based | Model Extraction | - | [[Code]](https://github.com/cake-lab/datafree-model-extraction) |
+| [Robust Counterfactual Explanations for Privacy-Preserving SVM](https://www.diva-portal.org/smash/record.jsf?pid=diva2%3A1581005&dswid=5229), [Robust Explanations for Private Support Vector Machines](https://arxiv.org/abs/2102.03785) | 2021 | _ICML-Workshop_ | Counterfactual | - | Private SVM | [[Code]](https://github.com/rami-mochaourab/robust-explanation-SVM) |
+| [When Differential Privacy Meets Interpretability: A Case Study](https://arxiv.org/abs/2106.13203) | 2021 | _RCV-CVPR_ | Interpretable Models | - | Differential Privacy | - |
+| [Differentially Private Quantiles](https://proceedings.mlr.press/v139/gillenwater21a.html) | 2021 | _ICML_ | Quantiles | - | Differential Privacy | [[Code]](https://github.com/google-research/google-research/tree/master/dp_multiq) |
+| [FOX: Fooling with Explanations : Privacy Protection with Adversarial Reactions in Social Media](https://ieeexplore.ieee.org/document/9647778) | 2021 | _PST_ | - | Attribute Inference | Privacy-Protecting Explanation | - |
+| [Privacy-preserving generative adversarial network for case-based explainability in medical image analysis](https://ieeexplore.ieee.org/abstract/document/9598877/) | 2021 | _IEEE Access_ | Example-based | - | Generative Anonymisation | - |
+| [Interpretable and Differentially Private Predictions](https://ojs.aaai.org/index.php/AAAI/article/view/5827) | 2020 | _AAAI_ | Locally linear maps | - | Differential Privacy | [[Code]](https://github.com/frhrdr/dp-llm) |
+| [Model extraction from counterfactual explanations](https://arxiv.org/abs/2009.01884) | 2020 | _arXiv_ | Counterfactual | Model Extraction | - | [[Code]](https://github.com/aivodji/mrce) |
+| [Model Reconstruction from Model Explanations](https://dl.acm.org/doi/10.1145/3287560.3287562) | 2019 | _FAT*_ | Gradient-based | Model Reconstruction, Model Extraction | - | - |
+| [Interpret Federated Learning with Shapley Values](https://arxiv.org/abs/1905.04519) | 2019 | __ | Shapley | - | Federated | [[Code]](https://github.com/crownpku/federated_shap) |
+| [Collaborative Explanation of Deep Models with Limited Interaction for Trade Secret and Privacy Preservation](https://dl.acm.org/doi/10.1145/3308560.3317586) | 2019 | _WWW_ | Feature-based | - | Collaborative rule-based model | - |
+| [Model inversion attacks that exploit confidence information and basic countermeasures](https://dl.acm.org/doi/abs/10.1145/2810103.2813677) | 2015 | _CCS_ | Confidence scores | Reconstruction, Model Inversion | - | - |
\ No newline at end of file
diff --git a/script/output.txt b/script/output.txt
index ca8f93f..9e9394e 100644
--- a/script/output.txt
+++ b/script/output.txt
@@ -1,969 +1,317 @@
-
- |
- Towards Adversarial Evaluations for Inexact Machine Unlearning |
- arXiv |
- 2023 |
- [Code] |
- Model-Agnostic |
-
-
- KGA: A General Machine Unlearning Framework Based on Knowledge Gap Alignment |
- arXiv |
- 2023 |
- [Code] |
- Model-Agnostic |
-
-
- On the Trade-Off between Actionable Explanations and the Right to be Forgotten |
- arXiv |
- 2023 |
- - |
- Model-Agnostic |
-
-
- Towards Unbounded Machine Unlearning |
- arXiv |
- 2023 |
- [Code] |
- Model-Agnostic |
-
-
- Netflix and Forget: Efficient and Exact Machine Unlearning from Bi-linear Recommendations |
- arXiv |
- 2023 |
- - |
- Model-Agnostic |
-
-
- To Be Forgotten or To Be Fair: Unveiling Fairness Implications of Machine Unlearning Methods |
- arXiv |
- 2023 |
- [Code] |
- Model-Agnostic |
-
-
- Sequential Informed Federated Unlearning: Efficient and Provable Client Unlearning in Federated Optimization |
- arXiv |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Certified Data Removal in Sum-Product Networks |
- ICKG |
- 2022 |
- [Code] |
- Model-Agnostic |
-
-
- Learning with Recoverable Forgetting |
- ECCV |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Continual Learning and Private Unlearning |
- CoLLAs |
- 2022 |
- [Code] |
- Model-Agnostic |
-
-
- Verifiable and Provably Secure Machine Unlearning |
- arXiv |
- 2022 |
- [Code] |
- Model-Agnostic |
-
-
- VeriFi: Towards Verifiable Federated Unlearning |
- arXiv |
- 2022 |
- - |
- Model-Agnostic |
-
-
- FedRecover: Recovering from Poisoning Attacks in Federated Learning using Historical Information |
- S&P |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Fast Yet Effective Machine Unlearning |
- arXiv |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Membership Inference via Backdooring |
- IJCAI |
- 2022 |
- [Code] |
- Model-Agnostic |
-
-
- Forget Unlearning: Towards True Data-Deletion in Machine Learning |
- ICLR |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Zero-Shot Machine Unlearning |
- arXiv |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Efficient Attribute Unlearning: Towards Selective Removal of Input Attributes from Feature Representations |
- arXiv |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Few-Shot Unlearning |
- ICLR |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Federated Unlearning: How to Efficiently Erase a Client in FL? |
- UpML Workshop |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Machine Unlearning Method Based On Projection Residual |
- DSAA |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Hard to Forget: Poisoning Attacks on Certified Machine Unlearning |
- AAAI |
- 2022 |
- [Code] |
- Model-Agnostic |
-
-
- Athena: Probabilistic Verification of Machine Unlearning |
- PoPETs |
- 2022 |
- - |
- Model-Agnostic |
-
-
- FP2-MIA: A Membership Inference Attack Free of Posterior Probability in Machine Unlearning |
- ProvSec |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Deletion Inference, Reconstruction, and Compliance in Machine (Un)Learning |
- PETS |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Prompt Certified Machine Unlearning with Randomized Gradient Smoothing and Quantization |
- NeurIPS |
- 2022 |
- - |
- Model-Agnostic |
-
-
- The Right to be Forgotten in Federated Learning: An Efficient Realization with Rapid Retraining |
- INFOCOM |
- 2022 |
- [Code] |
- Model-Agnostic |
-
-
- Backdoor Defense with Machine Unlearning |
- INFOCOM |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Markov Chain Monte Carlo-Based Machine Unlearning: Unlearning What Needs to be Forgotten |
- ASIA CCS |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Federated Unlearning for On-Device Recommendation |
- arXiv |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Can Bad Teaching Induce Forgetting? Unlearning in Deep Networks using an Incompetent Teacher |
- arXiv |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Efficient Two-Stage Model Retraining for Machine Unlearning |
- CVPR Workshop |
- 2022 |
- - |
- Model-Agnostic |
-
-
- Learn to Forget: Machine Unlearning Via Neuron Masking |
- IEEE |
- 2021 |
- - |
- Model-Agnostic |
-
-
- Adaptive Machine Unlearning |
- NeurIPS |
- 2021 |
- [Code] |
- Model-Agnostic |
-
-
- Descent-to-Delete: Gradient-Based Methods for Machine Unlearning |
- ALT |
- 2021 |
- - |
- Model-Agnostic |
-
-
- Remember What You Want to Forget: Algorithms for Machine Unlearning |
- NeurIPS |
- 2021 |
- - |
- Model-Agnostic |
-
-
- FedEraser: Enabling Efficient Client-Level Data Removal from Federated Learning Models |
- IWQoS |
- 2021 |
- - |
- Model-Agnostic |
-
-
- Federated Unlearning |
- IWQoS |
- 2021 |
- [Code] |
- Model-Agnostic |
-
-
- Machine Unlearning via Algorithmic Stability |
- COLT |
- 2021 |
- - |
- Model-Agnostic |
-
-
- EMA: Auditing Data Removal from Trained Models |
- MICCAI |
- 2021 |
- [Code] |
- Model-Agnostic |
-
-
- Knowledge-Adaptation Priors |
- NeurIPS |
- 2021 |
- [Code] |
- Model-Agnostic |
-
-
- PrIU: A Provenance-Based Approach for Incrementally Updating Regression Models |
- NeurIPS |
- 2020 |
- - |
- Model-Agnostic |
-
-
- Eternal Sunshine of the Spotless Net: Selective Forgetting in Deep Networks |
- CVPR |
- 2020 |
- - |
- Model-Agnostic |
-
-
- Learn to Forget: User-Level Memorization Elimination in Federated Learning |
- arXiv |
- 2020 |
- - |
- Model-Agnostic |
-
-
- Certified Data Removal from Machine Learning Models |
- ICML |
- 2020 |
- - |
- Model-Agnostic |
-
-
- Class Clown: Data Redaction in Machine Unlearning at Enterprise Scale |
- arXiv |
- 2020 |
- - |
- Model-Agnostic |
-
-
- A Novel Online Incremental and Decremental Learning Algorithm Based on Variable Support Vector Machine |
- Cluster Computing |
- 2019 |
- - |
- Model-Agnostic |
-
-
- Making AI Forget You: Data Deletion in Machine Learning |
- NeurIPS |
- 2019 |
- - |
- Model-Agnostic |
-
-
- Lifelong Anomaly Detection Through Unlearning |
- CCS |
- 2019 |
- - |
- Model-Agnostic |
-
-
- Learning Not to Learn: Training Deep Neural Networks With Biased Data |
- CVPR |
- 2019 |
- - |
- Model-Agnostic |
-
-
- Efficient Repair of Polluted Machine Learning Systems via Causal Unlearning |
- ASIACCS |
- 2018 |
- [Code] |
- Model-Agnostic |
-
-
- Understanding Black-box Predictions via Influence Functions |
- ICML |
- 2017 |
- [Code] |
- Model-Agnostic |
-
-
- Towards Making Systems Forget with Machine Unlearning |
- S&P |
- 2015 |
- - |
- Model-Agnostic |
-
-
- Towards Making Systems Forget with Machine Unlearning |
- S&P |
- 2015 |
- - |
- Model-Agnostic |
-
-
- Incremental and decremental training for linear classification |
- KDD |
- 2014 |
- [Code] |
- Model-Agnostic |
-
-
- Multiple Incremental Decremental Learning of Support Vector Machines |
- NIPS |
- 2009 |
- - |
- Model-Agnostic |
-
-
- Incremental and Decremental Learning for Linear Support Vector Machines |
- ICANN |
- 2007 |
- - |
- Model-Agnostic |
-
-
- Decremental Learning Algorithms for Nonlinear Langrangian and Least Squares Support Vector Machines |
- OSB |
- 2007 |
- - |
- Model-Agnostic |
-
-
- Multicategory Incremental Proximal Support Vector Classifiers |
- KES |
- 2003 |
- - |
- Model-Agnostic |
-
-
- Incremental and Decremental Proximal Support Vector Classification using Decay Coefficients |
- DaWak |
- 2003 |
- - |
- Model-Agnostic |
-
-
- Incremental and Decremental Support Vector Machine Learning |
- NeurIPS |
- 2000 |
- - |
- Model-Agnostic |
-
-
-
- Heterogeneous Federated Knowledge Graph Embedding Learning and Unlearning |
- WWW |
- 2023 |
- [Code] |
- Model-Intrinsic |
-
-
- One-Shot Machine Unlearning with Mnemonic Code |
- arXiv |
- 2023 |
- - |
- Model-Intrinsic |
-
-
- Inductive Graph Unlearning |
- USENIX |
- 2023 |
- [Code] |
- Model-Intrinsic |
-
-
- ERM-KTP: Knowledge-level Machine Unlearning via Knowledge Transfer |
- CVPR |
- 2023 |
- [Code] |
- Model-Intrinsic |
-
-
- GNNDelete: A General Strategy for Unlearning in Graph Neural Networks |
- ICLR |
- 2023 |
- [Code] |
- Model-Intrinsic |
-
-
- Unfolded Self-Reconstruction LSH: Towards Machine Unlearning in Approximate Nearest Neighbour Search |
- arXiv |
- 2023 |
- [Code] |
- Model-Intrinsic |
-
-
- Efficiently Forgetting What You Have Learned in Graph Representation Learning via Projection |
- AISTATS |
- 2023 |
- [Code] |
- Model-Intrinsic |
-
-
- Unrolling SGD: Understanding Factors Influencing Machine Unlearning |
- EuroS&P |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Graph Unlearning |
- CCS |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Certified Graph Unlearning |
- GLFrontiers Workshop |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Skin Deep Unlearning: Artefact and Instrument Debiasing in the Context of Melanoma Classification |
- ICML |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Near-Optimal Task Selection for Meta-Learning with Mutual Information and Online Variational Bayesian Unlearning |
- AISTATS |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Unlearning Protected User Attributes in Recommendations with Adversarial Training |
- SIGIR |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Recommendation Unlearning |
- TheWebConf |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Knowledge Neurons in Pretrained Transformers |
- ACL |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Memory-Based Model Editing at Scale |
- MLR |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Forgetting Fast in Recommender Systems |
- arXiv |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Unlearning Nonlinear Graph Classifiers in the Limited Training Data Regime |
- arXiv |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Deep Regression Unlearning |
- arXiv |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Quark: Controllable Text Generation with Reinforced Unlearning |
- arXiv |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Forget-SVGD: Particle-Based Bayesian Federated Unlearning |
- DSL Workshop |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Machine Unlearning of Federated Clusters |
- arXiv |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Machine Unlearning for Image Retrieval: A Generative Scrubbing Approach |
- MM |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Machine Unlearning: Linear Filtration for Logit-based Classifiers |
- Machine Learning |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Deep Unlearning via Randomized Conditionally Independent Hessians |
- CVPR |
- 2022 |
- [Code] |
- Model-Intrinsic |
-
-
- Challenges and Pitfalls of Bayesian Unlearning |
- UPML Workshop |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Federated Unlearning via Class-Discriminative Pruning |
- WWW |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Active forgetting via influence estimation for neural networks |
- Int. J. Intel. Systems |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Variational Bayesian unlearning |
- NeurIPS |
- 2022 |
- - |
- Model-Intrinsic |
-
-
- Revisiting Machine Learning Training Process for Enhanced Data Privacy |
- IC3 |
- 2021 |
- - |
- Model-Intrinsic |
-
-
- Knowledge Removal in Sampling-based Bayesian Inference |
- ICLR |
- 2021 |
- [Code] |
- Model-Intrinsic |
-
-
- Mixed-Privacy Forgetting in Deep Networks |
- CVPR |
- 2021 |
- - |
- Model-Intrinsic |
-
-
- HedgeCut: Maintaining Randomised Trees for Low-Latency Machine Unlearning |
- SIGMOD |
- 2021 |
- [Code] |
- Model-Intrinsic |
-
-
- A Unified PAC-Bayesian Framework for Machine Unlearning via Information Risk Minimization |
- MLSP |
- 2021 |
- - |
- Model-Intrinsic |
-
-
- DeepObliviate: A Powerful Charm for Erasing Data Residual Memory in Deep Neural Networks |
- arXiv |
- 2021 |
- - |
- Model-Intrinsic |
-
-
- Approximate Data Deletion from Machine Learning Models: Algorithms and Evaluations |
- AISTATS |
- 2021 |
- [Code] |
- Model-Intrinsic |
-
-
- Bayesian Inference Forgetting |
- arXiv |
- 2021 |
- [Code] |
- Model-Intrinsic |
-
-
- Approximate Data Deletion from Machine Learning Models |
- AISTATS |
- 2021 |
- [Code] |
- Model-Intrinsic |
-
-
- Online Forgetting Process for Linear Regression Models |
- AISTATS |
- 2021 |
- - |
- Model-Intrinsic |
-
-
- RevFRF: Enabling Cross-domain Random Forest Training with Revocable Federated Learning |
- IEEE |
- 2021 |
- - |
- Model-Intrinsic |
-
-
- Coded Machine Unlearning |
- IEEE Access |
- 2021 |
- - |
- Model-Intrinsic |
-
-
- Machine Unlearning for Random Forests |
- ICML |
- 2021 |
- - |
- Model-Intrinsic |
-
-
- Bayesian Variational Federated Learning and Unlearning in Decentralized Networks |
- SPAWC |
- 2021 |
- - |
- Model-Intrinsic |
-
-
- Forgetting Outside the Box: Scrubbing Deep Networks of Information Accessible from Input-Output Observations |
- ECCV |
- 2020 |
- - |
- Model-Intrinsic |
-
-
- Influence Functions in Deep Learning Are Fragile |
- arXiv |
- 2020 |
- - |
- Model-Intrinsic |
-
-
- Deep Autoencoding Topic Model With Scalable Hybrid Bayesian Inference |
- IEEE |
- 2020 |
- - |
- Model-Intrinsic |
-
-
- Eternal Sunshine of the Spotless Net: Selective Forgetting in Deep Networks |
- CVPR |
- 2020 |
- - |
- Model-Intrinsic |
-
-
- Uncertainty in Neural Networks: Approximately Bayesian Ensembling |
- AISTATS |
- 2020 |
- [Code] |
- Model-Intrinsic |
-
-
- Certified Data Removal from Machine Learning Models |
- ICML |
- 2020 |
- - |
- Model-Intrinsic |
-
-
- DeltaGrad: Rapid retraining of machine learning models |
- ICML |
- 2020 |
- [Code] |
- Model-Intrinsic |
-
-
- Making AI Forget You: Data Deletion in Machine Learning |
- NeurIPS |
- 2019 |
- - |
- Model-Intrinsic |
-
-
- “Amnesia” – Towards Machine Learning Models That Can Forget User Data Very Fast |
- AIDB Workshop |
- 2019 |
- [Code] |
- Model-Intrinsic |
-
-
- A Novel Online Incremental and Decremental Learning Algorithm Based on Variable Support Vector Machine |
- Cluster Computing |
- 2019 |
- - |
- Model-Intrinsic |
-
-
- Neural Text Degeneration With Unlikelihood Training |
- arXiv |
- 2019 |
- [Code] |
- Model-Intrinsic |
-
-
- Bayesian Neural Networks with Weight Sharing Using Dirichlet Processes |
- IEEE |
- 2018 |
- [Code] |
- Model-Intrinsic |
-
-
-
- Hidden Poison: Machine Unlearning Enables Camouflaged Poisoning Attacks |
- NeurIPS-TSRML |
- 2022 |
- [Code] |
- Data-Driven |
-
-
- Forget Unlearning: Towards True Data Deletion in Machine Learning |
- ICLR |
- 2022 |
- - |
- Data-Driven |
-
-
- ARCANE: An Efficient Architecture for Exact Machine Unlearning |
- IJCAI |
- 2022 |
- - |
- Data-Driven |
-
-
- PUMA: Performance Unchanged Model Augmentation for Training Data Removal |
- AAAI |
- 2022 |
- - |
- Data-Driven |
-
-
- Certifiable Unlearning Pipelines for Logistic Regression: An Experimental Study |
- MAKE |
- 2022 |
- [Code] |
- Data-Driven |
-
-
- Zero-Shot Machine Unlearning |
- arXiv |
- 2022 |
- - |
- Data-Driven |
-
-
- GRAPHEDITOR: An Efficient Graph Representation Learning and Unlearning Approach |
- - |
- 2022 |
- [Code] |
- Data-Driven |
-
-
- Fast Model Update for IoT Traffic Anomaly Detection with Machine Unlearning |
- IEEE IoT-J |
- 2022 |
- - |
- Data-Driven |
-
-
- Learning to Refit for Convex Learning Problems |
- arXiv |
- 2021 |
- - |
- Data-Driven |
-
-
- Fast Yet Effective Machine Unlearning |
- arXiv |
- 2021 |
- - |
- Data-Driven |
-
-
- Learning with Selective Forgetting |
- IJCAI |
- 2021 |
- - |
- Data-Driven |
-
-
- SSSE: Efficiently Erasing Samples from Trained Machine Learning Models |
- NeurIPS-PRIML |
- 2021 |
- - |
- Data-Driven |
-
-
- How Does Data Augmentation Affect Privacy in Machine Learning? |
- AAAI |
- 2021 |
- [Code] |
- Data-Driven |
-
-
- Coded Machine Unlearning |
- IEEE |
- 2021 |
- - |
- Data-Driven |
-
-
- Machine Unlearning |
- IEEE |
- 2021 |
- [Code] |
- Data-Driven |
-
-
- How Does Data Augmentation Affect Privacy in Machine Learning? |
- AAAI |
- 2021 |
- [Code] |
- Data-Driven |
-
-
- Amnesiac Machine Learning |
- AAAI |
- 2021 |
- [Code] |
- Data-Driven |
-
-
- Unlearnable Examples: Making Personal Data Unexploitable |
- ICLR |
- 2021 |
- [Code] |
- Data-Driven |
-
-
- Descent-to-Delete: Gradient-Based Methods for Machine Unlearning |
- ALT |
- 2021 |
- - |
- Data-Driven |
-
-
- Fawkes: Protecting Privacy against Unauthorized Deep Learning Models |
- USENIX Sec. Sym. |
- 2020 |
- [Code] |
- Data-Driven |
-
-
- PrIU: A Provenance-Based Approach for Incrementally Updating Regression Models |
- SIGMOD |
- 2020 |
- - |
- Data-Driven |
-
-
- DeltaGrad: Rapid retraining of machine learning models |
- ICML |
- 2020 |
- [Code] |
- Data-Driven |
-
+
+
+ Please Tell Me More: Privacy Impact of Explainability through the Lens of Membership Inference Attack |
+ 2024 |
+ SP |
+ Feature-based |
+ Membership Inference |
+ Differential Privacy, Privacy-Preserving Models, DP-SGD |
+ - |
+
+
+ On the Privacy Risks of Algorithmic Recourse |
+ 2023 |
+ AISTATS |
+ Counterfactual |
+ Membership Inference |
+ Differential Privacy |
+ - |
+
+
+ The Privacy Issue of Counterfactual Explanations: Explanation Linkage Attacks |
+ 2023 |
+ TIST |
+ Counterfactual |
+ Linkage |
+ Anonymisaion |
+ - |
+
+
+ Feature-based Learning for Diverse and Privacy-Preserving Counterfactual Explanations |
+ 2023 |
+ KDD |
+ Counterfactual |
+ - |
+ Perturbation |
+ [Code] |
+
+
+ Private Graph Extraction via Feature Explanations |
+ 2023 |
+ PETS |
+ Feature-based |
+ Graph Extraction |
+ Perturbation |
+ [Code] |
+
+
+ Privacy-Preserving Algorithmic Recourse |
+ 2023 |
+ ICAIF |
+ Counterfactual |
+ - |
+ Differential Privacy |
+ - |
+
+
+ Accurate, Explainable, and Private Models: Providing Recourse While Minimizing Training Data Leakage |
+ 2023 |
+ ICML-Workshop |
+ Counterfactual |
+ Membership Inference |
+ Differential Privacy |
+ - |
+
+
+ Probabilistic Dataset Reconstruction from Interpretable Models |
+ 2023 |
+ arXiv |
+ Interpretable Surrogates |
+ Data Reconstruction |
+ - |
+ [Code] |
+
+
+ DeepFixCX: Explainable privacy-preserving image compression for medical image analysis |
+ 2023 |
+ WIREs-DMKD |
+ Case-based |
+ Identity recognition |
+ Anonymisation |
+ [Code] |
+
+
+ XorSHAP: Privacy-Preserving Explainable AI for Decision Tree Models |
+ 2023 |
+ Preprint |
+ Shapley |
+ - |
+ Multi-party Computation |
+ - |
+
+
+ - |
+ 2023 |
+ Github |
+ ALE plot |
+ - |
+ Differential Privacy |
+ [Code] |
+
+
+ Inferring Sensitive Attributes from Model Explanations |
+ 2022 |
+ CIKM |
+ Gradient-based, Perturbation-based |
+ Attribute Inference |
+ - |
+ [Code] |
+
+
+ Model explanations with differential privacy |
+ 2022 |
+ FAccT |
+ Feature-based |
+ - |
+ Differential Privacy |
+ - |
+
+
+ DualCF: Efficient Model Extraction Attack from Counterfactual Explanations |
+ 2022 |
+ FAccT |
+ Counterfactual |
+ Model Extraction |
+ - |
+ - |
+
+
+ Feature Inference Attack on Shapley Values |
+ 2022 |
+ CCS |
+ Shapley |
+ Attribute/Feature Inference |
+ Low-dimensional |
+ - |
+
+
+ Evaluating the privacy exposure of interpretable global explainers |
+ 2022 |
+ CogMI |
+ Interpretable Surrogates |
+ Membership Inference |
+ - |
+ - |
+
+
+ Privacy-Preserving Case-Based Explanations: Enabling Visual Interpretability by Protecting Privacy |
+ 2022 |
+ IEEE Access |
+ Example-based |
+ - |
+ Anonymisation |
+ - |
+
+
+ On the amplification of security and privacy risks by post-hoc explanations in machine learning models |
+ 2022 |
+ arXiv |
+ Feature-based |
+ Membership Inference |
+ - |
+ - |
+
+
+ Differentially Private Counterfactuals via Functional Mechanism |
+ 2022 |
+ arXiv |
+ Counterfactual |
+ - |
+ Differential Privacy |
+ - |
+
+
+ Differentially Private Shapley Values for Data Evaluation |
+ 2022 |
+ arXiv |
+ Shapley |
+ - |
+ Differential Privacy |
+ [Code] |
+
+
+ Exploiting Explanations for Model Inversion Attacks |
+ 2021 |
+ ICCV |
+ Gradient-based, Interpretable Surrogates |
+ Model Inversion |
+ - |
+ - |
+
+
+ On the Privacy Risks of Model Explanations |
+ 2021 |
+ AIES |
+ Feature-based, Shapley, Counterfactual |
+ Membership Inference |
+ - |
+ - |
+
+
+ Adversarial XAI Methods in Cybersecurity |
+ 2021 |
+ TIFS |
+ Counterfactual |
+ Membership Inference |
+ - |
+ - |
+
+
+ MEGEX: Data-Free Model Extraction Attack against Gradient-Based Explainable AI |
+ 2021 |
+ arXiv |
+ Gradient-based |
+ Model Extraction |
+ - |
+ [Code] |
+
+
+ Robust Counterfactual Explanations for Privacy-Preserving SVM |
+ 2021 |
+ ICML-Workshop |
+ Counterfactual |
+ - |
+ Private SVM |
+ [Code] |
+
+
+ When Differential Privacy Meets Interpretability: A Case Study |
+ 2021 |
+ RCV-CVPR |
+ Interpretable Models |
+ - |
+ Differential Privacy |
+ - |
+
+
+ Differentially Private Quantiles |
+ 2021 |
+ ICML |
+ Quantiles |
+ - |
+ Differential Privacy |
+ [Code] |
+
+
+ FOX: Fooling with Explanations : Privacy Protection with Adversarial Reactions in Social Media |
+ 2021 |
+ PST |
+ - |
+ Attribute Inference |
+ Privacy-Protecting Explanation |
+ - |
+
+
+ Privacy-preserving generative adversarial network for case-based explainability in medical image analysis |
+ 2021 |
+ IEEE Access |
+ Example-based |
+ - |
+ Generative Anonymisation |
+ - |
+
+
+ Interpretable and Differentially Private Predictions |
+ 2020 |
+ AAAI |
+ Locally linear maps |
+ - |
+ Differential Privacy |
+ [Code] |
+
+
+ Model extraction from counterfactual explanations |
+ 2020 |
+ arXiv |
+ Counterfactual |
+ Model Extraction |
+ - |
+ [Code] |
+
+
+ Model Reconstruction from Model Explanations |
+ 2019 |
+ FAT* |
+ Gradient-based |
+ Model Reconstruction, Model Extraction |
+ - |
+ - |
+
+
+ Interpret Federated Learning with Shapley Values |
+ 2019 |
+ - |
+ Shapley |
+ - |
+ Federated |
+ [Code] |
+
+
+ Collaborative Explanation of Deep Models with Limited Interaction for Trade Secret and Privacy Preservation |
+ 2019 |
+ WWW |
+ Feature-based |
+ - |
+ Collaborative rule-based model |
+ - |
+
+
+ Model inversion attacks that exploit confidence information and basic countermeasures |
+ 2015 |
+ CCS |
+ Confidence scores |
+ Reconstruction, Model Inversion |
+ - |
+ - |
+
+
\ No newline at end of file
diff --git a/script/translator.ipynb b/script/translator.ipynb
index 4b8df9b..8437165 100644
--- a/script/translator.ipynb
+++ b/script/translator.ipynb
@@ -36,1044 +36,63 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 7,
"id": "84f9273a",
"metadata": {
"scrolled": false
},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "----\n",
- "Towards Adversarial Evaluations for Inexact Machine Unlearning\n",
- "https://arxiv.org/abs/2201.06640\n",
- "arXiv\n",
- "2023\n",
- "https://github.com/shash42/Evaluating-Inexact-Unlearning\n",
- "Model-Agnostic\n",
- "----\n",
- "KGA: A General Machine Unlearning Framework Based on Knowledge Gap Alignment\n",
- "https://arxiv.org/abs/2305.06535\n",
- "arXiv\n",
- "2023\n",
- "https://github.com/Lingzhi-WANG/KGAUnlearn\n",
- "Model-Agnostic\n",
- "----\n",
- "On the Trade-Off between Actionable Explanations and the Right to be Forgotten\n",
- "https://openreview.net/pdf?id=HWt4BBZjVW\n",
- "arXiv\n",
- "2023\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Towards Unbounded Machine Unlearning\n",
- "https://arxiv.org/pdf/2302.09880\n",
- "arXiv\n",
- "2023\n",
- "https://github.com/Meghdad92/SCRUB\n",
- "Model-Agnostic\n",
- "----\n",
- "Netflix and Forget: Efficient and Exact Machine Unlearning from Bi-linear Recommendations\n",
- "https://arxiv.org/abs/2302.06676\n",
- "arXiv\n",
- "2023\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "To Be Forgotten or To Be Fair: Unveiling Fairness Implications of Machine Unlearning Methods\n",
- "https://arxiv.org/abs/2302.03350\n",
- "arXiv\n",
- "2023\n",
- "https://github.com/cleverhans-lab/machine-unlearning\n",
- "Model-Agnostic\n",
- "----\n",
- "Sequential Informed Federated Unlearning: Efficient and Provable Client Unlearning in Federated Optimization\n",
- "https://arxiv.org/abs/2211.11656\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Certified Data Removal in Sum-Product Networks\n",
- "https://arxiv.org/abs/2210.01451\n",
- "ICKG\n",
- "2022\n",
- "https://github.com/ROYALBEFF/UnlearnSPN\n",
- "Model-Agnostic\n",
- "----\n",
- "Learning with Recoverable Forgetting\n",
- "https://arxiv.org/abs/2207.08224\n",
- "ECCV\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Continual Learning and Private Unlearning\n",
- "https://arxiv.org/abs/2203.12817\n",
- "CoLLAs\n",
- "2022\n",
- "https://github.com/Cranial-XIX/Continual-Learning-Private-Unlearning\n",
- "Model-Agnostic\n",
- "----\n",
- "Verifiable and Provably Secure Machine Unlearning\n",
- "https://arxiv.org/abs/2210.09126\n",
- "arXiv\n",
- "2022\n",
- "https://github.com/cleverhans-lab/verifiable-unlearning\n",
- "Model-Agnostic\n",
- "----\n",
- "VeriFi: Towards Verifiable Federated Unlearning\n",
- "https://arxiv.org/abs/2205.12709\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "FedRecover: Recovering from Poisoning Attacks in Federated Learning using Historical Information\n",
- "https://arxiv.org/abs/2210.10936\n",
- "S&P\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Fast Yet Effective Machine Unlearning\n",
- "https://arxiv.org/abs/2111.08947\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Membership Inference via Backdooring\n",
- "https://arxiv.org/abs/2206.04823\n",
- "IJCAI\n",
- "2022\n",
- "https://github.com/HongshengHu/membership-inference-via-backdooring\n",
- "Model-Agnostic\n",
- "----\n",
- "Forget Unlearning: Towards True Data-Deletion in Machine Learning\n",
- "https://arxiv.org/abs/2210.08911\n",
- "ICLR\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Zero-Shot Machine Unlearning\n",
- "https://arxiv.org/abs/2201.05629\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Efficient Attribute Unlearning: Towards Selective Removal of Input Attributes from Feature Representations\n",
- "https://arxiv.org/abs/2202.13295\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Few-Shot Unlearning\n",
- "https://download.huan-zhang.com/events/srml2022/accepted/yoon22fewshot.pdf\n",
- "ICLR\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Federated Unlearning: How to Efficiently Erase a Client in FL?\n",
- "https://arxiv.org/abs/2207.05521\n",
- "UpML Workshop\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Machine Unlearning Method Based On Projection Residual\n",
- "https://arxiv.org/abs/2209.15276\n",
- "DSAA\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Hard to Forget: Poisoning Attacks on Certified Machine Unlearning\n",
- "https://ojs.aaai.org/index.php/AAAI/article/view/20736\n",
- "AAAI\n",
- "2022\n",
- "https://github.com/ngmarchant/attack-unlearning\n",
- "Model-Agnostic\n",
- "----\n",
- "Athena: Probabilistic Verification of Machine Unlearning\n",
- "https://web.archive.org/web/20220721061150id_/https://petsymposium.org/popets/2022/popets-2022-0072.pdf\n",
- "PoPETs\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "FP2-MIA: A Membership Inference Attack Free of Posterior Probability in Machine Unlearning\n",
- "https://link.springer.com/chapter/10.1007/978-3-031-20917-8_12\n",
- "ProvSec\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Deletion Inference, Reconstruction, and Compliance in Machine (Un)Learning\n",
- "Un\n",
- "PETS\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Prompt Certified Machine Unlearning with Randomized Gradient Smoothing and Quantization\n",
- "https://openreview.net/pdf?id=ue4gP8ZKiWb\n",
- "NeurIPS\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "The Right to be Forgotten in Federated Learning: An Efficient Realization with Rapid Retraining\n",
- "https://arxiv.org/abs/2203.07320\n",
- "INFOCOM\n",
- "2022\n",
- "https://github.com/yiliucs/federated-unlearning\n",
- "Model-Agnostic\n",
- "----\n",
- "Backdoor Defense with Machine Unlearning\n",
- "https://arxiv.org/abs/2201.09538\n",
- "INFOCOM\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Markov Chain Monte Carlo-Based Machine Unlearning: Unlearning What Needs to be Forgotten\n",
- "https://dl.acm.org/doi/abs/10.1145/3488932.3517406\n",
- "ASIA CCS\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Federated Unlearning for On-Device Recommendation\n",
- "https://arxiv.org/abs/2210.10958\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Can Bad Teaching Induce Forgetting? Unlearning in Deep Networks using an Incompetent Teacher\n",
- "https://arxiv.org/abs/2205.08096\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- " Efficient Two-Stage Model Retraining for Machine Unlearning\n",
- "https://openaccess.thecvf.com/content/CVPR2022W/HCIS/html/Kim_Efficient_Two-Stage_Model_Retraining_for_Machine_Unlearning_CVPRW_2022_paper.html\n",
- "CVPR Workshop\n",
- "2022\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Learn to Forget: Machine Unlearning Via Neuron Masking\n",
- "https://ieeexplore.ieee.org/abstract/document/9844865?casa_token=_eowH3BTt1sAAAAA:X0uCpLxOwcFRNJHoo3AtA0ay4t075_cSptgTMznsjusnvgySq-rJe8GC285YhWG4Q0fUmP9Sodw0\n",
- "IEEE\n",
- "2021\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Adaptive Machine Unlearning\n",
- "https://proceedings.neurips.cc/paper/2021/hash/87f7ee4fdb57bdfd52179947211b7ebb-Abstract.html\n",
- "NeurIPS\n",
- "2021\n",
- "https://github.com/ChrisWaites/adaptive-machine-unlearning\n",
- "Model-Agnostic\n",
- "----\n",
- "Descent-to-Delete: Gradient-Based Methods for Machine Unlearning\n",
- "https://proceedings.mlr.press/v132/neel21a.html\n",
- "ALT\n",
- "2021\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Remember What You Want to Forget: Algorithms for Machine Unlearning\n",
- "https://arxiv.org/abs/2103.03279\n",
- "NeurIPS\n",
- "2021\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "FedEraser: Enabling Efficient Client-Level Data Removal from Federated Learning Models\n",
- "https://ieeexplore.ieee.org/abstract/document/9521274\n",
- "IWQoS\n",
- "2021\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Federated Unlearning\n",
- "https://arxiv.org/abs/2012.13891\n",
- "IWQoS\n",
- "2021\n",
- "https://www.dropbox.com/s/1lhx962axovbbom/FedEraser-Code.zip?dl=0\n",
- "Model-Agnostic\n",
- "----\n",
- "Machine Unlearning via Algorithmic Stability\n",
- "https://proceedings.mlr.press/v134/ullah21a.html\n",
- "COLT\n",
- "2021\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "EMA: Auditing Data Removal from Trained Models\n",
- "https://link.springer.com/chapter/10.1007/978-3-030-87240-3_76\n",
- "MICCAI\n",
- "2021\n",
- "https://github.com/Hazelsuko07/EMA\n",
- "Model-Agnostic\n",
- "----\n",
- "Knowledge-Adaptation Priors\n",
- "https://proceedings.neurips.cc/paper/2021/hash/a4380923dd651c195b1631af7c829187-Abstract.html\n",
- "NeurIPS\n",
- "2021\n",
- "https://github.com/team-approx-bayes/kpriors\n",
- "Model-Agnostic\n",
- "----\n",
- "PrIU: A Provenance-Based Approach for Incrementally Updating Regression Models\n",
- "https://dl.acm.org/doi/abs/10.1145/3318464.3380571\n",
- "NeurIPS\n",
- "2020\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Eternal Sunshine of the Spotless Net: Selective Forgetting in Deep Networks\n",
- "https://arxiv.org/abs/1911.04933\n",
- "CVPR\n",
- "2020\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Learn to Forget: User-Level Memorization Elimination in Federated Learning\n",
- "https://www.researchgate.net/profile/Ximeng-Liu-5/publication/340134612_Learn_to_Forget_User-Level_Memorization_Elimination_in_Federated_Learning/links/5e849e64a6fdcca789e5f955/Learn-to-Forget-User-Level-Memorization-Elimination-in-Federated-Learning.pdf\n",
- "arXiv\n",
- "2020\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Certified Data Removal from Machine Learning Models\n",
- "https://proceedings.mlr.press/v119/guo20c.html\n",
- "ICML\n",
- "2020\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Class Clown: Data Redaction in Machine Unlearning at Enterprise Scale\n",
- "https://arxiv.org/abs/2012.04699\n",
- "arXiv\n",
- "2020\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "A Novel Online Incremental and Decremental Learning Algorithm Based on Variable Support Vector Machine\n",
- "https://link.springer.com/article/10.1007/s10586-018-1772-4\n",
- "Cluster Computing\n",
- "2019\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Making AI Forget You: Data Deletion in Machine Learning\n",
- "https://papers.nips.cc/paper/2019/hash/cb79f8fa58b91d3af6c9c991f63962d3-Abstract.html\n",
- "NeurIPS\n",
- "2019\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Lifelong Anomaly Detection Through Unlearning\n",
- "https://dl.acm.org/doi/abs/10.1145/3319535.3363226\n",
- "CCS\n",
- "2019\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Learning Not to Learn: Training Deep Neural Networks With Biased Data\n",
- "https://openaccess.thecvf.com/content_CVPR_2019/html/Kim_Learning_Not_to_Learn_Training_Deep_Neural_Networks_With_Biased_CVPR_2019_paper.html\n",
- "CVPR\n",
- "2019\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Efficient Repair of Polluted Machine Learning Systems via Causal Unlearning\n",
- "https://dl.acm.org/citation.cfm?id=3196517\n",
- "ASIACCS\n",
- "2018\n",
- "https://github.com/CausalUnlearning/KARMA\n",
- "Model-Agnostic\n",
- "----\n",
- "Understanding Black-box Predictions via Influence Functions\n",
- "https://proceedings.mlr.press/v70/koh17a.html\n",
- "ICML\n",
- "2017\n",
- "https://github.com/kohpangwei/influence-release\n",
- "Model-Agnostic\n",
- "----\n",
- "Towards Making Systems Forget with Machine Unlearning\n",
- "https://ieeexplore.ieee.org/abstract/document/7163042\n",
- "S&P\n",
- "2015\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Towards Making Systems Forget with Machine Unlearning\n",
- "https://dl.acm.org/doi/10.1109/SP.2015.35\n",
- "S&P\n",
- "2015\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Incremental and decremental training for linear classification\n",
- "https://dl.acm.org/doi/10.1145/2623330.2623661\n",
- "KDD\n",
- "2014\n",
- "https://www.csie.ntu.edu.tw/~cjlin/papers/ws/\n",
- "Model-Agnostic\n",
- "----\n",
- "Multiple Incremental Decremental Learning of Support Vector Machines\n",
- "https://dl.acm.org/doi/10.5555/2984093.2984196\n",
- "NIPS\n",
- "2009\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Incremental and Decremental Learning for Linear Support Vector Machines\n",
- "https://dl.acm.org/doi/10.5555/1776814.1776838\n",
- "ICANN\n",
- "2007\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Decremental Learning Algorithms for Nonlinear Langrangian and Least Squares Support Vector Machines\n",
- "https://www.semanticscholar.org/paper/Decremental-Learning-Algorithms-for-Nonlinear-and-Duan-Li/312c677f0882d0dfd60bfd77346588f52aefd10f\n",
- "OSB\n",
- "2007\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Multicategory Incremental Proximal Support Vector Classifiers\n",
- "https://link.springer.com/chapter/10.1007/978-3-540-45224-9_54\n",
- "KES\n",
- "2003\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Incremental and Decremental Proximal Support Vector Classification using Decay Coefficients\n",
- "https://link.springer.com/chapter/10.1007/978-3-540-45228-7_42\n",
- "DaWak\n",
- "2003\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Incremental and Decremental Support Vector Machine Learning\n",
- "https://dl.acm.org/doi/10.5555/3008751.3008808\n",
- "NeurIPS\n",
- "2000\n",
- "-\n",
- "Model-Agnostic\n",
- "----\n",
- "Heterogeneous Federated Knowledge Graph Embedding Learning and Unlearning\n",
- "https://arxiv.org/abs/2302.02069\n",
- "WWW\n",
- "2023\n",
- "https://github.com/nju-websoft/FedLU/\n",
- "Model-Intrinsic\n",
- "----\n",
- "One-Shot Machine Unlearning with Mnemonic Code\n",
- "https://arxiv.org/abs/2306.05670\n",
- "arXiv\n",
- "2023\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Inductive Graph Unlearning\n",
- "https://arxiv.org/pdf/2304.03093.pdf\n",
- "USENIX\n",
- "2023\n",
- "https://github.com/Happy2Git/GUIDE\n",
- "Model-Intrinsic\n",
- "----\n",
- "ERM-KTP: Knowledge-level Machine Unlearning via Knowledge Transfer\n",
- "https://openaccess.thecvf.com/content/CVPR2023/papers/Lin_ERM-KTP_Knowledge-Level_Machine_Unlearning_via_Knowledge_Transfer_CVPR_2023_paper.pdf\n",
- "CVPR\n",
- "2023\n",
- "https://github.com/RUIYUN-ML/ERM-KTP\n",
- "Model-Intrinsic\n",
- "----\n",
- "GNNDelete: A General Strategy for Unlearning in Graph Neural Networks\n",
- "https://arxiv.org/abs/2302.13406\n",
- "ICLR\n",
- "2023\n",
- "https://github.com/mims-harvard/GNNDelete\n",
- "Model-Intrinsic\n",
- "----\n",
- "Unfolded Self-Reconstruction LSH: Towards Machine Unlearning in Approximate Nearest Neighbour Search\n",
- "https://arxiv.org/pdf/2304.02350.pdf\n",
- "arXiv\n",
- "2023\n",
- "https://anonymous.4open.science/r/ann-benchmarks-3786/README.md\n",
- "Model-Intrinsic\n",
- "----\n",
- "Efficiently Forgetting What You Have Learned in Graph Representation Learning via Projection\n",
- "https://arxiv.org/abs/2302.08990\n",
- "AISTATS\n",
- "2023\n",
- "https://github.com/CongWeilin/Projector\n",
- "Model-Intrinsic\n",
- "----\n",
- "Unrolling SGD: Understanding Factors Influencing Machine Unlearning\n",
- "https://ieeexplore.ieee.org/abstract/document/9797378\n",
- "EuroS&P\n",
- "2022\n",
- "https://github.com/cleverhans-lab/unrolling-sgd\n",
- "Model-Intrinsic\n",
- "----\n",
- "Graph Unlearning\n",
- "https://arxiv.org/abs/2103.14991\n",
- "CCS\n",
- "2022\n",
- "https://github.com/MinChen00/Graph-Unlearning\n",
- "Model-Intrinsic\n",
- "----\n",
- "Certified Graph Unlearning\n",
- "https://arxiv.org/abs/2206.09140\n",
- "GLFrontiers Workshop\n",
- "2022\n",
- "https://github.com/thupchnsky/sgc_unlearn\n",
- "Model-Intrinsic\n",
- "----\n",
- "Skin Deep Unlearning: Artefact and Instrument Debiasing in the Context of Melanoma Classification\n",
- "https://arxiv.org/abs/2109.09818\n",
- "ICML\n",
- "2022\n",
- "https://github.com/pbevan1/Skin-Deep-Unlearning\n",
- "Model-Intrinsic\n",
- "----\n",
- "Near-Optimal Task Selection for Meta-Learning with Mutual Information and Online Variational Bayesian Unlearning\n",
- "https://proceedings.mlr.press/v151/chen22h.html\n",
- "AISTATS\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Unlearning Protected User Attributes in Recommendations with Adversarial Training\n",
- "https://arxiv.org/abs/2206.04500\n",
- "SIGIR\n",
- "2022\n",
- "https://github.com/CPJKU/adv-multvae\n",
- "Model-Intrinsic\n",
- "----\n",
- "Recommendation Unlearning\n",
- "https://dl.acm.org/doi/abs/10.1145/3485447.3511997\n",
- "TheWebConf\n",
- "2022\n",
- "https://github.com/chenchongthu/Recommendation-Unlearning\n",
- "Model-Intrinsic\n",
- "----\n",
- "Knowledge Neurons in Pretrained Transformers\n",
- "https://arxiv.org/abs/2104.08696\n",
- "ACL\n",
- "2022\n",
- "https://github.com/Hunter-DDM/knowledge-neurons\n",
- "Model-Intrinsic\n",
- "----\n",
- "Memory-Based Model Editing at Scale\n",
- "https://proceedings.mlr.press/v162/mitchell22a/mitchell22a.pdf\n",
- "MLR\n",
- "2022\n",
- "https://sites.google.com/view/serac-editing\n",
- "Model-Intrinsic\n",
- "----\n",
- "Forgetting Fast in Recommender Systems\n",
- "https://arxiv.org/abs/2208.06875\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Unlearning Nonlinear Graph Classifiers in the Limited Training Data Regime\n",
- "https://arxiv.org/abs/2211.03216\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Deep Regression Unlearning\n",
- "https://arxiv.org/abs/2210.08196\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Quark: Controllable Text Generation with Reinforced Unlearning\n",
- "https://arxiv.org/abs/2205.13636\n",
- "arXiv\n",
- "2022\n",
- "https://github.com/GXimingLu/Quark\n",
- "Model-Intrinsic\n",
- "----\n",
- "Forget-SVGD: Particle-Based Bayesian Federated Unlearning\n",
- "https://ieeexplore.ieee.org/abstract/document/9820602\n",
- "DSL Workshop\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Machine Unlearning of Federated Clusters\n",
- "https://arxiv.org/abs/2210.16424\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Machine Unlearning for Image Retrieval: A Generative Scrubbing Approach\n",
- "https://dl.acm.org/doi/abs/10.1145/3503161.3548378\n",
- "MM\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Machine Unlearning: Linear Filtration for Logit-based Classifiers\n",
- "https://link.springer.com/article/10.1007/s10994-022-06178-9\n",
- "Machine Learning\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Deep Unlearning via Randomized Conditionally Independent Hessians\n",
- "https://openaccess.thecvf.com/content/CVPR2022/html/Mehta_Deep_Unlearning_via_Randomized_Conditionally_Independent_Hessians_CVPR_2022_paper.html\n",
- "CVPR\n",
- "2022\n",
- "https://github.com/vsingh-group/LCODEC-deep-unlearning\n",
- "Model-Intrinsic\n",
- "----\n",
- "Challenges and Pitfalls of Bayesian Unlearning\n",
- "https://arxiv.org/abs/2207.03227\n",
- "UPML Workshop\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Federated Unlearning via Class-Discriminative Pruning\n",
- "https://arxiv.org/abs/2110.11794\n",
- "WWW\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Active forgetting via influence estimation for neural networks\n",
- "https://onlinelibrary.wiley.com/doi/abs/10.1002/int.22981\n",
- "Int. J. Intel. Systems\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Variational Bayesian unlearning\n",
- "https://dl.acm.org/doi/abs/10.5555/3495724.3497068\n",
- "NeurIPS\n",
- "2022\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Revisiting Machine Learning Training Process for Enhanced Data Privacy\n",
- "https://dl.acm.org/doi/abs/10.1145/3474124.3474208\n",
- "IC3\n",
- "2021\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Knowledge Removal in Sampling-based Bayesian Inference\n",
- "https://openreview.net/forum?id=dTqOcTUOQO\n",
- "ICLR\n",
- "2021\n",
- "https://github.com/fshp971/mcmc-unlearning\n",
- "Model-Intrinsic\n",
- "----\n",
- "Mixed-Privacy Forgetting in Deep Networks\n",
- "https://openaccess.thecvf.com/content/CVPR2021/html/Golatkar_Mixed-Privacy_Forgetting_in_Deep_Networks_CVPR_2021_paper.html\n",
- "CVPR\n",
- "2021\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "HedgeCut: Maintaining Randomised Trees for Low-Latency Machine Unlearning\n",
- "https://dl.acm.org/doi/abs/10.1145/3448016.3457239\n",
- "SIGMOD\n",
- "2021\n",
- "https://github.com/schelterlabs/hedgecut\n",
- "Model-Intrinsic\n",
- "----\n",
- "A Unified PAC-Bayesian Framework for Machine Unlearning via Information Risk Minimization\n",
- "https://ieeexplore.ieee.org/abstract/document/9596170\n",
- "MLSP\n",
- "2021\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "DeepObliviate: A Powerful Charm for Erasing Data Residual Memory in Deep Neural Networks\n",
- "https://arxiv.org/abs/2105.06209\n",
- "arXiv\n",
- "2021\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Approximate Data Deletion from Machine Learning Models: Algorithms and Evaluations\n",
- "https://arxiv.org/abs/2002.10077\n",
- "AISTATS\n",
- "2021\n",
- "https://github.com/zleizzo/datadeletion\n",
- "Model-Intrinsic\n",
- "----\n",
- "Bayesian Inference Forgetting\n",
- "https://arxiv.org/abs/2101.06417\n",
- "arXiv\n",
- "2021\n",
- "https://github.com/fshp971/BIF\n",
- "Model-Intrinsic\n",
- "----\n",
- "Approximate Data Deletion from Machine Learning Models\n",
- "https://proceedings.mlr.press/v130/izzo21a.html\n",
- "AISTATS\n",
- "2021\n",
- "https://github.com/zleizzo/datadeletion\n",
- "Model-Intrinsic\n",
- "----\n",
- "Online Forgetting Process for Linear Regression Models\n",
- "https://proceedings.mlr.press/v130/li21a.html\n",
- "AISTATS\n",
- "2021\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "RevFRF: Enabling Cross-domain Random Forest Training with Revocable Federated Learning\n",
- "https://ieeexplore.ieee.org/abstract/document/9514457\n",
- "IEEE\n",
- "2021\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Coded Machine Unlearning\n",
- "https://ieeexplore.ieee.org/abstract/document/9458237\n",
- "IEEE Access\n",
- "2021\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Machine Unlearning for Random Forests\n",
- "http://proceedings.mlr.press/v139/brophy21a.html\n",
- "ICML\n",
- "2021\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Bayesian Variational Federated Learning and Unlearning in Decentralized Networks\n",
- "https://ieeexplore.ieee.org/abstract/document/9593225\n",
- "SPAWC\n",
- "2021\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Forgetting Outside the Box: Scrubbing Deep Networks of Information Accessible from Input-Output Observations\n",
- "https://link.springer.com/chapter/10.1007/978-3-030-58526-6_23\n",
- "ECCV\n",
- "2020\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Influence Functions in Deep Learning Are Fragile\n",
- "https://www.semanticscholar.org/paper/Influence-Functions-in-Deep-Learning-Are-Fragile-Basu-Pope/098076a2c90e42c81b843bf339446427c2ff02ed\n",
- "arXiv\n",
- "2020\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Deep Autoencoding Topic Model With Scalable Hybrid Bayesian Inference\n",
- "https://ieeexplore.ieee.org/document/9121755\n",
- "IEEE\n",
- "2020\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Eternal Sunshine of the Spotless Net: Selective Forgetting in Deep Networks\n",
- "https://arxiv.org/abs/1911.04933\n",
- "CVPR\n",
- "2020\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Uncertainty in Neural Networks: Approximately Bayesian Ensembling\n",
- "https://proceedings.mlr.press/v108/pearce20a.html\n",
- "AISTATS\n",
- "2020\n",
- "https://teapearce.github.io/portfolio/github_io_1_ens/\n",
- "Model-Intrinsic\n",
- "----\n",
- "Certified Data Removal from Machine Learning Models\n",
- "https://proceedings.mlr.press/v119/guo20c.html\n",
- "ICML\n",
- "2020\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "DeltaGrad: Rapid retraining of machine learning models\n",
- "https://proceedings.mlr.press/v119/wu20b.html\n",
- "ICML\n",
- "2020\n",
- "https://github.com/thuwuyinjun/DeltaGrad\n",
- "Model-Intrinsic\n",
- "----\n",
- "Making AI Forget You: Data Deletion in Machine Learning\n",
- "https://papers.nips.cc/paper/2019/hash/cb79f8fa58b91d3af6c9c991f63962d3-Abstract.html\n",
- "NeurIPS\n",
- "2019\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "“Amnesia” – Towards Machine Learning Models That Can Forget User Data Very Fast\n",
- "http://cidrdb.org/cidr2020/papers/p32-schelter-cidr20.pdf\n",
- "AIDB Workshop\n",
- "2019\n",
- "https://github.com/schelterlabs/projects-amnesia\n",
- "Model-Intrinsic\n",
- "----\n",
- "A Novel Online Incremental and Decremental Learning Algorithm Based on Variable Support Vector Machine\n",
- "https://link.springer.com/article/10.1007/s10586-018-1772-4\n",
- "Cluster Computing\n",
- "2019\n",
- "-\n",
- "Model-Intrinsic\n",
- "----\n",
- "Neural Text Degeneration With Unlikelihood Training\n",
- "https://arxiv.org/abs/1908.04319\n",
- "arXiv\n",
- "2019\n",
- "https://github.com/facebookresearch/unlikelihood_training\n",
- "Model-Intrinsic\n",
- "----\n",
- "Bayesian Neural Networks with Weight Sharing Using Dirichlet Processes\n",
- "https://ieeexplore.ieee.org/document/8566011\n",
- "IEEE\n",
- "2018\n",
- "https://github.com/wroth8/dp-bnn\n",
- "Model-Intrinsic\n",
- "----\n",
- "Hidden Poison: Machine Unlearning Enables Camouflaged Poisoning Attacks\n",
- "https://arxiv.org/abs/2212.10717\n",
- "NeurIPS-TSRML\n",
- "2022\n",
- "https://github.com/Jimmy-di/camouflage-poisoning\n",
- "Data-Driven\n",
- "----\n",
- "Forget Unlearning: Towards True Data Deletion in Machine Learning\n",
- "https://arxiv.org/pdf/2210.08911.pdf\n",
- "ICLR\n",
- "2022\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "ARCANE: An Efficient Architecture for Exact Machine Unlearning\n",
- "https://www.ijcai.org/proceedings/2022/0556.pdf\n",
- "IJCAI\n",
- "2022\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "PUMA: Performance Unchanged Model Augmentation for Training Data Removal\n",
- "https://ojs.aaai.org/index.php/AAAI/article/view/20846\n",
- "AAAI\n",
- "2022\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "Certifiable Unlearning Pipelines for Logistic Regression: An Experimental Study\n",
- "https://www.mdpi.com/2504-4990/4/3/28\n",
- "MAKE\n",
- "2022\n",
- "https://version.helsinki.fi/mahadeva/unlearning-experiments\n",
- "Data-Driven\n",
- "----\n",
- "Zero-Shot Machine Unlearning\n",
- "https://arxiv.org/abs/2201.05629\n",
- "arXiv\n",
- "2022\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "GRAPHEDITOR: An Efficient Graph Representation Learning and Unlearning Approach\n",
- "https://congweilin.github.io/CongWeilin.io/files/GraphEditor.pdf\n",
- "-\n",
- "2022\n",
- "https://anonymous.4open.science/r/GraphEditor-NeurIPS22-856E/README.md\n",
- "Data-Driven\n",
- "----\n",
- "Fast Model Update for IoT Traffic Anomaly Detection with Machine Unlearning\n",
- "https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9927728\n",
- "IEEE IoT-J\n",
- "2022\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "Learning to Refit for Convex Learning Problems\n",
- "https://arxiv.org/abs/2111.12545\n",
- "arXiv\n",
- "2021\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "Fast Yet Effective Machine Unlearning\n",
- "https://arxiv.org/abs/2111.08947\n",
- "arXiv\n",
- "2021\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "Learning with Selective Forgetting\n",
- "https://www.ijcai.org/proceedings/2021/0137.pdf\n",
- "IJCAI\n",
- "2021\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "SSSE: Efficiently Erasing Samples from Trained Machine Learning Models\n",
- "https://openreview.net/forum?id=GRMKEx3kEo\n",
- "NeurIPS-PRIML\n",
- "2021\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "How Does Data Augmentation Affect Privacy in Machine Learning?\n",
- "https://arxiv.org/abs/2007.10567\n",
- "AAAI\n",
- "2021\n",
- "https://github.com/dayu11/MI_with_DA\n",
- "Data-Driven\n",
- "----\n",
- "Coded Machine Unlearning\n",
- "https://ieeexplore.ieee.org/document/9458237\n",
- "IEEE\n",
- "2021\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "Machine Unlearning\n",
- "https://ieeexplore.ieee.org/document/9519428\n",
- "IEEE\n",
- "2021\n",
- "https://github.com/cleverhans-lab/machine-unlearning\n",
- "Data-Driven\n",
- "----\n",
- "How Does Data Augmentation Affect Privacy in Machine Learning?\n",
- "https://ojs.aaai.org/index.php/AAAI/article/view/17284/\n",
- "AAAI\n",
- "2021\n",
- "https://github.com/dayu11/MI_with_DA\n",
- "Data-Driven\n",
- "----\n",
- "Amnesiac Machine Learning\n",
- "https://ojs.aaai.org/index.php/AAAI/article/view/17371\n",
- "AAAI\n",
- "2021\n",
- "https://github.com/lmgraves/AmnesiacML\n",
- "Data-Driven\n",
- "----\n",
- "Unlearnable Examples: Making Personal Data Unexploitable\n",
- "https://arxiv.org/abs/2101.04898\n",
- "ICLR\n",
- "2021\n",
- "https://github.com/HanxunH/Unlearnable-Examples\n",
- "Data-Driven\n",
- "----\n",
- "Descent-to-Delete: Gradient-Based Methods for Machine Unlearning\n",
- "https://proceedings.mlr.press/v132/neel21a.html\n",
- "ALT\n",
- "2021\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "Fawkes: Protecting Privacy against Unauthorized Deep Learning Models\n",
- "https://dl.acm.org/doi/abs/10.5555/3489212.3489302\n",
- "USENIX Sec. Sym.\n",
- "2020\n",
- "https://github.com/Shawn-Shan/fawkes\n",
- "Data-Driven\n",
- "----\n",
- "PrIU: A Provenance-Based Approach for Incrementally Updating Regression Models\n",
- "https://dl.acm.org/doi/abs/10.1145/3318464.3380571\n",
- "SIGMOD\n",
- "2020\n",
- "-\n",
- "Data-Driven\n",
- "----\n",
- "DeltaGrad: Rapid retraining of machine learning models\n",
- "https://proceedings.mlr.press/v119/wu20b.html\n",
- "ICML\n",
- "2020\n",
- "https://github.com/thuwuyinjun/DeltaGrad\n",
- "Data-Driven\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
- "for line in lines:\n",
- " if line==\"Model-Agnostic\\n\" or line==\"Model-Intrinsic\\n\" or line==\"Data-Driven\\n\":\n",
- " types=line[:-1]\n",
- " else:\n",
- " print('----')\n",
- " ### title\n",
- " title = re.search(r'\\[(.*?)\\]', line).group()[1:-1]\n",
- "\n",
- " ### link to paper\n",
- " link = re.search(r'\\((.*?)\\)', line).group()[1:-1]\n",
- "\n",
- " try:\n",
- " ### venue\n",
- " venue = re.search(r'\\|\\s\\_(.*?)\\_', line).group()[3:-1]\n",
- " except:\n",
- " venue = '-'\n",
- "\n",
- " ### year\n",
- " year = re.search(r'\\s\\d{4}\\s', line).group()[1:-1]\n",
- "\n",
- " ### code\n",
- " res = re.search(r'\\]\\]\\((.*?)\\)', line)\n",
- " if res is None:\n",
- " code = '-'\n",
- " else:\n",
- " code = res.group()[3:-1]\n",
- "\n",
- "# ### type\n",
- "# type_slash_idx = findOccurrences(line, '|')[-2:]\n",
- "# types = line[type_slash_idx[0]:type_slash_idx[1]][2:-1].lstrip()\n",
+ "import re\n",
"\n",
- " print(title)\n",
- " print(link)\n",
- " print(venue)\n",
- " print(year)\n",
- " print(code)\n",
- " print(types)\n",
+ "def markdown_to_html(input_file, output_file):\n",
+ " with open(input_file, 'r') as md, open(output_file, 'w') as html:\n",
+ " html.write('\\n') # Start of HTML table\n",
+ " \n",
+ " for line in md:\n",
+ " # Ignore empty lines and lines that do not contain markdown table syntax\n",
+ " if '|' not in line.strip():\n",
+ " continue\n",
+ " \n",
+ " # Split the line into columns based on '|' and strip whitespace\n",
+ " columns = [col.strip() for col in line.split('|') if col.strip()]\n",
+ " \n",
+ " # Start of the table row\n",
+ " html.write(' \\n')\n",
+ " \n",
+ " # Loop through columns and handle each type of data\n",
+ " for idx, col in enumerate(columns):\n",
+ " if idx == 0: # First column with link and title\n",
+ " match = re.search(r'\\[(.*?)\\]\\((.*?)\\)', col)\n",
+ " if match:\n",
+ " title = match.group(1)\n",
+ " link = match.group(2)\n",
+ " html.write(f' {title} | \\n')\n",
+ " else:\n",
+ " html.write(' - | \\n')\n",
+ " elif idx == 6: # Last column might have a code link\n",
+ " code_match = re.search(r'\\[(.*?)\\]\\((.*?)\\)', col)\n",
+ " if code_match:\n",
+ " code_link = code_match.group(2)\n",
+ " html.write(f' [Code] | \\n')\n",
+ " else:\n",
+ " html.write(' - | \\n')\n",
+ " else: # Other columns\n",
+ " col = col.replace('_', '').strip() # Remove markdown italic markers\n",
+ " if col == '-': # If column is explicitly empty, maintain the placeholder\n",
+ " html.write(' - | \\n')\n",
+ " elif not col: # If column is empty, output a placeholder with 'code' class\n",
+ " html.write(' - | \\n')\n",
+ " else:\n",
+ " html.write(f' {col} | \\n')\n",
+ " \n",
+ " # End of the table row\n",
+ " html.write('
\\n')\n",
+ " \n",
+ " html.write('
') # End of HTML table\n",
"\n",
- " ### formation\n",
- " f.write(\" \")\n",
- " f.write(\"\\n\")\n",
- " f.write(\" {} | \".format(link, title))\n",
- " f.write(\"\\n\")\n",
- " f.write(\" {} | \".format(venue))\n",
- " f.write(\"\\n\")\n",
- " f.write(\" {} | \".format(year))\n",
- " f.write(\"\\n\")\n",
- " if code == '-':\n",
- " f.write(\" - | \".format(code))\n",
- " else:\n",
- " f.write(\" [Code] | \".format(code))\n",
- " f.write(\"\\n\")\n",
- " f.write(\" {} | \".format(types))\n",
- " f.write(\"\\n\")\n",
- " f.write(\"
\")\n",
- " f.write(\"\\n\")\n",
- " \n",
- "f.close()"
+ "# Usage\n",
+ "markdown_to_html('input.txt', 'output.txt')\n"
]
},
{
@@ -1087,7 +106,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3 (ipykernel)",
+ "display_name": "Python 3",
"language": "python",
"name": "python3"
},