{"payload":{"header_redesign_enabled":false,"results":[{"id":"170393886","archived":false,"color":"#DA5B0B","followers":124,"has_funding_file":false,"hl_name":"laura-rieger/deep-explanation-penalization","hl_trunc_description":"Code for using CDEP from the paper \"Interpretations are useful: penalizing explanations to align neural networks with prior knowledge\" ht…","language":"Jupyter Notebook","mirror":false,"owned_by_organization":false,"public":true,"repo":{"repository":{"id":170393886,"name":"deep-explanation-penalization","owner_id":10225326,"owner_login":"laura-rieger","updated_at":"2021-03-22T02:10:12.827Z","has_issues":true}},"sponsorable":false,"topics":["python","data-science","machine-learning","ai","deep-learning","neural-network","jupyter-notebook","ml","pytorch","artificial-intelligence","convolutional-neural-network","fairness","interpretability","cdep","feature-importance","recurrent-neural-network","interpretable-deep-learning","explainable-ai","explainability","fairness-ml"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":72,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Alaura-rieger%252Fdeep-explanation-penalization%2B%2Blanguage%253A%2522Jupyter%2BNotebook%2522","metadata":null,"csrf_tokens":{"/laura-rieger/deep-explanation-penalization/star":{"post":"vGDmQdW2QDREOU4hy6PKJieoy6fgY4NAXEvLOaw6j4z5N27UP4W2do0vM0mwavUvKcS4rkJy9eFvhFCuI2xRvQ"},"/laura-rieger/deep-explanation-penalization/unstar":{"post":"kxBozaJYx50l4msAxNPQ36m7ncBzNxWvjc9E-n2BCyiGCXB3OOMxTuS062qVkBlwv8g0MDGykE4dZci4ONa1jA"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"x7EKPMXzgp443UTzadjKBTVUjWfg6hF2ZfzeZiBFv-_uaAk-X7PcV7K3mZE1e88UbIdoJkHgnM1oRQRCo0tmEw"}}},"title":"Repository search results"}