-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvoorstel.bib
executable file
·274 lines (245 loc) · 16.7 KB
/
voorstel.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
% Encoding: UTF-8
@Electronic{Greenfield2018,
author = {Greenfield, Sam},
editor = {Google},
title = {Picture what the cloud can do: How the New York Times is using Google Cloud to find untold stories in millions of archived photos},
year = {2018},
date = {2018-11-09},
url = {https://cloud.google.com/blog/products/ai-machine-learning/how-the-new-york-times-is-using-google-cloud-to-find-untold-stories-in-millions-of-archived-photos},
urldate = {2018-12-08},
abstract = {Google Cloud has teamed up with The New York Times to help them digitize their vast photo collection. It’s making use of numerous tools within Google Cloud Platform that allow them to securely store their images, provide them with a better interface for finding photos, and find new insights even from the data locked on the backs of images.},
comment = {Stand van zaken - Achtergrond},
keywords = {Google Cloud Vision},
}
@Online{MOMA2018?,
author = {MOMA},
editor = {MOMA},
title = {Identifying art through machine learning},
year = {2018},
url = {https://www.moma.org/calendar/exhibitions/history/identifying-art},
subtitle = {A project with Google Arts \& Culture Lab},
organization = {Museum of Modern Art},
urldate = {2018-12-08},
abstract = {Given years of experience and some diligent research, identifying each work of art in an old exhibition photo doesn’t sound so hard, does it? Now imagine you have tens of thousands of photos, dating back to 1929. MoMA’s Digital Media team and Google Arts & Culture Lab set out to face this daunting challenge—or at least get a head start—using machine learning and computer vision technology.},
comment = {stand van zaken - achtergrond},
keywords = {MOMA, Google Cloud Vision, Google Arts & Culture Lab},
}
@Report{Blessings2013,
author = {Blessings, Alexander and Wen, Kai},
title = {Using Machine Learning for Identification of Art Paintings},
type = {resreport},
institution = {Stanford University},
year = {2013},
pagetotal = {5},
abstract = {Machine learning applications have been suggested for many tasks. We have investigated the suitability of applying machine learning to the problem of art identification, which we believe to be a new, but promising field. Our approach focuses on classifying works of seven different artists, by using a multi-class SVM with state-of-the-art features. Our results indicate that machine learning has good potential to classify art works. We conclude this paper by analyzing our results.},
comment = {nog lezen},
file = {:Blessings2013.pdf:PDF},
keywords = {paintings},
}
@Misc{Wevers2018,
author = {Wevers, Melvin and Smits, Thomas},
title = {Seeing History: The Visual Side of the Digital Turn},
year = {2018},
abstract = {How can computers help us to explore and analyze large collection of historical visual material},
comment = {stand van zaken - achtergrond},
eventdate = {2018-12-09},
eventtitle = {Digital Humanities Benelux Amsterdam 2018},
file = {:Wevers2018.pdf:PDF},
keywords = {kranten, datasets, logaritmes},
}
@Misc{Wevers2018a,
author = {Wevers, Melvin and Smits, Thomas},
title = {Seeing history: analyzing large-scale historical visual datasets using deep neural networks},
year = {2018},
organization = {DHLAB, KNAW},
abstract = {Scholars are increasingly applying computational methods to analyze the visual aspects of large scale digitized visual datasets Inspiring examples are the work of Seguin on visual pattern discovery in large databases of paintings and Moretti’s and Impett’s large scale analysis of body postures in Aby Warburg’s Atlas Mnemosyne. In our paper, we will present two datasets of historical images and accompanying metadata harvested from Dutch digitized newspapers and reflect on ways to improve existing neural networks for historical research. We will discuss how large historical visual datasets can be used for historical research using neural networks. We will do this by describing two case studies, and will end our paper by arguing for the need for a benchmarked dataset with historical visual material.},
file = {:Wevers2018a.pdf:PDF},
keywords = {visual dataset, logaritmes},
}
@Report{Mensink2014,
author = {Mensink, Thomas and van Gemert, Jan},
title = {The Rijksmuseum Challenge: Museum-Centered Visual Recognition},
type = {resreport},
institution = {ISLA Lab - Informatics Institute University of Amsterdam},
year = {2014},
pagetotal = {4},
abstract = {This paper offers a challenge for visual classification and content-based retrieval of artistic content. The challenge is posed from a museum-centric point of view offering a wide
range of object types including paintings, photographs, ceramics, furniture, etc. The freely available dataset consists of 112,039 photographic reproductions of the artworks exhibited in the Rijksmuseum in Amsterdam, the Netherlands. We offer four automatic visual recognition challenges consisting of predicting the artist, type, material and creation year. We include a set of baseline results, and make available state-of-the-art image features encoded with the Fishervector. Progress on this challenge improves the tools of a museum curator while improving content-based exploration by online visitors of the museum collection.},
comment = {stand van zaken - achtergrond},
file = {:Mensink2014.pdf:PDF},
keywords = {Rijksmuseum dataset, theoretische studie},
}
@Online{UniAntwerpen2017?,
editor = {{Universiteit Antwerpen}},
title = {INSIGHT},
year = {2017},
url = {http://uahost.uantwerpen.be/insight/index.php/about/},
urldate = {2018-12-08},
abstract = {INSIGHT is a research project that targets the digital assets of two museum clusters in Brussels: Royal Museums of Fine Arts of Belgium and Royal Museums of Art and History. This project aims to deploy the recent advances in Artificial Intelligence (language technology and computer vision in particular) to support the enrichment of these collections with descriptive metadata. An important focus of this project is the issue of transferring knowledge from open collections, such as The Rijksmuseum Dataset, to other players in the field. To this end, we investigate issues relating to multimodality or the way in which we can simultaneously model different information streams about digital heritage objects (e.g. in different languages, or across different media). Apart from multimodality, multilinguality will be another crucial aspect of our research, which is of course important in the context of federal heritage collections in Belgium. The end goal of this project is to develop and release a series of practical Machine Learning tools for managing digital collections. A major outcome of this project will be an export of the digital collections involved as a “Europeana-ready” linked open data set, which will contribute to the broader accessibility of these collections.},
comment = {stand van zaken},
}
@Report{Sabatteli2018,
author = {Sabatelli, Matthia and Kestemont, Mike and Daelemans, Walter and Geurts, Pierre},
title = {Deep Transfer Learning for Art Classification Problems},
type = {resreport},
institution = {Universiteit Antwerpen and Université de Liège},
year = {2018},
pagetotal = {16},
abstract = {In this paper we investigate whether Deep Convolutional Neural Net- works (DCNNs), which have obtained state of the art results on the ImageNet challenge, are able to perform equally well on three different art classification problems. In particular, we assess whether it is beneficial to fine tune the net- works instead of just using them as off the shelf feature extractors for a sepa- rately trained softmax classifier. Our experiments show how the first approach yields significantly better results and allows the DCNNs to develop new selective attention mechanisms over the images, which provide powerful insights about which pixel regions allow the networks successfully tackle the proposed classi- fication challenges. Furthermore, we also show how DCNNs, which have been fine tuned on a large artistic collection, outperform the same architectures which are pre-trained on the ImageNet dataset only, when it comes to the classification of heritage objects from a different dataset.},
file = {:Sabatelli2018.pdf:PDF},
keywords = {theoretische studie, logaritmes, visual datasets, Deep Convolutional Neural Networks, Art Classification, Transfer Learning, Visual Attention},
}
@Manual{AKE2014,
editor = {{Agentschap Kunsten en Ergoed}},
title = {Handleiding bij het Cultureel-Erfgoeddecreet},
year = {2014},
subtitle = {Het Kwaliteitslabel},
abstract = {Met het toekennen van een kwaliteitslabel aan collectiebeherende cultureel-erfgoedorganisaties wil de Vlaamse Gemeenschap de kwaliteitsvolle werking van deze organisaties erkennen. Het doel van het kwaliteitslabel is het zichtbaar maken, het bewaken en het verbeteren van de kwaliteit van de werking van collectiebeherende cultureel-erfgoedorganisaties in Vlaanderen. De Vlaamse Gemeenschap kent kwaliteitslabels toe aan musea, culturele archiefinstellingen en erfgoedbibliotheken. Om een kwaliteitslabel te krijgen moet de collectiebeherende cultureel-erfgoedorganisatie aan minimale kwaliteitsnormen voldoen. Deze kwaliteitsnormen zitten vervat in de voorwaarden voor de toekenning van het kwaliteitslabel. },
}
@Misc{Gatz2014,
author = {Gatz, Sven},
editor = {Vlaams Minister van Cultuur, Jeugd en Media},
title = {Beleidsnota Cultuur 2014-2019},
year = {2014},
date = {2014-10},
organization = {De Vlaamse Regering},
file = {:Gatz2014.pdf:PDF},
keywords = {cultuurbeleid},
}
@Misc{Gatz2016,
author = {Gatz, Sven},
title = {Conceptnota aan de Vlaamse Regering. Naar een duurzame cultureel-erfgoedwerking in Vlaanderen.},
year = {2016},
date = {2016-03},
subtitle = {Een langetermijnvisie voor cultureel erfgoed en cultureel-erfgoedwerking in Vlaanderen},
organization = {Vlaamse Regering},
keywords = {cultuurbeleid},
}
@Misc{JeugdMediaC2018,
author = {{Departement Cultuur, Jeugd en Media}},
editor = {{Departement Cultuur, Jeugd en Media}},
title = {Visienota: Een Vlaams cultuurbeleid in het digitale tijdperk},
year = {2018},
date = {2018-06},
organization = {Vlaamse Regering},
file = {:JeugdMediaC2018.pdf:PDF},
keywords = {cultuurbeleid},
}
@Online{JeugdMediaC2018a,
author = {{Departement Cultuur, Jeugd en Media}},
editor = {{Departement Cultuur, Jeugd en Media}},
title = {Inhaalbeweging voor digitale collectieregistratie},
year = {2018},
date = {2018-07-12},
url = {http://www.kunstenenerfgoed.be/nl/nieuws/inhaalbeweging-voor-digitale-collectieregistratie},
urldate = {2018-12-08},
abstract = {De Vlaamse minister van Cultuur heeft zich tot doel gesteld om de achterstanden op het vlak van collectieregistratie binnen de cultureel-erfgoedsector weg te werken. Met een nieuw subsidiereglement voor digitale collectieregistratie wil de minister de eerste stappen zetten om hier concreet werk van te maken.},
}
@Manual{JeugdMediaC2018b,
editor = {{Departement Cultuur, Jeugd en Media}},
title = {Subsidies voor inhaalbeweging digitale collectieregistratie bij collectiebeherende cultureel-erfgoedorganisaties},
year = {2018},
date = {2018-07-09},
subtitle = {Reglement},
file = {:JeugdMediaC2018b.pdf:PDF},
keywords = {cultuurbeleid},
}
@Online{Nasjonalmuseet2017?,
author = {Nasjonalmuseet},
editor = {{Nasjonalmuseet}},
title = {Project: "Principal Components"},
year = {2017},
url = {http://www.nasjonalmuseet.no/en/collections_and_research/collection_management/digital_collection_management/Project%3A+%C2%ABPrincipal+Components%C2%BB.b7C_wJjU4L.ips},
urldate = {2018-12-08},
abstract = {In this project, we have tried out artificial intelligence in principal component analysis on our images, by using neural networks and algorithms.
Two of the results in the project are described in more detail below.
The algorithms that show compositional similarities for us in a new user interface on our website.
The algorithm that classifies the National Museum's art by subject keywords.},
keywords = {Iconclass, Caffe, gelijkenissen, gezichten, kleuren},
}
@Online{Westvang2017?,
author = {Westvang, Even},
title = {Principal Components},
year = {2017},
url = {http://bengler.no/principalcomponents},
subtitle = {Machine learning in search of the uncanny},
urldate = {2018-12-08},
abstract = {Following on the Repcol project, Principal Components looks at applying a diverse set of machine learning technologies to museum collections. It looks at how machine learning can give easier access to collections through better metadata and explorative interfaces. Concurrently it also explores the strange and uncanny artifacts and errors that arise from machine learning processes and errors.},
keywords = {Nasjonaalmuseet, Caffe, classificeren},
}
@Online{Smith2017,
author = {Smith, Ryan P.},
title = {How Artificial Intelligence Could Revolutionize Archival Museum Research},
year = {2017},
date = {2017-11-03},
url = {https://www.smithsonianmag.com/smithsonian-institution/how-artificial-intelligence-could-revolutionize-museum-research-180967065/},
urldate = {2018-12-08},
abstract = {When you think of artificial intelligence, the field of botany probably isn't uppermost in your mind. When you picture settings for cutting-edge computational research, century-old museums may not top the list. And yet, a just-published article in the Biodiversity Data Journal shows that some of the most exciting and portentous innovation in machine learning is taking place at none other than the National Herbarium of the National Museum of Natural History in Washington, D.C.},
keywords = {planten, deep learning},
}
@Online{Hindle2017,
author = {Hindle, Adrian},
title = {Automated image analysis with IIIF},
year = {2017},
date = {2017-06-20},
url = {https://blog.cogapp.com/automated-image-analysis-with-iiif-6594ff5b2b32},
subtitle = {Using Artificial Intelligence for bulk image analysis},
urldate = {2018-12-08},
abstract = {In this article we’ll show how to use the IIIF Presentation and Image APIs to gather inputs including:
Finding interesting images
Image recognition and automatic tagging
Colour analysis
Finding similar images
Term extraction
The best image analysis API
And we will show the interesting, valuable, and occasionally hilarious outcomes from these techniques for bulk image analysis.},
keywords = {IIIF, VRS},
}
@Misc{Roddis2018,
author = {Roddis, Tristan},
title = {When automated analysis goes wrong},
year = {2018},
date = {2018-05-16},
url = {https://www.slideshare.net/Europeana/when-automated-analysis-goes-wrong-by-tristan-roddis-europeanatech-conference-2018},
urldate = {2018-12-08},
eventtitle = {EuropeanaTech Conference 2018},
keywords = {IIIF, VRS, fouten},
}
@Online{Fraser2018,
author = {Fraser, Matt},
title = {Using Google Cloud AutoML to classify poisonous Australian spiders},
year = {2018},
date = {2018-03-14},
url = {https://shinesolutions.com/2018/03/14/using-google-cloud-automl-vision-to-classify-poisonous-australian-spiders/},
urldate = {2018-12-09},
keywords = {google cloud vision},
}
@Online{Lardinois2018,
author = {Lardinois, Frederic},
editor = {TechCrunch},
title = {Google’s AutoML lets you train custom machine learning models without having to code},
year = {2018},
url = {https://techcrunch.com/2018/01/17/googles-automl-lets-you-train-custom-machine-learning-models-without-having-to-code},
urldate = {2018-12-09},
}
@Online{Oberoi2016,
author = {Oberoi, Gaurav},
title = {Comparing the Top Five Computer Vision APIs},
year = {2016},
date = {2016-07-11},
url = {https://goberoi.com/comparing-the-top-five-computer-vision-apis-98e3e3d7c647},
urldate = {2018-12-09},
}
@Online{Wiericx2011,
author = {Wiericx, Bram},
title = {Crowdsourcing in het Huis van Alijn},
year = {2011},
date = {2011-08-09},
url = {https://faro.be/blogs/bram-wiercx/crowdsourcing-in-het-huis-van-alijn},
}
@Collection{Bordoni2016,
editor = {Bordoni, Luciana and Mele, Francesco and Sorgente, Antonio},
title = {Artificial Intelligence for Cultural Heritage},
year = {2016},
}
@Comment{jabref-meta: databaseType:biblatex;}