-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
213 lines (195 loc) · 14.5 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
@article{Wilkinson2016,
abstract = {There is an urgent need to improve the infrastructure supporting the reuse of scholarly data. A diverse set of stakeholders—representing academia, industry, funding agencies, and scholarly publishers—have come together to design and jointly endorse a concise and measureable set of principles that we refer to as the FAIR Data Principles. The intent is that these may act as a guideline for those wishing to enhance the reusability of their data holdings. Distinct from peer initiatives that focus on the human scholar, the FAIR Principles put specific emphasis on enhancing the ability of machines to automatically find and use the data, in addition to supporting its reuse by individuals. This Comment is the first formal publication of the FAIR Principles, and includes the rationale behind them, and some exemplar implementations in the community.},
author = {Wilkinson, Mark D and Dumontier, Michel and Aalbersberg, IJsbrand Jan and Appleton, Gabrielle and Axton, Myles and Baak, Arie and Blomberg, Niklas and Boiten, Jan-Willem and {da Silva Santos}, Luiz Bonino and Bourne, Philip E and Bouwman, Jildau and Brookes, Anthony J and Clark, Tim and Crosas, Merc{\`{e}} and Dillo, Ingrid and Dumon, Olivier and Edmunds, Scott and Evelo, Chris T and Finkers, Richard and Gonzalez-Beltran, Alejandra and Gray, Alasdair J G and Groth, Paul and Goble, Carole and Grethe, Jeffrey S and Heringa, Jaap and {'t Hoen}, Peter A C and Hooft, Rob and Kuhn, Tobias and Kok, Ruben and Kok, Joost and Lusher, Scott J and Martone, Maryann E and Mons, Albert and Packer, Abel L and Persson, Bengt and Rocca-Serra, Philippe and Roos, Marco and van Schaik, Rene and Sansone, Susanna-Assunta and Schultes, Erik and Sengstag, Thierry and Slater, Ted and Strawn, George and Swertz, Morris A and Thompson, Mark and van der Lei, Johan and van Mulligen, Erik and Velterop, Jan and Waagmeester, Andra and Wittenburg, Peter and Wolstencroft, Katherine and Zhao, Jun and Mons, Barend},
doi = {10.1038/sdata.2016.18},
issn = {2052-4463},
journal = {Scientific Data},
number = {1},
pages = {160018},
title = {{The FAIR Guiding Principles for scientific data management and stewardship}},
url = {https://doi.org/10.1038/sdata.2016.18},
volume = {3},
year = {2016}
}
@article{mons2017,
abstract = {The FAIR Data Principles propose that all scholarly output should be Findable, Accessible, Interoperable, and Reusable. As a set of guiding principles, expressing only the kinds of behaviours that researchers should expect from contemporary data reso},
author = {Mons, Barend and Neylon, Cameron and Velterop, Jan and Dumontier, Michel and {da Silva Santos}, Luiz Olavo Bonino and Wilkinson, Mark D},
doi = {10.3233/ISU-170824},
issn = {0167-5265},
journal = {Information Services & Use},
keywords = {imported},
number = {1},
pages = {49--56},
title = {{Cloudy, increasingly {FAIR}; revisiting the {FAIR} {Data} guiding principles for the {European} {Open} {Science} {Cloud}}},
url = {https://content.iospress.com/articles/information-services-and-use/isu824},
volume = {37},
year = {2017}
}
@article{Bejoy2010,
abstract = {The possibilities and limits of participation at the ‘bottom' (represented, for example, by PRA and PLA) have been well articulated in development literature. However, the emergence of the Knowledge Society has opened up spaces for what we could call participation at the ‘top' (free software, wiki, open access), the implications of which Development Studies is only beginning to grapple with. Building upon recent debates on the issue, we take the cases of the free software movement and participatory development, arguing that they share common ground in several ways. We aim to offer a few pointers on conceptualising development in the Knowledge Society.},
author = {Thomas, Bejoy K},
doi = {10.1080/09614520903566509},
journal = {Development in Practice},
number = {2},
pages = {270--276},
publisher = {Routledge},
title = {{Participation in the Knowledge Society: the Free and Open Source Software (FOSS) movement compared with participatory development}},
url = {https://doi.org/10.1080/09614520903566509},
volume = {20},
year = {2010}
}
@article{Stockhause2017,
abstract = {Data citations have become widely accepted. Technical infrastructures as well as principles and recommendations for data citation are in place but best practices or guidelines for their implementation are not yet available. On the other hand, the scientific climate community requests early citations on evolving data for credit, e.g. for CMIP6 (Coupled Model Intercomparison Project Phase 6). The data citation concept for CMIP6 is presented. The main challenges lie in limited resources, a strict project timeline and the dependency on changes of the data dissemination infrastructure ESGF (Earth System Grid Federation) to meet the data citation requirements. Therefore a pragmatic, flexible and extendible approach for the CMIP6 data citation service was developed, consisting of a citation for the full evolving data superset and a data cart approach for citing the concrete used data subset. This two citation approach can be implemented according to the RDA recommendations for evolving data. Because of resource constraints and missing project policies, the implementation of the second part of the citation concept is postponed to CMIP7.},
author = {Stockhause, Martina and Lautenschlager, Michael},
doi = {10.5334/dsj-2017-030},
issn = {16831470},
journal = {Data Science Journal},
keywords = {CMIP6,Climate data,Data publishing,Earth system modeling,Evolving data,IPCC-DDC},
pages = {30},
title = {{CMIP6 data citation of evolving data}},
volume = {16},
year = {2017}
}
@article{Weigel2013,
abstract = {Several scientific communities relying on e-science infrastructures are in need of persistent identifiers for data and contextual information. In this article, we present a framework for persistent identification that fundamentally supports context information. It is installed as a number of low-level requirements and abstract data type descriptions, flexible enough to envelope context information while remaining compatible with existing definitions and infrastructures. The abstract data type definitions we draw from the requirements and exemplary use cases can act as an evaluation tool for existing implementations or as a blueprint for future persistent identification infrastructures. A prototypic implementation based on the Handle System is briefly introduced. We also lay the groundwork for establishing a graph of persistent entities that can act as a base layer for more sophisticated information schemas to preserve context information.},
author = {Weigel, Tobias and Lautenschlager, Michael and Toussaint, Frank and Kindermann, Stephan},
doi = {10.2481/dsj.12-036},
issn = {16831470},
journal = {Data Science Journal},
keywords = {Data-intensive science,E-Science infrastructures,Long-term archival,Persistent identifiers,Scientific data management,Unique identifiers},
number = {March},
pages = {10--22},
title = {{A framework for extended persistent identification of scientific assets}},
volume = {12},
year = {2013}
}
@article{hempelmann2018,
title = {Web processing service for climate impact and extreme weather event analyses. Flyingpigeon (Version 1.0)},
journal = {Computers \& Geosciences},
volume = {110},
number = {Supplement C},
pages = {65 - 72},
year = {2018},
issn = {0098-3004},
doi = {https://doi.org/10.1016/j.cageo.2017.10.004},
url = {http://www.sciencedirect.com/science/article/pii/S0098300416302801},
AUTHOR = {N. Hempelmann and C. Ehbrecht and C. Alvarez-Castro and P. Brockmann and W. Falk and J. Hoffmann and S. Kindermann and B. Koziol and C. Nangini and S. Radanovics and R. Vautard and P. Yiou},
keywords = {Web processing service, Climate impact, Extreme weather events, Birdhouse, OGC}
}
@Article{ehbrecht2018,
AUTHOR = {Ehbrecht, C. and Landry, T. and Hempelmann, N. and Huard, D. and Kindermann, S.},
TITLE = {PROJECTS BASED ON THE WEB PROCESSING SERVICE FRAMEWORK BIRDHOUSE},
JOURNAL = {ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
VOLUME = {XLII-4/W8},
YEAR = {2018},
PAGES = {43--47},
URL = {https://www.int-arch-photogramm-remote-sens-spatial-inf-sci.net/XLII-4-W8/43/2018/},
DOI = {10.5194/isprs-archives-XLII-4-W8-43-2018}
}
@book{Jung2017,
Title = {Helmholtz Portfolio Theme Large-Scale Data Management and Analysis (LSDMA)},
Doi = {10.5445/KSP/1000071931},
Editor = {Jung, Christopher and Meyer, J{\"{o}}rg and Streit, Achim},
ISBN = {978-3-7315-0695-9},
Keywords = {Big Data, Data Science, Datenlebenszyklus, Datenmanagement, Datenanalyse,{\convertToLaTeXASCII{U+D}}{\convertToLaTeXASCII{U+A}}Big Data, data science, data life cycle, data management, data analysis},
Language = {english},
Note = {46.12.02; LK 01},
Pagetotal = {259},
Publisher = {{KIT Scientific Publishing, Karlsruhe}},
Year = {2017}
}
@CONFERENCE{HEMPELMANN2014,
author = {Hempelmann, N. and Ehbrecht, C.},
title = {Web Processing Services for Climate Data - with Examples for Impact
Modelers},
booktitle = {EGI Community Forum 2014},
year = {2014},
abstract = {Impact modeling forced by climate data is often connected with big
data processing. But impact modelers are often not equipped with
appropriate hardware (computing and storage facilities) or appropriate
programming experience. Web Processing Service (WPS) is an open standard
defined by the Open Spatial Consortium (OGC). It is an interface
to perform processes over the HTTP network protocol. This tutorial
in an introduction to an early stage of the ClimDaPs project. ClimDaPs
is using WPS for climate data processing. It is based on the PyWPS
implementation of WPS and provides additionally a simple web-based
user-interface to access and combine climate data processes. It provides
access to the climate data archive of the Earth System Grid Federation
(ESGF) for CMIP5 and CORDEX data. Performing simple processes of
climate data up to complex impact models are already available within
ClimDaPs. One can also visualize climate data and processed results.
Besides the introduction of existing processing possibilities, we
will show how you can add your own climate data processes to ClimDaPs
and other WPS services.},
owner = {nils},
timestamp = {2016.06.07},
url = {https://indico.egi.eu/indico/event/1994/session/47/timetable.pdf}
}
@ARTICLE{Jung2014,
author = {C. Jung and M. Gasthuber and A. Giesler and M. Hardt and J. Meyer
and F. Rigoll and K. Schwarz and R. Stotzka and A. Streit},
title = {Optimization of data life cycles},
journal = {Journal of Physics: Conference Series},
year = {2014},
volume = {513},
pages = {032047},
number = {3},
abstract = {Data play a central role in most fields of science. In recent years,
the amount of data from experiment, observation, and simulation has
increased rapidly and data complexity has grown. Also, communities
and shared storage have become geographically more distributed. Therefore,
methods and techniques applied to scientific data need to be revised
and partially be replaced, while keeping the community-specific needs
in focus. The German Helmholtz Association project "Large Scale Data
Management and Analysis" (LSDMA) aims to maximize the efficiency
of data life cycles in different research areas, ranging from high
energy physics to systems biology. In its five Data Life Cycle Labs
(DLCLs), data experts closely collaborate with the communities in
joint research and development to optimize the respective data life
cycle. In addition, the Data Services Integration Team (DSIT) provides
data analysis tools and services which are common to several DLCLs.
This paper describes the various activities within LSDMA and focuses
on the work performed in the DLCLs.},
owner = {nils},
timestamp = {2016.06.07},
url = {http://stacks.iop.org/1742-6596/513/i=3/a=032047}
}
@article{Bahamdain2015,
title = {Open Source Software (OSS) Quality Assurance: A Survey Paper},
journal = {Procedia Computer Science},
volume = {56},
pages = {459-464},
year = {2015},
note = {The 10th International Conference on Future Networks and Communications (FNC 2015) / The 12th International Conference on Mobile Systems and Pervasive Computing (MobiSPC 2015) Affiliated Workshops},
issn = {1877-0509},
doi = {https://doi.org/10.1016/j.procs.2015.07.236},
url = {https://www.sciencedirect.com/science/article/pii/S1877050915017172},
author = {Salem S. Bahamdain},
keywords = {OSS, Open Source, Quality Assurance, Open Source Development Model, OSSD},
abstract = {Open source software (OSS) is a software product with the source code made public so that anyone can read, analyze, and change or improve the code. The use of this software is under a license, like Apache, GNU, MIT, Mozilla Public, and Eclipse Public License. Open source software development (OSSD) provides high quality assurance through user testing and peer reviews. The quality of these products depends on the size of the product community. This paper discusses the stakeholders of the OSS community, the quality assurance frameworks and models proposed in some studies, some statistics about OSS, the problems that affect the quality of OSSD, and the advantages and disadvantages of OSS compared to closed source software. This allows us to understand how we can achieve and improve the quality assurance and quality control of OSSD.}
}
@book{Brown2019,
author = {{Brown, Valerie and Smith, David Ingle and Wiseman, Rob and Handmer}, John},
publisher = {Routledge},
title = {{Risks and opportunities: managing environmental conflict and change}},
volume = {5},
year = {2019}
}
@book{Raymond2001,
address = {Sebastopol, CA, USA},
author = {Raymond, Eric S},
isbn = {0596001088},
publisher = {O'Reilly & Associates, Inc.},
title = {{The Cathedral and the Bazaar: Musings on Linux and Open Source by an Accidental Revolutionary}},
year = {2001}
}
@Article{Hempelmann2022,
AUTHOR = {Hempelmann, N. and Ehbrecht, C. and Plesiat, E. and Hobona, G. and Simoes, J. and Huard, D. and Smith, T. J. and McKnight, U. S. and Pechlivanidis, I. G. and Alvarez-Castro, C.},
TITLE = {DEPLOYMENT OF AI-ENHANCED SERVICES IN CLIMATE RESILIENCE INFORMATION SYSTEMS},
JOURNAL = {The International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
VOLUME = {XLVIII-4/W1-2022},
YEAR = {2022},
PAGES = {187--194},
URL = {https://isprs-archives.copernicus.org/articles/XLVIII-4-W1-2022/187/2022/},
DOI = {10.5194/isprs-archives-XLVIII-4-W1-2022-187-2022}
}