diff --git a/.github/workflows/on_candidate_push.yml b/.github/workflows/on_candidate_push.yml deleted file mode 100644 index 3fb5bdb..0000000 --- a/.github/workflows/on_candidate_push.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Create a release when pushing to a candidate branch. - -on: - push: - branches: - - '*_candidate*' - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - uses: rlespinasse/github-slug-action@v2.x - - - name: Create version number - run: echo ::set-env name=VERSION::candidate-$(echo ${{ env.GITHUB_SHA_SHORT }}) - - - name: Build the Docker image that creates the RFC-PDFs - run: docker build $GITHUB_WORKSPACE/build -t time-machine-project/publish_pdfs - - - name: Create the output directories - run: mkdir -p candidates - - - name: Create the candidate pdfs - run: docker run -e VERSION=${{ env.VERSION }} -v $GITHUB_WORKSPACE/files/candidates:/opt/input -v $GITHUB_WORKSPACE/candidates:/opt/output time-machine-project/publish_pdfs - - - name: Zip the candidate pdfs - run: zip -r -j candidate_pdf_files.zip candidates - - - name: Create a new release - id: create_release - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ env.VERSION }} - release_name: Candidate Release ${{ env.VERSION }} - draft: false - prerelease: true - - - name: Upload released pdfs as release asset - id: upload-release-asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./candidate_pdf_files.zip - asset_name: candidate_pdf_files.zip - asset_content_type: application/zip diff --git a/.github/workflows/on_push.yml b/.github/workflows/on_push.yml index 030aad3..20e6747 100644 --- a/.github/workflows/on_push.yml +++ b/.github/workflows/on_push.yml @@ -7,23 +7,30 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - - uses: rlespinasse/github-slug-action@v2.x - - - name: Create version number - run: echo ::set-env name=VERSION::draft-${{ env.GITHUB_SHA_SHORT }} - - - name: Build the Docker image that creates the RFC-PDFs - run: docker build $GITHUB_WORKSPACE/build -t time-machine-project/publish_pdfs - - - name: Create the output directories - run: mkdir -p drafts - - - name: Create the draft PDFs - run: docker run -e VERSION=${{ env.VERSION }} -v $GITHUB_WORKSPACE/files/drafts:/opt/input -v $GITHUB_WORKSPACE/drafts:/opt/output time-machine-project/publish_pdfs - - - uses: actions/upload-artifact@v2 - with: - name: rfc_draft_pdf_files - path: drafts + - uses: actions/checkout@v2 + + - uses: rlespinasse/github-slug-action@v3.x + + - name: Create version number + run: echo "VERSION=commit-${{ env.GITHUB_SHA_SHORT }}" >> $GITHUB_ENV + + - name: Build the Docker image that creates the RFC-PDFs + run: docker build $GITHUB_WORKSPACE/build -f $GITHUB_WORKSPACE/build/Dockerfile.single -t time-machine-project/publish_pdfs + + - name: Create the drafts output directories + run: mkdir -p out/drafts + + - name: Create the releases output directories + run: mkdir -p out/releases + + - name: Create the draft PDFs + run: + docker run -e VERSION=${{ env.VERSION }} -v $GITHUB_WORKSPACE/files/drafts:/opt/input -v $GITHUB_WORKSPACE/out/drafts:/opt/output time-machine-project/publish_pdfs + + - name: Create the release PDFs + run: docker run -e VERSION=${{ env.VERSION }} -v $GITHUB_WORKSPACE/files/releases:/opt/input -v $GITHUB_WORKSPACE/out/releases:/opt/output time-machine-project/publish_pdfs + + - uses: actions/upload-artifact@v2 + with: + name: rfc_pdf_files + path: out diff --git a/.github/workflows/on_release.yml b/.github/workflows/on_release.yml new file mode 100644 index 0000000..50d4ad8 --- /dev/null +++ b/.github/workflows/on_release.yml @@ -0,0 +1,31 @@ +name: Compile and attach artifact to release on release publication + +on: + release: + types: [released] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Create version number + run: echo "VERSION=${{ github.event.release.tag_name }}" >> $GITHUB_ENV + + - name: Build the Docker image that creates the PDF + run: docker build $GITHUB_WORKSPACE/build -f $GITHUB_WORKSPACE/build/Dockerfile.book -t time-machine-project/publish_book + + - name: Create the output directory + run: mkdir -p out + + - name: Create the PDF + run: docker run -e VERSION=${{ env.VERSION }} -v $GITHUB_WORKSPACE/files:/opt/input -v $GITHUB_WORKSPACE/out:/opt/output time-machine-project/publish_book + + - name: Upload the artifacts + uses: alexellis/upload-assets@0.2.2 + with: + asset_paths: '["./out/*.pdf"]' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/on_release_push.yml b/.github/workflows/on_release_push.yml deleted file mode 100644 index 26e37d3..0000000 --- a/.github/workflows/on_release_push.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Create a release when pushing to a release branch. - -on: - push: - branches: - - '*_release*' - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - uses: rlespinasse/github-slug-action@v2.x - - name: Create version number - run: echo ::set-env name=release-VERSION::$(echo ${{ env.GITHUB_SHA_SHORT }}) - - - name: Build the Docker image that creates the RFC-PDFs - run: docker build $GITHUB_WORKSPACE/build -t time-machine-project/publish_pdfs - - - name: Create the output directories - run: mkdir -p releases - - - name: Create the release pdfs - run: docker run -e VERSION=${{ env.VERSION }} -v $GITHUB_WORKSPACE/files/releases:/opt/input -v $GITHUB_WORKSPACE/releases:/opt/output time-machine-project/publish_pdfs - - - name: Zip the release pdfs - run: zip -r -j rfc_release_pdf_files.zip releases - - - name: Create a new release - id: create_release - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ env.VERSION }} - release_name: Release ${{ env.VERSION }} - draft: false - prerelease: false - - - name: Upload released pdfs as release asset - id: upload-release-asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./rfc_release_pdf_files.zip - asset_name: rfc_release_pdf_files.zip - asset_content_type: application/zip diff --git a/README.md b/README.md index 244358c..8112c2a 100644 --- a/README.md +++ b/README.md @@ -2,31 +2,22 @@ # Requests for Comments -This repository is the main location for work on and release of finalised Time Machine _Request for Comments_. +This repository is the main location for work on and release of finalised Time +Machine _Request for Comments_. -## Drafts in preparation +## RFCs in preparation -| ID | Title | Draft pull request | -| -------- | ---------------------------------- | ------------------------------------------------------------------------------------------------ | -| RFC-0004 | RFC on the RFC Editorial Committee | [New version of RFC-0004](https://github.com/time-machine-project/requests-for-comments/pull/31) | -| RFC-0005 | RFC on LTM | [Current draft of RFC5](https://github.com/time-machine-project/requests-for-comments/pull/32) | +| ID | Title | Draft file | Draft pull request | +| -------- | ------------------------------------------- | ------------------------------------------------ | ------------------ | +| RFC-0033 | RFC on Map and Cadaster Processing pipeline | [RFC-0033.md](files/drafts/RFC-0000/RFC-0033.md) | | -## Current open drafts - -| ID | Contribution deadline | Main file | -| -------- | --------------------- | ------------------------------------------------ | -| RFC-0000 | 2020-11-08 | [RFC-0000.md](files/drafts/RFC-0000/RFC-0000.md) | -| RFC-0001 | 2020-11-08 | [RFC-0001.md](files/drafts/RFC-0001/RFC-0001.md) | -| RFC-0002 | 2020-11-08 | [RFC-0002.md](files/drafts/RFC-0002/RFC-0002.md) | -| RFC-0003 | 2020-11-08 | [RFC-0003.md](files/drafts/RFC-0003/RFC-0003.md) | - -## Current open candidates - -| ID | Review deadline | Release | Peer Review URL | -| --- | --------------- | ------- | --------------- | - -## Published releases - -| ID | File | Release | Release date | -| --- | ---- | ------- | ------------ | +## Published RFCs +| ID | File | +| -------- | -------------------------------------------------- | +| RFC-0000 | [RFC-0000.md](files/releases/RFC-0000/RFC-0000.md) | +| RFC-0001 | [RFC-0001.md](files/releases/RFC-0001/RFC-0001.md) | +| RFC-0002 | [RFC-0002.md](files/releases/RFC-0002/RFC-0002.md) | +| RFC-0003 | [RFC-0003.md](files/releases/RFC-0003/RFC-0003.md) | +| RFC-0004 | [RFC-0004.md](files/releases/RFC-0004/RFC-0004.md) | +| RFC-0005 | [RFC-0005.md](files/releases/RFC-0005/RFC-0005.md) | diff --git a/build/Dockerfile b/build/Dockerfile deleted file mode 100644 index 45ef335..0000000 --- a/build/Dockerfile +++ /dev/null @@ -1,37 +0,0 @@ -FROM pandoc/latex:2.9.2.1 - -ENV PUPPETEER_SKIP_CHROMIUM_DOWNLOAD true -ENV PATH="/data/node_modules/.bin:/usr/lib/chromium:${PATH}" -ENV MERMAID_BIN="/data/node_modules/.bin/mmdc" -ENV PUPPETEER_CFG="/data/puppeteer-config.json" -ENV SCALE=5 - -RUN apk update && apk upgrade && apk add --no-cache \ - bash \ - dos2unix \ - git \ - python \ - py-pip \ - chromium \ - nss \ - freetype \ - freetype-dev \ - harfbuzz \ - ca-certificates \ - ttf-freefont \ - nodejs \ - npm - -RUN echo "{\"args\": [\"--no-sandbox\", \"--disable-setuid-sandbox\"], \"executablePath\": \"/usr/lib/chromium/chrome\"}" > puppeteer-config.json - -RUN npm install mermaid @mermaid-js/mermaid-cli - -RUN pip install git+https://github.com/time-machine-project/pandoc-mermaid-filter.git - -COPY ./convert_to_pdf.sh /opt/script/ - -RUN dos2unix /opt/script/convert_to_pdf.sh - -RUN chmod +x /opt/script/convert_to_pdf.sh - -ENTRYPOINT ["/opt/script/convert_to_pdf.sh"] diff --git a/build/Dockerfile.book b/build/Dockerfile.book new file mode 100644 index 0000000..5c515bd --- /dev/null +++ b/build/Dockerfile.book @@ -0,0 +1,22 @@ +FROM pandoc/latex:2.16 + +RUN apk update && apk upgrade && apk add --no-cache \ + bash \ + dos2unix \ + git \ + python3 \ + py3-pip \ + nss \ + freetype \ + freetype-dev \ + harfbuzz \ + ca-certificates \ + ttf-freefont + +COPY ./convert_book.sh /opt/script/ + +RUN dos2unix /opt/script/convert_book.sh + +RUN chmod +x /opt/script/convert_book.sh + +ENTRYPOINT ["/opt/script/convert_book.sh"] diff --git a/build/Dockerfile.single b/build/Dockerfile.single new file mode 100644 index 0000000..af08728 --- /dev/null +++ b/build/Dockerfile.single @@ -0,0 +1,22 @@ +FROM pandoc/latex:2.16 + +RUN apk update && apk upgrade && apk add --no-cache \ + bash \ + dos2unix \ + git \ + python3 \ + py3-pip \ + nss \ + freetype \ + freetype-dev \ + harfbuzz \ + ca-certificates \ + ttf-freefont + +COPY ./convert_single.sh /opt/script/ + +RUN dos2unix /opt/script/convert_single.sh + +RUN chmod +x /opt/script/convert_single.sh + +ENTRYPOINT ["/opt/script/convert_single.sh"] diff --git a/build/convert_book.sh b/build/convert_book.sh new file mode 100644 index 0000000..7e98698 --- /dev/null +++ b/build/convert_book.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# Initialize variables +inpath=$1 +outpath=$2 +version=$3 +if [ -z "$inpath" ]; then + inpath=/opt/input +fi +if [ -z "$outpath" ]; then + outpath=/opt/output +fi +if [ -z "$version" ]; then + version=${VERSION:=custom-build} +fi +pdfpath="$outpath"/RFC-Book_"$version".pdf +tmppath="$outpath"/tmp +mkdir -p "$tmppath" + +# Copy all markdown files into a single folder so relative +# links to files still work in the final markdown +cd "$inpath" || exit +shopt -s globstar +for i in **/*.md; do + cp -r "$(dirname "$i")"/* "$tmppath" +done + +cd "$tmppath" || exit + +# Create basic markdown content with yaml header +read -r -d '' text < - -[^IETF_RFC_791]: diff --git a/files/drafts/RFC-0002/RFC-0002.md b/files/drafts/RFC-0002/RFC-0002.md deleted file mode 100644 index 0b0cacb..0000000 --- a/files/drafts/RFC-0002/RFC-0002.md +++ /dev/null @@ -1,1342 +0,0 @@ ---- -# Don't change this header section -title: 'RFC on RFC Tree' -subtitle: 'Time Machine RFC-0002' -author: -- François Ballaud -- Frédéric Kaplan -- Isabella di Lenardo -- Kevin Baumer -- Daniel Jeller - -# Don't change the following lines -header-includes: -- \usepackage{fancyhdr} -- \pagestyle{fancy} -- \fancyhead[R]{} -- \fancyfoot[L]{-release-version-} -colorlinks: true -output: pdf_document ---- - - - -# Motivation - -The process for writing RFCs requires long-term planning instruments which will enable writers to not only access constantly updated versions of current RFCs but also to have a view on future planned RFCs. The **RFC Tree** as described in this RFC will provide an up-to-date description of all planned RFCs, including a short textual motivation for each and listing their dependencies with other RFCs. - -# Definition - -**RFC Tree** is the metaphorical description and the hierarchical representation of the **Time Machine Request for Comments** development plan that will be used as a baseline scenario to help monitor the progress and achievements towards completion of the **Time Machine Horizon** over the coming 10 years. - -# Behaviour - -While the **RFC Tree** itself results from the progressive completion of RFCs over time, it also acts as a blueprint for defining the incremental steps used to build the Time Machine Infrastructure. - -**RFC Tree** behaves as a body of law for Time Machine, containing all RFC documentation and documenting the production process, as well as a progress indicator showing the progress of upgrades to Time Machine components in real time. - -**RFC Tree** is a macro-architecture plan that shapes dependencies between RFCs. It could be divided into micro thematic or productive arcs, as each RFC belongs to a specific sequence and is classified in one of the following categories: Framework, Infrastructure, Data, Local Time Machines. - -Singular RFCs are the basic units of the **RFC Tree**. The 70+ initial RFCs are listed at the end of this document. This initial set is an indicative path to completion of the **Time Machine Horizon** and may be expanded/modified/edited as needed. The tree-like structure and subsequent dependency chains which connect singular RFC guarantee that the **Time Machine Horizon** will be realised, as each RFC edit/removal/addition will cause dynamic adjustments and reorganisation. - -This RFC provides the most up-to-date version of the **RFC Tree**. - -\newpage - -# RFC Tree - -\newpage - -## RFC-0000 - RFC on RFCs {#rfc-0000} - -Reaching consensus on the technology options to pursue in a programme as large as Time Machine is a complex issue. To ensure open development and evaluation of work, a process inspired by the Request for Comments (RFC) that was used for the development of the Internet protocol is being adapted to the needs of Time Machine. Time Machine Requests for Comments are freely accessible publications, identified with a unique ID, that constitute the main process for establishing rules, recommendations and core architectural choices for Time Machine components. - -### Status - -* Draft - -### Schedule - -* 2020 - -### Dependencies - -\newpage - -## RFC-0001 - RFC on TM Glossary {#rfc-0001} - -The Time Machine Glossary's function is to provide clear definitions of the terms used in the RFCs and throughout related Time Machine documentation. It should be updated regularly as new RFCs are introduced and published. - -### Status - -* Draft - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0000](#rfc-0000) - -\newpage - -## RFC-0002 - RFC on RFC Tree {#rfc-0002} - -The process for writing RFCs requires long-term planning instruments which will enable writers to not only access constantly updated versions of current RFCs but also to have a view on future planned RFCs. The RFC Tree as described in this RFC will provide an up-to-date description of all planned RFCs, including a short textual motivation for each and listing their dependencies with other RFCs. - -### Status - -* Draft - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0000](#rfc-0000) - -\newpage - -## RFC-0003 - RFC on Publication Platform {#rfc-0003} - -This Request for Comments (RFC) describes the inner workings and technical details of the RFC platform itself. It aims to provide the technical framework for authorship, review, community contribution and publication of all future Time Machine RFCs. - -### Status - -* Draft - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0000](#rfc-0000) -* [RFC-0001](#rfc-0001) -* [RFC-0004](#rfc-0004) - -\newpage - -## RFC-0004 - RFC on RFC Editorial Committee {#rfc-0004} - -This RFC outlines the basic policies and procedures related to the RFC Editorial Committee, many of which have been inspired by the IEEE Signal Processing Society. The main items that this document defines are: the organisation of the RFC Editorial Committee, how members are appointed, procedure for applications, length of term and main duties. - -### Status - -* Draft - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0000](#rfc-0000) -* [RFC-0001](#rfc-0001) -* [RFC-0003](#rfc-0003) - -\newpage - -## RFC-0005 - RFC on Local Time Machines {#rfc-0005} - -In order to build a planetary-scale Time Machine, it is necessary to define an organic incremental strategy. To succeed, the Time Machine must be able to progressively anchor itself in local territories, bringing added value to local activities directly, spurring the creation of new projects to mine information of the past contained in surviving objects and documents. Local Time Machines (LTM) can be defined as zones of higher density of activities towards reconstruction of the past. This RFC defines the dynamics that will: enable bootstrapping of Local Time Machines, facilitate the onboarding of new projects, valorise the data extracted, facilitate the involvement of the local population, develop use cases for exploitation avenues, and eventually enable the development of sustainable structures where Big Data of the Past are fruitfully exploited thus leading to a constant increase of such activities. - -### Status - -* Draft - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0000](#rfc-0000) -* [RFC-0001](#rfc-0001) -* [RFC-0006](#rfc-0006) -* [RFC-0007](#rfc-0007) - -\newpage - -## RFC-0006 - RFC on Technical Charter {#rfc-0006} - -The goal of the Technical Charter is to guarantee a first level of standardisation for data and processes, in order to remain light and usable by most, and the charter also encourages the use of universal and open interfaces and references that do not require central coordination - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -\newpage - -## RFC-0007 - RFC on Vision Mission and Values Charter {#rfc-0007} - -In order to protect Time Machine's overarching purpose, fundamental values and ethical principles, a common charter will be created. Its duty will be to protect the core of TM and sustain its future. Becoming a TM network member implies ratification of the Vision, Mission and Values Charter - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -\newpage - -## RFC-0008 - RFC on Intellectual Property Rights and Licenses {#rfc-0008} - -Defining licenses to preserve intellectual property rights (regulating data acquisition, sharing and publishing) and to sustain the interoperability and accessibility of the TM. The proposed solutions could be based on the Creative Commons copyright licenses and should be further developed with the help of cultural-heritage networks (e.g. Europeana) who are experienced with these issues and are already proposing solutions. Means of monitoring the openness process of data should also be taken into consideration. - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0006](#rfc-0006) -* [RFC-0007](#rfc-0007) - -\newpage - -## RFC-0009 - RFC on Training {#rfc-0009} - -Complying with TM Rules and Recommendations, legal settings, using TM components and understanding TM infrastructures will require specific training. A proper set of documentation, tutorials, videos and online courses will be offered to partners. This RFC will set the general rules for development of training materials, and these principles will subsequently be used by other more specific training RFCs. - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0008](#rfc-0008) - -\newpage - -## RFC-0010 - RFC on LTM Value Scale {#rfc-0010} - -The proposed LTM Value Scale (Key concepts and global overview – Local Time Machines), based on density criteria, will be outlined and its relevance and organisation further developed. Value scales concerning Projects will be created based on previously defined bricks and other criteria required in order to foster the development of Projects (e.g. collaboration, cooperation metrics). As some of the measures will relate to qualitative processes, a dedicated RFC on collaboration indicators will focus on creating a suitable system of metrics. The labelling system may require a third-party certification to assess its efficiency and accuracy. - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0005](#rfc-0005) - -\newpage - -## RFC-0011 - RFC on LTM Training {#rfc-0011} - -Specific RFC rooted in the general principles established by **RFC on Training** but adapted to the framework developed in the **RFC on LTM**. - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0005](#rfc-0005) -* [RFC-0009](#rfc-0009) - -\newpage - -## RFC-0012 - RFC on Definition of Typologies of Digitisation Interventions {#rfc-0012} - -A typology of digitisation interventions will be established grouping: - - 1. Collections that can be moved and processed in digital hubs (large, non-fragile collections), collections or objects that need local intervention (e.g. very fragile document, statues, buildings) - 2. Processes that can be performed by volunteers using mobile technology (e.g. scanning campaign across cities, on-the-fly digitisation in reading rooms), processes that can be performed using robots and drones, etc. - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -\newpage - -## RFC-0013 - RFC on Standardisation and Homologation {#rfc-0013} - -Definition of the terms and contracts enabling digitisation partners to become part of the Time Machine network. - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0012](#rfc-0012) - -\newpage - -## RFC-0014 - RFC on Digitisation Priorities and Data Selection {#rfc-0014} - -A data selection model based on the identification of performance criteria will help partners to focus on priority aspects and to take decisions accordingly. For reference, the National Information Standards Organisation (NISO) has already proposed a framework for guidance on Building Good Digital Collections. Examples of performance criteria include: Significance of Content to Internal Stakeholders (degree to which a collection, once digitised supports the both immediate and long-term research and the teaching needs of the institution), Significance of Content to External Stakeholders (a highly successful digital collection is of interest to researchers and users outside of the university), Uniqueness (many unique institutional resources such as original photographs, archival materials, grey literature such as university technical reports and conference proceedings have not yet been digitised), Exposure (degree to which a digital collection garners positive recognition and press for an institution and assessing the potential for digital availability of the collection to result in grants and other funding). This digitisation recommendation will also be in line with the LTM coordinated strategy for digitisation. - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0012](#rfc-0012) - -\newpage - -## RFC-0015 - RFC on Open Hardware {#rfc-0015} - -Definition of the open hardware strategy for Time Machine including licensing terms and catalogues. - -### Status - -* Proposal - -### Schedule - -* 2020 - -### Dependencies - -* [RFC-0012](#rfc-0012) - -\newpage - -## RFC-0016 - RFC on Data Lifecycle {#rfc-0016} - -The data lifecycle within the TM begins with the concept of documents and data selection. The goal is to assist partners in selecting proper documents or collections to be processed by the TM pipelines and then subsequently which data within the documents. The data selection is closely related to the LTM or project perimeters (see LTM/Framework) and should be stated prior to any project launch. Criteria such as intellectual property rights, obtaining copyright permissions, digitisation, OCR processing or metadata creation costs must also be taken into account. Extending the results of the **RFC of Digitisation Priorities and Data selection**, the RFC will also detail data acquisition, data sharing and data publishing. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0005](#rfc-0005) -* [RFC-0014](#rfc-0014) - -\newpage - -## RFC-0017 - RFC on Operation Graph {#rfc-0017} - -Definition of the format of the operation graph describing the operations currently pursued in the TM partners, as monitored by the **Time Machine Organisation**. It includes both automatic processes and human interventions. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0016](#rfc-0016) - -\newpage - -## RFC-0018 - RFC on TM Data Graph {#rfc-0018} - -The Time Machine **Data Graph** contains all information modelled in the Time Machine. The graph is constructed both manually and automatically through the processing of the **Digital Content Processor**. The **Data Graph** is intrinsically composed of two sub-parts: (1) The bright graph, which is composed of information that has been manually mapped and integrated with other large databases or used in a publication. This information is integrated with the current sum of digital human knowledge. It can be considered actual. (2) The dark graph, which is composed of information extracted automatically from (massive) documentation which has to this point been used in a stand alone fashion as individual historic items. It can be considered virtual. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0016](#rfc-0016) - -\newpage - -## RFC-0019 - RFC for TM APIs {#rfc-0019} - -Algorithms and software integrated into the Time Machine must be able to communicate with each other, thus a definition of joint APIs is required. It is likely that TM Services will be built on top of REST interfaces. In order to meet TM's needs, these will have to be adapted towards use in large-scale machine learning. A probable addition is the option to provide the gradient information of a specific module that is integrated using the API, for example. This way remote services can also be integrated into large-scale training processes. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0017](#rfc-0017) - -\newpage - -## RFC-0020 - RFC for Classification and Planning of Languages to Address {#rfc-0020} - -Definition of Time Machine's multilingual strategy. This RFC impacts the exploitation platforms, as the TM will handle documents in multiple European languages and dialects. Some of these languages might be more complicated to address than others due to pre-existing tools for modern variants or availability of materials. A working plan for natural language processing (NLP) tool development should be conceived by taking into consideration the materials, the locations of the LTMs and the Digitisation Hubs, and the features of the languages. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0018](#rfc-0018) - -\newpage - -## RFC-0021 - RFC on Annotation {#rfc-0021} - -Definition of the annotation protocols used for the documenting of TM Data Graph. This RFC will be used by exploitation platforms. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0018](#rfc-0018) - -\newpage - -## RFC-0022 - RFC on Digital Content Processor Development and Testing {#rfc-0022} - -Digital Content Processors (DCP) are automatic processes for extracting information from documents (images, video, sound, etc.) As such, the following pipeline may be envisioned: (1) Development of a DCP in a dedicated “Sandbox” (a place where trial and error tests can be undertaken without compromising the entire functioning of the Time Machine architecture). Training will be done on existing labelled documents. (2) Submission of the DCP to the Time Machine Organisation's dedicated service. (3) After some benchmark and assessments of performance, resulting in acceptance or rejection, the suitable DCP becomes an Official TM Component. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0019](#rfc-0019) -* [RFC-0027](#rfc-0027) - -\newpage - -## RFC-0023 - RFC on Digital Content Processor (DCP) of Level 1 {#rfc-0023} - -DCP are automatic processes for extracting information from documents (images, video, sound, etc.) DCP of Level 1 only label mentions of entities. Each processing is fully traceable and reversible, and the results of the processing make up the core dataset of the Big Data of the Past and are integrated into the TM Data Graph. The document should define: - -1. The technical conditions for implementing DCP that can be inserted in the Time Machine Operation Graph. -2. The requirements for hosting DCP in the TM Super Computing Infrastructure. The process by which DCP are developed, tested, labelled, published and put in operation. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0018](#rfc-0018) -* [RFC-0022](#rfc-0022) - -\newpage - -## RFC-0024 - RFC on Digital Content Processor (DCP) of Level 2 {#rfc-0024} - -DCP are automatic processes for extracting information from documents (images, video, sound, etc.) DCP of Level 2 label relations between entities. Each processing is fully traceable and reversible, and the results of the processing make up the core dataset of the Big Data of the Past and are integrated into the TM Data Graph. The document should define: - -1. The technical conditions for implementing DCP that can be inserted in the Time Machine Operation Graph. -2. The requirements for hosting DCP in the TM Super Computing Infrastructure. The process by which DCP are developed, tested, labelled, published and put in operation. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0023](#rfc-0023) - -\newpage - -## RFC-0025 - RFC on Digital Content Processor (DCP) of Level 3 {#rfc-0025} - -DCP are automatic processes for extracting information from documents (images, video, sound, etc.) DCP of Level 3 label rules. Each processing is fully traceable and reversible, and the results of the processing constitute the core dataset of the Big Data of the Past and are integrated in the TM Data Graph. The document should define: - -1. The technical conditions for implementing DCP that can be inserted in the Time Machine Operation Graph. -2. The requirements for hosting DCP in the TM Super Computing Infrastructure. The process by which DCP are developed, tested, labelled, published and put in operation. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0024](#rfc-0024) - -\newpage - -## RFC-0026 - RFC on Synergy and interaction in EU Research Infrastructure {#rfc-0026} - -The TM digitisation network will build upon existing EU Research Infrastructures (DARIAH, CLARIN) and infrastructure providing access to Cultural Heritage (Europeana, Archive Portal Europe, etc.) TM will introduce new processing pipelines for transforming and integrating Cultural Heritage data into such infrastructures. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -\newpage - -## RFC-0027 - RFC on General Standards for the Super Computing Architecture {#rfc-0027} - -This document will define the general rules that the TM Network partners must follow to integrate their computing resources into the TM Super Computing Architecture and the routing processes managing the data pipelines. In particular, this document will define: - -1. The hardware and software standards that the computing resources will follow across the entire distributed Super Computing Architecture. -2. The routing protocols of the TM Operation Graphs. -3. The processes for naming and renewing the administrators of the administrators -4. The role of the TMO for managing of infrastructure. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0017](#rfc-0017) -* [RFC-0018](#rfc-0018) -* [RFC-0026](#rfc-0026) - -\newpage - -## RFC-0028 - RFC on Time Machine Box {#rfc-0028} - -Also meant to cover storage needs for data, the TM Box should help partners involved in a data acquisition, sharing or publishing process to conform to the metadata specifications and delivery – harvesting protocols as stated by the data model. One of its goals is to smoothen the process of and contribute to the automatization of the digitisation process (offering for instance a way of monitoring the digitisation tasks). This hardware is part of the Time Machine Official Components. The RFC will define how the production of the Time Machine Box should be managed in the long-term. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0017](#rfc-0017) - -\newpage - -## RFC-0029 - RFC Digitisation Hubs {#rfc-0029} - -Defining the functioning and business model for the Digitisation Hubs. In order for the Digitisation Hubs to be implemented, standards in terms of resolution, file formats and metadata during acquisition need to be defined beforehand (RFC on Technical Charts). These standards must be consensual and simple in order to ensure easy implementation and fit into existing practices. The RFC must also evaluate relevant technologies and recommend affordable technologies that do not damage the objects, while also providing the best possible results at the same time. Time Machine aims to distribute affordable technology on a large scale using, for example, open design hardware. More costly and dedicated scanning solutions ,such as scan robots and tomographic methods, should be made available in specialised centres spread across the European Union such that their services are available to a maximum number of users. The objective of achieving affordable and wide-spread digitisation should be a priority in this RFC. - -### Status - -* Proposal - -### Schedule - -* 2021 - -### Dependencies - -* [RFC-0015](#rfc-0015) -* [RFC-0028](#rfc-0028) - -\newpage - -## RFC-0030 - RFC for Named Entity Recognition {#rfc-0030} - -This RFC defines named entity recognition for older European languages and variants. The results of the tagging of entities will feed the Dark Data Graph with new information. - -### Status - -* Proposal - -### Schedule - -* 2022 - -### Dependencies - -* [RFC-0021](#rfc-0021) -* [RFC-0023](#rfc-0023) - -\newpage - -## RFC-0031 - RFC on Text Recognition and Processing Pipeline {#rfc-0031} - -This RFC defines the general architecture for this particular kind of media. It must be aligned with the LTM central services. - -### Status - -* Proposal - -### Schedule - -* 2022 - -### Dependencies - -* [RFC-0023](#rfc-0023) -* [RFC-0024](#rfc-0024) -* [RFC-0025](#rfc-0025) - -\newpage - -## RFC-0032 - RFC on Structured Document Pipeline {#rfc-0032} - -This RFC defines the general architecture for this particular kind of media. It must be aligned with the LTM central services. - -### Status - -* Proposal - -### Schedule - -* 2022 - -### Dependencies - -* [RFC-0023](#rfc-0023) -* [RFC-0024](#rfc-0024) -* [RFC-0025](#rfc-0025) - -\newpage - -## RFC-0033 - RFC on Map and Cadaster Processing pipeline {#rfc-0033} - -This RFC defines the general architecture for this particular kind of media. It must be aligned with the LTM central services. - -### Status - -* Proposal - -### Schedule - -* 2022 - -### Dependencies - -* [RFC-0023](#rfc-0023) -* [RFC-0024](#rfc-0024) -* [RFC-0025](#rfc-0025) - -\newpage - -## RFC-0034 - RFC on Audio Processing Pipeline {#rfc-0034} - -This RFC defines the general architecture for this particular kind of media. It must be aligned with the LTM central services. - -### Status - -* Proposal - -### Schedule - -* 2022 - -### Dependencies - -* [RFC-0023](#rfc-0023) -* [RFC-0024](#rfc-0024) -* [RFC-0025](#rfc-0025) - -\newpage - -## RFC-0035 - RFC on Video Processing Pipeline {#rfc-0035} - -This RFC defines the general architecture for this particular kind of media. It must be aligned with the LTM central services. - -### Status - -* Proposal - -### Schedule - -* 2022 - -### Dependencies - -* [RFC-0023](#rfc-0023) -* [RFC-0024](#rfc-0024) -* [RFC-0025](#rfc-0025) - -\newpage - -## RFC-0036 - RFC on Music Score Pipeline {#rfc-0036} - -This RFC defines the general architecture for this particular kind of media. It must be aligned with the LTM central services. - -### Status - -* Proposal - -### Schedule - -* 2022 - -### Dependencies - -* [RFC-0023](#rfc-0023) -* [RFC-0024](#rfc-0024) -* [RFC-0025](#rfc-0025) - -\newpage - -## RFC-0037 - RFC on Photographic Processing Pipeline {#rfc-0037} - -This RFC defines the general architecture for this particular kind of media. It must be aligned with the LTM central services. - -### Status - -* Proposal - -### Schedule - -* 2022 - -### Dependencies - -* [RFC-0023](#rfc-0023) -* [RFC-0024](#rfc-0024) -* [RFC-0025](#rfc-0025) - -\newpage - -## RFC-0038 - RFC on Photogrammetric Pipeline {#rfc-0038} - -This RFC defines the general architecture for this particular kind of media. It must be aligned with the LTM central services. - -### Status - -* Proposal - -### Schedule - -* 2022 - -### Dependencies - -* [RFC-0023](#rfc-0023) -* [RFC-0024](#rfc-0024) -* [RFC-0025](#rfc-0025) - -\newpage - -## RFC-0039 - RFC on Enhancing Collaboration {#rfc-0039} - -Investigating how to support partnerships across Time Machine member networks (e.g. Europeana, Icarus) or external cultural-heritage networks. The reflection should focus on a number of different aspects: how to enhance collaboration internally and externally, both at the level of the LTM and the TMO (e.g. with other LTM or partner networks); how to enhance exchange of best practices; how to share content and enhance collaboration between already existing cultural heritage networks and associations. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0005](#rfc-0005) - -\newpage - -## RFC-0040 - RFC on Franchise System {#rfc-0040} - -A franchise model clarifying the financial relationship between LTMs and the TMO’s services will be put in place, in accordance with the financial needs and costs of Time Machine's technical and coordination infrastructures. The franchise system must be scalable and adaptable, as the network will grow along with its reputation in the public eye and the financial benefit generated for LTMs. The franchise system is meant to be complementary to that which is established for the TMO partners. To enter a LTM, an institution should at minimum be required to become a member of the TMO. One of the subtasks of this RFC is to also assess and to further design the role of the TMO as a “Financial, Economic, Intelligence and Watch services office. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0005](#rfc-0005) - -\newpage - -## RFC-0041 - RFC on Solidarity {#rfc-0041} - -How to select, align and finance the “redocumentation” project, for potential projects compatible with TM goals but placed in a stand-by stage for a period of time, will be the main task of this RFC. What would it take (training, formation) to ensure the project scalability? - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0005](#rfc-0005) - -\newpage - -## RFC-0042 - RFC on Top-Down initiatives {#rfc-0042} - -Defining what local and national measures might contribute to the creation of LTMs. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0005](#rfc-0005) - -\newpage - -## RFC-0043 - RFC on Distributed Storage {#rfc-0043} - -The TM distributed storage system will target offering an alternative solution to current HTTP-based storage. While in a first phase, most documents and data will be stored on specific servers accessible through standard protocols (e.g. images on IIIF servers), the aim of the project is to develop a storage solution that would have the following objectives: (1) Giving access to high volume of data with high efficiency, (2) Optimising storage to store more data, (3) Implementing long-term preservation of data, preventing accidental or deliberate deletion and keeping a fully versioned history of the data stored, (4) Guaranteeing authenticity of the data stored and preventing the inclusion of fake sources. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0017](#rfc-0017) -* [RFC-0018](#rfc-0018) -* [RFC-0026](#rfc-0026) -* [RFC-0028](#rfc-0028) - -\newpage - -## RFC-0044 - RFC on Distributed storage System for Public Data {#rfc-0044} - -This document will define the infrastructure principles for a decentralized solution of public datasets based on Creative Commons licences like CC-0, CC-BY, CC-BY-NC and the Europeana rights declarations. Storage will be maintained for the resources shared by the partners of the TM Infrastructure Alliance. A distributed system like IPFS (InterPlanetary File System) and the work done by the IPFS Consortium for persistence of IPFS objects may be a good starting point, as such types of file systems do not identify a resource by its location but by a unique identification number. Routing algorithms optimised through P2P algorithms are the most efficient ways to bring the data to the visualisation or computing processes. This also speeds up the process when the host is a region with low connectivity. It is critical that redundancy and long-term resilience can be guaranteed, which means that the system must be designed to make any public data content that it stores un-deletable in practice. In turn, this will make it difficult to censor content, and for this reason it is especially well adapted for public data associated with creative common licenses. Systems like IPFS also provide the possibility for each node of the network to choose the categories of data they accept to replicate. This provides some flexibility in the negotiation of a common strategy by the Time Machine Infrastructure Alliance. To ensure the authenticity of the data stored, a blockchain type solution could be the solution. The interaction between the distributed file system and the authentication solution will be defined by the RFC. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0043](#rfc-0043) - -\newpage - -## RFC-0045 - RFC on Distributed Storage System for Private Data {#rfc-0045} - -Private datasets could be stored in either: (1) A specific layer of the distributed storage system, provided reliable cryptographic and authentication systems are in place, (2) A “fenced” location offered by partners of the Time Machine Network as storage solution. In both cases, the RFC should define how such closed datasets could use Time Machine infrastructure services and under which conditions. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0043](#rfc-0043) - -\newpage - -## RFC-0046 - RFC on On-demand Digitisation {#rfc-0046} - -This RFC defines the process for on-demand digitisation, enabling any user to request the digitisation of a specific document. An alignment with the archival description system will be necessary. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0017](#rfc-0017) -* [RFC-0018](#rfc-0018) - -\newpage - -## RFC-0047 - RFC on Global Optimization of Digitisation Process {#rfc-0047} - -Definition of the strategy for optimising the digitisation processes. The objective of this RFC is, among others, to avoid digitisation of redundant printed material via a synchronization of all digitisation initiatives. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0014](#rfc-0014) -* [RFC-0026](#rfc-0026) -* [RFC-0044](#rfc-0044) -* [RFC-0046](#rfc-0046) - -\newpage - -## RFC-0048 - RFC for Orthographic Normalisation {#rfc-0048} - -This RFC defines the orthographic normalisation of older European language variants. The results will improve the search functionality of the databases. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0020](#rfc-0020) - -\newpage - -## RFC-0049 - RFC on Content Filtering {#rfc-0049} - -Content filtering may be necessary to control the exposure of users of Time Machine services to unsolicited content. Finding the right technologies which allow such controls without enabling the possibly of abusive censorship operations will be the challenge of this RFC. - -### Status - -* Proposal - -### Schedule - -* 2023 - -### Dependencies - -* [RFC-0007](#rfc-0007) - -\newpage - -## RFC-0050 - RFC on Knowledge Transfer {#rfc-0050} - -Investigating how to support achievements and knowledge transfer inside the TM network, ensuring a global research collaboration at a European scale. There are only rare examples of large-scale research data management models, dealing with similar complexity levels as the TM; however, some guidelines can be found such as: “Guidance Document Presenting a Framework for Discipline-specific Research Data Management” (Science Europe, January 2018), “Practical Guide to the International Alignment of Research Data Management” (Science Europe, November 2018). - -### Status - -* Proposal - -### Schedule - -* 2024 - -### Dependencies - -* [RFC-0039](#rfc-0039) - -\newpage - -## RFC-0051 - RFC on Smart Cluster {#rfc-0051} - -Defining the rules to be followed by the future smart clusters (for instance compliance with LTM rules and recommendations), ensuring means for the creation of such a space for creativity, supporting inter-disciplinary exchanges, political involvement and job creation, defining what types of relations could be built between the participants of the smart clusters and the partners of the LTM, and how to monitor, evaluate and revise or update the process. - -### Status - -* Proposal - -### Schedule - -* 2024 - -### Dependencies - -* [RFC-0005](#rfc-005) - -\newpage - -## RFC-0052 - RFC on Collaboration Indicators {#rfc-0052} - -This RFC defines key performance indicators to evaluate the level of collaboration among TM partners, both at a local and global level. It will also develop "affinity maps" to suggest potential future collaborations. - -### Status - -* Proposal - -### Schedule - -* 2024 - -### Dependencies - -* [RFC-0039](#rfc-0039) - -\newpage - -## RFC-0053 - RFC on Large-Scale Inference Engine {#rfc-0053} - -The Large-Scale Inference Engine is capable of inferring the consequences of chaining any information in the database. This enables Time Machine to induce new logical consequences of existing data. The Large-Scale Inference Engine is used to shape and to assess the coherence of the 4D simulations based on human-understandable concepts and constraints. Its origin derives from more traditional logic-based AI technology, slightly overlooked since the recent success of the deep learning architecture, that can, nevertheless, play a key role in an initiative like TM. This document will specify the various types of rules that the Large-Scale Inference Engine can process including: rules extracted from documents by DCP, implicit rules made explicit, and rules (statistical or not) induced from the data. The document should define: the process by which rules are submitted, tested and integrated in the engine, and the processes for managing conflicting rules or results from various rules. The document will also motivate implementation of solutions in relation with existing deployed systems like Wolfram Alpha or IBM Watson and standards like OWL, SKOS. - -### Status - -* Proposal - -### Schedule - -* 2024 - -### Dependencies - -* [RFC-0025](#rfc-0025) - -\newpage - -## RFC-0054 - RFC on the 4D Grid {#rfc-0054} - -Through a hierarchical division of space (3D) and time, the system organises a multi-resolution 4D grid which serves as a general spatiotemporal index. Each “cube” in the grid indexes all the information relevant for these particular spatiotemporal elements. It offers an efficient perspective for organising large datasets and performing collective curation through manual and automatic processes. Each element of the grid will also potentially be labelled according to other various multidimensional criteria, some of them being AI-based descriptors (e.g. descriptors for architectural style detection in images). - -### Status - -* Proposal - -### Schedule - -* 2024 - -### Dependencies - -* [RFC-0018](#rfc-0018) - -\newpage - -## RFC-0055 - RFC on 4D Simulations {#rfc-0055} - -The 4D grid is sparse, as there are many places/times in the world that are not directly associated with existing archival data. A central research challenge is to develop AI systems capable of extending the information of the data grid in space and time through continuous extrapolation and interpolation, and developing new ways of visualising which parts of the content are anchored in sourced data, simulated or unknown. Extensions of current deep-learning generative methods, originally developed for 2D imaging, can be envisioned to deal with the richness of the 4D datasets. Many 4D simulations can be associated with the same 4D grid, and one central challenge is to manage this multiplicity of worlds and their specific resolution levels for various services of the Time Machine (e.g. entertainment, policy planning). - -### Status - -* Proposal - -### Schedule - -* 2024 - -### Dependencies - -* [RFC-0054](#rfc-0054) - -\newpage - -## RFC-0056 - RFC TM Tools for History Research {#rfc-0056} - -To engage researchers in social sciences and humanities is to productively use the Big Data of the Past, and the TM can offer researchers a series of tools that facilitate analyses. These tools will be enhanced by the Digital Content Processor and the Simulation Engines, which will enable scholars to work with historical data in an unprecedented way. - -### Status - -* Proposal - -### Schedule - -* 2024 - -### Dependencies - -* [RFC-0053](#rfc-0053) -* [RFC-0055](#rfc-0055) - -\newpage - -## RFC-0057 - RFC on New Scanning Technology {#rfc-0057} - -Cutting-edge technologies, such as automatic scanning machines with low human supervision, scanning robots and solutions for scanning films and books without the need to unroll/open them, should be considered and fostered by the TM. A specific scheme to incentivise these technologies will be created. The goal is to reach an appropriate mix of dedicated specialized scanning centres and aid the development of mobile special use hardware, e.g. mobile CT scanners that are mounted on trucks. - -### Status - -* Proposal - -### Schedule - -* 2024 - -### Dependencies - -* [RFC-0015](#rfc-0015) -* [RFC-0029](#rfc-0029) - -\newpage - -## RFC-0058 - RFC on 4D Simulator {#rfc-0058} - -The 4D Simulator manages a continuous spatiotemporal simulation of all possible pasts and futures that are compatible with the data. The 4D Simulator includes a multi-scale hierarchical architecture for dividing space and time into discrete volumes with a unique identifier: a simulation engine for producing new datasets based on the information stored. Each possible spatiotemporal multi-scale simulation corresponds to a multidimensional representation in the 4D computing infrastructure. When sufficient spatiotemporal density of data is reached, it can produce a 3D representation of the place at a chosen moment in European history. In navigating the representation space, one can also navigate in alternative past and future simulations. Uncertainty and incoherence are managed at each stage of the process and directly associated with the corresponding reconstructions of the past and the future. The document should specify the interaction of the 4D simulator with the rest of the architecture, answering questions like: How can an entity of the TM Data Graph be associated with a particular element of the 4D Grid?, How can 4D simulations be run and cached for future use?, How can the system be used directly in exploitation platforms? - -### Status - -* Proposal - -### Schedule - -* 2025 - -### Dependencies - -* [RFC-0055](#rfc-0055) - -\newpage - -## RFC-0059 - RFC on Universal Representation Engine {#rfc-0059} - -The Universal Representation Engine manages a multidimensional representation of a space resulting from the integration of a pattern of extremely diverse types of digital cultural artefacts (text, images, videos, 3D and time) and permitting new types of data generation based on trans-modal pattern understanding. In such a space, the surface structure of any complex cultural artefact, landscape or situation is seen as a point in a multidimensional vector space. On this basis, it could generate a statue or a building or produce a piece of music or a painting, based only on its description, geographical origins and age. The document will specify the integration of the URE in the global architecture, outlining for example how a given node in the TM Data Graph can be associated with a parametric representation space. - -### Status - -* Proposal - -### Schedule - -* 2025 - -### Dependencies - -* [RFC-0019](#rfc-0019) - -\newpage - -## RFC-0060 - RFC for Machine Translation {#rfc-0060} - -Definition of the architecture for multilingual diachronic machine translation. Existing algorithms for machine translation will be adapted to older language variants of European languages. This will densify the Data Graph and provide more input to the Large-Scale Inference Engine. - -### Status - -* Proposal - -### Schedule - -* 2025 - -### Dependencies - -* [RFC-0030](#rfc-0030) -* [RFC-0048](#rfc-0048) -* [RFC-0053](#rfc-0053) - -\newpage - -## RFC-0061 - RFC on Mirror World Prototyping {#rfc-0061} - -Definition of the implementation strategy for the first working prototype of Mirror World using TM engines. This first prototype is likely to be developed on the most advanced LTM. This RFC will define how to safely experiment with this technology. - -### Status - -* Proposal - -### Schedule - -* 2025 - -### Dependencies - -* [RFC-0053](#rfc-0053) -* [RFC-0058](#rfc-0058) -* [RFC-0059](#rfc-0059) - -\newpage - -## RFC-0062 - RFC on Legal issues linked with Mirror World {#rfc-0062} - -Mirror Worlds are linked with specific legal issues which will need to be addressed. This particularly concerns privacy and confidentiality. - -### Status - -* Proposal - -### Schedule - -* 2026 - -### Dependencies - -* [RFC-0061](#rfc-0061) - -\newpage - -## RFC-0063 - RFC on Mirror World Extension Strategy {#rfc-0063} - -The Mirror World extension strategy takes into account all the technical and legal choices defined by the RFC on Scaling in order to address the extension of the Mirror World to cover a larger, mostly European, scale. - -### Status - -* Proposal - -### Schedule - -* 2026 - -### Dependencies - -* [RFC-0061](#rfc-0061) - -\newpage - -## RFC-0064 - RFC on Mirror World Technical Standards {#rfc-0064} - -This RFC defines the specific technical standards needed for the Mirror World extension, based on the experience provided by the Mirror World prototype. - -### Status - -* Proposal - -### Schedule - -* 2026 - -### Dependencies - -* [RFC-0061](#rfc-0061) - -\newpage - -## RFC-0065 - RFC on Virtual/Augmented Reality and Discovery {#rfc-0065} - -Definition the standards that should be adopted in order to enable the development of Virtual/Augmented Reality services and the discovery module on top of the TM Data Graph. - -### Status - -* Proposal - -### Schedule - -* 2026 - -### Dependencies - -* [RFC-0061](#rfc-0061) - -\newpage - -## RFC-0066 - RFC on 4D Mirror World {#rfc-0066} - -This RFC defines how the Mirror World can be continuously synchronized with the TM DataGraph, enabling direct access and manipulation of 4D data. - -### Status - -* Proposal - -### Schedule - -* 2026 - -### Dependencies - -* [RFC-0061](#rfc-0061) - -\newpage - -## RFC-0067 - RFC for Improved Simulation using TM Simulation Engines {#rfc-0067} - -Researchers will be able to use the TM simulation engines to perform simulation studies without having to rely on external models and tools. The simulation engines have the capacity to improve the performance and the reach of computational simulations for historical research. - -### Status - -* Proposal - -### Schedule - -* 2026 - -### Dependencies - -* [RFC-0058](#rfc-0058) - -\newpage - -## RFC-0068 - RFC on Large Scale Mirror World {#rfc-0068} - -This RFC consolidates the vision, strategy and framework conditions for the development of a World-Wide Mirror World. - -### Status - -* Proposal - -### Schedule - -* 2027 - -### Dependencies - -* [RFC-0062](#rfc-0062) -* [RFC-0063](#rfc-0063) -* [RFC-0064](#rfc-0064) diff --git a/files/drafts/RFC-0003/RFC-0003.md b/files/drafts/RFC-0003/RFC-0003.md deleted file mode 100644 index c4673e2..0000000 --- a/files/drafts/RFC-0003/RFC-0003.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: "RFC on RFC Platform" -subtitle: "Time Machine RFC-0003" -author: - - Daniel Jeller - - Frédéric Kaplan - - Kevin Baumer - -# Don't change the following lines -header-includes: - - \usepackage{fancyhdr} - - \pagestyle{fancy} - - \fancyhead[R]{} - - \fancyfoot[L]{-release-version-} -output: pdf_document ---- - -# Motivation - -This Request for Comments (RFC) describes the inner workings and technical details of the RFC platform itself. It aims to provide the technical framework for authorship, review, community contribution and publication of all future Time Machine RFCs. - -# Introduction - -The Time Machine **Requests for Comments** (RFC) workflow is based on Git[^git_website], a tool initially designed to track changes to source code by multiple developers and GitHub[^github_about], currently the leading place to host open-source projects and to create and collaborate on software and many other kinds of projects. The contribution and review process used by RFCs builds on the basic _forking_ workflow that "is most often seen in public open source projects"[^bitbucket_forking]. This ensures that contributions will be tracked indefinitely, review decisions are documented correctly and it is possible to permanently access and reference older versions of the RFC drafts. - -The choice of this solution is motivated by the possibility to scale the number of users and contributions over time. It is likely that releasing and updating RFCs will ultimately be akin to maintaining a large source code base. - -In addition to the git-based workflow, TM RFCs will also enable people who are not familiar with Git and the related tools to participate in additional ways, for instance by accessing candidate RFCs published as papers on the open peer review platform OpenReview.net. - -# Definitions - -Before describing the workflow in detail, this section gives an overview on the most important design decisions and distinct parts of the platform. - -## RFC Editorial Committee and RFC Team - -All strategic and aspects of the RFCs are managed by the **RFC Editorial Committee** appointed by the Time Machine Organisation board, the technical management, editorial work and support for RFC authors and other contributors is done by the **RFC Team**. - -## Authorship - -An RFC author can be an individual person or a group of authors working together on a single RFC draft. -_Note_: This document doesn't differentiate between individual and groups of authors when talking about _RFC authors_. - -## New RFC proposal - -RFCs can be proposed by either the RFC Editorial Committee, RFC authors appointed by the Time Machine Organization or any other interested public author. An up-to-date list of planned RFCs and their interconnection can be seen in RFC-0002. - -## Identifiers - -Any accepted RFC proposal will be assigned an identifier by the RFC Team. Identifiers are based on the pattern `RFC-[number]`. The numbers contain leading zeros to pad them to be four digits long. An example is this RFC itself, called `RFC-0003`, pronounced as _RFC-three_. - -## Document format - -RFCs are drafted in Markdown[^orig_markdown_syntax] -- more specifically, in the extended syntax used by Pandoc[^pandoc_markdown], a tool to convert texts between different file formats. - -_Note_: A short introduction into the most important features of Markdown can be found in the \*RFC-template document[^template] - -## RFC content and contribution repository - -RFC documents are managed in Git and are hosted in a single repository on GitHub. The individual RFCs stored in directories and files named after the RFC number, for example `RFC-0003/RFC-0003.md`. Accompanying files like images are stored alongside the main RFC documents. Public contributions will be possible via issues, comments and pull requests, the RFC Team is responsible for the immediate interaction with the community as well as the maintenance of the repository. - -The drafting process takes place and is therefore documented on the `master` branch of the main RFC repository[^main_repo_url]. Content contributions to an RFC currently being drafted can be made by creating a fork[^github_forking] of the official RFC repository and submitting all modifications in the form of a GitHub Pull-Request[^github_pullrequest] containing the relevant changes to the RFC repository. - -_Note_: A Pull-Request is only allowed to contain changes to a single RFC (main document and accompanying files), otherwise it will be rejected by the RFC Team. Contributions outside an active drafting phase will also be rejected. - -## Document lifecycle - -The lifecycle phases for an RFC are `draft`, `release candidate` and `release`. Draft phases will be open to the public for two weeks. Review phases will have a duration of four weeks. Drafts are stored in the `files/drafts` folder, release candidates in the `files/candidates` folder and released RFCs in the `files/releases/` folder of the RFC repository. - -## Candidate and release publication - -In addition to the Markdown-documents in the RFC repository, candidates and finalized releases will be converted into PDF files using the above-mentioned Pandoc. These PDF files will be amended with the time of conversion as well as a unique release number, then stored in the form of a GitHub Release[^github_release] with the PDF files attached as release assets. - -Release candidates will be published and reviewed on OpenReview.net[^openreview_about]. Releases will be provided with a DOI and published on the open-access repository Zenodo[^zenodo]. - -_Note_: It is to be expected that the automatic file preview for Markdown files on GitHub (and possibly in other tools as well) will differ from the final PDF files, as Pandoc enables advanced features like footnotes which are not used by GitHub and their preferred Markdown dialect.[^gfm] - -# Workflow phases - -This section describes the progression of the RFC from an initial idea to its final release as a citable PDF paper. - -## Phase 1: Conception - -![Conception flow](images/phase_1.png) - -Ideas for RFCs can come from both the RFC Editorial Committee and public authors, both as individual or groups of authors working together. Potential authors with an idea for an RFC should contact the RFC Editorial Committee to coordinate the initial conception and drafting process. After accepting an RFC idea, the RFC Editorial Committee will assign an identifier and the RFC Team will prepare the draft file from a standardised template in the official RFC repository in the `files/drafts/[RFC-id]/` folder. - -Following this step, the draft author will create a new fork of the official repository or pull the latest changes into their pre-existing fork, then begin to work on the initial version of the RFC draft. - -After the initial drafting process is finished and the draft is ready to be shared with the public, the draft author will create a pull request containing the initial version of the draft from their private fork to the official repository. - -The RFC Team will then conduct a brief internal review of the initial draft to ensure the formal correctness of the document. After this is concluded and necessary changes are made by the initial draft author in the scope of the original pull request, the pull request is merged in the master branch of the official repository and the public drafting phase will start. - -## Phase 2: Public drafting - -![Drafting flow](images/phase_2.png) - -The drafting of an RFC is designed as a process that enables direct contributions by public participants as well as the official RFC Editorial Committee and Team in a similar manner. Over the course of a fixed timespan all contributors can work on improvements of the draft text and propose and review changes. The fork-and-pull-request-workflow on GitHub makes sure that each contribution is registered and stored in the official repository indefinitely. - -The official drafting phase begins with the merging of the initial RFC draft by the RFC Team and extends over a period of two weeks. The main RFC Markdown document as well as accompanying files, for example images, are stored in the `files/drafts/[RFC-id]` folder for the duration of the drafting phase. - -Persons interested in contributing to the text directly, either by additions or changes to the existing content, can create a new fork (a full copy) of the official repository or pull the current state of its master branch into their own pre-existing fork. They can then either: use the inline editor on the GitHub website to change the content of the RFC document in their own forked repository, or clone their repository to their computer and use a Markdown editor / Git client of their choice to work with the files. - -Changes to the RFC draft can be submitted at any time during the drafting phase in the form of pull requests from the forked repository to the so-called upstream repository, that is the official RFC repository. This enables the RFC Editorial Committee, RFC Team and other contributors to review the proposed changes, suggest editions or point out problems directly next to the provided version of the draft document. - -All pull requests to the official RFC repository during the drafting phase can be reviewed by any contributor but to be accepted and to be merged, favourable reviews by at least three appointed reviewers will be necessary. These reviewers are appointed by the RFC Editorial Committee based on the area of expertise the RFC falls into. After a pull request passes the review, it is accepted and will be merged by the RFC Team into the master branch of the RFC repository. This makes the merged version the most recent official version of the draft. Other pull requests open at the same time will automatically be compared to this new version and will have to be amended by the contributors if necessary to solve possible conflicts with the then-official version. - -Merged pull requests will trigger a `GitHub Action`[^github_action] that compiles the current version of all open draft documents and attaches them as a ZIP file accessible in workflow job details[^github_job_details]. These preview files are available for 90 days. - -Another way for potential contributors not familiar with Markdown writing and the Git-based workflow to contribute are so-called GitHub Issues[^github_issues]. These are commonly used to give users the possibility to report software bugs to project developers and to track tasks to be done. In the TM RFCs issues can be created by any person that wants to just report a problem with the current version of the draft or to discuss specific topics or ideas related to the RFC. It is the responsibility of the RFC author to react to these issues and up to the RFC Team to moderate the issues as necessary. - -## Phase 3: Release candidate review - -![Review flow](images/phase_3.png) - -After the end of the drafting phase and after all open pull requests containing changes to the draft are either merged or rejected, the final version of the draft will be turned into the first release candidate by the RFC Team. If necessary, the TM Editorial Committee, in combination with the appointed reviewers, can abort the RFC drafting process. Otherwise, the RFC Team will update the list of authors / contributors in the document and move the folder the RFC and all accompanying documents are located in from `files/drafts` to `files/candidates`. - -To trigger the automatic release of the candidate the RFC Team commits and pushes the draft-turned-candidate on a new branch[^github_branches] in the main repository. The branch name will follow the pattern `[RFC-id]_candidate_[version_number]`. The version number will be assigned by the RFC Team and will follow semantic versioning[^semver]. This step triggers an automatic process in GitHub that converts the Markdown document into a PDF RFC candidate document with integrated version and timestamp and publish it in the form of a GitHub release. - -The PDF file created in this step will be uploaded by the RFC Team as a submission in the RFC section of the virtual venue "TimeMachine" on the open peer review platform OpenReview.net[^openreview_timemachine]. The submission will be open for four weeks during which users of OpenReview will have the possibility to use the integrated tools to review the candidate or comment on it. This step ensures a scientifically sound RFC release and enables possible reviewers to use the tools they are familiar with from other paper review processes. - -Contributors are encouraged to participate on OpenReview during this time, and discussions in GitHub issues are still possible. However, pull requests on release candidates will not be accepted. - -Depending on the result of the release candidate review process, the RFC will either progress to the official release phase, return to another drafting and refinement phase or be cancelled altogether if necessary. If an additional drafting phase is necessary, the document will be moved back to `files/drafts` by the RFC Team and pull requests on the `master` branch will again be considered for merging. - -## Phase 4: Official release - -![Release flow](images/phase_4.png) - -If an RFC candidate is accepted by the community during the review phase on OpenReview, it will be prepared for the official release by the RFC Team. Again, the list of contributors will be updated as necessary and the document folder will be moved to `files/releases`. This change will be pushed onto a new Git branch that follows the naming pattern `[RFC-id]_release_[version_number]`. Analogous to the candidate phase, the version number will be assigned by the RFC Team and will follow semantic versioning. This step will again trigger the conversion of the document in the form of a versioned and timestamped PDF document that will be published as an asset on a GitHub release. - -The finalized release PDF will then be published as an official publication by the Time Machine on Zenodo. - -## Phase 5: Updates at a later point - -Due to the fact that the RFCs describe all aspect of the Time Machine, additions, amendments or changes to existing RFCs can be necessary. In this case the RFC Team will move the RFC file from the `files/releases` folder back to the `files/drafts` folder and a new drafting phase can start based on an amended draft produced by the new draft authors. It will then again process through all the different phases as described above resulting in a new release version. - -# Q&A - -## Question: Do I need a GitHub or an OpenReview.net account to contribute to an RFC? - -Yes, you will need a GitHub account if you would like to contribute directly to the text of an RFC, to discuss issues or to comment on pull requests. If you would like to comment on a review candidate on OpenReview.net you will need an account on OpenReview.net. Both tools serve to make attributions of content and comments from individual persons possible and to help to ensure scientific standards for the drafting and review process. - -## Question: I have an idea for an RFC. What do I do? - -Please contact the RFC Editorial Committee with your idea. They will assist you with the creation an initial draft version of the idea, if it is deemed to be suitable and feasible. - -## Question: I would like to fix some errors in an RFC draft. How do I do that? - -The easiest way is for you to sign in to your GitHub account, create a fork of the main RFC repository into your own account, fix the error in your forked version of the document, commit it into your repository and open a pull request outlining your changes to the main RFC repository. It will then be visible, can be reviewed and then eventually merged into the main draft document if the changes are accepted by the reviewers. - -## Question: I have worked on a draft document myself and would like to preview the final PDF document to see how it would look. How can I do this? - -If you follow the official contribution process by forking the main RFC repository, anything you push to the `master` branch in your own repository will be automatically converted into a PDF document by triggering a GitHub action. You can access the ZIP file with the compiled draft documents in the GitHub job details page in the action section. - -## Question: How do I see which drafts are currently available for contributions or release candidates open for review? - -You can see the current plan of work in the `README` file of the main RFC repository. - - - - - -[^bitbucket_forking]: -[^gfm]: GitHub Flavored Markdown (_GFM_) -[^git_website]: -[^github_about]: -[^github_action]: -[^github_branches]: -[^github_forking]: -[^github_issues]: -[^github_job_details]: -[^github_pullrequest]: -[^github_release]: -[^main_repo_url]: -[^openreview_about]: -[^openreview_timemachine]: -[^orig_markdown_syntax]: -[^pandoc_markdown]: -[^semver]: -[^template]: -[^zenodo]: diff --git a/files/drafts/RFC-0003/images/phase_3.drawio b/files/drafts/RFC-0003/images/phase_3.drawio deleted file mode 100644 index 137dd57..0000000 --- a/files/drafts/RFC-0003/images/phase_3.drawio +++ /dev/null @@ -1 +0,0 @@ -7VtLc6M4EP41OcbFyxgfN3Yeh2QnlaR216eUDG3QLiBWCD/m168EEmDLSTyz5YA9OcS2Wg16dH+fuhtyYU+S9S1FWfRAAogvLCNYX9jTC8syDc/lX0KyqSSeIwUhxYFUagTP+DuoK6W0wAHkW4qMkJjhbFvokzQFn23JEKVkta22IPH2qBkKQRM8+yjWpX/igEVyFUOjkd8BDiM1smnIngQpZSnIIxSQVUtkX1/YE0oIq34l6wnEYvPUvlTX3bzRW0+MQsoOucB6uET3Vuw9TWE2+ff2j9n3b/PLoZwb26gFQ8DXL5uEsoiEJEXxdSO9oqRIAxB3NXir0bknJONCkwv/BsY20pioYISLIpbEsneB43hCYkLLEW0wgyGMuDxnlPwDrZ6xO7KRW/coA/Ctu9JXLzckJwX14Z0lKy9CNAT2jp5T6Yn9aA0g9/YWSAKMbrgChRgxvNz2FyTdLqz1GsvwH9I4P2Aoed8ligs50gNZ4jQUK4lAAIyiBSvRUYt8lAY4QIzjp/J8oJq5G2MKy6wizOA5Q+UGrjiiP9FwS6AM1u9utepVkJKc4sjmqgForRK1wOkaRzKO3QWK+P7RzV/y+rIxE43BUDWn63bndCNb3aLPOhB9Zq/QZ2noe7rhdzJeACWa7TOCUwb0esk3J5fGqslfWCBAeVRbvmXRty3wls1iNIf4keSYYZLyPh/EyLxDgAnzU+x+R2FOGCNJS+G3GIeigwmvuyIFi3HKp6AOUzFDJFXqm/PFZGKtyToUh/6ALBbYh0GRA82rz2Mh39kB/lADvgov2rgfHgv3425x30B9toX0PuLeORD3o17h3uyE2U/DUma/KNrRKPqF4jAEWgVJiP/dYnZXzKsZAcpFiDQXE8yKPGq06qhJdFOU+tFZBE2W17OgyesCWvXR2z3O3JNkRFeDmULVVxRURkE8/Fny73wgFCmKXxOUohAS3nqVfZ8UHZmurUF8aO4Jj5wj+cpI85UJBe6jray1TbZZsBCzSAPxyRjyJSmXGa3O3WfIyabbNSmbnRSEfjaV7RGbq3rlx2GT1Ss+V/NugfRbBukTLDGsBilfyhex7xI7SRfAPcSHV5Sm3Pf9Lti95o42u4/3sPv4WJ7j/Hq5kcp5Tg7levn4sZjHWOU9VTXrcXqzeyRzJFD+zcFlGR/wwikewM5uQanz83f0df7+HDIPLiy7/UKmXlq+I7mMkV8eamy2UZkX8wTnOS5RWUI0P0s0WqNxx3C0rFOCY8cIPLSgoSzYFwTqJY3ybPTFucePPLEdUJ6CtDz8RDs6lwzUNfoGObNXD1XMfkPOO9FDz9MgVwWWe15Q+FXTzeppqk+SpEj5rcWIR3usao93ouA9qaWjc8DQO9axa2v+8TvphBbWmLVYgbfqJ638d8MJoqEo4TSpxDq0fqXu+CGVSN+6NAajkSo7SwerRzuYbuTtHwUZtFQ4VnKZirb9rZ7F/3DBbgqh0t/MtrfVvveRv51StHi4v/XrPTxLjxZnoKdfn+ArujV+3KJ9sl/DF8MtslABYY+5Qq+hV0k78n3ImDDyjZ4tRCSZF/nJZAojdydTcPVMwduTKHhHy8331DQp8DVWtZN2wWRBRPomXu5Iy/ytfFe2UjufVG7Ut1qmpb+S81S/eNMyzxnsved9XhrNm80/DVQU1vzrhX39Hw== \ No newline at end of file diff --git a/files/drafts/RFC-0003/images/phase_3.png b/files/drafts/RFC-0003/images/phase_3.png deleted file mode 100644 index 1bd744c..0000000 Binary files a/files/drafts/RFC-0003/images/phase_3.png and /dev/null differ diff --git a/files/drafts/RFC-0003/images/phase_4.drawio b/files/drafts/RFC-0003/images/phase_4.drawio deleted file mode 100644 index 1ec3f05..0000000 --- a/files/drafts/RFC-0003/images/phase_4.drawio +++ /dev/null @@ -1 +0,0 @@ -7ZlLc9MwEIB/TY50/Eic9NimrwPMdCgMcGIUe2MLZK+R5Dz49Ui25EdFSzikCSmHPLS7tqRdfauVPQrn+eaWkzJ7hwmwUeAlm1F4NQoC35tF6kdLto1kNjaClNPEGHWCB/oT7JVGWtEExMBQIjJJy6EwxqKAWA5khHNcD82WyIa9liQFR/AQE+ZKP9FEZmYWE6+T3wFNM9uz7xlNTqyxEYiMJLjuicLrUTjniLL5l2/mwLTzrF+a626e0LYD41DIXS7YXEy98+xiDD/Ekr+ZldHFgr2ZmLHJrZ0wJGr+polcZphiQdh1J73kWBUJ6Lt6qtXZvEUsldBXwm8g5dYEk1QSlSiTOTPaJWVsjgx53WMIfjKBqZILyfE79DTn0TQkUauxAVCuu3RnbxwisOIxPDNlu4oIT0E+Yxc2dtofvQ6Mb28Bc5B8qww4MCLparheiFl2aWvXRUb9McH5i0CZ+64Iq0xP9xzU9GmRNmMAIkBzhnGVa5+oMRSJXoe4amze38xreDpzYYAA7qyCLsY6YOuMSngoSe3XtQL9BeO5Ai5h82wErNaSZlLN2DTXHbetSdZjNvL2FLPx64Mr2BEu/6jgChy4Glg+AMmdGJZICwn8eqWcI4zT29yug5YQkbUR7EXm6Qg8FTNGFsDuUVBJsVC6GHTPSqGhoGqTevvIYIFSYt4zuGA01QqpV88lVpLRQg3B7pV6hMSYtDdXkyn1XPNNqvf0M1wuaQxnlQAumu99ETx+BPDEAdhWD31+J/vi1z8IwO3yOTzN4a40B0eFs3+QquYfjNv0qMIWOln4A6dpCqbGIepzS+VdtRgUPAs9wLISWWfVU3JSxNlJVDfB7Miqm9krhyz6JyGLHMgsU//LnLrMUfXNSv2KM23ICfuak0Id8vWZ6qvRvVD540ehg/jE/039M97TWpk6a2XOQa3ROtHKDAap9v7qpj1zEilJbBIylfbU6SbvE0zLfnTovOz7jmNfV2I+37Vq9Y4qM587tN1XC6ayW530lCvUIJbI/x9InUy9hoVNzS91NA1mgYN59BvKJ9G+KJ++csotvX/GPDoqzO24e5zfoZD9Z7SlWu6n8Uh27GBz8N0xPAQ3upTcftbXn01s84u5Xd242gxa28eZ+wh42/nR7nFtq3bcPd4+lgxJ0hDXlK2GuKZOLXfbdk+Bx4M/RPDd82iTA6l+OUVitaMLumDQhoVt21MGhyUoX8Sk0RNhHv/UhjU3p5RKI+/lUqlqdu+la13v7X54/Qs= \ No newline at end of file diff --git a/files/drafts/RFC-0003/images/phase_4.png b/files/drafts/RFC-0003/images/phase_4.png deleted file mode 100644 index 85b0800..0000000 Binary files a/files/drafts/RFC-0003/images/phase_4.png and /dev/null differ diff --git a/files/drafts/RFC-0004/RFC-0004.md b/files/drafts/RFC-0004/RFC-0004.md deleted file mode 100644 index d2fd206..0000000 --- a/files/drafts/RFC-0004/RFC-0004.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -# Don't change this header section -title: 'RFC on the RFC Editorial Committee' -subtitle: 'Time Machine RFC-0004' -author: -- Author 1 -header-includes: -- \usepackage{fancyhdr} -- \pagestyle{fancy} -- \fancyhead[R]{} -- \fancyfoot[L]{-release-version-} -output: pdf_document ---- - -# Motivation - -This file is the template to be used for new RFCs. It is intended to be both a starting point for new RFCs and an example of what can be achieved within the limits of a markdown file. - -# File structure - -## YAML-Header - -To aid the generation of the final release-PDF-files, this Markdown file contains a header in the `YAML`-format[^yaml]. Apart from other things it states the RFC-title and subtitle as well as information about the authors of the RFC. - -Please keep the header exactly as-is, its contents will be modified by the RFC-Editorial Team. - -## Sections - -An RFC document should always contain the YAML header, a *Motivation* section directly below the header as the first section in the document, as many author-defined sections as necessary as the main content, a *questions and answers*-section (*Q&A*) and the footnotes at the bottom of the document. Writers can delete the pre-existing section structure (with the exception of the motivation and q&a sections) in this file and add new sections as they see fit. - -The *Motivation* section is intended to show the general reason for the writing of the RFC. It should be concise. - -The *Q&A* section is intended to provide a view on the RFC from a different angle than the one of a traditional paper on a subject. It gives RFC writers the possibility to test their main structures, methods and drawbacks from the point of view of outside readers of the document. It should follow the general form outlined below in the q&a section of this template file. - -# TM Glossary - -Terms that are of special significance to the Time Machine must be written in **bold** on their first usage in an RFC document. Definitions and important terms are listed in *RFC-0001 on RFC Glossary*. - -# Markdown - -It is allowed to use the whole range of Markdown features as well as everything supported by Pandoc out of the box. A good overview and introduction can be found in the Markdown specification itself[^daring_markdown] and the Pandoc user documentation[^pandoc_markdown]. - -## Hyperlinks - -RFCs are released as PDF documents. While it is possible to set hyperlinks in Markdown that will also work in PDF documents it is advised to add the actual links in footnotes. This way they can be easily read. Footnotes (similar to the one in the previous paragraph can be created by adding `[^footnote_name]` where the footnote is to be placed and `[^footnote_name]: Content of the footnote` at the end of this file. Only alphanumeric characters and underscores are allowed. Hyperlinks in footnotes should be added in angled brackets: ``. - -## Tables - -It is possible to use the different ways to create table using Pandoc markdown[^pandoc_tables]. The following example uses the simple syntax: - - Right Left Center Default -------- ------ ---------- ------- - 12 12 12 12 - 123 123 123 123 - 1 1 1 1 - -Table: Demonstration of simple table syntax. - -## Images - -It is possible to include images. They should be added in an `images` folder directly next to the markdown document. In the text they are referenced like this: `![Image caption](./images/image_name_including_file_ending.png)`. The path is relative to the location of the Markdown file. - -## Diagrams - -In addition to pre-created images, it is also possible to add so-called *Mermaid*[^mermaid] diagrams and flowcharts to the document. This are two examples: - -```mermaid -graph LR - draft[RFC Draft] --> candidate[RFC Candidate] --> draft - candidate --> release{RFC release} - release -.updates on existing RFCs.-> draft -``` - -```mermaid -pie - title Time spent on RFCs - "Preparation" : 10 - "Draft" : 60 - "Release Candidate" : 30 -``` - -# Q&A - -## Question: Am I allowed to modify the YAML header on top of the file? - -No, the header is aimed at the creation of the release PDF files and should only be changed by the RFC-Editorial Team. - -## Question: Can I omit the Motivation or Q&A sections? - -No, both sections are important for reviewers and implementers to understand the direction of the RFC, it's motivation and possibly problematic points / implications for other parts of the Time Machine. - -## Question: Why doesn't my file look the same in my editor, the GitHub repository and the PDF files? - -The PDF files are created by an automated process using *Pandoc*[^pandoc], a tool to convert between different text formats. It provides more extensive capabilities for text structuring and formatting (for instance footnotes). This additional parts are not understood by the GitHub Markdown parser and might be previewed differently by various markdown editors. - -## Question: Where can I preview how my RFC document will look at the end? - -If you have started your document by forking the official GitHub RFC repository[^rfc_repo] you can see the current version of the RFC drafts in the *Action* section of your own GitHub RFC repository[^github_manage_action] after you pushed a change to it. - - - -[^daring_markdown]: -[^github_manage_action]: -[^mermaid]: -[^pandoc]: -[^pandoc_markdown]: -[^pandoc_tables]: -[^rfc_repo]: -[^yaml]: diff --git a/files/drafts/RFC-0005/RFC-0005.md b/files/drafts/RFC-0005/RFC-0005.md deleted file mode 100644 index 914d1ad..0000000 --- a/files/drafts/RFC-0005/RFC-0005.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -# Don't change this header section -title: 'RFC on LTM' -subtitle: 'Time Machine RFC-0005' -author: -- Author 1 -header-includes: -- \usepackage{fancyhdr} -- \pagestyle{fancy} -- \fancyhead[R]{} -- \fancyfoot[L]{-release-version-} -output: pdf_document ---- - -# Motivation - -This file is the template to be used for new RFCs. It is intended to be both a starting point for new RFCs and an example of what can be achieved within the limits of a markdown file. - -# File structure - -## YAML-Header - -To aid the generation of the final release-PDF-files, this Markdown file contains a header in the `YAML`-format[^yaml]. Apart from other things it states the RFC-title and subtitle as well as information about the authors of the RFC. - -Please keep the header exactly as-is, its contents will be modified by the RFC-Editorial Team. - -## Sections - -An RFC document should always contain the YAML header, a *Motivation* section directly below the header as the first section in the document, as many author-defined sections as necessary as the main content, a *questions and answers*-section (*Q&A*) and the footnotes at the bottom of the document. Writers can delete the pre-existing section structure (with the exception of the motivation and q&a sections) in this file and add new sections as they see fit. - -The *Motivation* section is intended to show the general reason for the writing of the RFC. It should be concise. - -The *Q&A* section is intended to provide a view on the RFC from a different angle than the one of a traditional paper on a subject. It gives RFC writers the possibility to test their main structures, methods and drawbacks from the point of view of outside readers of the document. It should follow the general form outlined below in the q&a section of this template file. - -# TM Glossary - -Terms that are of special significance to the Time Machine must be written in **bold** on their first usage in an RFC document. Definitions and important terms are listed in *RFC-0001 on RFC Glossary*. - -# Markdown - -It is allowed to use the whole range of Markdown features as well as everything supported by Pandoc out of the box. A good overview and introduction can be found in the Markdown specification itself[^daring_markdown] and the Pandoc user documentation[^pandoc_markdown]. - -## Hyperlinks - -RFCs are released as PDF documents. While it is possible to set hyperlinks in Markdown that will also work in PDF documents it is advised to add the actual links in footnotes. This way they can be easily read. Footnotes (similar to the one in the previous paragraph can be created by adding `[^footnote_name]` where the footnote is to be placed and `[^footnote_name]: Content of the footnote` at the end of this file. Only alphanumeric characters and underscores are allowed. Hyperlinks in footnotes should be added in angled brackets: ``. - -## Tables - -It is possible to use the different ways to create table using Pandoc markdown[^pandoc_tables]. The following example uses the simple syntax: - - Right Left Center Default -------- ------ ---------- ------- - 12 12 12 12 - 123 123 123 123 - 1 1 1 1 - -Table: Demonstration of simple table syntax. - -## Images - -It is possible to include images. They should be added in an `images` folder directly next to the markdown document. In the text they are referenced like this: `![Image caption](./images/image_name_including_file_ending.png)`. The path is relative to the location of the Markdown file. - -## Diagrams - -In addition to pre-created images, it is also possible to add so-called *Mermaid*[^mermaid] diagrams and flowcharts to the document. This are two examples: - -```mermaid -graph LR - draft[RFC Draft] --> candidate[RFC Candidate] --> draft - candidate --> release{RFC release} - release -.updates on existing RFCs.-> draft -``` - -```mermaid -pie - title Time spent on RFCs - "Preparation" : 10 - "Draft" : 60 - "Release Candidate" : 30 -``` - -# Q&A - -## Question: Am I allowed to modify the YAML header on top of the file? - -No, the header is aimed at the creation of the release PDF files and should only be changed by the RFC-Editorial Team. - -## Question: Can I omit the Motivation or Q&A sections? - -No, both sections are important for reviewers and implementers to understand the direction of the RFC, it's motivation and possibly problematic points / implications for other parts of the Time Machine. - -## Question: Why doesn't my file look the same in my editor, the GitHub repository and the PDF files? - -The PDF files are created by an automated process using *Pandoc*[^pandoc], a tool to convert between different text formats. It provides more extensive capabilities for text structuring and formatting (for instance footnotes). This additional parts are not understood by the GitHub Markdown parser and might be previewed differently by various markdown editors. - -## Question: Where can I preview how my RFC document will look at the end? - -If you have started your document by forking the official GitHub RFC repository[^rfc_repo] you can see the current version of the RFC drafts in the *Action* section of your own GitHub RFC repository[^github_manage_action] after you pushed a change to it. - - - -[^daring_markdown]: -[^github_manage_action]: -[^mermaid]: -[^pandoc]: -[^pandoc_markdown]: -[^pandoc_tables]: -[^rfc_repo]: -[^yaml]: diff --git a/files/drafts/RFC-0033/RFC-0033.md b/files/drafts/RFC-0033/RFC-0033.md new file mode 100644 index 0000000..eb6193b --- /dev/null +++ b/files/drafts/RFC-0033/RFC-0033.md @@ -0,0 +1,131 @@ +--- +# Don't change this header section +title: "RFC on Map and Cadaster Processing pipeline" +subtitle: "Time Machine RFC-0033" +author: + - Author 1 +header-includes: + - \usepackage{fancyhdr} + - \pagestyle{fancy} + - \fancyhead[R]{} + - \fancyfoot[L]{-release-version-} +output: pdf_document +--- + +# Motivation + +This file is the template to be used for new RFCs. It is intended to be both a +starting point for new RFCs and an example of what can be achieved within the +limits of a markdown file. + +# File structure + +## YAML-Header + +To aid the generation of the final release-PDF-files, this markdown file +contains a header in the `YAML`-format[^yaml]. Apart from other things it states +the RFC-title and subtitle as well as information about the authors of the RFC. + +Please keep the header exactly as-is, its contents will be modified by the +RFC-Editorial Team. + +## Sections + +An RFC document should always contain the YAML header, a _Motivation_ section +directly below the header as the first section in the document, as many +author-defined sections as necessary as the main content, a _questions and +answers_-section (_Q&A_) and the footnotes at the bottom of the document. +Writers can delete the pre-existing section structure (with the exception of the +motivation and q&a sections) in this file and add new sections as they see fit. + +The _Motivation_ section is intended to show the general reason for the writing +of the RFC. It should be concise. + +The _Q&A_ section is intended to provide a view on the RFC from a different +angle than the one of a traditional paper on a subject. It gives RFC writers the +possibility to test their main structures, methods and drawbacks from the point +of view of outside readers of the document. It should follow the general form +outlined below in the q&a section of this template file. + +# TM Glossary + +Terms that are of special significance to the Time Machine must be written in +**bold** on their first usage in an RFC document. Definitions and important +terms are listed in _RFC-0001 on RFC Glossary_. + +# Markdown + +It is allowed to use the whole range of markdown features as well as everything +supported by Pandoc out of the box. A good overview and introduction can be +found in the markdown specification itself[^daring_markdown] and the Pandoc user +documentation[^pandoc_markdown]. + +## Hyperlinks + +RFCs are released as PDF documents. While it is possible to set hyperlinks in +markdown that will also work in PDF documents it is advised to add the actual +links in footnotes. This way they can be easily read. Footnotes (similar to the +one in the previous paragraph can be created by adding `[^footnote_name]` where +the footnote is to be placed and `[^footnote_name]: Content of the footnote` at +the end of this file. Only alphanumeric characters and underscores are allowed. +Hyperlinks in footnotes should be added in angled brackets: +``. + +## Tables + +It is possible to use the different ways to create table using Pandoc +markdown[^pandoc_tables]. The following example uses the simple syntax: + +| Column 1 | Column 2 | Column 3 | +| -------- | -------- | -------- | +| Foo | Bar | Baz | +| Alpha | Beta | Gamma | + +Table: Demonstration of simple table syntax. + +## Images + +It is possible to include images. They should be added in an `images` folder +directly next to the markdown document. In the text they are referenced like +this: `![Image caption](./images/image_name_including_file_ending.png)`. The +path is relative to the location of the markdown file. + +# Q&A + +## Question: Am I allowed to modify the YAML header on top of the file? + +No, the header is aimed at the creation of the release PDF files and should only +be changed by the RFC-Editorial Team. + +## Question: Can I omit the Motivation or Q&A sections? + +No, both sections are important for reviewers and implementers to understand the +direction of the RFC, it's motivation and possibly problematic points / +implications for other parts of the Time Machine. + +## Question: Why doesn't my file look the same in my editor, the GitHub repository and the PDF files? + +The PDF files are created by an automated process using _Pandoc_[^pandoc], a +tool to convert between different text formats. It provides more extensive +capabilities for text structuring and formatting (for instance footnotes). This +additional parts are not understood by the GitHub markdown parser and might be +previewed differently by various markdown editors. + +## Question: Where can I preview how my RFC document will look at the end? + +If you have started your document by forking the official GitHub RFC +repository[^rfc_repo] you can see the current version of the RFC drafts in the +_Action_ section of your own GitHub RFC repository[^github_manage_action] after +you pushed a change to it. + + + +[^daring_markdown]: +[^github_manage_action]: + + +[^pandoc]: +[^pandoc_markdown]: +[^pandoc_tables]: +[^rfc_repo]: +[^yaml]: diff --git a/files/preface/preface.md b/files/preface/preface.md new file mode 100644 index 0000000..f564c8f --- /dev/null +++ b/files/preface/preface.md @@ -0,0 +1,12 @@ +# Preface + +This document summarizes the efforts of the Time Machine _Request for Comments_ +effort. It is automatically created from the contents of the TM RFC +repository[^tm_rfc_repo] and published following a loose semi-annual schedule +with additional releases on important milestones. All RFCs are written by the +Time Machine RFC Team and Editorial Committee as well as all individual RFC +authors in a joint effort. + +-- _The Time Machine RFC Editorial Committee_ + +[^tm_rfc_repo]: diff --git a/files/releases/RFC-0000/RFC-0000.md b/files/releases/RFC-0000/RFC-0000.md new file mode 100644 index 0000000..fa487d1 --- /dev/null +++ b/files/releases/RFC-0000/RFC-0000.md @@ -0,0 +1,111 @@ +--- +# Don't change this header section +title: "RFC on RFCs" +subtitle: "Time Machine RFC-0000" +author: + - Frédéric Kaplan + - Kevin Baumer + - Mike Kestemont + - Daniel Jeller +header-includes: + - \usepackage{fancyhdr} + - \pagestyle{fancy} + - \fancyhead[R]{} + - \fancyfoot[L]{-release-version-} +output: pdf_document +--- + +# Motivation + +Reaching consensus on the technology options to pursue in a programme as large +as Time Machine is a complex issue. To ensure the open development and +evaluation of work, a process inspired by the Request for Comments (RFC) that +was used for the development of the Internet protocol[^ietf_rfc_791] is being +adapted to the needs of Time Machine. Time Machine Requests for Comments are +freely accessible publications, identified with a unique ID, that constitute the +main process for establishing rules, recommendations and core architectural +choices for Time Machine components. + +# Approach + +The Time Machine RFCs are based on the following principles: + +1. Accessibility. **RFCs** are freely accessible, at no cost. +2. Openness. Anybody can write an **RFC**. +3. Identification. Each **RFC**, once published, has a unique ID and version + number. It can nevertheless be revised over time as a living document, being + republished with the same ID and a different version number. +4. Incrementalism. Each **RFC** should be useful in its own right and act as a + building block for others. Each **RFC** must be intended as a contribution + to, extension or revision of the Time Machine Infrastructure. +5. Standardisation. **RFCs** should aim to make use of standardised terms to + improve the clarity level of its recommendation. +6. Scope. **RFCs** are designed contributions and implementation solutions for + solving practical problems. **RFCs** are not research papers and may not + necessarily contain experimental evidence. RFCs cover not only the technical + infrastructure but the data standards, legal frameworks, and values and + principles of Time Machine. +7. Self-defining process. As used for the development of the Internet, **RFCs** + are the main process for establishing Time Machine Infrastructure and + Processes and also the processes and roles for managing **RFCs** themselves. + +# RFC Publication Process + +![75 % center](rfc_process.jpg) + +The **RFC Editorial Committee** organises the publication process of the RFCs, +maintains the consistency of the RFC System, appoints RFC teams to organise new +RFCs and to improve existing RFCs, keeps track of RFC versioning and ensures the +timely and regular publication of RFCs. The governance and organisation of the +**RFC Editorial Committee** is defined in **RFC-0004**. + +The publication process is the following : + +1. The **RFC Editorial Committee** appoints authors to write the RFCs planned in + the **RFC tree** (RFC-0002). Alternatively, authors may contact the **RFC + Editorial Committee** to submit their candidature to write an RFC (planned in + the **RFC tree** or not). +2. The authors produce an RFC draft which is reviewed, first by the **RFC + Editorial Committee** for coherence with the rest of the RFC corpus and then + by a larger community. The RFC is revised and possibly sent for review again. +3. Once accepted by the **RFC Editorial Committee**, an RFC receives an official + identifier and is officially published comparable to a peer-reviewed + publication with proper scholarly credits assigned to the original author(s). +4. If necessary, the **RFC tree** is adapted to include the published RFC and + any possible sub-RFCs planned during the writing of the RFC. + +# RFC Format + +The RFC Format and Guidelines are established iteratively by the **RFC Editorial +Committee**. The most-up-to-date version can be found in the **RFC-0000**. + +Current Format + +1. Motivation section +2. Series of sections describing the approach and solution +3. Question and answers (Q&A) section +4. Linked RFCs section + +# Question and Answers + +## What are the main differences between Time Machine RFCs and Internet Society RFCs? + +The Time Machine RFCs are being developed over 50 years after the RFCs that +shaped in the Internet. The main differences are the following: + +1. Time Machine RFCs are exclusively used to describe motivated solutions and + not general communication. +2. Time Machine RFCs can be revised and are redefined iteratively, whereas + significant improvement on an Internet Society RFC leads to the creation of a + new RFC. + +# Linked RFCs + +- The **RFC Tree** is kept up to date in **RFC-0002**. +- The details of the RFC platform are defined in the **RFC-0003**. +- The governance and function of the **RFC Editorial Committee** is defined in + **RFC-0004**. + + + +[^ietf_rfc_791]: diff --git a/files/drafts/RFC-0000/rfc_process.jpg b/files/releases/RFC-0000/rfc_process.jpg similarity index 100% rename from files/drafts/RFC-0000/rfc_process.jpg rename to files/releases/RFC-0000/rfc_process.jpg diff --git a/files/drafts/RFC-0001/RFC-0001.md b/files/releases/RFC-0001/RFC-0001.md similarity index 85% rename from files/drafts/RFC-0001/RFC-0001.md rename to files/releases/RFC-0001/RFC-0001.md index b1f4db4..ca09613 100644 --- a/files/drafts/RFC-0001/RFC-0001.md +++ b/files/releases/RFC-0001/RFC-0001.md @@ -1,92 +1,89 @@ --- -title: 'RFC on RFC Glossary' -subtitle: 'Time Machine RFC-0001' +# Don't change this header section +title: "RFC on RFC Glossary" +subtitle: "Time Machine RFC-0001" author: -- Frédéric Kaplan -- Kevin Baumer -- Daniel Jeller - -# Don't change the following lines + - Frédéric Kaplan + - Kevin Baumer + - Daniel Jeller header-includes: -- \usepackage{fancyhdr} -- \pagestyle{fancy} -- \fancyhead[R]{} -- \fancyfoot[L]{-release-version-} + - \usepackage{fancyhdr} + - \pagestyle{fancy} + - \fancyhead[R]{} + - \fancyfoot[L]{-release-version-} output: pdf_document --- -# RFC-0001: RFC on RFC Glossary - -## Motivation +# Motivation -The Time Machine Glossary's function is to provide clear definitions of the terms used in the RFCs and throughout related Time Machine documentation. It should be updated regularly as new RFCs are introduced and published. +The Time Machine Glossary's function is to provide clear definitions of the terms used in the RFCs and throughout related Time Machine documentation. It should be updated regularly as new RFCs are introduced and published. -## Glossary +# Glossary 4D Map -: A core element of the Time Machine which is used to locate resources, services and reconstructions. It is both the map where activities can be followed and the map that aggregates results. It is used for plotting **Local Time Machines** in particular. The density of the 4D Maps is not uniform, in particular some zones may be modelled only in 3D, 2D and even 1D, such as a list of included elements. +: A core element of the Time Machine which is used to locate resources, services and reconstructions. It is both the map where activities can be followed and the map that aggregates results. It is used for plotting **Local Time Machines** in particular. The density of the 4D Maps is not uniform, in particular some zones may be modelled only in 3D, 2D and even 1D, such as a list of included elements. -4D Simulator -: One of the Time Machine **Engines**. It manages a continuous spatiotemporal simulation of all possible pasts and futures that are compatible with the data. The 4D Simulator includes a multiscale hierarchical architecture for dividing space and time into discrete volumes with unique identifiers. It is a simulation engine for producing new datasets based on the information stored, and each possible spatiotemporal multiscale simulation corresponds to a multidimensional representation in the 4D computing infrastructure. When a sufficient spatiotemporal density of data is reached, it can produce a 3D representation of the place at a chosen moment in History. In navigating the representation space, one can also navigate in alternative simulations of the past and future. Uncertainty and incoherence are managed at each stage of the process and are directly associated with the corresponding reconstructions of the past and the future. +4D Simulator +: One of the Time Machine **Engines**. It manages a continuous spatiotemporal simulation of all possible pasts and futures that are compatible with the data. The 4D Simulator includes a multiscale hierarchical architecture for dividing space and time into discrete volumes with unique identifiers. It is a simulation engine for producing new datasets based on the information stored, and each possible spatiotemporal multiscale simulation corresponds to a multidimensional representation in the 4D computing infrastructure. When a sufficient spatiotemporal density of data is reached, it can produce a 3D representation of the place at a chosen moment in History. In navigating the representation space, one can also navigate in alternative simulations of the past and future. Uncertainty and incoherence are managed at each stage of the process and are directly associated with the corresponding reconstructions of the past and the future. Annotators -: Dedicated **Apps** for annotating images and documents. +: Dedicated **Apps** for annotating images and documents. API -: The abbreviation of *Application programming interface*. It is a contract and technical implementation that defines the ways to programmatically interact with a particular piece of software. +: The abbreviation of *Application programming interface*. It is a contract and technical implementation that defines the ways to programmatically interact with a particular piece of software. App -: An application, either web-based or not, that performs operations on Time Machine **Components**. Apps can be developed by the **Time Machine Organisation** or by third parties. Apps are pieces of software (in general built as part of official **Projects** but not necessarily) that enables users to experience and edit the information in the **Data Graph** and the **4D Map**. They can be grouped into families of Apps like the **Navigators** or the **Annotators**. +: An application, either web-based or not, that performs operations on Time Machine **Components**. Apps can be developed by the **Time Machine Organisation** or by third parties. Apps are pieces of software (in general built as part of official **Projects** but not necessarily) that enables users to experience and edit the information in the **Data Graph** and the **4D Map**. They can be grouped into families of Apps like the **Navigators** or the **Annotators**. Big Data of the Past -: A huge distributed digital information system mapping social, cultural and geographical evolution. A key objective of Time Machine is to ensure that there is a system to bring together dense, interoperable, standardised (linked data, preferably open) and localised (marked up with spatial-temporal information) social, cultural and geographical heritage resources. +: A huge distributed digital information system mapping social, cultural and geographical evolution. A key objective of Time Machine is to ensure that there is a system to bring together dense, interoperable, standardised (linked data, preferably open) and localised (marked up with spatial-temporal information) social, cultural and geographical heritage resources. Code Library -: A collection of software components written in Python (and possibly later other languages) regrouping key **Operators** for processing Data in the Time Machine Environment. +: A collection of software components written in Python (and possibly later other languages) regrouping key **Operators** for processing Data in the Time Machine Environment. -Component -: A part of the Time Machine that is itself a Machine in the sense of systems reacting in a predictable manner to input given an internal State. Each component can react or be acted upon through a defined set of inputs / operations. Some of these operations can be internal, others can be external. Internal Operations change the internal structure or state of the Machine and therefore update its history. External Operations changes are characterised by the usage of a Component, producing an external output based on the current State of the Machine. Sequences of consecutive states permit the reconstruction of the current state of the component. +Component +: A part of the Time Machine that is itself a Machine in the sense of systems reacting in a predictable manner to input given an internal State. Each component can react or be acted upon through a defined set of inputs / operations. Some of these operations can be internal, others can be external. Internal Operations change the internal structure or state of the Machine and therefore update its history. External Operations changes are characterised by the usage of a Component, producing an external output based on the current State of the Machine. Sequences of consecutive states permit the reconstruction of the current state of the component. Conflict Detectors -: Specialized **Apps** that detect incoherence in the **Data Graph** and help users to solve them. +: Specialized **Apps** that detect incoherence in the **Data Graph** and help users to solve them. Data Graph : A formal representation of knowledge using semantic web technologies and extracted by human or automatic processes. Data Synchronisation -: An automated process that compares data documented by a project with conflicting data from another project or an inference. +: An automated process that compares data documented by a project with conflicting data from another project or an inference. Digital Content Processor -: An automated process extracting information from documents (images, video, sound, etc.). Level 1 Digital Content Processor labels mentions of entities. Level 2 Digital Content Processor labels relations between entities. The Digital Content Processor of Level 3 labels rules. Each process is fully traceable and reversible. The results of the processing constitute the core dataset of the **Big Data of the Past** and are integrated into the **Data Graph**. +: An automated process extracting information from documents (images, video, sound, etc.). Level 1 Digital Content Processor labels mentions of entities. Level 2 Digital Content Processor labels relations between entities. The Digital Content Processor of Level 3 labels rules. Each process is fully traceable and reversible. The results of the processing constitute the core dataset of the **Big Data of the Past** and are integrated into the **Data Graph**. Digitisation Hubs -: Decentral and local structures that offer digitisation services based on open-hardware technology. They are an example of a **Service to Local Time Machines** for digitisation (e.g. scanning of documents, streets, 3D scanning). Contractual aspects are dealt with via **Standard Contracts**. The Digitisation Hubs will enable Time Machine to seamlessly aggregate new documents and metadata into a **Data Graph** with the appropriate standards in terms of resolution, file formats and metadata assured during acquisition. As a service, it can be associated with a **Zone of Coverage** and will use **Standard Metrics** to determine the exact price of operations. As Digitisation Hubs are based on open-hardware technology they can be duplicated anywhere and can thus progressively create a dense network of service providers. +: Decentral and local structures that offer digitisation services based on open-hardware technology. They are an example of a **Service to Local Time Machines** for digitisation (e.g. scanning of documents, streets, 3D scanning). Contractual aspects are dealt with via **Standard Contracts**. The Digitisation Hubs will enable Time Machine to seamlessly aggregate new documents and metadata into a **Data Graph** with the appropriate standards in terms of resolution, file formats and metadata assured during acquisition. As a service, it can be associated with a **Zone of Coverage** and will use **Standard Metrics** to determine the exact price of operations. As Digitisation Hubs are based on open-hardware technology they can be duplicated anywhere and can thus progressively create a dense network of service providers. Engine : An automated software mechanism that operates on the **Data Graph** to enrich it. Examples of Engines include: the **4D Simulator**, the **Large-Scale Inference Engine** and the **Universal Representation Engine**. Frontier Year -: Assuming a strict **Retrochronological approach**, it is the oldest year in the past for which the **Information Skeleton** of a given place is established. This may, for instance, correspond to the data extracted from the most ancient cadastral map of a city. +: Assuming a strict **Retrochronological approach**, it is the oldest year in the past for which the **Information Skeleton** of a given place is established. This may, for instance, correspond to the data extracted from the most ancient cadastral map of a city. GeoEntities : Existing geographical entities (e.g. a list of Places documented in **OpenStreetMap**) defined as standard Geographical Information System objects (points - lines - polygons). -Importers -: **Apps** that enable linking of existing datasets to the **Data Graph**, for example: - + IIIF Source Importer - + Collection creator - + Geographical Layer Importer (e.g. GeoJSON importer) - + Datatable importer (e.g. CSV) - + Cadastral Parcels Importer - + Cloud Point Importer - + Spherical Image Importer - + 3D Model Importer +Importers +: **Apps** that enable linking of existing datasets to the **Data Graph**, for example: + + IIIF Source Importer + + Collection creator + + Geographical Layer Importer (e.g. GeoJSON importer) + + Datatable importer (e.g. CSV) + + Cadastral Parcels Importer + + Cloud Point Importer + + Spherical Image Importer + + 3D Model Importer Information Skeleton -: A data structure that contains the core information of the **Data Graph**. It should be reconstructed first. +: A data structure that contains the core information of the **Data Graph**. It should be reconstructed first. Large-Scale Inference Engine -: One of the Time Machine **Engines**, capable of inferring the consequences of chaining any information in the database. It enables Time Machine to induce new logical consequences of existing data and is used to shape and to assess the coherence of the **4D Simulations** based on human-understandable concepts and constraints. +: One of the Time Machine **Engines**, capable of inferring the consequences of chaining any information in the database. It enables Time Machine to induce new logical consequences of existing data and is used to shape and to assess the coherence of the **4D Simulations** based on human-understandable concepts and constraints. Local Datasets : Data produced and published about a particular place. Published Datasets are Open Access and can be freely downloaded, even if they are not yet integrated with the global **Data Graph**. They have authors and are possibly derived from other datasets. The history of their production must be documented, hence they cannot be unpublished once made available, and they appear in in the **Local Time Machine Webspace** @@ -104,53 +101,56 @@ Local Time Machine Density of Operations : A key data point that characterises the number of activities underway in a given **Local Time Machine** at the given time. It characterises both the activities that are part of **Projects** and those related to Citizen Scientist activities. Local Time Machine Operations -: A list of basic operations produced by **Projects** and concerning particular **GeoEntitites**. They appear as a log of activities in the **Local Time Machines Webspace**. +: A list of basic operations produced by **Projects** and concerning particular **GeoEntities**. They appear as a log of activities in the **Local Time Machines Webspace**. Local Time Machine Visual Plan -: An automatically generated representation of the spatiotemporal focus of different **Projects** participating in a **Local Time Machine**. +: An automatically generated representation of the spatiotemporal focus of different **Projects** participating in a **Local Time Machine**. Local Time Machine Webspace : A part of the **Time Machine Website** dedicated to the **Local Time Machines**. It includes information on **Local Time Machine Activities**, **Services to Local Time Machines** and the **Local Datasets**. It also features information on all **Local Time Machine** **Projects** linked to a given **Municipality**. Long-term Preservation Service -: Services offered by operators to enable the long-term preservation of particular datasets (e.g. IIIF repository). Some of these services may include innovative technology such as DNA storage. +: Services offered by operators to enable the long-term preservation of particular datasets (e.g. IIIF repository). Some of these services may include innovative technology such as DNA storage. Memory Store -: The aspect of physical artifacts as stores of complex information from and about the past. Local Time Machines use extraction tools to redocument the content of these memory stores into the **Data Graph**. +: The aspect of physical artifacts as stores of complex information from and about the past. Local Time Machines use extraction tools to redocument the content of these memory stores into the **Data Graph**. Metadata : Data about data in its most narrow sense. This includes data about file formats or modification dates for digital files or information about the authorship of a book, creation date of a work of art or various handwriting patterns in a written text. Metadata Generation -: The automatic generation of **Metadata** using methods of pattern recognition, machine learning and artificial intelligence. Such automated extraction methods are already being explored for hand-written text recognition and entity extraction. Yet this is only a first step, as today’s systems fail in identifying, for example, text in complex 2-D scenes such as paintings and maps and on 3-D objects such as vases and sculptures. Significant amounts of metadata currently being created by curators in museums and archives could, however, be generated automatically, such as classification of materials and object sizes, as well as linking to other public data sources such as wiki data given appropriate digitisation workflows and sensors. By doing so, Time Machine strives to create a new dimension of accessibility in digital history and cultural heritage. By indexing objects by their content, it will also be possible to link collection items at large scale, thus providing the capability to propagate metadata through these connections. In the current design, this help for metadatation will be provided through **Time Machine Tools and API**. This will enable Time Machine to combine automatic suggestions of metadata with expert curation. +: The automatic generation of **Metadata** using methods of pattern recognition, machine learning and artificial intelligence. Such automated extraction methods are already being explored for hand-written text recognition and entity extraction. Yet this is only a first step, as today’s systems fail in identifying, for example, text in complex 2-D scenes such as paintings and maps and on 3-D objects such as vases and sculptures. Significant amounts of metadata currently being created by curators in museums and archives could, however, be generated automatically, such as classification of materials and object sizes, as well as linking to other public data sources such as wiki data given appropriate digitisation workflows and sensors. By doing so, Time Machine strives to create a new dimension of accessibility in digital history and cultural heritage. By indexing objects by their content, it will also be possible to link collection items at large scale, thus providing the capability to propagate metadata through these connections. In the current design, this help for metadatation will be provided through **Time Machine Tools and API**. This will enable Time Machine to combine automatic suggestions of metadata with expert curation. Municipalities -: A geo-spatial division of the surface of earth into administratively non-ambiguous zones. The list of **Municipalities** is therefore that of potential Local Time Machines and is fixed and predetermined. Thus, this sets the granularity of the **Local Time Machines**. +: A geo-spatial division of the surface of earth into administratively non-ambiguous zones. The list of **Municipalities** is therefore that of potential Local Time Machines and is fixed and predetermined. Thus, this sets the granularity of the **Local Time Machines**. -Navigators -: Tools to view and query the data in Time Machine similar to the web browser for the World Wide Web. They interpret the **Data Graph** and the **4D Map** to create different processes for exploration. Various typologies of Navigators can be designed: Search Engines, Collection Browsers, 4D Browser (web-based or application-based for better performances), Data Graph Inspector / Editor, Pattern Finder / Navigators, Genealogical Explorers, etc. +Navigators +: Tools to view and query the data in Time Machine similar to the web browser for the World Wide Web. They interpret the **Data Graph** and the **4D Map** to create different processes for exploration. Various typologies of Navigators can be designed: Search Engines, Collection Browsers, 4D Browser (web-based or application-based for better performances), Data Graph Inspector / Editor, Pattern Finder / Navigators, Genealogical Explorers, etc. OpenStreetMap : A public project[^openstreetmap] to create a free and editable map of the world. It is commonly abbreviated *OSM*. Operator -: Functions in the **Code Library** that can be chained in a procedural way. +: Functions in the **Code Library** that can be chained in a procedural way. Operation Graph -: A formal representation of the past, ongoing and future operations of the partners in the **Time Machine Network**. +: A formal representation of the past, ongoing and future operations of the partners in the **Time Machine Network**. Place -: An identified geo-spatial container in which activities can occur. A **Place** corresponds to a particular extent in space, usually determined in reference to another place. A place can be contained in another **Place** and can contain other **Places**. **Places** are symbolic entities in which temporarily extended processes can occur. A **Place** may not be determined by a geographical coordinate. Some moving entities are places (e.g. a car, a plane, a ship). +: An identified geo-spatial container in which activities can occur. A **Place** corresponds to a particular extent in space, usually determined in reference to another place. A place can be contained in another **Place** and can contain other **Places**. **Places** are symbolic entities in which temporarily extended processes can occur. A **Place** may not be determined by a geographical coordinate. Some moving entities are places (e.g. a car, a plane, a ship). Project -: An operation, typically conducted by institutions or individuals that produces data for a particular or several **GeoEntities**. They may be new or redocumentation of ancient projects, can mine **Sources** and ingest their extracted data into the **Data Graph**. They are associated with a **Zone of Coverage** that links them with **Local Time Machines**, producing content for GeoEntities. They may also produce intermediary datasets that can be downloaded even if they are not yet integrated in the Data Graph. Projects can develop **Apps** that interact with the **4D Map** and the Data Graph and contribute to the **Code Library** by working on the GitHub repository of the Time Machine to produce new **Operators**. These different objectives are non-exclusive from one another. The **Project Development Space** is a space in the **Time Machine Website** that features the Apps relevant to the development of Projects. +: An operation, typically conducted by institutions or individuals that produces data for a particular or several **GeoEntities**. They may be new or redocumentation of ancient projects, can mine **Sources** and ingest their extracted data into the **Data Graph**. They are associated with a **Zone of Coverage** that links them with **Local Time Machines**, producing content for GeoEntities. They may also produce intermediary datasets that can be downloaded even if they are not yet integrated in the Data Graph. Projects can develop **Apps** that interact with the **4D Map** and the Data Graph and contribute to the **Code Library** by working on the GitHub repository of the Time Machine to produce new **Operators**. These different objectives are non-exclusive from one another. The **Project Development Space** is a space in the **Time Machine Website** that features the Apps relevant to the development of Projects. Project Repository -: The cental tool to monitor all active Time Machine projects. +: The central tool to monitor all active Time Machine projects. Retrochronological Approach : A way to model the traversal of information available about an entity starting from the present day and moving backwards in time, retropropagating the data precision found in the present through **Transfer**. +RFC book +: An automatically created PDF document containing all released and draft RFC texts for simpler reading and sharing. + RFC Editorial Committee : The people responsible for managing the **Time Machine Request for Comments** process. It is fully defined in *RFC-0003*. @@ -161,16 +161,16 @@ Scouting Projects : **Projects** that operate beyond the **Frontier year** Segmenters -: **Operators** that can be used for segmenting objects and persons in images, parcels in Cadastral Maps, performing Document Layout analysis or 3D object segmentation. +: **Operators** that can be used for segmenting objects and persons in images, parcels in Cadastral Maps, performing Document Layout analysis or 3D object segmentation. Service to Local Time Machines : Services offered to **Local Time Machines**, appearing as active **Apps** for some Local Time Machines and using **Standard Contracts**. Services typically appear in the **Local Time Machine WebSpace**. Examples include **Digitisation Hubs**, **Time Machine Box** and **Long-term preservation Service**. Spatio-temporal Anchoring -: The objective of Time Machine is to localise cultural objects in space and time. For instance, an object in a museum can be associated with several spatio-temporal references: its place of production, the various places it was stored before entering the museum and the spatio-temporal entity it represents. One consequence of this is that a museum in Paris may contribute to many **Local Time Machines** in the world, having, for instance, paintings representing Venice, Madrid or Budapest. Inversely, in many cases, the information for reconstructing the past of a city or a site is scattered in collections all over the world. The granularity of such spatio-temporal repositioning can be more or less precise or more or less uncertain. To be ingested in the **Data Graph**, the museum will create a dedicated project whose goal is to produce the spatio-temporal anchoring of some of its collections in one way or another and to document the process by which this repositioning has been done. In many cases, this work has already been done by the curator or researchers in the museums and the goal is thus only to redocument this work in the Time Machine framework. In other cases, dedicated projects must be conducted. +: The objective of Time Machine is to localise cultural objects in space and time. For instance, an object in a museum can be associated with several spatio-temporal references: its place of production, the various places it was stored before entering the museum and the spatio-temporal entity it represents. One consequence of this is that a museum in Paris may contribute to many **Local Time Machines** in the world, having, for instance, paintings representing Venice, Madrid or Budapest. Inversely, in many cases, the information for reconstructing the past of a city or a site is scattered in collections all over the world. The granularity of such spatio-temporal repositioning can be more or less precise or more or less uncertain. To be ingested in the **Data Graph**, the museum will create a dedicated project whose goal is to produce the spatio-temporal anchoring of some of its collections in one way or another and to document the process by which this repositioning has been done. In many cases, this work has already been done by the curator or researchers in the museums and the goal is thus only to redocument this work in the Time Machine framework. In other cases, dedicated projects must be conducted. Sources -: Entities that contain information that can be extracted (mined). It is typical to distinguish primary sources (e.g. archival records) from secondary sources (e.g. academic articles). +: Entities that contain information that can be extracted (mined). It is typical to distinguish primary sources (e.g. archival records) from secondary sources (e.g. academic articles). Standard Contracts : Sets of standard contracts used to facilitate the interaction between Time Machine partners, for instance in the case of **Digitisation Hubs**. @@ -179,43 +179,43 @@ Standard Metrics : Metrics that help partners in the **Time Machine Network** to compare performance with one another and to set benchmarks. Technical Charter -: A document that defines the Time Machines Rules, Recommendations, Metrics and Official software. The document is periodically revised. +: A document that defines the Time Machines Rules, Recommendations, Metrics and Official software. The document is periodically revised. Time Machine API : An **API** that can be used by **Projects** to interact with the **Data Graph**. Time Machine Box -: A collection of hard- and software that allows partners to store their documents and metadata in a decentralised way, to easily integrate them into the **Time Machine Network** and to ensure they are appropriately documented in the **Data Graph**. The Time Machine Box is one of the **Time Machine Official Components**. +: A collection of hard- and software that allows partners to store their documents and metadata in a decentralised way, to easily integrate them into the **Time Machine Network** and to ensure they are appropriately documented in the **Data Graph**. The Time Machine Box is one of the **Time Machine Official Components**. Time Machine CSA -: A project with 33 partners supported by European Commission, taking place from 2019-2020, to produce a 10-year roadmap for Time Machine. All deliverables and roadmaps are available publically[^csa_info]. +: A project with 33 partners supported by European Commission, taking place from 2019-2020, to produce a 10-year roadmap for Time Machine. All deliverables and roadmaps are available publicly[^csa_info]. Time Machine Community : All users registered in the **Time Machine Website**. Each user's Time Machine ID can be used to login to third parties **Apps**, and, depending on their level of activity, users may reach different status types linked with particular privileges in terms of operations. These statuses are defined and organised taking inspiration from the Wikipedia systems of privilege (e.g. Administrators, Stewards, etc.). Time Machine Index -: A global system for indexing different types of objects: e.g. documents; iconography; 3D geometries. It gathers all information on documents and their contents and can be used as a basis for additional search engine infrastructure. +: A global system for indexing different types of objects: e.g. documents; iconography; 3D geometries. It gathers all information on documents and their contents and can be used as a basis for additional search engine infrastructure. Time Machine Infrastructure Alliance -: The coalition of the Time Machine partners consisting of in-kind donators of infrastructure components (server space and computing power). +: The coalition of the Time Machine partners consisting of in-kind donators of infrastructure components (server space and computing power). Time Machine Horizon : The asymptotic objective of the Time Machine. Time Machine Mirror World -: The **Time Machine Mirror World** is a digital twin of the real world, enabling 4D navigation. It is based an advanced stage of the **4D Map** associated with dedicated **Apps**. The **Time Machine Mirror World** results from the the processing of the Time Machine **Engines** producing a continuous representation model that can be accessed as an information stratum overlaying the real world. +: The **Time Machine Mirror World** is a digital twin of the real world, enabling 4D navigation. It is based an advanced stage of the **4D Map** associated with dedicated **Apps**. The **Time Machine Mirror World** results from the the processing of the Time Machine **Engines** producing a continuous representation model that can be accessed as an information stratum overlaying the real world. Time Machine Network -: The **Time Machine Network** is the set of all the partners actually interacting in the Time Machine as part of **Projects**. +: The **Time Machine Network** is the set of all the partners actually interacting in the Time Machine as part of **Projects**. Time Machine Official Components -: Pieces of hard- and software (e.g. **Time Machine Box**) that help partners conform to and interact with the **Time Machine Tools** and **Time Machine API** to simplify interaction with the **Data Graph**. +: Pieces of hard- and software (e.g. **Time Machine Box**) that help partners conform to and interact with the **Time Machine Tools** and **Time Machine API** to simplify interaction with the **Data Graph**. Time Machine Organisation -: A non-profit Association under the Austrian Law responsible for Time Machine governance and operations. +: A non-profit Association under the Austrian Law responsible for Time Machine governance and operations. Time Machine Request for Comments -: A set of documents as well as a process of develoment and feedback for the progressive design of the Time Machine infrastructure, standards, recommendations and rules, inspired by the process which has been used for 50 years for the development of Internet Technology, today administrated by the Internet Engineering Task Force (IETF) as part of Internet Society (ISOC). Their functioning is described in *RFC-0000* and related RFCs. +: A set of documents as well as a process of development and feedback for the progressive design of the Time Machine infrastructure, standards, recommendations and rules, inspired by the process which has been used for 50 years for the development of Internet Technology, today administrated by the Internet Engineering Task Force (IETF) as part of Internet Society (ISOC). Their functioning is described in *RFC-0000* and related RFCs. Time Machine Tools : Data ingestion and curation services offered to **Projects** to enter their data in the **Data Graph** @@ -224,13 +224,13 @@ Time Machine Website : Main website of the Time Machine[^tm_website]". Includes the **Local Time Machine WebSpace** Transfer -: Transfer of a structure of a given year to the year before assuming no change has occurred. Linked with the principle of continuity. If the transfer of a given structure is not impossible, it means that an event occurred. This event must be modelled in order to push the information backward. +: Transfer of a structure of a given year to the year before assuming no change has occurred. Linked with the principle of continuity. If the transfer of a given structure is not impossible, it means that an event occurred. This event must be modelled in order to push the information backward. Universal Representation Engine -: One of the Time Machine **Engines**. It manages a multidimensional representation of latent space resulting from the integration of the pattern of extremely diverse types of digital cultural artefacts (text, images, videos, 3D), and permitting new types of data generation based on transmodal pattern understanding. +: One of the Time Machine **Engines**. It manages a multidimensional representation of latent space resulting from the integration of the pattern of extremely diverse types of digital cultural artefacts (text, images, videos, 3D), and permitting new types of data generation based on transmodal pattern understanding. Zone of Coverage -: The geo-spatial zone of activity of a **Project**. +: The geo-spatial zone of activity of a **Project**. diff --git a/files/releases/RFC-0002/RFC-0002.md b/files/releases/RFC-0002/RFC-0002.md new file mode 100644 index 0000000..b1e9295 --- /dev/null +++ b/files/releases/RFC-0002/RFC-0002.md @@ -0,0 +1,1702 @@ +--- +# Don't change this header section +title: "RFC on RFC Tree" +subtitle: "Time Machine RFC-0002" +author: + - François Ballaud + - Frédéric Kaplan + - Isabella di Lenardo + - Kevin Baumer + - Daniel Jeller +header-includes: + - \usepackage{fancyhdr} + - \pagestyle{fancy} + - \fancyhead[R]{} + - \fancyfoot[L]{-release-version-} +colorlinks: true +output: pdf_document +--- + + + +# Motivation + +The process for writing RFCs requires long-term planning instruments which will +enable writers to not only access constantly updated versions of current RFCs +but also to have a view on future planned RFCs. The **RFC Tree** as described in +this RFC will provide an up-to-date description of all planned RFCs, including a +short textual motivation for each and listing their dependencies with other +RFCs. + +# Definition + +**RFC Tree** is the metaphorical description and the hierarchical representation +of the **Time Machine Request for Comments** development plan that will be used +as a baseline scenario to help monitor the progress and achievements towards +completion of the **Time Machine Horizon** over the coming 10 years. + +# Behaviour + +While the **RFC Tree** itself results from the progressive completion of RFCs +over time, it also acts as a blueprint for defining the incremental steps used +to build the Time Machine Infrastructure. + +**RFC Tree** behaves as a body of law for Time Machine, containing all RFC +documentation and documenting the production process, as well as a progress +indicator showing the progress of upgrades to Time Machine components in real +time. + +**RFC Tree** is a macro-architecture plan that shapes dependencies between RFCs. +It could be divided into micro thematic or productive arcs, as each RFC belongs +to a specific sequence and is classified in one of the following categories: +Framework, Infrastructure, Data, Local Time Machines. + +Singular RFCs are the basic units of the **RFC Tree**. The 70+ initial RFCs are +listed at the end of this document. This initial set is an indicative path to +completion of the **Time Machine Horizon** and may be expanded/modified/edited +as needed. The tree-like structure and subsequent dependency chains which +connect singular RFC guarantee that the **Time Machine Horizon** will be +realised, as each RFC edit/removal/addition will cause dynamic adjustments and +reorganisation. + +This RFC provides the most up-to-date version of the **RFC Tree**. + +\newpage + +# RFC Tree + +\newpage + +## RFC-0000 - RFC on RFCs {#rfc-0000} + +Reaching consensus on the technology options to pursue in a programme as large +as Time Machine is a complex issue. To ensure open development and evaluation of +work, a process inspired by the Request for Comments (RFC) that was used for the +development of the Internet protocol is being adapted to the needs of Time +Machine. Time Machine Requests for Comments are freely accessible publications, +identified with a unique ID, that constitute the main process for establishing +rules, recommendations and core architectural choices for Time Machine +components. + +### Status + +- Release + +### Schedule + +- 2020 + +### Dependencies + +\newpage + +## RFC-0001 - RFC on TM Glossary {#rfc-0001} + +The Time Machine Glossary's function is to provide clear definitions of the +terms used in the RFCs and throughout related Time Machine documentation. It +should be updated regularly as new RFCs are introduced and published. + +### Status + +- Release + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0000](#rfc-0000) + +\newpage + +## RFC-0002 - RFC on RFC Tree {#rfc-0002} + +The process for writing RFCs requires long-term planning instruments which will +enable writers to not only access constantly updated versions of current RFCs +but also to have a view on future planned RFCs. The RFC Tree as described in +this RFC will provide an up-to-date description of all planned RFCs, including a +short textual motivation for each and listing their dependencies with other +RFCs. + +### Status + +- Release + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0000](#rfc-0000) + +\newpage + +## RFC-0003 - RFC on Publication Platform {#rfc-0003} + +This Request for Comments (RFC) describes the inner workings and technical +details of the RFC platform itself. It aims to provide the technical framework +for authorship, review, community contribution and publication of all future +Time Machine RFCs. + +### Status + +- Release + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0000](#rfc-0000) +- [RFC-0001](#rfc-0001) +- [RFC-0004](#rfc-0004) + +\newpage + +## RFC-0004 - RFC on RFC Editorial Committee {#rfc-0004} + +This RFC outlines the basic policies and procedures related to the RFC Editorial +Committee, many of which have been inspired by the IEEE Signal Processing +Society. The main items that this document defines are: the organisation of the +RFC Editorial Committee, how members are appointed, procedure for applications, +length of term and main duties. + +### Status + +- Release + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0000](#rfc-0000) +- [RFC-0001](#rfc-0001) +- [RFC-0003](#rfc-0003) + +\newpage + +## RFC-0005 - RFC on Local Time Machines {#rfc-0005} + +In order to build a planetary-scale Time Machine, it is necessary to define an +organic incremental strategy. To succeed, the Time Machine must be able to +progressively anchor itself in local territories, bringing added value to local +activities directly, spurring the creation of new projects to mine information +of the past contained in surviving objects and documents. Local Time Machines +(LTM) can be defined as zones of higher density of activities towards +reconstruction of the past. This RFC defines the dynamics that will: enable +bootstrapping of Local Time Machines, facilitate the onboarding of new projects, +valorise the data extracted, facilitate the involvement of the local population, +develop use cases for exploitation avenues, and eventually enable the +development of sustainable structures where Big Data of the Past are fruitfully +exploited thus leading to a constant increase of such activities. + +### Status + +- Release + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0000](#rfc-0000) +- [RFC-0001](#rfc-0001) +- [RFC-0006](#rfc-0006) +- [RFC-0007](#rfc-0007) + +\newpage + +## RFC-0006 - RFC on Technical Charter {#rfc-0006} + +The goal of the Technical Charter is to guarantee a first level of +standardisation for data and processes, in order to remain light and usable by +most, and the charter also encourages the use of universal and open interfaces +and references that do not require central coordination + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +\newpage + +## RFC-0007 - RFC on Vision Mission and Values Charter {#rfc-0007} + +In order to protect Time Machine's overarching purpose, fundamental values and +ethical principles, a common charter will be created. Its duty will be to +protect the core of TM and sustain its future. Becoming a TM network member +implies ratification of the Vision, Mission and Values Charter + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +\newpage + +## RFC-0008 - RFC on Intellectual Property Rights and Licenses {#rfc-0008} + +Defining licenses to preserve intellectual property rights (regulating data +acquisition, sharing and publishing) and to sustain the interoperability and +accessibility of the TM. The proposed solutions could be based on the Creative +Commons copyright licenses and should be further developed with the help of +cultural-heritage networks (e.g. Europeana) who are experienced with these +issues and are already proposing solutions. Means of monitoring the openness +process of data should also be taken into consideration. + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0006](#rfc-0006) +- [RFC-0007](#rfc-0007) + +\newpage + +## RFC-0009 - RFC on Training {#rfc-0009} + +Complying with TM Rules and Recommendations, legal settings, using TM components +and understanding TM infrastructures will require specific training. A proper +set of documentation, tutorials, videos and online courses will be offered to +partners. This RFC will set the general rules for development of training +materials, and these principles will subsequently be used by other more specific +training RFCs. + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0008](#rfc-0008) + +\newpage + +## RFC-0010 - RFC on LTM Value Scale {#rfc-0010} + +The proposed LTM Value Scale (Key concepts and global overview – Local Time +Machines), based on density criteria, will be outlined and its relevance and +organisation further developed. Value scales concerning Projects will be created +based on previously defined bricks and other criteria required in order to +foster the development of Projects (e.g. collaboration, cooperation metrics). As +some of the measures will relate to qualitative processes, a dedicated RFC on +collaboration indicators will focus on creating a suitable system of metrics. +The labelling system may require a third-party certification to assess its +efficiency and accuracy. + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0005](#rfc-0005) + +\newpage + +## RFC-0011 - RFC on LTM Training {#rfc-0011} + +Specific RFC rooted in the general principles established by **RFC on Training** +but adapted to the framework developed in the **RFC on LTM**. + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0005](#rfc-0005) +- [RFC-0009](#rfc-0009) + +\newpage + +## RFC-0012 - RFC on Definition of Typologies of Digitisation Interventions {#rfc-0012} + +A typology of digitisation interventions will be established grouping: + +1. Collections that can be moved and processed in digital hubs (large, + non-fragile collections), collections or objects that need local intervention + (e.g. very fragile document, statues, buildings) +2. Processes that can be performed by volunteers using mobile technology (e.g. + scanning campaign across cities, on-the-fly digitisation in reading rooms), + processes that can be performed using robots and drones, etc. + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +\newpage + +## RFC-0013 - RFC on Standardisation and Homologation {#rfc-0013} + +Definition of the terms and contracts enabling digitisation partners to become +part of the Time Machine network. + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0012](#rfc-0012) + +\newpage + +## RFC-0014 - RFC on Digitisation Priorities and Data Selection {#rfc-0014} + +A data selection model based on the identification of performance criteria will +help partners to focus on priority aspects and to take decisions accordingly. +For reference, the National Information Standards Organisation (NISO) has +already proposed a framework for guidance on Building Good Digital Collections. +Examples of performance criteria include: Significance of Content to Internal +Stakeholders (degree to which a collection, once digitised supports the both +immediate and long-term research and the teaching needs of the institution), +Significance of Content to External Stakeholders (a highly successful digital +collection is of interest to researchers and users outside of the university), +Uniqueness (many unique institutional resources such as original photographs, +archival materials, grey literature such as university technical reports and +conference proceedings have not yet been digitised), Exposure (degree to which a +digital collection garners positive recognition and press for an institution and +assessing the potential for digital availability of the collection to result in +grants and other funding). This digitisation recommendation will also be in line +with the LTM coordinated strategy for digitisation. + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0012](#rfc-0012) + +\newpage + +## RFC-0015 - RFC on Open Hardware {#rfc-0015} + +Definition of the open hardware strategy for Time Machine including licensing +terms and catalogues. + +### Status + +- Proposal + +### Schedule + +- 2020 + +### Dependencies + +- [RFC-0012](#rfc-0012) + +\newpage + +## RFC-0016 - RFC on Data Lifecycle {#rfc-0016} + +The data lifecycle within the TM begins with the concept of documents and data +selection. The goal is to assist partners in selecting proper documents or +collections to be processed by the TM pipelines and then subsequently which data +within the documents. The data selection is closely related to the LTM or +project perimeters (see LTM/Framework) and should be stated prior to any project +launch. Criteria such as intellectual property rights, obtaining copyright +permissions, digitisation, OCR processing or metadata creation costs must also +be taken into account. Extending the results of the **RFC of Digitisation +Priorities and Data selection**, the RFC will also detail data acquisition, data +sharing and data publishing. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0005](#rfc-0005) +- [RFC-0014](#rfc-0014) + +\newpage + +## RFC-0017 - RFC on Operation Graph {#rfc-0017} + +Definition of the format of the operation graph describing the operations +currently pursued in the TM partners, as monitored by the **Time Machine +Organisation**. It includes both automatic processes and human interventions. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0016](#rfc-0016) + +\newpage + +## RFC-0018 - RFC on TM Data Graph {#rfc-0018} + +The Time Machine **Data Graph** contains all information modelled in the Time +Machine. The graph is constructed both manually and automatically through the +processing of the **Digital Content Processor**. The **Data Graph** is +intrinsically composed of two sub-parts: (1) The bright graph, which is composed +of information that has been manually mapped and integrated with other large +databases or used in a publication. This information is integrated with the +current sum of digital human knowledge. It can be considered actual. (2) The +dark graph, which is composed of information extracted automatically from +(massive) documentation which has to this point been used in a stand alone +fashion as individual historic items. It can be considered virtual. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0016](#rfc-0016) + +\newpage + +## RFC-0019 - RFC for TM APIs {#rfc-0019} + +Algorithms and software integrated into the Time Machine must be able to +communicate with each other, thus a definition of joint APIs is required. It is +likely that TM Services will be built on top of REST interfaces. In order to +meet TM's needs, these will have to be adapted towards use in large-scale +machine learning. A probable addition is the option to provide the gradient +information of a specific module that is integrated using the API, for example. +This way remote services can also be integrated into large-scale training +processes. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0017](#rfc-0017) + +\newpage + +## RFC-0020 - RFC for Classification and Planning of Languages to Address {#rfc-0020} + +Definition of Time Machine's multilingual strategy. This RFC impacts the +exploitation platforms, as the TM will handle documents in multiple European +languages and dialects. Some of these languages might be more complicated to +address than others due to pre-existing tools for modern variants or +availability of materials. A working plan for natural language processing (NLP) +tool development should be conceived by taking into consideration the materials, +the locations of the LTMs and the Digitisation Hubs, and the features of the +languages. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0018](#rfc-0018) + +\newpage + +## RFC-0021 - RFC on Annotation {#rfc-0021} + +Definition of the annotation protocols used for the documenting of TM Data +Graph. This RFC will be used by exploitation platforms. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0018](#rfc-0018) + +\newpage + +## RFC-0022 - RFC on Digital Content Processor Development and Testing {#rfc-0022} + +Digital Content Processors (DCP) are automatic processes for extracting +information from documents (images, video, sound, etc.) As such, the following +pipeline may be envisioned: (1) Development of a DCP in a dedicated “Sandbox” (a +place where trial and error tests can be undertaken without compromising the +entire functioning of the Time Machine architecture). Training will be done on +existing labelled documents. (2) Submission of the DCP to the Time Machine +Organisation's dedicated service. (3) After some benchmark and assessments of +performance, resulting in acceptance or rejection, the suitable DCP becomes an +Official TM Component. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0019](#rfc-0019) +- [RFC-0027](#rfc-0027) + +\newpage + +## RFC-0023 - RFC on Digital Content Processor (DCP) of Level 1 {#rfc-0023} + +DCP are automatic processes for extracting information from documents (images, +video, sound, etc.) DCP of Level 1 only label mentions of entities. Each +processing is fully traceable and reversible, and the results of the processing +make up the core dataset of the Big Data of the Past and are integrated into the +TM Data Graph. The document should define: + +1. The technical conditions for implementing DCP that can be inserted in the + Time Machine Operation Graph. +2. The requirements for hosting DCP in the TM Super Computing Infrastructure. + The process by which DCP are developed, tested, labelled, published and put + in operation. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0018](#rfc-0018) +- [RFC-0022](#rfc-0022) + +\newpage + +## RFC-0024 - RFC on Digital Content Processor (DCP) of Level 2 {#rfc-0024} + +DCP are automatic processes for extracting information from documents (images, +video, sound, etc.) DCP of Level 2 label relations between entities. Each +processing is fully traceable and reversible, and the results of the processing +make up the core dataset of the Big Data of the Past and are integrated into the +TM Data Graph. The document should define: + +1. The technical conditions for implementing DCP that can be inserted in the + Time Machine Operation Graph. +2. The requirements for hosting DCP in the TM Super Computing Infrastructure. + The process by which DCP are developed, tested, labelled, published and put + in operation. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0023](#rfc-0023) + +\newpage + +## RFC-0025 - RFC on Digital Content Processor (DCP) of Level 3 {#rfc-0025} + +DCP are automatic processes for extracting information from documents (images, +video, sound, etc.) DCP of Level 3 label rules. Each processing is fully +traceable and reversible, and the results of the processing constitute the core +dataset of the Big Data of the Past and are integrated in the TM Data Graph. The +document should define: + +1. The technical conditions for implementing DCP that can be inserted in the + Time Machine Operation Graph. +2. The requirements for hosting DCP in the TM Super Computing Infrastructure. + The process by which DCP are developed, tested, labelled, published and put + in operation. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0024](#rfc-0024) + +\newpage + +## RFC-0026 - RFC on Synergy and interaction in EU Research Infrastructure {#rfc-0026} + +The TM digitisation network will build upon existing EU Research Infrastructures +(DARIAH, CLARIN) and infrastructure providing access to Cultural Heritage +(Europeana, Archive Portal Europe, etc.) TM will introduce new processing +pipelines for transforming and integrating Cultural Heritage data into such +infrastructures. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +\newpage + +## RFC-0027 - RFC on General Standards for the Super Computing Architecture {#rfc-0027} + +This document will define the general rules that the TM Network partners must +follow to integrate their computing resources into the TM Super Computing +Architecture and the routing processes managing the data pipelines. In +particular, this document will define: + +1. The hardware and software standards that the computing resources will follow + across the entire distributed Super Computing Architecture. +2. The routing protocols of the TM Operation Graphs. +3. The processes for naming and renewing the administrators of the + administrators +4. The role of the TMO for managing of infrastructure. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0017](#rfc-0017) +- [RFC-0018](#rfc-0018) +- [RFC-0026](#rfc-0026) + +\newpage + +## RFC-0028 - RFC on Time Machine Box {#rfc-0028} + +Also meant to cover storage needs for data, the TM Box should help partners +involved in a data acquisition, sharing or publishing process to conform to the +metadata specifications and delivery – harvesting protocols as stated by the +data model. One of its goals is to smoothen the process of and contribute to the +automatization of the digitisation process (offering for instance a way of +monitoring the digitisation tasks). This hardware is part of the Time Machine +Official Components. The RFC will define how the production of the Time Machine +Box should be managed in the long-term. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0017](#rfc-0017) + +\newpage + +## RFC-0029 - RFC Digitisation Hubs {#rfc-0029} + +Defining the functioning and business model for the Digitisation Hubs. In order +for the Digitisation Hubs to be implemented, standards in terms of resolution, +file formats and metadata during acquisition need to be defined beforehand (RFC +on Technical Charts). These standards must be consensual and simple in order to +ensure easy implementation and fit into existing practices. The RFC must also +evaluate relevant technologies and recommend affordable technologies that do not +damage the objects, while also providing the best possible results at the same +time. Time Machine aims to distribute affordable technology on a large scale +using, for example, open design hardware. More costly and dedicated scanning +solutions ,such as scan robots and tomographic methods, should be made available +in specialised centres spread across the European Union such that their services +are available to a maximum number of users. The objective of achieving +affordable and wide-spread digitisation should be a priority in this RFC. + +### Status + +- Proposal + +### Schedule + +- 2021 + +### Dependencies + +- [RFC-0015](#rfc-0015) +- [RFC-0028](#rfc-0028) + +\newpage + +## RFC-0030 - RFC for Named Entity Recognition {#rfc-0030} + +This RFC defines named entity recognition for older European languages and +variants. The results of the tagging of entities will feed the Dark Data Graph +with new information. + +### Status + +- Proposal + +### Schedule + +- 2022 + +### Dependencies + +- [RFC-0021](#rfc-0021) +- [RFC-0023](#rfc-0023) + +\newpage + +## RFC-0031 - RFC on Text Recognition and Processing Pipeline {#rfc-0031} + +This RFC defines the general architecture for this particular kind of media. It +must be aligned with the LTM central services. + +### Status + +- Proposal + +### Schedule + +- 2022 + +### Dependencies + +- [RFC-0023](#rfc-0023) +- [RFC-0024](#rfc-0024) +- [RFC-0025](#rfc-0025) + +\newpage + +## RFC-0032 - RFC on Structured Document Pipeline {#rfc-0032} + +This RFC defines the general architecture for this particular kind of media. It +must be aligned with the LTM central services. + +### Status + +- Proposal + +### Schedule + +- 2022 + +### Dependencies + +- [RFC-0023](#rfc-0023) +- [RFC-0024](#rfc-0024) +- [RFC-0025](#rfc-0025) + +\newpage + +## RFC-0033 - RFC on Map and Cadaster Processing pipeline {#rfc-0033} + +This RFC defines the general architecture for this particular kind of media. It +must be aligned with the LTM central services. + +### Status + +- Draft + +### Schedule + +- 2022 + +### Dependencies + +- [RFC-0023](#rfc-0023) +- [RFC-0024](#rfc-0024) +- [RFC-0025](#rfc-0025) + +\newpage + +## RFC-0034 - RFC on Audio Processing Pipeline {#rfc-0034} + +This RFC defines the general architecture for this particular kind of media. It +must be aligned with the LTM central services. + +### Status + +- Proposal + +### Schedule + +- 2022 + +### Dependencies + +- [RFC-0023](#rfc-0023) +- [RFC-0024](#rfc-0024) +- [RFC-0025](#rfc-0025) + +\newpage + +## RFC-0035 - RFC on Video Processing Pipeline {#rfc-0035} + +This RFC defines the general architecture for this particular kind of media. It +must be aligned with the LTM central services. + +### Status + +- Proposal + +### Schedule + +- 2022 + +### Dependencies + +- [RFC-0023](#rfc-0023) +- [RFC-0024](#rfc-0024) +- [RFC-0025](#rfc-0025) + +\newpage + +## RFC-0036 - RFC on Music Score Pipeline {#rfc-0036} + +This RFC defines the general architecture for this particular kind of media. It +must be aligned with the LTM central services. + +### Status + +- Proposal + +### Schedule + +- 2022 + +### Dependencies + +- [RFC-0023](#rfc-0023) +- [RFC-0024](#rfc-0024) +- [RFC-0025](#rfc-0025) + +\newpage + +## RFC-0037 - RFC on Photographic Processing Pipeline {#rfc-0037} + +This RFC defines the general architecture for this particular kind of media. It +must be aligned with the LTM central services. + +### Status + +- Proposal + +### Schedule + +- 2022 + +### Dependencies + +- [RFC-0023](#rfc-0023) +- [RFC-0024](#rfc-0024) +- [RFC-0025](#rfc-0025) + +\newpage + +## RFC-0038 - RFC on Photogrammetric Pipeline {#rfc-0038} + +This RFC defines the general architecture for this particular kind of media. It +must be aligned with the LTM central services. + +### Status + +- Proposal + +### Schedule + +- 2022 + +### Dependencies + +- [RFC-0023](#rfc-0023) +- [RFC-0024](#rfc-0024) +- [RFC-0025](#rfc-0025) + +\newpage + +## RFC-0039 - RFC on Enhancing Collaboration {#rfc-0039} + +Investigating how to support partnerships across Time Machine member networks +(e.g. Europeana, Icarus) or external cultural-heritage networks. The reflection +should focus on a number of different aspects: how to enhance collaboration +internally and externally, both at the level of the LTM and the TMO (e.g. with +other LTM or partner networks); how to enhance exchange of best practices; how +to share content and enhance collaboration between already existing cultural +heritage networks and associations. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0005](#rfc-0005) + +\newpage + +## RFC-0040 - RFC on Franchise System {#rfc-0040} + +A franchise model clarifying the financial relationship between LTMs and the +TMO’s services will be put in place, in accordance with the financial needs and +costs of Time Machine's technical and coordination infrastructures. The +franchise system must be scalable and adaptable, as the network will grow along +with its reputation in the public eye and the financial benefit generated for +LTMs. The franchise system is meant to be complementary to that which is +established for the TMO partners. To enter a LTM, an institution should at +minimum be required to become a member of the TMO. One of the subtasks of this +RFC is to also assess and to further design the role of the TMO as a “Financial, +Economic, Intelligence and Watch services office. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0005](#rfc-0005) + +\newpage + +## RFC-0041 - RFC on Solidarity {#rfc-0041} + +How to select, align and finance the “redocumentation” project, for potential +projects compatible with TM goals but placed in a stand-by stage for a period of +time, will be the main task of this RFC. What would it take (training, +formation) to ensure the project scalability? + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0005](#rfc-0005) + +\newpage + +## RFC-0042 - RFC on Top-Down initiatives {#rfc-0042} + +Defining what local and national measures might contribute to the creation of +LTMs. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0005](#rfc-0005) + +\newpage + +## RFC-0043 - RFC on Distributed Storage {#rfc-0043} + +The TM distributed storage system will target offering an alternative solution +to current HTTP-based storage. While in a first phase, most documents and data +will be stored on specific servers accessible through standard protocols (e.g. +images on IIIF servers), the aim of the project is to develop a storage solution +that would have the following objectives: (1) Giving access to high volume of +data with high efficiency, (2) Optimising storage to store more data, (3) +Implementing long-term preservation of data, preventing accidental or deliberate +deletion and keeping a fully versioned history of the data stored, (4) +Guaranteeing authenticity of the data stored and preventing the inclusion of +fake sources. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0017](#rfc-0017) +- [RFC-0018](#rfc-0018) +- [RFC-0026](#rfc-0026) +- [RFC-0028](#rfc-0028) + +\newpage + +## RFC-0044 - RFC on Distributed storage System for Public Data {#rfc-0044} + +This document will define the infrastructure principles for a decentralized +solution of public datasets based on Creative Commons licences like CC-0, CC-BY, +CC-BY-NC and the Europeana rights declarations. Storage will be maintained for +the resources shared by the partners of the TM Infrastructure Alliance. A +distributed system like IPFS (InterPlanetary File System) and the work done by +the IPFS Consortium for persistence of IPFS objects may be a good starting +point, as such types of file systems do not identify a resource by its location +but by a unique identification number. Routing algorithms optimised through P2P +algorithms are the most efficient ways to bring the data to the visualisation or +computing processes. This also speeds up the process when the host is a region +with low connectivity. It is critical that redundancy and long-term resilience +can be guaranteed, which means that the system must be designed to make any +public data content that it stores un-deletable in practice. In turn, this will +make it difficult to censor content, and for this reason it is especially well +adapted for public data associated with creative common licenses. Systems like +IPFS also provide the possibility for each node of the network to choose the +categories of data they accept to replicate. This provides some flexibility in +the negotiation of a common strategy by the Time Machine Infrastructure +Alliance. To ensure the authenticity of the data stored, a blockchain type +solution could be the solution. The interaction between the distributed file +system and the authentication solution will be defined by the RFC. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0043](#rfc-0043) + +\newpage + +## RFC-0045 - RFC on Distributed Storage System for Private Data {#rfc-0045} + +Private datasets could be stored in either: (1) A specific layer of the +distributed storage system, provided reliable cryptographic and authentication +systems are in place, (2) A “fenced” location offered by partners of the Time +Machine Network as storage solution. In both cases, the RFC should define how +such closed datasets could use Time Machine infrastructure services and under +which conditions. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0043](#rfc-0043) + +\newpage + +## RFC-0046 - RFC on On-demand Digitisation {#rfc-0046} + +This RFC defines the process for on-demand digitisation, enabling any user to +request the digitisation of a specific document. An alignment with the archival +description system will be necessary. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0017](#rfc-0017) +- [RFC-0018](#rfc-0018) + +\newpage + +## RFC-0047 - RFC on Global Optimization of Digitisation Process {#rfc-0047} + +Definition of the strategy for optimising the digitisation processes. The +objective of this RFC is, among others, to avoid digitisation of redundant +printed material via a synchronization of all digitisation initiatives. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0014](#rfc-0014) +- [RFC-0026](#rfc-0026) +- [RFC-0044](#rfc-0044) +- [RFC-0046](#rfc-0046) + +\newpage + +## RFC-0048 - RFC for Orthographic Normalisation {#rfc-0048} + +This RFC defines the orthographic normalisation of older European language +variants. The results will improve the search functionality of the databases. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0020](#rfc-0020) + +\newpage + +## RFC-0049 - RFC on Content Filtering {#rfc-0049} + +Content filtering may be necessary to control the exposure of users of Time +Machine services to unsolicited content. Finding the right technologies which +allow such controls without enabling the possibly of abusive censorship +operations will be the challenge of this RFC. + +### Status + +- Proposal + +### Schedule + +- 2023 + +### Dependencies + +- [RFC-0007](#rfc-0007) + +\newpage + +## RFC-0050 - RFC on Knowledge Transfer {#rfc-0050} + +Investigating how to support achievements and knowledge transfer inside the TM +network, ensuring a global research collaboration at a European scale. There are +only rare examples of large-scale research data management models, dealing with +similar complexity levels as the TM; however, some guidelines can be found such +as: “Guidance Document Presenting a Framework for Discipline-specific Research +Data Management” (Science Europe, January 2018), “Practical Guide to the +International Alignment of Research Data Management” (Science Europe, November +2018). + +### Status + +- Proposal + +### Schedule + +- 2024 + +### Dependencies + +- [RFC-0039](#rfc-0039) + +\newpage + +## RFC-0051 - RFC on Smart Cluster {#rfc-0051} + +Defining the rules to be followed by the future smart clusters (for instance +compliance with LTM rules and recommendations), ensuring means for the creation +of such a space for creativity, supporting inter-disciplinary exchanges, +political involvement and job creation, defining what types of relations could +be built between the participants of the smart clusters and the partners of the +LTM, and how to monitor, evaluate and revise or update the process. + +### Status + +- Proposal + +### Schedule + +- 2024 + +### Dependencies + +- [RFC-0005](#rfc-005) + +\newpage + +## RFC-0052 - RFC on Collaboration Indicators {#rfc-0052} + +This RFC defines key performance indicators to evaluate the level of +collaboration among TM partners, both at a local and global level. It will also +develop "affinity maps" to suggest potential future collaborations. + +### Status + +- Proposal + +### Schedule + +- 2024 + +### Dependencies + +- [RFC-0039](#rfc-0039) + +\newpage + +## RFC-0053 - RFC on Large-Scale Inference Engine {#rfc-0053} + +The Large-Scale Inference Engine is capable of inferring the consequences of +chaining any information in the database. This enables Time Machine to induce +new logical consequences of existing data. The Large-Scale Inference Engine is +used to shape and to assess the coherence of the 4D simulations based on +human-understandable concepts and constraints. Its origin derives from more +traditional logic-based AI technology, slightly overlooked since the recent +success of the deep learning architecture, that can, nevertheless, play a key +role in an initiative like TM. This document will specify the various types of +rules that the Large-Scale Inference Engine can process including: rules +extracted from documents by DCP, implicit rules made explicit, and rules +(statistical or not) induced from the data. The document should define: the +process by which rules are submitted, tested and integrated in the engine, and +the processes for managing conflicting rules or results from various rules. The +document will also motivate implementation of solutions in relation with +existing deployed systems like Wolfram Alpha or IBM Watson and standards like +OWL, SKOS. + +### Status + +- Proposal + +### Schedule + +- 2024 + +### Dependencies + +- [RFC-0025](#rfc-0025) + +\newpage + +## RFC-0054 - RFC on the 4D Grid {#rfc-0054} + +Through a hierarchical division of space (3D) and time, the system organises a +multi-resolution 4D grid which serves as a general spatiotemporal index. Each +“cube” in the grid indexes all the information relevant for these particular +spatiotemporal elements. It offers an efficient perspective for organising large +datasets and performing collective curation through manual and automatic +processes. Each element of the grid will also potentially be labelled according +to other various multidimensional criteria, some of them being AI-based +descriptors (e.g. descriptors for architectural style detection in images). + +### Status + +- Proposal + +### Schedule + +- 2024 + +### Dependencies + +- [RFC-0018](#rfc-0018) + +\newpage + +## RFC-0055 - RFC on 4D Simulations {#rfc-0055} + +The 4D grid is sparse, as there are many places/times in the world that are not +directly associated with existing archival data. A central research challenge is +to develop AI systems capable of extending the information of the data grid in +space and time through continuous extrapolation and interpolation, and +developing new ways of visualising which parts of the content are anchored in +sourced data, simulated or unknown. Extensions of current deep-learning +generative methods, originally developed for 2D imaging, can be envisioned to +deal with the richness of the 4D datasets. Many 4D simulations can be associated +with the same 4D grid, and one central challenge is to manage this multiplicity +of worlds and their specific resolution levels for various services of the Time +Machine (e.g. entertainment, policy planning). + +### Status + +- Proposal + +### Schedule + +- 2024 + +### Dependencies + +- [RFC-0054](#rfc-0054) + +\newpage + +## RFC-0056 - RFC TM Tools for History Research {#rfc-0056} + +To engage researchers in social sciences and humanities is to productively use +the Big Data of the Past, and the TM can offer researchers a series of tools +that facilitate analyses. These tools will be enhanced by the Digital Content +Processor and the Simulation Engines, which will enable scholars to work with +historical data in an unprecedented way. + +### Status + +- Proposal + +### Schedule + +- 2024 + +### Dependencies + +- [RFC-0053](#rfc-0053) +- [RFC-0055](#rfc-0055) + +\newpage + +## RFC-0057 - RFC on New Scanning Technology {#rfc-0057} + +Cutting-edge technologies, such as automatic scanning machines with low human +supervision, scanning robots and solutions for scanning films and books without +the need to unroll/open them, should be considered and fostered by the TM. A +specific scheme to incentivise these technologies will be created. The goal is +to reach an appropriate mix of dedicated specialized scanning centres and aid +the development of mobile special use hardware, e.g. mobile CT scanners that are +mounted on trucks. + +### Status + +- Proposal + +### Schedule + +- 2024 + +### Dependencies + +- [RFC-0015](#rfc-0015) +- [RFC-0029](#rfc-0029) + +\newpage + +## RFC-0058 - RFC on 4D Simulator {#rfc-0058} + +The 4D Simulator manages a continuous spatiotemporal simulation of all possible +pasts and futures that are compatible with the data. The 4D Simulator includes a +multi-scale hierarchical architecture for dividing space and time into discrete +volumes with a unique identifier: a simulation engine for producing new datasets +based on the information stored. Each possible spatiotemporal multi-scale +simulation corresponds to a multidimensional representation in the 4D computing +infrastructure. When sufficient spatiotemporal density of data is reached, it +can produce a 3D representation of the place at a chosen moment in European +history. In navigating the representation space, one can also navigate in +alternative past and future simulations. Uncertainty and incoherence are managed +at each stage of the process and directly associated with the corresponding +reconstructions of the past and the future. The document should specify the +interaction of the 4D simulator with the rest of the architecture, answering +questions like: How can an entity of the TM Data Graph be associated with a +particular element of the 4D Grid?, How can 4D simulations be run and cached for +future use?, How can the system be used directly in exploitation platforms? + +### Status + +- Proposal + +### Schedule + +- 2025 + +### Dependencies + +- [RFC-0055](#rfc-0055) + +\newpage + +## RFC-0059 - RFC on Universal Representation Engine {#rfc-0059} + +The Universal Representation Engine manages a multidimensional representation of +a space resulting from the integration of a pattern of extremely diverse types +of digital cultural artefacts (text, images, videos, 3D and time) and permitting +new types of data generation based on trans-modal pattern understanding. In such +a space, the surface structure of any complex cultural artefact, landscape or +situation is seen as a point in a multidimensional vector space. On this basis, +it could generate a statue or a building or produce a piece of music or a +painting, based only on its description, geographical origins and age. The +document will specify the integration of the URE in the global architecture, +outlining for example how a given node in the TM Data Graph can be associated +with a parametric representation space. + +### Status + +- Proposal + +### Schedule + +- 2025 + +### Dependencies + +- [RFC-0019](#rfc-0019) + +\newpage + +## RFC-0060 - RFC for Machine Translation {#rfc-0060} + +Definition of the architecture for multilingual diachronic machine translation. +Existing algorithms for machine translation will be adapted to older language +variants of European languages. This will densify the Data Graph and provide +more input to the Large-Scale Inference Engine. + +### Status + +- Proposal + +### Schedule + +- 2025 + +### Dependencies + +- [RFC-0030](#rfc-0030) +- [RFC-0048](#rfc-0048) +- [RFC-0053](#rfc-0053) + +\newpage + +## RFC-0061 - RFC on Mirror World Prototyping {#rfc-0061} + +Definition of the implementation strategy for the first working prototype of +Mirror World using TM engines. This first prototype is likely to be developed on +the most advanced LTM. This RFC will define how to safely experiment with this +technology. + +### Status + +- Proposal + +### Schedule + +- 2025 + +### Dependencies + +- [RFC-0053](#rfc-0053) +- [RFC-0058](#rfc-0058) +- [RFC-0059](#rfc-0059) + +\newpage + +## RFC-0062 - RFC on Legal issues linked with Mirror World {#rfc-0062} + +Mirror Worlds are linked with specific legal issues which will need to be +addressed. This particularly concerns privacy and confidentiality. + +### Status + +- Proposal + +### Schedule + +- 2026 + +### Dependencies + +- [RFC-0061](#rfc-0061) + +\newpage + +## RFC-0063 - RFC on Mirror World Extension Strategy {#rfc-0063} + +The Mirror World extension strategy takes into account all the technical and +legal choices defined by the RFC on Scaling in order to address the extension of +the Mirror World to cover a larger, mostly European, scale. + +### Status + +- Proposal + +### Schedule + +- 2026 + +### Dependencies + +- [RFC-0061](#rfc-0061) + +\newpage + +## RFC-0064 - RFC on Mirror World Technical Standards {#rfc-0064} + +This RFC defines the specific technical standards needed for the Mirror World +extension, based on the experience provided by the Mirror World prototype. + +### Status + +- Proposal + +### Schedule + +- 2026 + +### Dependencies + +- [RFC-0061](#rfc-0061) + +\newpage + +## RFC-0065 - RFC on Virtual/Augmented Reality and Discovery {#rfc-0065} + +Definition the standards that should be adopted in order to enable the +development of Virtual/Augmented Reality services and the discovery module on +top of the TM Data Graph. + +### Status + +- Proposal + +### Schedule + +- 2026 + +### Dependencies + +- [RFC-0061](#rfc-0061) + +\newpage + +## RFC-0066 - RFC on 4D Mirror World {#rfc-0066} + +This RFC defines how the Mirror World can be continuously synchronized with the +TM DataGraph, enabling direct access and manipulation of 4D data. + +### Status + +- Proposal + +### Schedule + +- 2026 + +### Dependencies + +- [RFC-0061](#rfc-0061) + +\newpage + +## RFC-0067 - RFC for Improved Simulation using TM Simulation Engines {#rfc-0067} + +Researchers will be able to use the TM simulation engines to perform simulation +studies without having to rely on external models and tools. The simulation +engines have the capacity to improve the performance and the reach of +computational simulations for historical research. + +### Status + +- Proposal + +### Schedule + +- 2026 + +### Dependencies + +- [RFC-0058](#rfc-0058) + +\newpage + +## RFC-0068 - RFC on Large Scale Mirror World {#rfc-0068} + +This RFC consolidates the vision, strategy and framework conditions for the +development of a World-Wide Mirror World. + +### Status + +- Proposal + +### Schedule + +- 2027 + +### Dependencies + +- [RFC-0062](#rfc-0062) +- [RFC-0063](#rfc-0063) +- [RFC-0064](#rfc-0064) diff --git a/files/releases/RFC-0003/RFC-0003.md b/files/releases/RFC-0003/RFC-0003.md new file mode 100644 index 0000000..c52e2e9 --- /dev/null +++ b/files/releases/RFC-0003/RFC-0003.md @@ -0,0 +1,309 @@ +--- +# Don't change this header section +title: "RFC on RFC Platform" +subtitle: "Time Machine RFC-0003" +author: + - Daniel Jeller + - Frédéric Kaplan + - Kevin Baumer +header-includes: + - \usepackage{fancyhdr} + - \pagestyle{fancy} + - \fancyhead[R]{} + - \fancyfoot[L]{-release-version-} +output: pdf_document +--- + +# Motivation + +This Request for Comments (RFC) describes the inner workings and technical +details of the RFC platform itself. It aims to provide the technical framework +for authorship, review, community contribution and publication of all future +Time Machine RFCs. + +# Introduction + +The Time Machine **Requests for Comments** (RFC) workflow is based on +Git[^git_website], a tool initially designed to track changes to source code by +multiple developers and GitHub[^github_about], currently the leading place to +host open-source projects and to create and collaborate on software and many +other kinds of projects. The contribution and review process used by RFCs builds +on the basic _forking_ workflow that "is most often seen in public open source +projects"[^bitbucket_forking]. This ensures that contributions will be tracked +indefinitely, review decisions are documented correctly and it is possible to +permanently access and reference older versions of the RFC drafts. + +The choice of this solution is motivated by the possibility to scale the number +of users and contributions over time. It is likely that releasing and updating +RFCs will ultimately be akin to maintaining a large software codebase. + +# Definitions + +Before describing the workflow in detail, this section gives an overview on the +most important design decisions and distinct parts of the platform. + +## RFC Editorial Committee and RFC Team + +All strategic and aspects of the RFCs are managed by the **RFC Editorial +Committee** appointed by the Time Machine Organisation board, the technical +management, editorial work and support for RFC authors and other contributors is +done by the **RFC Team**. + +## Authorship + +An RFC author can be an individual person or a group of authors working together +on a single RFC draft. _Note_: This document doesn't differentiate between +individual and groups of authors when talking about _RFC authors_. + +## New RFC proposal + +RFCs can be proposed by either the RFC Editorial Committee, RFC authors +appointed by the Time Machine Organization or any other interested public +author. An up-to-date list of planned RFCs and their interconnection can be seen +in RFC-0002. + +## Identifiers + +Any accepted RFC proposal will be assigned an identifier by the RFC Team. +Identifiers are based on the pattern `RFC-[number]`. The numbers contain leading +zeros to pad them to be four digits long. An example is this RFC itself, called +`RFC-0003`, pronounced as _RFC-three_. + +## Document format + +RFCs are drafted in markdown[^orig_markdown_syntax] -- more specifically, in the +extended syntax used by Pandoc[^pandoc_markdown], a tool to convert texts +between different file formats. + +_Note_: A short introduction into the most important features of markdown can be +found in the \*RFC-template document[^template] + +## RFC content and contribution repository + +RFC documents are managed in Git and are hosted in a single repository on +GitHub. The individual RFCs stored in directories and files named after the RFC +number, for example `RFC-0003/RFC-0003.md`. Accompanying files like images are +stored alongside the main RFC documents. Public contributions will be possible +via issues, comments and pull requests, the RFC Team is responsible for the +immediate interaction with the community as well as the maintenance of the +repository. + +Content contributions to an RFC currently being drafted can be made by creating +a fork[^github_forking] of the official RFC repository and submitting all +modifications in the form of a GitHub Pull-Request[^github_pullrequest] +containing the relevant changes to the RFC repository. + +_Note_: An author-pull-request is only allowed to contain changes to a single +RFC (main document and accompanying files), otherwise it will be rejected by the +RFC Team. Changes to multiple RFCs need to be submitted in separate RFCs. +Contributions outside an active drafting phase will also be rejected. + +## Document life cycle + +The life cycle phases for an RFC are `draft` and `release`. Draft phases will be +open to the public for an arbitrary amount of time during which the RFC is being +prepared by the authors and open to comments by the public. Drafts are stored in +the `files/drafts` folder, and released RFCs in the `files/releases/` folder of +the RFC repository. + +## Publication + +In addition to the markdown-documents in the RFC repository, drafts and releases +will be converted into PDF files using the above-mentioned Pandoc. These PDF +files will be amended with the time of conversion as well as a unique release +number, then stored as build artifacts[^github_job_details] in the GitHub +repository. + +After significant releases and following a loose half-yearly schedule, all +drafts and releases will be published in a combined versioned and timestamped +PDF document called the **RFC-book**. This PDF document is built automatically +when a GitHub release[^github_release] is created and attached to the GitHub +release page. + +_Note_: It is to be expected that the automatic file preview for markdown files +on GitHub (and possibly in other tools as well) will differ from the final PDF +files, as Pandoc, the tool used to create these files, enables advanced features +like footnotes which are not necessarily used by GitHub and their preferred +markdown dialect.[^gfm] + +# Workflow phases + +This section describes the progression of the RFC from an initial idea to its +final release in the RFC book. + +## Phase 1: Conception + +![Conception flow](images/phase_1.png) + +Ideas for RFCs can come from both the RFC Editorial Committee and public +authors, both as individual or groups of authors working together. Potential +authors with an idea for an RFC should contact the RFC Editorial Committee to +coordinate the initial conception and drafting process. After accepting an RFC +idea, the RFC Editorial Committee will assign an identifier and the RFC Team +will prepare the draft file from a standardised template in the official RFC +repository in the `files/drafts/[RFC-id]/` folder. + +Following this step, the draft author can work on the document as they wish but +once they feel the RFC is in a submittable state they will create a pull request +with the draft text to the RFC repository or submit the text so the RFC Team can +do so on their behalf. + +The RFC Team will then conduct a brief internal review of the initial draft to +ensure the formal correctness of the document. After this is concluded and +necessary changes are made by the initial draft author in the scope of the +original submission, the pull request is merged in the master branch of the +official repository and the public drafting phase will start. + +## Phase 2: Public drafting and review + +![Drafting flow](images/phase_2.png) + +The drafting of an RFC is designed as a process that enables direct +contributions by public participants as well as the official RFC Editorial +Committee and Team in a similar manner. Over the course of a limited timespan +all contributors can work on improvements of the draft text or propose and +review changes. The fork-and-pull-request-workflow on GitHub makes sure that +each contribution is registered and stored in the official repository +indefinitely. + +The official drafting and review phase begins with the merging of the initial +RFC draft by the RFC Team and extends over an arbitrary period of time depending +on the topic and scope of the RFC being drafted. The main RFC markdown document +as well as accompanying files, for example images, are stored in the +`files/drafts/[RFC-id]` folder for the duration of the drafting phase. + +Persons interested in contributing to the text directly, either by additions or +changes to the existing content, can create a new fork (a full copy) of the +official repository or pull the current state of its master branch into their +own pre-existing fork. They can then either: use the inline editor on the GitHub +website to change the content of the RFC document in their own forked +repository, or clone their repository to their computer and use a markdown +editor / Git client of their choice to work with the files. + +Changes to the RFC draft can be submitted at any time during the drafting phase +in the form of pull requests from the forked repository to the so-called +upstream repository, that is the official RFC repository. This enables the RFC +Editorial Committee, RFC Team and other contributors to review the proposed +changes, suggest editions or point out problems directly next to the provided +version of the draft document. + +All pull requests to the official RFC repository during the drafting and review +phase can be reviewed and commented by any contributor but to be accepted and to +be merged the changes have to be accepted by the RFC Team and Editorial +Committee or reviewers appointed by the RFC Editorial Committee. After a pull +request with changes passes this review, it is accepted and will be merged by +the RFC Team into the master branch of the RFC repository. This makes the newly +merged version the new official version of the draft. Other open pull requests +will have to be updated by their creators to integrate the new changes into +their version if conflicts occur. + +Merged pull requests will trigger a _GitHub Action_[^github_action] that +compiles the current version of all open draft documents and attaches them as a +ZIP file accessible in workflow job details[^github_job_details]. These preview +files are available for 90 days. + +Another way for potential contributors not familiar with markdown writing and +the Git-based workflow to contribute are so-called GitHub +Issues[^github_issues]. These are commonly used to give users the possibility to +report software bugs to project developers and to track tasks to be done. In the +TM RFCs issues can be created by any person that wants to just report a problem +with the current version of the draft or to discuss specific topics or ideas +related to the RFC. New issues will be tagged by the RFC Team with the +identifier of the RFC they are directed at. It is the responsibility of the RFC +author to react to these issues and up to the RFC Team to moderate the issues as +necessary. + +## Phase 3: Release + +![Release flow](images/phase_3.png) + +If an RFC draft is considered fit for release by the community and the RFC +Editorial Committee and reviewers after the drafting and review phase, the draft +file is updated to include all contributors and to conform to the other +formalities and is then moved from the `files/drafts` folder to `files/releases` +by the RFC Team. After merging this change into the master branch of the +repository, a new GitHub release[^github_release] will be created by the RFC +Team. This will trigger the creation of a new timestamped version of the RFC +book which will be attached to the release. This marks the formal release of the +newly finished RFC document. + +# Updates at a later point + +Due to the fact that the RFCs describe aspects of the Time Machine, additions, +amendments or changes to existing RFCs may be necessary. If this is the case, +the RFC Team will move the RFC file to be amended from the `files/releases` +folder back to the `files/drafts` folder and a new drafting phase can start. It +will then again process through all the different phases as described above +resulting in a new release version. + +# Q&A + +## Question: Do I need a GitHub account to contribute to an RFC? + +Yes, you will need a GitHub account if you would like to contribute directly to +the text of an RFC, to discuss issues or to comment on pull requests. It serves +to make attributions of content and comments from individual persons possible +and to help to ensure scientific standards for the drafting and review process. + +## Question: I have an idea for an RFC. What do I do? + +Please contact the RFC Team at [rfc@timemachine.eu](mailto:rfc@timemachine.eu) +with your idea. They will assist you with the creation an initial draft version +of the idea, if it is deemed to be suitable and feasible. + +## Question: I would like to fix some errors in an RFC draft. How do I do that? + +The easiest way is for you to sign in to your GitHub account, create a fork of +the main RFC repository into your own account, fix the error in your forked +version of the document, commit it into your repository and open a pull request +outlining your changes to the main RFC repository. It will then be visible, can +be reviewed and then eventually merged into the main draft document if the +changes are accepted by the reviewers. If you need any help with this, feel free +to contact the RFC Team at [rfc@timemachine.eu](mailto:rfc@timemachine.eu). + +## Question: I have worked on a draft document myself and would like to preview the final PDF document to see how it would look. How can I do this? + +If you follow the official contribution process by forking the main RFC +repository, anything you push to the `master` branch in your own repository will +be automatically converted into a PDF document by triggering a GitHub action. +You can access the ZIP file with the compiled draft documents in the GitHub job +details page in the action section once you have enabled[^github_enable_actions] +the execution of actions in your repository. + +## Question: How do I see which drafts are currently available for contributions? + +You can see the current plan of work in the _README_[^github_repo_rfc_readme] +file of the main RFC repository. + + + + + +[^bitbucket_forking]: + + +[^gfm]: GitHub Flavored markdown (_GFM_) +[^git_website]: +[^github_about]: +[^github_action]: +[^github_enable_actions]: + + +[^github_forking]: +[^github_issues]: +[^github_job_details]: + + +[^github_pullrequest]: + + +[^github_release]: + + +[^github_repo_rfc_readme]: + + +[^orig_markdown_syntax]: +[^pandoc_markdown]: +[^template]: + diff --git a/files/drafts/RFC-0003/images/phase_1.drawio b/files/releases/RFC-0003/images/phase_1.drawio similarity index 100% rename from files/drafts/RFC-0003/images/phase_1.drawio rename to files/releases/RFC-0003/images/phase_1.drawio diff --git a/files/drafts/RFC-0003/images/phase_1.png b/files/releases/RFC-0003/images/phase_1.png similarity index 100% rename from files/drafts/RFC-0003/images/phase_1.png rename to files/releases/RFC-0003/images/phase_1.png diff --git a/files/drafts/RFC-0003/images/phase_2.drawio b/files/releases/RFC-0003/images/phase_2.drawio similarity index 100% rename from files/drafts/RFC-0003/images/phase_2.drawio rename to files/releases/RFC-0003/images/phase_2.drawio diff --git a/files/drafts/RFC-0003/images/phase_2.png b/files/releases/RFC-0003/images/phase_2.png similarity index 100% rename from files/drafts/RFC-0003/images/phase_2.png rename to files/releases/RFC-0003/images/phase_2.png diff --git a/files/releases/RFC-0003/images/phase_3.drawio b/files/releases/RFC-0003/images/phase_3.drawio new file mode 100644 index 0000000..ea51504 --- /dev/null +++ b/files/releases/RFC-0003/images/phase_3.drawio @@ -0,0 +1 @@ +7VhNc5swEP01PibDh42do+M6ySGdydSd6TEjwxrUCJZKiz/66ytZ4sOh8aQHJ2ndgw28fSBpn54WMQhn+fZWsjL7jAmIQeAl20H4aRAEfhhE+mCQnUXGVxMLpJInjtQCC/4THOg5tOIJqAMiIQri5SEYY1FATAcYkxI3h7QVisNWS5ZCD1jETPTRbzyhzKKTkdfid8DTrG7Z91wkZzXZASpjCW46UDgfhDOJSPYs385AmOTVebH33bwQbTomoaDX3LCdjr2rbDqEH2olLyZlNF2Ki5HrG+3qAUOix+8uUVKGKRZMzFv0WmJVJGCe6umrlnOPWGrQ1+B3INo5MVlFqKGMcuGiKy7EDAXKfYsh+MkIxhpXJPEJOpGraByyqInUAujUXfdH7xKisJIxHBlyPYuYTIGO8ELLM/noNOByewuYA8mdJkgQjPj6cL4wN+3Shtcqo0+cOH8glHvumonKtfQgQQ+fF6ntAzAFxmcYV7nJie5DkZh5iGvL+XIz25unpStnCJC9WdBqbATbZJxgUbJ9Xjfa6G+o5xokwfaoAnW0dppbaobuctP6tqFkHc9G3ok0G56fuYJXmsv/UOYKeuayZvkKLO9pWCIvCOR8rZOjXNKbtd2IljCVNQp2lHlZgZc0E2wJ4gEVJ46FjsVgWtYBYwqui9T9M8ISiTDvEKaCpyZAZvZcY0WCF7oLda00PWSO0jxcD6Y0Y823qanpl7ha8RguKwVS2f9TOXj4zMCjnoGHUd+/o1P513+X6thMn/d3c/hKN48/lJvDnptnEnS7+yrI9O+W01217JbNf6D6BZMPVv0mZ26e6K80T9QzT+2W/2VwXwZ1/Vvro7o0RMnEY84KvQk079yPLvZG5dGPwp7FR/5v6uPwRHNlfGyhpcxsRwrY2CEqo0zg4aoJ2VesJeJTs1dhRCzO7P2c6t2KpZ/Fou1Hp1u19WX7sWEf63yyCee/AA== \ No newline at end of file diff --git a/files/releases/RFC-0003/images/phase_3.png b/files/releases/RFC-0003/images/phase_3.png new file mode 100644 index 0000000..f79da27 Binary files /dev/null and b/files/releases/RFC-0003/images/phase_3.png differ diff --git a/files/releases/RFC-0004/RFC-0004.md b/files/releases/RFC-0004/RFC-0004.md new file mode 100644 index 0000000..5134d72 --- /dev/null +++ b/files/releases/RFC-0004/RFC-0004.md @@ -0,0 +1,73 @@ +--- +# Don't change this header section +title: "RFC on the RFC Editorial Committee" +subtitle: "Time Machine RFC-0004" +author: + - Andreas Maier + - Daniel Jeller +header-includes: + - \usepackage{fancyhdr} + - \pagestyle{fancy} + - \fancyfoot[L]{-release-version-} +output: pdf_document +--- + +# Motivation + +This RFC outlines the basic policies and procedures related to the RFC Editorial +Committee, many of which have been inspired by the IEEE Signal Processing +Society [^ieee_sps_policy]. The main items that this document defines are: the +organisation of the RFC Editorial Committee, how members are appointed, +procedure for applications, length of term and main duties. + +# Organisation + +The RFC Editorial Committee consists of four members. The four members share +equal rights and duties with regards to the editing process of the time machine +RFCs. For decisions of the RFC Committee to be passed, a majority of more than +60% is required, in practice meaning three of the four members must agree on a +decision. + +# Appointment + +Vacancies in the RFC Editorial Committee are communicated to the general public +by the Time Machine Organisation (TMO), and anybody is welcome to apply for a +position in the Committee. After a period of 14 days following the public +announcement, the TMO's Board reviews all applications and determines if the new +member of the RFC Committee is approved by a majority vote. In the case of +multiple applications that result in affirmative votes totalling less than 50% +of the voting members' support, the top two candidates move on to a second +ballot. The TMO Board may also remove RFC Editorial Committee Members with or +without cause by affirmative vote of 66% of the voting members. + +# Application Procedures + +In order to apply to a vacancy in the RFC Editorial Committee, applicants must +submit their application in response to the public call initiated by the TMO +Board. An application should consist of a letter of motivation, a curriculum +vitae, and a list of publications which will serve to provide the TMO Board with +a breadth and depth of information needed to make an appropriate decision. + +# Length of Terms + +An RFC Editorial Committee Member is appointed for a period of three years. The +post may be vacated earlier, should the RFC Editorial Committee Member wish to +do so. The appointment may also be terminated by the TMO Board. At the end of +their first term, members are permitted to run for a second term. In exceptional +cases, the TMO Board may allow additional terms. + +# Main Duties + +- The duties of the RFC Editorial Committee are to maintain the consistency of + the RFC System, to appoint RFC teams to organise new RFCs and to improve upon + existing RFCs. +- In particular, the RFC Editorial Committee reviews changes to the GitHub + version of the RFCs and comments on their technical soundness. +- The RFC Editorial Committee is also responsible for keeping track of RFC + versioning, the timely and regular publication of RFCs, and publicly + announcing new releases. + + + +[^ieee_sps_policy]: + diff --git a/files/releases/RFC-0005/LTM_Map1.jpg b/files/releases/RFC-0005/LTM_Map1.jpg new file mode 100644 index 0000000..ad92f36 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Map1.jpg differ diff --git a/files/releases/RFC-0005/LTM_Map2.jpg b/files/releases/RFC-0005/LTM_Map2.jpg new file mode 100644 index 0000000..279480b Binary files /dev/null and b/files/releases/RFC-0005/LTM_Map2.jpg differ diff --git a/files/releases/RFC-0005/LTM_Project.jpg b/files/releases/RFC-0005/LTM_Project.jpg new file mode 100644 index 0000000..7307707 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Project.jpg differ diff --git a/files/releases/RFC-0005/LTM_Project_Data.jpg b/files/releases/RFC-0005/LTM_Project_Data.jpg new file mode 100644 index 0000000..0c646d5 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Project_Data.jpg differ diff --git a/files/releases/RFC-0005/LTM_Project_Description.jpg b/files/releases/RFC-0005/LTM_Project_Description.jpg new file mode 100644 index 0000000..f94a09e Binary files /dev/null and b/files/releases/RFC-0005/LTM_Project_Description.jpg differ diff --git a/files/releases/RFC-0005/LTM_Project_Description_Institution.jpg b/files/releases/RFC-0005/LTM_Project_Description_Institution.jpg new file mode 100644 index 0000000..1e92ea4 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Project_Description_Institution.jpg differ diff --git a/files/releases/RFC-0005/LTM_Project_Geographic_AddLocation.jpg b/files/releases/RFC-0005/LTM_Project_Geographic_AddLocation.jpg new file mode 100644 index 0000000..6e88682 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Project_Geographic_AddLocation.jpg differ diff --git a/files/releases/RFC-0005/LTM_Project_Geographic_LocationList.jpg b/files/releases/RFC-0005/LTM_Project_Geographic_LocationList.jpg new file mode 100644 index 0000000..e522de2 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Project_Geographic_LocationList.jpg differ diff --git a/files/releases/RFC-0005/LTM_Project_Geographic_ZoneofCoverage.jpg b/files/releases/RFC-0005/LTM_Project_Geographic_ZoneofCoverage.jpg new file mode 100644 index 0000000..100f78b Binary files /dev/null and b/files/releases/RFC-0005/LTM_Project_Geographic_ZoneofCoverage.jpg differ diff --git a/files/releases/RFC-0005/LTM_Project_Name_and_Period.jpg b/files/releases/RFC-0005/LTM_Project_Name_and_Period.jpg new file mode 100644 index 0000000..6d284b2 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Project_Name_and_Period.jpg differ diff --git a/files/releases/RFC-0005/LTM_Project_Review.jpg b/files/releases/RFC-0005/LTM_Project_Review.jpg new file mode 100644 index 0000000..f990e19 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Project_Review.jpg differ diff --git a/files/releases/RFC-0005/LTM_Schema.jpg b/files/releases/RFC-0005/LTM_Schema.jpg new file mode 100644 index 0000000..0ba6540 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Schema.jpg differ diff --git a/files/releases/RFC-0005/LTM_Webspace.jpg b/files/releases/RFC-0005/LTM_Webspace.jpg new file mode 100644 index 0000000..903f941 Binary files /dev/null and b/files/releases/RFC-0005/LTM_Webspace.jpg differ diff --git a/files/releases/RFC-0005/RFC-0005.md b/files/releases/RFC-0005/RFC-0005.md new file mode 100644 index 0000000..c317519 --- /dev/null +++ b/files/releases/RFC-0005/RFC-0005.md @@ -0,0 +1,649 @@ +--- +# Don't change this header section +title: "RFC on LTM" +subtitle: "Time Machine RFC-0005" +author: + - Isabella di Lenardo + - François Ballaud + - Gael Paccard + - Frederic Kaplan + - Daniel Jeller +header-includes: + - \usepackage{fancyhdr} + - \pagestyle{fancy} + - \fancyfoot[L]{-release-version-} +output: pdf_document +--- + +# Motivation + +In order to build a planetary scale Time Machine, it is necessary to define an +organic incremental strategy. To succeed, the Time Machine must enable to +progressively anchor itself in local territories, directly bringing locally +higher value to the activities, favouring the creation of new projects to mine +information about the past in surviving objects and documents. Local Time +Machines can be defined as zones of higher density of activities of past +reconstruction. This RFC defines the dynamics that permit to bootstrap Local +Time Machines, facilitate onboarding of new projects, valorise the data +extracted, facilitate the involvement of the local population, develop use cases +for exploitation avenues and eventually find sustainable regime where **Big Data +of the Past** are fruitfully exploited leading to a constant increase of such +activities. This RFC defines an approach based on the standardisation of a core +infrastructure and independent development of Apps. + +# Approach + +## Openness By Design: Autonomy of Projects, Emergence of Local Time Machines + +The first principle for the development of Local Time Machine is +**Openness-by-design**. The general idea is that **Local Time Machines** do not +have a hierarchical structure managed by a coordinator or leader, but represent +an aggregation of different projects, which arose in different contexts and +which have their own independent structure and governance, specific purposes, +development context and method of financing. Consequently, **Local Time +Machines** are areas characterised by **Density of Operations**. There is no +single coordination that aims to manage all the **Projects** that have arisen in +a specific area. All those involved in the various projects and activities, +through their organisation charts, can in any case converge towards a community +which will therefore have an autonomous logic of structuring and functioning +that emerges locally. The projects involved can include projects with national +and international grants, institutional projects having internal funding, +projects financed by local administrative institutions, projects hold by +companies on cultural heritage benefiting of services and tools implemented by +the **Time Machine Organisation** through the **Local Time Machines +Infrastructure**, but also small-scale projects led by individuals. + +The project-based horizontal structure has key advantages. + +- Standard processes facilitate easy on-boarding of new projects and + members.They ensure openness by-design +- Standard operations and libraries of standard operators guaranty by design the + desired level of compatibility between processes and datasets. +- Centralised repositories for projects, operations and data sets enable a + constantly up-to-date map of activities in progress. + +## Scalability by Design: Core Component, Apps, Code Library and Local Time Machines + +The second development principle of the Local Time Machine is scalability by +design. To maximise growth of the Time Machine environment, the right balance +must be found between part of the infrastructure under the control of the **Time +Machine Organisation** and pieces of software independently developed. The Time +Machine should be as distributed as possible but as centralized as necessary. + +In the structure defined in this RFC, the **Time Machine Organisation** is +responsible for the development of the **Core Infrastructure** which includes. + +- The definition of the structure of the **Data Graph** and the creation of the + interfaces to read and write it. +- The curation of the **4D Map** including both the activities of the projects + and the results of the reconstructions. +- The curation of the **Project repository**, list of officially recognised + projects transforming the **Data Graph** and **4D Map** and building Tools to + read it and write it. +- The curation of the **Code Library** regrouping key functions for processing + Data in the Time Machine Environment. + +The **Apps** are pieces of software (in general built as part of official +**Projects** but not necessarily) that permits to experience and edit the +information in the **Data Graph** and the **4D Map**. They can be grouped into +families of **Apps** like the **Navigators** or the **Annotators**. + +In this context, Local Time Machines defined as zones of higher density of +activities, correspond to part of the **Data Graph** and **4D Maps** where +**Project** activities are more intense. The level of intensity corresponding to +the different labelling (**RFC on LTM Value Scale**, planned 2020) and +corresponds to different modes of visualisation in the **Time Machine Website** + +This principle of development permits a cost-effective strategy in which a +variety of actors can be build tools and services around the activities of +**Local Time Machines** + +## Project Zone of Coverage and Local Time Machine Municipalities + +**Local Time Machines** focus on **Municipalities**, i.e. zone of territories +dividing the surface of earth on administratively non-ambiguous zones. The list +of **Municipalities** (and therefore the list of the potential Local Time +Machines) is fixed and predetermined. This determines a priori the granularity +of the **Local Time Machines**. + +**Projects** focus on a **Zone of Coverage** corresponding to one of several +**GeoEntities**. The list of based on existing geographical entities (e.g. a +list of Places documents in Open Street Map (OSM)) defines as standard +Geographical Information System objects (points - lines - polygons). Some +**GeoEntities** can include **Municipalities** or be included in +**Municipalities**. In both cases, the **Project** will be featured in the +corresponding **Local Time Machines**. + +# Core Components related to Local Time Machines + +![70 % center](LTM_Schema.jpg) + +## Data Graph + +The **Data Graph** is the central component of the Time Machine, containing all +the information modelled in the Time Machine. The graph is constructed both +manually using editing **Apps** and automatically through the processing of the +**Digital Content Processor** (3 RFCs planned in 2021). The **Data Graph** is +intrinsically composed of two subparts. + +- The bright (actual) graph composed of information that has been manually + mapped and integrated with other large database +- The dark (virtual) graph composed of information extracted automatically from + (massive) documentation which has been used so far apart as individual + historic items. + +**Apps** permit visualise and edit the **Data Graph**, thus performing Internal +(e.g. inclusion of Nodes and Links) and External Operations (e.g. +Visualisation). As any **Component** of the Time Machine, the **Data Graph** is +fully constructed in a procedural way, entirely defined by a sequence of +operations. + +The first definition of the Data Graph is established by the **RFC on Time +Machine Data Graph** (planned 2021, RFC2) + +## 4D Map + +The **4D Map** is a second central component of Time Machine. It plots both +ongoing projects and the dataset of these projects. This means that the **4D +Map** is both the map where activities can be followed and the map aggregating +results. The density of the **4D Map** is not uniformed. In particular some +zones may be modelled only in 3D, 2D and even 1D, as a list of included +elements. The **4D Map** includes a layer of **Municipalities** on which **Local +Time Machines** can be anchored. The **4D Map** can be navigated using the +several **4D interfaces** + +The Time Machine Website and **App** can perform specific internal operations +like : + +- Addition of a new Local Time Machine on a **Municipality** of the **4D Map**. +- Registration of an **Event** on the **4D Map** + +**Apps** can also feature 4D interfaces, application-based or web-based, that +permit to perform external operations like : + +- Navigate in Space and see activities of **Local Time Machines** +- Navigate in Space and Time to see the State of the 4D reconstruction + +The **4D Map** has a series of standard Layers that are fully defined in a +dedicated RFC (**Not currently planned in RFC2**) + +1. The **Municipalities** segmentation that defines the granularity of the + **Local Time Machines** +2. The **Points of View** Layer that corresponds to the perspectives of + photographs or paintings. +3. The **Parcel Layer** : 2D polygon with temporal extension typically defined + by an administrative source (e.g. Parcels of the Napoleonic Cadaster of + 1808). Each parcel has a unique ID in the Time Machine system. +4. The **Place names** Layer +5. The **Spherical Image** Layer +6. The **Cloud Point** Layer +7. The **4D Vectorial** Layer +8. The **Homologous Point Network**, connecting images with one another through + homologous point + +In the meantime, a simplified version of the **4D Map** is used : The **Local +Time Machines Map** + +## Code Library + +The **Code Library** is a library accessible in Python (and possibly later other +languages) regrouping key **Operators** function for processing Data in the Time +Machine Environment. + +## Projects Repository + +The **Projects Repository** monitors all the active projects of the Time +Machine. **Projects** are usually conducted by institutions but can also be +launched by individuals. Projects may be new or documentation of ancient +projects. + +- Projects can mine **Sources** and ingest their extracted data into the **Data + Graph**. These **Projects** are associated with a **Zone of Coverage** that + associated them with **Local Time Machines**, producing content for + **GeoEntities**. Projects may also produce intermediary datasets that can be + downloaded even if they are not yet integrated in the **Data Graph** +- Projects can also develop **Apps** that interact with the **4D Map** and the + **Data Graph**. +- Projects can contribute to the **Code Library** by working on the GitHub + repository of the Time Machine to produce new **Operators** + +These different objectives are non-exclusive from one another. + +The **Project Development Space** is a space in the Time Machine Website that +features the **Apps** relevant to develop the **Projects**. + +## Time Machine Community + +The Time Machine Community is constituted of all users registered in the Time +Machine Website. Each user will get an ID that can be used to login in third +parties **Apps**. Depending on their level of activity, users may reach +different status linked with particular privileges in terms of operations. These +statuses are organised taking inspiration from the Wikipedia systems of +privileges. + +# Characteristics of Local Time Machines + +A **Local Time Machine** is defined as the activities and results of the +**Projects** concerning a **Municipality** + +- The **Local Time Webspace** features the information of all the **Projects** + linked with the **Municipality** corresponding to the **Local Time Machine** +- The **Local Time Machine Visual Plan** is an automatically generated + representation of a spatiotemporal focus of the different projects + participating to a **Local Time Machine** +- The **Local Time Machine Datasets** are extracted datasets that can be freely + downloaded, even if they are not yet integrated with the global **Data Graph** +- The **Local Time Machine Operations** are the list of basic operations + produced by **Projects** and concerning the **Municipality**. They appear as a + log of activities in the **Local Time Machines Webspace**. The **Local Time + Machine Density of Operations** characterises the number of activities going + on in the **Local Time Machine**. It characterises both the activities that + are going on inside **Projects** and for Citizen Scientist activities. + Standards values of activities define a scale to represent Local Time Machine + (**RFC on LTM Value Scale**, planned 2020) +- The **Local Time Machine Community** is the list of active users of a **Local + Time Machine** . These include users active in **Projects** and users active + in crowdsourcing activities (e.g. transcription, correction, training, etc.). + By default, the active users may get updated about announcement concerning + **Local Time Machine Activities** + +# First Phases of Development + +## Phase I : Development of the Project Environment and LTM Webspace + +The first release develops the following components, essentially meant for +introducing the basic structure of the LTM infrastructure functioning. At this +stage, the LTM Webspace essentially consists in the documentation of the +existing. + +### Local Time Machines Map + +A first interface to visualise and navigate in a simplified version of **4D +Map** as a 2D visualisation. The first planetary-scale representation could be +done using Open Street Map data. Each city which corresponds to a **Local Time +Machine** (i.e. each city which has a **Project** actively working on it) would +be highlighted. By clicking of corresponding city, the user access the **Local +Time Machine WebSpace**. The interface should give access to the General Menu +permitting to create a new project. + +Various versions of the **Local Time Machines Map** could be envisioned. The +following schemas are illustrating various possible visualisations. The +interface could be quite different in practice. + +Version 1 : A zoom features permits to focus on the most active **Local Time +Machines** + +![75 % center](LTM_Map1.jpg) + +Version 2 : Simple dots are showing the Local Time Machines on the map. + +![75 % center](LTM_Map2.jpg) + +### Local Time Machine Webspace + +The Local Time Machine Webspace features all the information available about a +given Local Time Machine + +1. The **Projects** covering the corresponding **GeoEntity** +2. The **Local Time Machine Visual Plan**, a synthetic summary of the activity + of the **Projects** +3. The **Local Time Machine Agenda** of past and forthcoming Events +4. The combined log of the operations of the **Projects** +5. The **Local Time Machine Partners**, i.e list of institutions participating + in at least one projects linked with the **Local Time Machine** + +Wireframes of Local Time Machine Webspace including **Local Time Machine Visual +Plan**, **Local Time Machine Datasets**, **Projects** associated with the +**Local Time Machine**, the **Local Time Machine Agenda**, the news section +covering the combined log of the **Projects** and the **Local Time Machine +Partners**. + +![50 % center](LTM_Webspace.jpg) + +### Project Environment + +The **Project Environment** includes basic tools for performing Internal +Operations on **Projects** + +1. Updating the Project Name and basic description +2. Updating the List of institutions involved +3. Updating the list **Municipalities** on which the **Project** operates (the + first version of this list is automatically generated by the selection on the + **Local Time Machines Map** using the **Zone of Coverage** ) +4. Updating of Project Spatiotemporal Data Focus. This information will be used + for positioning the project in the **Local Time Machine Visual Plan** +5. Announcing activities to be displayed in the corresponding Local Time Machine + Agendas. + +Each of the project operations, i.e. the project history, can be seen and edited +in the Project log. Projects operations include : Publication of Datasets, +Scientific Publications, Press Article, Addition of new Partner, etc. + +Wireframes of Project Page including link with external pages, datasets, contact +person. related events, related news and associated institutions + +![50 % center](LTM_Project.jpg) + +When a new project is created, the user follows in a script to enter all the +necessary information. + +In the first Phase, the **Project Environment** will only enable the creation of +projects whose primary goal is mining data from sources. The on-boarding process +includes the following 5 steps : Name and Period, Geographic Coverage, +Description, Public Data and Review/Publish + +Step 1: Entering Project Name and Period : + +![50 % center](LTM_Project_Name_and_Period.jpg) + +Step 2: Adding a Location from the list of **GeoEntities** . The **Zone of +Coverage** of the **GeoEntity** may correspond directly to a **Municipality**, +may include **Municipalities** or may be a sub-part of a **Municipality**. + +![50 % center](LTM_Project_Geographic_AddLocation.jpg) + +Step 2: Alternatively, one can draw a **Zone of Coverage** to select a list of +**Municipalities** : + +![50 % center](LTM_Project_Geographic_ZoneofCoverage.jpg) + +Step 2: Editing the list of **Municipalities** associated with the **Project** + +![50 % center](LTM_Project_Geographic_LocationList.jpg) + +Step 3 : Entering Description : + +![50 % center](LTM_Project_Description.jpg) + +Step 4: Entering Public Data : + +![50 % center](LTM_Project_Data.jpg) + +Step 4: Institution Picker : + +![50 % center](LTM_Project_Description_Institution.jpg) + +Step 5 :Project Review : + +![50 % center](LTM_Project_Review.jpg) + +All these parameters can also be edited afterwards from the **Project +Environment** Menus. + +### User Manager + +The **User Manager** includes basic tools : + +1. User Login system including Password recovery +2. User Profile manager including photos, link to other users' pages + +## Phase II : Development of the App and Code Library Environment + +In the Phase II creates the environment for the development of the first Apps +and the Code Library. + +The **Code Library** is a joint effort to create Operators (function) that can +transform data based on parameters and that can be chained in a procedural +manner. They can be called in Python and used for instance in a Jupyter +Notebook. + +The **Apps** are dedicated environment with user interfaces to interact with the +**Data Graph** and the **4D Map**. Some **Apps** may use Operators of the **Code +Library** + +The **Project Environment** is extended to include projects creating **Apps** +and operators for the **Code Library**. + +### Examples of Apps + +#### Navigators + +Navigators are the equivalent of Browsers on the World Wide Web. They interpret +the **Data Graph** and the **4D Map** to create different processes for +exploration. Various typologies of \*\*Navigators can be designed : + +1. Search Engines (including Visual Search Engines). A generic Search Engine can + search the **Data Graph** globally giving results for IIIF documents, images, + GeoEntities, Named Entities (Person, Place, Organisation, etc.), Conceptual + Class of Object, and potentially Time Machine Users. +2. Collection Browsers +3. 4D Browser (web-based or application-based for better performances) +4. Data Graph Inspector / Editor +5. Pattern finder / navigators +6. Genealogical Explorer + +#### Importers + +Importers permit to link existing datasets to the **Data Graph** + +1. IIIF Source Importer (Import an existing IIIF source). An imported source is + immediately searchable in the Search Engine. +2. Collection creator: Create a new Time Machine hosted IIIF source, includes + ingestion of a whole directory of images of limited size) +3. Geographical Layer Importer (e.g. Geojson importer) +4. Datatable importer (e.g. CSV) +5. Cadastral Parcels Importer +6. Cloud point Importer +7. Spherical Image Importer +8. 3D Model Importer + +#### Annotators + +Annotators are dedicated tools for annotating images and documents + +1. Image Annotator: Permit to annotate zone of images in IIIF collection by + defining zones and associate them with Time Machine or Wikidata ID). This + makes these elements immediately searchable. +2. Transcription tool: Permit to input structure data (from based) to the Data + Graph. For each document a **Data Mask** can be defined. +3. Parcel Annotator : Edit the Parcel Layer of the **4D Map** using a document + as input +4. Point of View Annotator. Input a Point of View associated with an image in + the **4D Map** +5. Place Name Annotator. Associated a segment of an image with a Place in Place + Name layer of the **4D Map** +6. Homologous Point Annotator : update the homologous point network +7. Spherical Image Annotator +8. Table Annotator +9. Parsers dedicated to specific document structure (Cadaster, Directories) + +#### Aligners + +1. Georeferencing tool : Permit to Georeference a map part of the IIIF source by + clicking on a number of **Homologous Points** +2. Photoalignment tools : Permits to connect a photo with a map and with other + photos based on **Homologous Points**. + +#### Conflict Detectors + +**Conflict Detectors** detect incoherence in the **Data Graph** and help users +solve them. + +#### Digitisation and Storage Services + +Apps can also offer specific services like digitisation or storage in direct +link with the **Data Graph** and the **4D Map** + +### Examples of Operators from the Code Library + +#### Segmenters + +**Segmenters** can be used for + +1. Objects and Persons in images +2. Parcels in Cadastral Maps +3. Document Layout analysis +4. 3D Object segmentation. Using the **homologous point network** inside the + **Data Graph**, some 2D to 3D segmenter uses 2D segmentation to segment 3D + objects + +#### Transcribers + +Transcribers perform state of the art HTR transcription in document. + +# Examples of Scenarios and Services + +## Museum creating a Project + +One objective of Time Machine is to localize cultural objects in Space and Time. +For instance, an object in a museum can be associated with several +spatio-temporal references: its place of production, the various places it was +stored before entering the Museum, the spatio-temporal entity it represents. One +consequence is that a museum in Paris may contribute to many LTMs in the world, +having for instance paintings representing Venice, Madrid or Budapest. +Inversely, in many cases, the evidence of reconstructing the past of a city or a +site is scattered in collections all over the world. + +To enter and curate data in the **Data Graph**, a museum will create a +**Project**, possibly jointly with other institutions, with a particular +spatio-temporal anchoring objectives. For simplicity of the explanation, let’s +assume the collection to be ingested is already digitised. The steps are the +following: + +1. In the **Project Environment**, the Museum defines the approximate **Zone of + Coverage** of the collection. This will define the **Municipalities** (and + therefore the **Local Time Machines**) for which the data will be relevant. + It can be updated later. +2. The Museum enters the repository where the **Source** collection is, + typically as a IIIF repository. If the Museum does not have an IIIF solution, + a physical or virtual **Time Machine Box** can be used. This is an example of + **App** adapted to the **Data Graph**. The data will remain in the chosen + repository unless the Museum wants to benefit from a long-term preservation + service. +3. Using dedicated **Apps**, like **Annotators**, the project partners + reposition the objects of the chosen collection in space and time (for + instance annotating content of paintings or documenting the steps in the + trajectory of the objects) +4. A **Conflict Detector** tool (another specific **App**) is capable of + informing of any conflict in the metadata inserted linked to other operations + of the Time Machine (for instance due to an inference or a non-compatible + entree). On this basis, the Museum may or may not update its metadata or + launch specific research initiatives to investigate further the conflicting + data elements. + +Being part of the Data Graph, the new inserted collection will benefit from all +the others innovations done in the Time Machine environment. + +This case is a simple one. More complex curation may occur. + +## Digitisation Hubs + +The **Digitisation Hubs** are an example of services that will be offered to +Local Time Machines for digitisation (e.g. scanning of documents, streets, 3D +scanning). + +The **Digitisation Hubs** will enable to seamlessly aggregated new document and +metadata into a **Data Graph**, with the appropriate standards in terms of +resolution, file formats, and metadata during acquisition. Contractual aspects +will be dealt with the Time Machine Standard Contracts. + +The Structure and detailed functioning of the Digitisation Hubs are planned to +be developed first as a RFC specifying their structure then implemented as a +Service operated by several operators. As planned in the **RFC Tree**, the **RFC +on Digitisation Hubs** is planned in 2021. It will be established directly with +actors that may become Service operators. + +What is anticipated are the following steps : + +1. Creation of a **Project** with of the digitisation Services on timemachine.eu +2. Specification of its **Zone of Coverage**. The **Zone of Coverage** will + determine the Local Time Machines in which the services will be publicised. +3. Precise definition of digitisation offers using only **Standard Metrics** in + order to be able to determine the exact price of the operations. +4. Once the service are ready, creation of an **App** for offering the service. + +This way of functioning should enable to create a well-functioning single +European market for digitisation services connecting operators and customers +using agreed standards. + +## Long-term Preservation + +Likewise, some partners may offer services for the long-term preservation of +particular datasets (e.g. IIIF repository). This can for instance include DNA +storage. + +# Subsequent Development Phases + +The subsequent development phases are not detailed in this RFC but will be in +future ones. + +## Phase III : Development of the Franchise Model + +In Phase III, the process for consolidating Local Time Machine activities in the +**Municipality** itself is developed. This phase is described in the +corresponding RFCs : + +- **RFC on Franchise System** (planned 2023) + +## Phase IV : Development of the Mirror World Environment + +This phase will be developed in the corresponding RFCs : + +- **RFC on Mirror World Prototyping** (planned 2025) +- **RFC on Mirror World Extension Strategy** (planned 2026) +- **RFC on Mirror World Technical Standards** (planned 2026) +- **RFC on Virtual/Augmented Reality and Discovery** (planned 2026) +- **RFC on 4D Mirror World** (planned 2026) +- **RFC on Large Scale Mirror World** (planned 2027) + +# Coordination, Training and Local Dynamics + +## Time Machine Academies + +The goal of the **Time Machine Academies** is to help to global coordination in +the development of **Apps** and the training of their use. + +Examples of Communities : + +- Developers (Academic and SMEs) +- GLAM professionals +- Scholars +- Stakeholders of specific Exploitation Avenues (e.g. Tourism, Creative + Industries) + +The details of the Time Machine Academy Process is developed in a separate RFC. + +## Local Time Machine Events + +Processes for organising local time machine event will also be discussed in a +separated RFC + +# Questions and Answers + +## Can I create a Local Time Machine ? + +Not directly. But by creating a project that works on a particular +**GeoEntity**, I can start the development of the corresponding **Local Time +Machine**. + +## Can I edit a Local Time Machine ? + +Not directly. But the **Local Time Machine** webspace is updated as information +about projects are updated. + +## If my project studies a large object like Hadrian's Wall in which Local Time Machine will it be included? + +It will be included on all the Local Time Machine focused on **Municipalities** +that include a part of of the Hadrian's Wall. + +## I am part of an important institution of a City, how can my institution take part in the City Local Time Machine ? + +By starting or redocumenting a project focusing on a **GeoEntity** that is part +of the **Municipality**. + +## Can my project name be called (Name of City) Time Machine ? + +A priori, no. Only Local Time Machines are called Time Machines. + +## My institution would like to participate to Local Time Machine but we are not located in the city. What can we do ? + +This is not a problem. The institution must participate to a project focusing on +a **GeoEntity** that is part of the **Municipality** + +# Linked RFCs + +- **RFC on 4D Map** (to be planned) +- **RFC on Apps** (to be planned) +- **RFC on Operators** (to be planned) +- **RFC on Time Machine Academy** and **RFC on Local Time Machine Events** + (Maybe redundant with **RFC on Training**, planned 2020) diff --git a/files/template/RFC-template.md b/files/template/RFC-template.md index 322906a..59c6c1d 100644 --- a/files/template/RFC-template.md +++ b/files/template/RFC-template.md @@ -1,107 +1,129 @@ --- # Don't change this header section -title: '[RFC Title]' -subtitle: 'Time Machine [RFC-id]' +title: "[RFC Title]" +subtitle: "Time Machine [RFC-id]" author: -- Author 1 + - Author 1 header-includes: -- \usepackage{fancyhdr} -- \pagestyle{fancy} -- \fancyhead[R]{} -- \fancyfoot[L]{-release-version-} + - \usepackage{fancyhdr} + - \pagestyle{fancy} + - \fancyhead[R]{} + - \fancyfoot[L]{-release-version-} output: pdf_document --- # Motivation -This file is the template to be used for new RFCs. It is intended to be both a starting point for new RFCs and an example of what can be achieved within the limits of a markdown file. +This file is the template to be used for new RFCs. It is intended to be both a +starting point for new RFCs and an example of what can be achieved within the +limits of a markdown file. # File structure ## YAML-Header -To aid the generation of the final release-PDF-files, this Markdown file contains a header in the `YAML`-format[^yaml]. Apart from other things it states the RFC-title and subtitle as well as information about the authors of the RFC. +To aid the generation of the final release-PDF-files, this markdown file +contains a header in the `YAML`-format[^yaml]. Apart from other things it states +the RFC-title and subtitle as well as information about the authors of the RFC. -Please keep the header exactly as-is, its contents will be modified by the RFC-Editorial Team. +Please keep the header exactly as-is, its contents will be modified by the +RFC-Editorial Team. ## Sections -An RFC document should always contain the YAML header, a *Motivation* section directly below the header as the first section in the document, as many author-defined sections as necessary as the main content, a *questions and answers*-section (*Q&A*) and the footnotes at the bottom of the document. Writers can delete the pre-existing section structure (with the exception of the motivation and q&a sections) in this file and add new sections as they see fit. +An RFC document should always contain the YAML header, a _Motivation_ section +directly below the header as the first section in the document, as many +author-defined sections as necessary as the main content, a _questions and +answers_-section (_Q&A_) and the footnotes at the bottom of the document. +Writers can delete the pre-existing section structure (with the exception of the +motivation and q&a sections) in this file and add new sections as they see fit. -The *Motivation* section is intended to show the general reason for the writing of the RFC. It should be concise. +The _Motivation_ section is intended to show the general reason for the writing +of the RFC. It should be concise. -The *Q&A* section is intended to provide a view on the RFC from a different angle than the one of a traditional paper on a subject. It gives RFC writers the possibility to test their main structures, methods and drawbacks from the point of view of outside readers of the document. It should follow the general form outlined below in the q&a section of this template file. +The _Q&A_ section is intended to provide a view on the RFC from a different +angle than the one of a traditional paper on a subject. It gives RFC writers the +possibility to test their main structures, methods and drawbacks from the point +of view of outside readers of the document. It should follow the general form +outlined below in the q&a section of this template file. # TM Glossary -Terms that are of special significance to the Time Machine must be written in **bold** on their first usage in an RFC document. Definitions and important terms are listed in *RFC-0001 on RFC Glossary*. +Terms that are of special significance to the Time Machine must be written in +**bold** on their first usage in an RFC document. Definitions and important +terms are listed in _RFC-0001 on RFC Glossary_. # Markdown -It is allowed to use the whole range of Markdown features as well as everything supported by Pandoc out of the box. A good overview and introduction can be found in the Markdown specification itself[^daring_markdown] and the Pandoc user documentation[^pandoc_markdown]. +It is allowed to use the whole range of markdown features as well as everything +supported by Pandoc out of the box. A good overview and introduction can be +found in the markdown specification itself[^daring_markdown] and the Pandoc user +documentation[^pandoc_markdown]. ## Hyperlinks -RFCs are released as PDF documents. While it is possible to set hyperlinks in Markdown that will also work in PDF documents it is advised to add the actual links in footnotes. This way they can be easily read. Footnotes (similar to the one in the previous paragraph can be created by adding `[^footnote_name]` where the footnote is to be placed and `[^footnote_name]: Content of the footnote` at the end of this file. Only alphanumeric characters and underscores are allowed. Hyperlinks in footnotes should be added in angled brackets: ``. +RFCs are released as PDF documents. While it is possible to set hyperlinks in +markdown that will also work in PDF documents it is advised to add the actual +links in footnotes. This way they can be easily read. Footnotes (similar to the +one in the previous paragraph can be created by adding `[^footnote_name]` where +the footnote is to be placed and `[^footnote_name]: Content of the footnote` at +the end of this file. Only alphanumeric characters and underscores are allowed. +Hyperlinks in footnotes should be added in angled brackets: +``. ## Tables -It is possible to use the different ways to create table using Pandoc markdown[^pandoc_tables]. The following example uses the simple syntax: +It is possible to use the different ways to create table using Pandoc +markdown[^pandoc_tables]. The following example uses the simple syntax: - Right Left Center Default -------- ------ ---------- ------- - 12 12 12 12 - 123 123 123 123 - 1 1 1 1 +| Column 1 | Column 2 | Column 3 | +| -------- | -------- | -------- | +| Foo | Bar | Baz | +| Alpha | Beta | Gamma | -Table: Demonstration of simple table syntax. +Table: Demonstration of simple table syntax. ## Images -It is possible to include images. They should be added in an `images` folder directly next to the markdown document. In the text they are referenced like this: `![Image caption](./images/image_name_including_file_ending.png)`. The path is relative to the location of the Markdown file. - -## Diagrams - -In addition to pre-created images, it is also possible to add so-called *Mermaid*[^mermaid] diagrams and flowcharts to the document. This are two examples: - -```mermaid -graph LR - draft[RFC Draft] --> candidate[RFC Candidate] --> draft - candidate --> release{RFC release} - release -.updates on existing RFCs.-> draft -``` - -```mermaid -pie - title Time spent on RFCs - "Preparation" : 10 - "Draft" : 60 - "Release Candidate" : 30 -``` +It is possible to include images. They should be added in an `images` folder +directly next to the markdown document. In the text they are referenced like +this: `![Image caption](./images/image_name_including_file_ending.png)`. The +path is relative to the location of the markdown file. # Q&A ## Question: Am I allowed to modify the YAML header on top of the file? -No, the header is aimed at the creation of the release PDF files and should only be changed by the RFC-Editorial Team. +No, the header is aimed at the creation of the release PDF files and should only +be changed by the RFC-Editorial Team. ## Question: Can I omit the Motivation or Q&A sections? -No, both sections are important for reviewers and implementers to understand the direction of the RFC, it's motivation and possibly problematic points / implications for other parts of the Time Machine. +No, both sections are important for reviewers and implementers to understand the +direction of the RFC, it's motivation and possibly problematic points / +implications for other parts of the Time Machine. ## Question: Why doesn't my file look the same in my editor, the GitHub repository and the PDF files? -The PDF files are created by an automated process using *Pandoc*[^pandoc], a tool to convert between different text formats. It provides more extensive capabilities for text structuring and formatting (for instance footnotes). This additional parts are not understood by the GitHub Markdown parser and might be previewed differently by various markdown editors. +The PDF files are created by an automated process using _Pandoc_[^pandoc], a +tool to convert between different text formats. It provides more extensive +capabilities for text structuring and formatting (for instance footnotes). This +additional parts are not understood by the GitHub markdown parser and might be +previewed differently by various markdown editors. ## Question: Where can I preview how my RFC document will look at the end? -If you have started your document by forking the official GitHub RFC repository[^rfc_repo] you can see the current version of the RFC drafts in the *Action* section of your own GitHub RFC repository[^github_manage_action] after you pushed a change to it. +If you have started your document by forking the official GitHub RFC +repository[^rfc_repo] you can see the current version of the RFC drafts in the +_Action_ section of your own GitHub RFC repository[^github_manage_action] after +you pushed a change to it. [^daring_markdown]: -[^github_manage_action]: -[^mermaid]: +[^github_manage_action]: + + [^pandoc]: [^pandoc_markdown]: [^pandoc_tables]: