diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index a46f924..0000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: ci - -# Controls when the workflow will run -on: - - # Trigger the workflow on all pushes - push: - branches: - - '**' - tags: - - '**' - - # Trigger the workflow on all pull requests - pull_request: ~ - - # Trigger the workflow on release creation - release: - types: - - created - - # Allow workflow to be dispatched on demand - workflow_dispatch: ~ - -jobs: - - # Calls a reusable CI workflow to qa, test & deploy the current repository. - # We skip jobs that will result in duplicate jobs, since the code does not depend on the compiler. - # It will produce a code coverage report on success and upload it to the codecov service. - # If all checks were successful and a new release tag created, the package will be published on PyPI. - ci: - name: ci - uses: ecmwf-actions/reusable-workflows/.github/workflows/ci-python.yml@v1 - with: - # codecov_upload: true - skip_matrix_jobs: | - clang-12@ubuntu-20.04 - clang-9@ubuntu-18.04 - clang-12@macos-10.15 - build_package_inputs: | - self_build: false - secrets: - pypi_username: ${{ secrets.PYPI_USERNAME }} - pypi_password: ${{ secrets.PYPI_PASSWORD }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index b6e4761..c34d28b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,129 +1,2 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ +__pycache__ +.DS_Store \ No newline at end of file diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/PACKAGE_NAME/__init__.py b/PACKAGE_NAME/__init__.py deleted file mode 100644 index 6e4a923..0000000 --- a/PACKAGE_NAME/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# (C) Copyright 1996- ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -from .__meta__ import * # noqa diff --git a/PACKAGE_NAME/__meta__.py b/PACKAGE_NAME/__meta__.py deleted file mode 100644 index ec9d8b5..0000000 --- a/PACKAGE_NAME/__meta__.py +++ /dev/null @@ -1,16 +0,0 @@ -# (C) Copyright 1996- ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -"""Container for project metadata.""" - -__name__ = "PACKAGE_NAME" -__version__ = "0.1.dev0" -__author__ = "European Centre for Medium-Range Weather Forecasts (ECMWF)" -__author_email__ = "software.support@ecmwf.int" -__license__ = "Apache License Version 2.0" -__description__ = "" diff --git a/PACKAGE_NAME/sample.py b/PACKAGE_NAME/sample.py deleted file mode 100644 index 20837dd..0000000 --- a/PACKAGE_NAME/sample.py +++ /dev/null @@ -1,34 +0,0 @@ -# (C) Copyright 1996- ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -""" -SAMPLE MODULE - demonstrates basic code style. -""" - -import numpy as np - - -def speed_direction_to_uv(speed, direction): - """ - Calculate wind u- and v-components from wind speed and direction. - - Parameters - ---------- - speed : array - Array containing value(s) of wind speed. - wind_v : array - Array containing values of wind (from) direction, in degrees. - - Returns - ------- - tuple - Tuple containing arrays of wind u- and v-components. - """ - wind_u = speed * np.cos(np.deg2rad(direction)) - wind_v = speed * np.sin(np.deg2rad(direction)) - return wind_u, wind_v diff --git a/README.md b/README.md new file mode 100644 index 0000000..0458b29 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# Catalog Server \ No newline at end of file diff --git a/README.rst b/README.rst deleted file mode 100644 index 5e21190..0000000 --- a/README.rst +++ /dev/null @@ -1,90 +0,0 @@ - -**************************** -python-package-template-repo -**************************** - -|license| |tag_release| |commits_since_release| |last_commit| - -A template repository for developing Python packages under `ecmwf-projects `_. - -**Quick start** - -Follow these steps to create a new repository from this template. - -#. Click the `Use this template `_ - button and create a new repository with your desired name, location and visibility. - -#. Clone the repository:: - - git clone git@github.com:ecmwf-projects/.git - cd - -#. Remove sample code:: - - rm PACKAGE_NAME/sample.py - rm tests/test_sample.py - -#. Replace ``PACKAGE_NAME`` with your chosen package name:: - - NEW_PACKAGE_NAME= - mv PACKAGE_NAME $NEW_PACKAGE_NAME - sed -i "" "s/PACKAGE_NAME/$NEW_PACKAGE_NAME/g" setup.py \ - docs/source/conf.py \ - docs/source/getting_started/installing.rst \ - docs/source/index.rst \ - $NEW_PACKAGE_NAME/__meta__.py - -#. Modify the contents of ``__meta__.py`` to reflect your repository. Note that there - is no need to update this same information in ``setup.py``, as it will be imported - directly from ``__meta__.py``. - -#. Modify the project url in ``setup.py`` to reflect your project's home in GitHub. - -#. Modify ``README.rst`` to reflect your repository. A number of `shield `_ - templates are included, and will need to be updated to match your repository if you want - to use them. - -**Usage tips** - -* Create an executable called ``qa`` containing the following:: - - black . - isort . - - Add this to your path, and run it from the top-level of your repository before - committing changes:: - - qa . - -.. |last_commit| image:: https://img.shields.io/github/last-commit/ecmwf-projects/thermofeel - :target: https://github.com/ecmwf-projects/thermofeel - -.. |commits_since_release| image:: https://img.shields.io/github/commits-since/ecmwf-projects/thermofeel/latest?sort=semver - :target: https://github.com/ecmwf-projects/thermofeel - -.. |license| image:: https://img.shields.io/github/license/ecmwf-projects/thermofeel - :target: https://www.apache.org/licenses/LICENSE-2.0.html - -.. |pypi_release| image:: https://img.shields.io/pypi/v/thermofeel?color=green - :target: https://pypi.org/project/thermofeel - -.. |pypi_status| image:: https://img.shields.io/pypi/status/thermofeel - :target: https://pypi.org/project/thermofeel - -.. |tag_release| image:: https://img.shields.io/github/v/release/ecmwf-projects/thermofeel?sort=semver - :target: https://github.com/ecmwf-projects/thermofeel - -.. |codecov| image:: https://codecov.io/gh/ecmwf-projects/thermofeel/branch/master/graph/badge.svg - :target: https://codecov.io/gh/ecmwf-projects/thermofeel - -.. |ci| image:: https://img.shields.io/github/workflow/status/ecmwf-projects/thermofeel/ci - :target: https://github.com/ecmwf-projects/thermofeel/actions - -.. |pypi_downloads| image:: https://img.shields.io/pypi/dm/thermofeel - :target: https://pypi.org/project/thermofeel - -.. |code_size| image:: https://img.shields.io/github/languages/code-size/ecmwf-projects/thermofeel?color=green - :target: https://github.com/ecmwf-projects/thermofeel - -.. |docs| image:: https://readthedocs.org/projects/thermofeel/badge/?version=latest - :target: https://thermofeel.readthedocs.io/en/latest/?badge=latest diff --git a/backend/README.md b/backend/README.md new file mode 100644 index 0000000..48c763d --- /dev/null +++ b/backend/README.md @@ -0,0 +1,7 @@ +# stac-catalog + +``` +# Make and activate a python environment +pip install -r requirements.txt +./run.sh +``` \ No newline at end of file diff --git a/backend/dockerfile b/backend/dockerfile new file mode 100644 index 0000000..b889732 --- /dev/null +++ b/backend/dockerfile @@ -0,0 +1,9 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . . + +RUN pip install -r requirements.txt + +CMD ["fastapi", "run", "main.py"] \ No newline at end of file diff --git a/backend/fdb_schema/__init__.py b/backend/fdb_schema/__init__.py new file mode 100644 index 0000000..56160a5 --- /dev/null +++ b/backend/fdb_schema/__init__.py @@ -0,0 +1 @@ +from .fdb_schema_parser import FDBSchema, FDBSchemaFile, KeySpec, Key diff --git a/backend/fdb_schema/fdb_schema_parser.py b/backend/fdb_schema/fdb_schema_parser.py new file mode 100644 index 0000000..21c0beb --- /dev/null +++ b/backend/fdb_schema/fdb_schema_parser.py @@ -0,0 +1,371 @@ +import dataclasses +import json +from dataclasses import dataclass, field +from typing import Any + +import pe +from pe.actions import Pack +from pe.operators import Class, Star + +from .fdb_types import FDB_type_to_implementation, FDBType + + +@dataclass(frozen=True) +class KeySpec: + """ + Represents the specification of a single key in an FDB schema file. For example in + ``` + [ class, expver, stream=lwda, date, time, domain? + [ type=ofb/mfb/oai + [ obsgroup, reportype ]]] + ``` + class, expver, type=ofdb/mfb/oai etc are the KeySpecs + + These can have additional information such as: flags like `domain?`, allowed values like `type=ofb/mfb/oai` + or specify type information with `date: ClimateMonthly` + + """ + + key: str + type: FDBType = field(default_factory=FDBType) + flag: str | None = None + values: tuple = field(default_factory=tuple) + comment: str = "" + + def __repr__(self): + repr = self.key + if self.flag: + repr += self.flag + # if self.type: + # repr += f":{self.type}" + if self.values: + repr += "=" + "/".join(self.values) + return repr + + def matches(self, key, value): + # Sanity check! + if self.key != key: + return False + + # Some keys have a set of allowed values type=ofb/mfb/oai + if self.values: + if value not in self.values: + return False + + # Check the formatting of values like Time or Date + if self.type and not self.type.validate(value): + return False + + return True + + def is_optional(self): + if self.flag is None: + return False + return "?" in self.flag + + def is_allable(self): + if self.flag is None: + return False + return "*" in self.flag + + +@dataclass(frozen=True) +class Comment: + "Represents a comment node in the schema" + + value: str + + +@dataclass(frozen=True) +class FDBSchemaTypeDef: + "Mapping between FDB schema key names and FDB Schema Types, i.e expver is of type Expver" + + key: str + type: str + + +# This is the schema grammar written in PEG format +fdb_schema = pe.compile( + r""" + FDB < Line+ EOF + Line < Schema / Comment / TypeDef / empty + + # Comments + Comment <- "#" ~non_eol* + non_eol <- [\x09\x20-\x7F] / non_ascii + non_ascii <- [\x80-\uD7FF\uE000-\U0010FFFF] + + # Default Type Definitions + TypeDef < String ":" String ";" + + # Schemas are the main attraction + # They're a tree of KeySpecs. + Schema < "[" KeySpecs (","? Schema)* "]" + + # KeySpecs can be just a name i.e expver + # Can also have a type expver:int + # Or a flag expver? + # Or values expver=xxx + KeySpecs < KeySpec_ws ("," KeySpec_ws)* + KeySpec_ws < KeySpec + KeySpec <- key:String (flag:Flag)? (type:Type)? (values:Values)? ([ ]* comment:Comment)? + Flag <- ~("?" / "-" / "*") + Type <- ":" [ ]* String + Values <- "=" String ("/" String)* + + # Low level stuff + String <- ~([a-zA-Z0-9_]+) + EOF <- !. + empty <- "" + """, + actions={ + "Schema": Pack(tuple), + "KeySpec": KeySpec, + "Values": Pack(tuple), + "Comment": Comment, + "TypeDef": FDBSchemaTypeDef, + }, + ignore=Star(Class("\t\f\r\n ")), + # flags=pe.DEBUG, +) + + +def post_process(entries): + "Take the raw output from the PEG parser and split it into type definitions and schema entries." + typedefs = {} + schemas = [] + for entry in entries: + match entry: + case c if isinstance(c, Comment): + pass + case t if isinstance(t, FDBSchemaTypeDef): + typedefs[t.key] = t.type + case s if isinstance(s, tuple): + schemas.append(s) + case _: + raise ValueError + return typedefs, tuple(schemas) + + +def determine_types(types, node): + "Recursively walk a schema tree and insert the type information." + if isinstance(node, tuple): + return [determine_types(types, n) for n in node] + return dataclasses.replace(node, type=types.get(node.key, FDBType())) + + +@dataclass +class Key: + key: str + value: Any + key_spec: KeySpec + reason: str + + def __bool__(self): + return self.reason in {"Matches", "Skipped", "Select All"} + + def emoji(self): + return {"Matches": "✅", "Skipped": "⏭️", "Select All": "★"}.get( + self.reason, "❌" + ) + + def info(self): + return f"{self.emoji()} {self.key:<12}= {str(self.value):<12} ({self.key_spec}) {self.reason if not self else ''}" + + def __repr__(self): + return f"{self.key}={self.key_spec.type.format(self.value)}" + + def as_json(self): + return dict( + key=self.key, + value=self.as_string(), + reason=self.reason, + ) + + +class FDBSchema: + """ + Represents a parsed FDB Schema file. + Has methods to validate and convert request dictionaries to a mars request form with validation and type information. + """ + + def __init__(self, string, defaults: dict[str, str] = {}): + """ + 1. Use a PEG parser on a schema string, + 2. Separate the output into schemas and typedefs + 3. Insert any concrete implementations of types from fdb_types.py defaulting to generic string type + 4. Walk the schema tree and annotate it with type information. + """ + m = fdb_schema.match(string) + g = list(m.groups()) + self._str_types, schemas = post_process(g) + self.types = { + key: FDB_type_to_implementation[type] + for key, type in self._str_types.items() + } + self.schemas = determine_types(self.types, schemas) + self.defaults = defaults + + def __repr__(self): + return json.dumps( + dict(schemas=self.schemas, defaults=self.defaults), indent=4, default=repr + ) + + @classmethod + def consume_key( + cls, key_spec: KeySpec, request: dict[str, Any] + ) -> Key: + key = key_spec.key + try: + value = request[key] + except KeyError: + if key_spec.is_optional(): + return Key(key_spec.key, "", key_spec, "Skipped") + if key_spec.is_allable(): + return Key(key_spec.key, "", key_spec, "Select All") + else: + return Key( + key_spec.key, "", key_spec, "Key Missing" + ) + + if key_spec.matches(key, value): + return Key( + key_spec.key, + key_spec.type.parse(value), + key_spec, + "Matches", + ) + else: + return Key( + key_spec.key, value, key_spec, "Incorrect Value" + ) + + @classmethod + def _DFS_match( + cls, tree: list, request: dict[str, Any] + ) -> tuple[bool | list, list[Key]]: + """Do a DFS on the schema tree, returning the deepest matching path + At each stage return whether we matched on this path, and the path itself. + + When traversing the tree there are three cases to consider: + 1. base case [] + 2. one schema [k, k, k, [k, k, k]] + 3. list of schemas [[k,k,k], [k,k,k], [k,k,k]] + """ + # Case 1: Base Case + if not tree: + return True, [] + + # Case 2: [k, k, k, [k, k, k]] + if isinstance(tree[0], KeySpec): + node, *tree = tree + # Check if this node is in the request + match_result = cls.consume_key(node, request) + + # If if isn't then terminate this path here + if not match_result: + return False, [match_result,] # fmt: skip + + # Otherwise continue walking the tree and return the best result + matched, path = cls._DFS_match(tree, request) + + # Don't put the key in the path if it's optional and we're skipping it. + if match_result.reason != "Skipped": + path = [match_result,] + path # fmt: skip + + return matched, path + + # Case 3: [[k, k, k], [k, k, k]] + branches = [] + for branch in tree: + matched, branch_path = cls._DFS_match(branch, request) + + # If this branch matches, terminate the DFS and use this. + if matched: + return branch, branch_path + else: + branches.append(branch_path) + + # If no branch matches, return the one with the deepest match + return False, max(branches, key=len) + + @classmethod + def _DFS_match_all( + cls, tree: list, request: dict[str, Any] + ) -> list[list[Key]]: + """Do a DFS on the schema tree, returning all matching paths or partial matches. + At each stage return all matching paths and the deepest partial matches. + + When traversing the tree there are three cases to consider: + 1. base case [] + 2. one schema [k, k, k, [k, k, k]] + 3. list of schemas [[k,k,k], [k,k,k], [k,k,k]] + """ + # Case 1: Base Case + if not tree: + return [[]] + + # Case 2: [k, k, k, [k, k, k]] + if isinstance(tree[0], KeySpec): + node, *tree = tree + # Check if this node is in the request + request_values = request.get(node.key, None) + + if request_values is None: + # If the key is not in the request, return a partial match with Key Missing + return [[Key(node.key, "", node, "Key Missing")]] + + # If the request value is a list, try to match each value + if isinstance(request_values, list): + all_matches = [] + for value in request_values: + match_result = cls.consume_key(node, {node.key: value}) + + if match_result: + sub_matches = cls._DFS_match_all(tree, request) + for match in sub_matches: + if match_result.reason != "Skipped": + match.insert(0, match_result) + all_matches.append(match) + + return all_matches if all_matches else [[Key(node.key, "", node, "No Match Found")]] + else: + # Handle a single value + match_result = cls.consume_key(node, request) + + # If it isn't then return a partial match with Key Missing + if not match_result: + return [[Key(node.key, "", node, "Key Missing")]] + + # Continue walking the tree and get all matches + all_matches = cls._DFS_match_all(tree, request) + + # Prepend the current match to all further matches + for match in all_matches: + if match_result.reason != "Skipped": + match.insert(0, match_result) + + return all_matches + + # Case 3: [[k, k, k], [k, k, k]] + all_branch_matches = [] + for branch in tree: + branch_matches = cls._DFS_match_all(branch, request) + all_branch_matches.extend(branch_matches) + + # Return all of the deepest partial matches or complete matches + return all_branch_matches + + def match_all(self, request: dict[str, Any]): + request = request | self.defaults + return self._DFS_match_all(self.schemas, request) + + def match(self, request: dict[str, Any]): + request = request | self.defaults + return self._DFS_match(self.schemas, request) + + +class FDBSchemaFile(FDBSchema): + def __init__(self, path: str): + with open(path, "r") as f: + return super().__init__(f.read()) diff --git a/backend/fdb_schema/fdb_types.py b/backend/fdb_schema/fdb_types.py new file mode 100644 index 0000000..05093db --- /dev/null +++ b/backend/fdb_schema/fdb_types.py @@ -0,0 +1,83 @@ +from dataclasses import dataclass +from typing import Any +import re +from collections import defaultdict +from datetime import datetime, date, time + + +@dataclass(repr=False) +class FDBType: + """ + Holds information about how to format and validate a given FDB Schema type like Time or Expver + This base type represents a string and does no validation or formatting. It's the default type. + """ + + name: str = "String" + + def __repr__(self) -> str: + return self.name + + def validate(self, s: Any) -> bool: + try: + self.parse(s) + return True + except (ValueError, AssertionError): + return False + + def format(self, s: Any) -> str: + return str(s).lower() + + def parse(self, s: str) -> Any: + return s + + +@dataclass(repr=False) +class Expver_FDBType(FDBType): + name: str = "Expver" + + def parse(self, s: str) -> str: + assert bool(re.match(".{4}", s)) + return s + + +@dataclass(repr=False) +class Time_FDBType(FDBType): + name: str = "Time" + time_format = "%H%M" + + def format(self, t: time) -> str: + return t.strftime(self.time_format) + + def parse(self, s: datetime | str | int) -> time: + if isinstance(s, str): + assert len(s) == 4 + return datetime.strptime(s, self.time_format).time() + if isinstance(s, datetime): + return s.time() + return self.parse(f"{s:04}") + + +@dataclass(repr=False) +class Date_FDBType(FDBType): + name: str = "Date" + date_format: str = "%Y%m%d" + + def format(self, d: Any) -> str: + if isinstance(d, date): + return d.strftime(self.date_format) + if isinstance(d, int): + return f"{d:08}" + else: + return d + + def parse(self, s: datetime | str | int) -> date: + if isinstance(s, str): + return datetime.strptime(s, self.date_format).date() + elif isinstance(s, datetime): + return s.date() + return self.parse(f"{s:08}") + + +FDB_type_to_implementation = defaultdict(lambda: FDBType()) | { + cls.name: cls() for cls in [Expver_FDBType, Time_FDBType, Date_FDBType] +} diff --git a/backend/language.yaml b/backend/language.yaml new file mode 100644 index 0000000..50d793f --- /dev/null +++ b/backend/language.yaml @@ -0,0 +1,1232 @@ +--- +_field: &_field + + foo: + description: A foo field + values: + - bar + - baz + + class: + description: Class selects the main category of data to be retrieved such as operational, research or AIFS + category: data + default: od + flatten: false + type: enum + values: + - [ai, operational aifs] + # - [at, austria] + # - [be, belgium] + # - [c3, c3s] + # - [ce, cems] + # - [ch, switzerland] + # - [ci, cerise] + # - [co, cosmo] + # - [cr, cams research] + # - [cs, ecsn] + # - [d1, destine] + # - [de, germany] + # - [dk, denmark] + # - [dm, demeter] + # - [dt, dts] + # - [e2, e20c] + # - [e4, reanalyse40] + # - [e6, era6] + # - [ea, era5, esat] + # - [ed, eerie] + # - [ef, efas] + # - [ei, era interim] + # - [el, eldas] + # - [em, e20cm] + # - [en, ensembles] + # - [ep, cera-20c, cera20c] + # - [er, reanalyse] + # - [es, spain] + # - [et, cera-sat, cerasat] + # - [fi, finland] + # - [fr, france] + # - [gf, glofas] + # - [gg, greenhouse gases] + # - [gr, greece] + # - [gw, global wildfire information system] + # - [hr, croatia] + # - [hu, hungary] + # - [ie, ireland] + # - [is, iceland] + # - [it, italy] + # - [j5, jra55] + # - [l5, era5l] + # - [l6, era6l] + # - [la, aladin-laef, laef, lace] + # - [lw, WMO lead centre wave forecast verification] + # - [ma, metaps] + # - [mc, macc] + # - [me, mersea] + # - [ml, machine learning] + # - [ms, member states] + # - [nl, netherlands] + # - ['no', norway] + # - [nr, ncep 20cr, 20cr] + # - [o6, 'ocean 6 reanalysis'] + - [od, operations] + # - [pe, permanent experiment] + # - [pt, portugal] + # - [pv, provost] + - [rd, research] + # - [rm, euro4m] + # - [rr, regional reanalysis] + # - [s2, s2s] + # - [se, sweden] + # - [si, slovenia] + # - [sr, sreps] + # - [te, test] + # - [to, tost] + # - [tr, turkey] + # - [uk, united kingdom] + # - [ul, ulysses] + # - [ur, uerra] + # - [yp, yopp] + # - [yt, yotc] + + type: + category: data + default: an + flatten: false + type: enum + multiple: true + values: + # - [3g, 3d variational gradients] + # - [3v, 3d variational analysis] + # - [4g, 4d variational gradients] + # - [4i, 4d variational increments] + # - [4v, 4d variational analysis] + # - [ab, analysis bias] + # - [af, analysis feedback] + # - [ai, analysis input] + # - [an, analysis] + # - [as, adjoint singular vector] + # - [bf, bias-corrected forecast] + # - [cd, climate distribution] + # - [cf, control forecast] + # - [ci, clustering information] + # - [cl, climatology] + # - [cm, cluster means] + # - [cr, cluster representative] + # - [cs, cluster std deviations] + # - [cv, calibration validation forecast] + # - [ea, errors in analysis] + # - [ed, empirical distribution] + # - [ef, errors in first guess] + # - [efi, extreme forecast index] + # - [efic, extreme forecast index control] + # - [em, ensemble mean] + # - [eme, ensemble data assimilation model errors] + # - [emtm, ensemble mean of temporal mean] + # - [ep, event probability] + # - [es, ensemble standard deviation] + # - [est, ensemble statistics] + # - [estdtm, ensemble standard deviation of temporal mean] + # - [fa, forecast accumulation] + # - [fb, feedback] + - [fc, forecast] + # - [fcdfb, forecast departures feedback] + # - [fcmax, forecast maximum] + # - [fcmean, forecast mean] + # - [fcmin, forecast minimum] + # - [fcstdev, forecast standard deviation] + # - [ff, flux forcing realtime] + # - [fg, first guess] + # - [fp, forecast probability] + # - [fsoifb, forecast sensitivity to observations impact feedback] + # - [fu, fill-up] + # - [fx, flux forcing] + # - [ga, gfas analysis] + # - [gbf, bias-corrected gridbox] + # - [gai, gridded analysis input] + # - [go, gridded observations] + # - [gsd, gridded satellite data] + # - [gwt, weather type gridbox] + # - [hcmean, hindcast mean] + # - [ia, init. analysis] + # - [icp, initial condition perturbation] + # - [mpp, model physics perturbation] + # - [if, interim forecast] + # - [im, images] + # - [me, model errors] + # - [mfb, mondb feedback] + # - [oai, odb analysis input] + # - [ob, observations] + # - [of, ocean forward] + # - [ofb, odb feedback] + # - [oi, oi analysis] + # - [oldim, old format images] + # - [or, ocean reanalysis] + # - [pa, perturbed analysis] + # - [pb, probability boundary] + # - [pd, probability distribution] + - [pf, perturbed forecast] + # - [pfc, point values] + # - [ppm, point value metrics] + # - [s3, climate 30 days simulation] + # - [ses, scaled ensemble standard deviation] + # - [sf, sensitivity forecast] + # - [sfb, summary feedback] + # - [sfo, simulations with forcing] + # - [sg, sensitivity gradient] + # - [si, climate simulation] + # - [sim, simulated images] + # - [sot, shift of tails] + # - [ssd, simulated satellite data] + # - [sv, singular vector] + # - [svar, signal variance] + # - [taem, time average ensemble mean] + # - [taes, time average ensemble standard deviation] + # - [tpa, time processed analysis] + # - [tf, trajectory forecast] + # - [tu, tube] + # - [wem, weighted ensemble mean] + # - [wes, weighted ensemble standard deviation] + # - [wp, weather parameters] + + stream: + description: Stream selects the kind of data to be retrieved, for example the forecast model or the ensemble model. + category: data + default: oper + flatten: false + type: enum + values: + # - [amap, analysis for multianalysis project] + # - [ammc, melbourne] + # - [cher, ch, chernobyl] + # - [clte, climate, Climate run output] + # - [clmn, climate-monthly, Climate run monthly means output] + # - [cnrm, meteo france climate centre] + # - [cwao, montreal] + # - [dacl, daily climatology] + # - [dacw, daily climatology wave] + # - [dahc, daily archive hindcast] + # - [dcda, atmospheric model (delayed cutoff)] + # - [dcwv, wave model (delayed cutoff)] + # - [edmm, ensemble data assimilation monthly means] + # - [edmo, ensemble data assimilation monthly means of daily means] + # - [edzw, offenbach] + # - [eefh, extended ensemble forecast hindcast] + # - [eefo, extended ensemble prediction system] + # - [eehs, extended ensemble forecast hindcast statistics] + # - [efas, european flood awareness system (efas)] + # - [efcl, european flood awareness system (efas) climatology] + # - [efhc, ensemble forecast hindcasts (obsolete)] + # - [efho, ensemble forecast hindcast overlap] + # - [efhs, ensemble forecast hindcast statistics] + # - [efov, ensemble forecast overlap] + # - [efrf, european flood awareness system (efas) reforecasts] + # - [efse, european flood awareness system (efas) seasonal forecasts] + # - [efsr, european flood awareness system (efas) seasonal reforecasts] + # - [egrr, exeter, bracknell] + # - [ehmm, combined multi-model hindcast monthly means] + # - [elda, ensemble long window data assimilation] + # - [enda, ensemble data assimilation] + # - [enfh, ensemble forecast hindcasts] + - [enfo, ef, ensemble prediction system] + # - [enwh, ensemble forecast wave hindcasts] + # - [esmm, combined multi-model monthly means] + # - [espd, ensemble supplementary data] + # - [ewda, ensemble wave data assimilation] + # - [ewhc, wave ensemble forecast hindcast (obsolete)] + # - [ewho, ensemble forecast wave hindcast overlap] + # - [ewla, ensemble wave long window data assimilation] + # - [ewmm, ensemble wave data assimilation monthly means] + # - [ewmo, ensemble wave data assimilation monthly means of daily means] + # - [fgge, fg] + # - [fsob, forecast sensitivity to observations] + # - [fsow, forecast sensitivity to observations wave] + # - [gfas, global fire assimilation system] + # - [gfra, global fire assimilation system reanalysis] + # - [kwbc, washington] + # - [lfpw, paris, toulouse] + # - [lwda, long window daily archive] + # - [lwwv, long window wave] + # - [ma, means archive] + # - [maed, multianalysis ensemble data] + # - [mawm, wave anomaly means] + # - [mawv, multianalysis wave data] + # - [mdfa, monthly means of daily forecast accumulations] + # - [mfam, anomaly means] + # - [mfaw, wave anomalies] + # - [mfhm, hindcast means] + # - [mfhw, monthly forecast hindcasts wave] + # - [mfwm, wave real-time means] + # - [mhwm, wave hindcast means] + # - [mmaf, multi-model multi-annual forecast] + # - [mmam, multi-model multi-annual forecast means] + # - [mmaw, multi-model multi-annual forecast wave] + # - [mmsa, multi-model seasonal forecast monthly anomalies] + # - [mmsf, multi-model seasonal forecast] + # - [mmwm, multi-model multi-annual forecast wave means] + # - [mnfa, anomalies] + # - [mnfc, real-time] + # - [mnfh, hindcasts] + # - [mnfm, real-time means] + # - [mnfw, wave real-time] + # - [mnth, mo, monthly, monthly means] + # - [mnvr, monthly variance and covariance data using g. boer's step function] + # - [moda, monthly means of daily means] + # - [mofc, monthly forecast] + # - [mofm, monthly forecast means] + # - [monr, monthly means using g. boer's step function] + # - [mpic, max plank institute] + # - [msda, monthly standard deviation and covariance of daily means] + # - [msdc, mv, monthly standard deviation and covariance] + # - [msmm, multi-model seasonal forecast atmospheric monthly means] + # - [mswm, multi-model seasonal forecast wave monthly means] + # - [ocda, ocean data assimilation] + # - [ocea, ocean] + # - [olda, ocean Long window data assimilation] + - [oper, da, daily archive, atmospheric model] + # - [rjtd, tokyo] + # - [scda, atmospheric model (short cutoff)] + # - [scwv, wave model (short cutoff)] + # - [seap, sensitive area prediction] + # - [seas, seasonal forecast] + # - [sens, sf, sensitivity forecast] + # - [sfmm, seasonal forecast atmospheric monthly means] + # - [smma, seasonal monthly means anomalies] + # - [supd, sd, deterministic supplementary data] + # - [swmm, seasonal forecast wave monthly means] + # - [toga, tg] + # - [ukmo, ukmo climate centre] + # - [waef, we, wave ensemble forecast] + # - [wamd, wave monthly means of daily means] + # - [wamf, wave monthly forecast] + # - [wamo, wave monthly means] + # - [wams, multi-model seasonal forecast wave] + # - [wasf, wave seasonal forecast] + # - [wave, wv, wave model] + # - [wavm, wave model (standalone)] + # - [weef, wave extended ensemble forecast] + # - [weeh, wave extended ensemble forecast hindcast] + # - [wees, wave extended ensemble forecast hindcast statistics] + # - [wehs, wave ensemble forecast hindcast statistics] + # - [weov, wave ensemble forecast overlap] + # - [wfas, global flood awareness system (glofas)] + # - [wfcl, global flood awareness system (glofas) climatology] + # - [wfrf, global flood awareness system (glofas) reforecasts] + # - [wfse, global flood awareness system (glofas) seasonal forecasts] + # - [wfsr, global flood awareness system (glofas) seasonal reforecasts] + # - [wmfm, wave monthly forecast means] + # - [wvhc, wave hindcast] + expver: + description: Experiment number, 0001 is operational data. + category: data + default: '0001' + flatten: false + type: enum + values: + - ['0001', 'Operational Data'] + - ['xxxx', 'Experimental Data'] + + dataset: + # flatten: false + multiple: true + type: any + + model: + category: data + type: lowercase + + repres: + flatten: false + multiple: true + type: enum + values: + - gg + - sh + - ll + - np + - rl + + obsgroup: + category: data + multiple: true + type: enum + values: + # - [conventional] + - [sat, satellite] + - [ers1] + - [trmm] + - [qscat] + - [reo3] # reo3 needs to stay for compatibility + # previously in "obsgroups.def" + - [hirs, 1, HIRS ] + - [amsua, 2, AMSUA ] + - [amsub, 3, AMSUB ] + - [mhs, 4, MHS ] + - [geos, 5, GEOS ] + - [resat, 6, RESAT ] + - [meris, 7, MERIS ] + - [gpsro, 8, GPSRO ] + - [satob, 9, SATOB ] + - [scatt, 10, SCATT ] + - [ssmi_as, 11, SSMI ALL-SKY ] + - [iasi, 12, IASI ] + - [airs, 13, AIRS ] + - [ssmis_as, 14, SSMIS ALL-SKY ] + - [tmi_as, 15, TMI ALL-SKY ] + - [amsre_as, 16, AMSRE ALL-SKY ] + - [conv, 17, CONV ] + - [smos, 19, SMOS ] + - [windsat_as, 20, WINDSAT ALL-SKY ] + - [ssmi, 21, SSMI ] + - [amsua_as, 22, AMSUA ALL-SKY ] + - [amsre, 23, AMSRE ] + - [tmi, 24, TMI ] + - [ssmis, 25, SSMIS ] + - [gbrad, 26, GBRAD ] + - [mwhs, 27, MWHS ] + - [mwts, 28, MWTS ] + - [mwri_as, 29, MWRI ALL-SKY ] + - [iras, 30, IRAS ] + - [msu, 31, MSU ] + - [ssu, 32, SSU ] + - [vtpr1, 33, VTPR1 ] + - [vtpr2, 34, VTPR2 ] + - [atms, 35, ATMS ] + - [resat_ak, 36, RESAT AVERAGING KERNELS ] + - [cris, 37, CRIS ] + - [wave_ip, 38, WAVE INTEGRATED PARAMETERS ] + - [wave_sp, 39, WAVE SPECTRA ] + - [raingg, 40, RAINGG ] + - [sfc_ms, 41, SURFACE MULTISENSOR ] + - [amsr2_as, 42, AMSR-2 ALL-SKY ] + - [saphir_as, 43, SAPHIR ALL-SKY ] + - [amsub_as, 44, AMSUB ALL-SKY ] + - [mhs_as, 45, MHS ALL-SKY ] + - [dwl, 46, DOPPLER WIND LIDAR ] + - [iris, 47, IRIS ] + - [aatsr, 49, AATSR ] + - [atms_as, 50, ATMS ALL-SKY ] + - [gmi_as, 51, GMI ALL-SKY ] + - [godae_sst, 52, GODAE SEA SURFACE TEMPERATURES ] + - [atovs_ms, 53, ATOVS MULTISENSOR ] + - [atmospheric_composition, 54, ATMOSPHERIC COMPOSITION ] + - [non_sfc_ms, 55, NON-SURFACE MULTISENSOR ] + - [mwts2, 56, MWTS2 ] + - [ssmi_1d, 57, SSMI 1DVAR TCWV CLOUDY-SKY ] + - [mwhs2_as, 58, MWHS2 ALL-SKY ] + - [ssmt2, 59, SSMT2 ] + - [smap, 60, SMAP ] + - [tovs_ms, 61, TOVS MULTISENSOR ] + - [cloud_r, 62, CLOUD REFLECTIVITY ] + - [cloud_l, 63, CLOUD LIDAR ] + - [satellite_lightning, 64, SATELLITE LIGHTNING ] + - [geos_vis, 65, GEOS VIS ] + - [oconv, 66, OCONV ] + - [mwts3_as, 67, MWTS3 All-sky ] + - [giirs, 68, GIIRS ] + - [test, 99, TEST ] + + reportype: + category: data + type: any + multiple: true + + # rdbprefix + + levtype: + description: The Level Type, can be pressure levels, the surface, model levels etc. + category: data + default: pl + flatten: false + type: enum + values: + # - [cat, category] + # - [dp, depth] + # - [layer] + # - [ml, model levels] + - [pl, pressure levels] + # - [hl, height levels] + # - [pt, potential temperature] + # - [pv, potential vorticity] + - [sfc, surface] + # - [sol, surface other (multi)levels] + # - [wv, ocean wave] + # - [o2d, ocean surface] + # - [o3d, ocean model levels] + never: + - type: ssd + + levelist: + category: data + multiple: true + by: 1 + default: + - 1000 + - 850 + - 700 + - 500 + - 400 + - 300 + never: + - levtype: [sfc, o2d] + - type: ssd + type: to-by-list-float + + param: + category: data + default: 129 + multiple: true + type: param + never: + - type: [tf, ob] + +################################################################# + + # year + # decade + # month + + date: + category: data + default: 0 + type: date + multiple: true + + year: + category: data + type: to-by-list + multiple: true + by: 1 + + month: + category: data + flatten: true + type: enum + multiple: true + values: + - [1, jan, January] + - [2, feb, February] + - [3, mar, March] + - [4, apr, April] + - [5, may, May] + - [6, jun, June] + - [7, jul, July] + - [8, aug, August] + - [9, sep, September] + - [10, oct, October] + - [11, nov, November] + - [12, dec, December] + + # verify + # refdate + + hdate: + category: data + multiple: true + only: + - stream: + - enfh + - enwh + - efho + - ehmm + - ewho + - eefh + - weeh + type: integer + + offsetdate: + category: data + multiple: true + type: date + + fcmonth: + category: data + multiple: true + by: 1 + type: to-by-list + + fcperiod: + category: data + multiple: true + type: integer + + time: + category: data + default: '1200' + multiple: true + type: time + + offsettime: + category: data + multiple: true + type: time + + # leadtime + # opttime + # range + + step: + description: Specify which forecast we want in hours past the date/time. + category: data + multiple: true + by: 12 + default: 0 + type: range + never: + - dataset: + - climate-dt + - stream: + - msmm + - mmsa + - swmm + + anoffset: + category: data + multiple: true + type: integer + + reference: + category: data + multiple: true + type: integer + +################################################################# + + # cluster + # probability + + number: + description: Selects a subset of ensemble members + category: data + multiple: true + aliases: + - ensemble + by: 1 + only: + - type: [pf, cr, cm, fcmean, fcmin, fcmax, fcstdev, sot, fc, wp, 4i, 4v] + never: + # This is to prevent number with type=fc and stream=oper + - stream: [oper, wave] + type: to-by-list + + quantile: + category: data + multiple: true + only: + - type: + - pd + - pb + - taem + - cd + # - sot + type: to-by-list-quantile + denominators: [2,3,4,5,10,100,1000] + by: 1 + domain: + description: The large scale geographic region. + category: data + default: g + flatten: false + type: enum + never: + - dataset: + - climate-dt + values: + # - [a, north west europe] + # - [b, north east europe, baltic and black sea] + - [c, south west europe] + - [d, south east europe] + - [e, europe] + # - [f, fastex] + - [g, globe, general european area] + # - [h] + # - [i] + # - [j] + # - [k] + # - [l] + # - [m, mediterranean] + # - ['n', northern hemisphere] + # - [o] + # - [p] + # - [q] + # - [r] + # - [s, southern hemisphere] + # - [t, tropics] + # - [u, tropics 2] + # - [v] + # - [w, western atlantic] + # - [x] + # - ['y'] + # - [z] + + frequency: + category: data + multiple: true + by: 1 + only: + - param: + - '140251' + type: to-by-list + + direction: + category: data + multiple: true + by: 1 + only: + - param: + - '140251' + type: to-by-list + + diagnostic: + category: data + type: integer + multiple: true + + iteration: + category: data + type: integer + multiple: true + + channel: + category: data + only: + - type: ssd + type: integer + multiple: true + + ident: + category: data + only: + - type: ssd + type: integer + multiple: true + + instrument: + category: data + only: + - type: ssd + type: integer + multiple: true + + method: + category: data + type: integer + + origin: + category: data + multiple: true + type: enum + values: + - [ammc, 1, melbourne] + - [babj, 38, beijing] + - [cmcc] + - [cnmc, 80] + - [consensus, 255] + - [crfc, 239, cerfacs] + - [cwao, 54, montreal] + - [ecmf, 98, ecmwf] + - [edzw, dwd, 78, offenbach] + - [egrr, 74, exeter, bracknell] + - [enmi, 88, oslo] + - [fnmo, fnmoc, 58, fleet numerical] + - [hadc, 247, hadley centre] + - [ifmk, 246] + - [ingv, 235] + - [knmi, 245] + - [kwbc, 7, washington] + - [lemm, 214, madrid] + - [lfpw, 84, 85, paris, toulouse] + - [rjtd, 34, tokyo] + - [rksl, 40, seoul] + - [sbsj, 46, cptec] + - [vuwien, 244, university of vienna] + + system: + category: data + type: integer + +####################################################################### +# DestinE ClimateDT related keywords + + activity: + category: data + type: lowercase + + experiment: + category: data + type: lowercase + + generation: + category: data + type: integer + + realization: + category: data + type: integer + + resolution: + category: data + type: lowercase + +####################################################################### + +_observation: &_observation + + obstype: + category: data + type: any + multiple: true + + obsgroup: + category: data + type: any + multiple: true + +####################################################################### + +_postproc: &_postproc + + accuracy: + category: postproc + flatten: false + type: [enum, integer] + values: + - [av] + - ['off', normal, auto] + + bitmap: + category: postproc + flatten: false + type: any + + format: + category: postproc + flatten: false + type: enum + values: + - - grib + - grib1 + - gb + - - grib2 + - - bufr + - bf + - - grid + - gd + - odb + - ascii + + frame: + category: postproc + type: integer + + gaussian: + category: postproc + type: enum + values: + - reduced + - regular + + area: + category: postproc + flatten: false + multiple: true + type: [float, enum] + values: + - ['off', g, global] + - [e, europe] + + grid: + category: postproc + flatten: false + multiple: true + type: [enum, float, regex] + values: + - auto + - N16 + - N24 + - N32 + - N48 + - N64 + - N80 + - N96 + - N128 + - N160 + - N200 + - N256 + - N320 + - N400 + - N512 + - N576 + - N640 + - N800 + - N1024 + - N1280 + - N1600 + - N2000 + - N4000 + - N8000 + regex: + - '^[oOfF][1-9][0-9]+$' + uppercase: true + + interpolation: + category: postproc + flatten: false + type: enum + values: + - - linear + - - nearest-lsm + - nearest lsm + - - 'off' + - default + - any + + packing: + category: postproc + flatten: false + type: enum + values: + - - so + - second order + - ['off', av] + - [co, complex] + - simple + - ccsds + + resol: + category: postproc + flatten: false + aliases: + - tra + type: [enum, integer] + values: + - - 'off' + - av + - reduced gaussian 160 + + rotation: + category: postproc + flatten: false + multiple: true + type: float + + intgrid: + category: postproc + flatten: false + type: [enum, regex] + values: + - 'off' + - auto + - N32 + - N48 + - N64 + - N80 + - N96 + - N128 + - N160 + - N192 + - N200 + - N256 + - N320 + - N400 + - N512 + - N640 + - N800 + - N912 + - N1024 + - N1280 + regex: + - '^[oOfF][1-9][0-9]+$' + + truncation: + category: postproc + flatten: false + type: [enum, integer] + values: + - auto + - 'off' +####################################################################### + +_obspproc: &_obspproc + + filter: + type: any + category: postproc + + ident: + type: any + category: postproc + + +####################################################################### + +disseminate: + <<: *_field + <<: *_postproc + + requirements: + type: any + + use: + category: data + flatten: false + multiple: true + type: enum + values: + - bc + - monday + - tuesday + - wednesday + - thursday + - friday + - saturday + - sunday + + option: + default: normal + flatten: false + multiple: true + type: enum + values: + - normal + - delay + - asap + - gts + - opendata + + compatibility: + category: postproc + flatten: false + multiple: true + type: enum + values: + - 'off' + - 'no-local-extension' + + priority: + flatten: false + type: integer + + target: + flatten: false + type: any + +############################################################## + +archive: + <<: *_field + <<: *_observation + + + database: + flatten: false + multiple: true + type: any + + source: + flatten: false + multiple: true + type: any + + expect: + flatten: false + multiple: false + type: integer + +############################################################## + +retrieve: + + <<: *_field + <<: *_observation + <<: *_postproc + <<: *_obspproc + + target: + flatten: false + multiple: true + type: any + + expect: + flatten: false + multiple: false + type: integer + + fieldset: + flatten: false + multiple: false + type: any + + database: + flatten: false + multiple: true + type: any + + optimise: + type: enum + values: + - true + - false + default: + "off" + + padding: + flatten: false + type: enum + values: + - none + - auto + +############################################################## + +read: + source: + flatten: false + multiple: true + type: any + + <<: *_field + <<: *_observation + <<: *_postproc + <<: *_obspproc + + target: + flatten: false + multiple: true + type: any + + fieldset: + flatten: false + multiple: false + type: any + + _defaults: + class: null + date: null + domain: null + expver: null + levelist: null + levtype: null + param: null + step: null + stream: null + time: null + type: null + + _options: + param: + # expand_with: # In case not type/stream/levtype is provided + # type: an + # stream: oper + # levtype: pl + first_rule: true +############################################################## + +get: + + tape: + flatten: false + multiple: false + type: any + + database: + flatten: false + multiple: true + type: any + + target: + flatten: false + multiple: true + type: any + +############################################################## + +list: + + <<: *_field + <<: *_observation + + database: + flatten: false + multiple: true + type: any + + target: + flatten: false + multiple: true + type: any + + _defaults: + # class: null + date: null + domain: null + expver: null + levelist: null + levtype: null + param: null + step: null + stream: null + time: null + type: null + +############################################################## + +compute: + formula: + flatten: false + multiple: false + type: any + + fieldset: + flatten: false + multiple: false + type: any + +############################################################## + +write: + + fieldset: + flatten: false + multiple: false + type: any + + target: + flatten: false + multiple: true + type: any + +############################################################## + +pointdb: + lat: + multiple: false + type: float + + lon: + multiple: false + type: float + + <<: *_field + + _defaults: + class: null + date: null + domain: null + expver: null + levelist: null + levtype: null + param: null + step: null + stream: null + time: null + type: null + + _options: + param: + # expand_with: # In case not type/stream/levtype is provided + # type: an + # stream: oper + # levtype: pl + first_rule: true + +end: {} diff --git a/backend/main.py b/backend/main.py new file mode 100644 index 0000000..7f31a7a --- /dev/null +++ b/backend/main.py @@ -0,0 +1,130 @@ +from collections import defaultdict +from typing import Any, Dict + +from fastapi import FastAPI, Request +from fastapi.middleware.cors import CORSMiddleware +from fastapi.staticfiles import StaticFiles +from fdb_schema import FDBSchemaFile + +app = FastAPI() +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +app.mount("/app", StaticFiles(directory="../webapp"), name="static") + + +language_yaml = "./language.yaml" +import yaml + +with open(language_yaml, "r") as f: + mars_language = yaml.safe_load(f)["_field"] + +###### Load FDB Schema +schema = FDBSchemaFile("./standard_fdb_schema") +# schema = FDBSchemaFile("./test_schema") + +def request_to_dict(request: Request) -> Dict[str, Any]: + # Convert query parameters to dictionary format + request_dict = dict(request.query_params) + for key, value in request_dict.items(): + # Convert comma-separated values into lists + if "," in value: + request_dict[key] = value.split(",") + return request_dict + +@app.get("/simple") +async def get_tree(request: Request): + request_dict = request_to_dict(request) + print(request_dict) + target = next((k for k,v in request_dict.items() if v == "????"), None) + if not target: + return {"error": "No target found in request, there must be one key with value '????'"} + + current_query_params = "&".join(f"{k}={v}" for k, v in request_dict.items() if k != target) + if len(current_query_params) > 1: + current_query_params += "&" + + stac_collection = { + "type": "Collection", + "stac_version": "1.0.0", + "id": target, + "title" : target.capitalize(), + "key_type": mars_language.get(target, {}).get("type", ""), + "description": mars_language.get(target, {}).get("description", ""), + "values": mars_language.get(target, {}).get("values", ""), + "links": [ + { + "title": str(value[-1] if isinstance(value, list) else value), + "href": f"/tree?{current_query_params}{target}={value[0] if isinstance(value, list) else value}", + "rel": "child", + "type": "application/json", + + } + + for value in mars_language.get(target, {}).get("values", []) + ] + } + + return stac_collection + + +@app.get("/tree") +async def get_tree(request: Request): + # Convert query parameters to dictionary format + request_dict = request_to_dict(request) + + # Run the schema matching logic + matches = schema.match_all(request_dict) + + # Only take the longest matches + max_len = max(len(m) for m in matches) + matches = [m for m in matches if len(m) == max_len] + + # Take the ends of all partial matches, ignore those that are full matches + # Full matches are indicated by the last key having boolean value True + key_frontier = defaultdict(list) + for match in matches: + if not match[-1]: + key_frontier[match[-1].key].append([m for m in match[:-1]]) + + + + def make_link(key_name, paths): + """Take a MARS Key and information about which paths matched up to this point and use it to make a STAC Link""" + first_path = [str(p) for p in paths[0]] + href = f"/simple?{'&'.join(first_path)}{'&' if first_path else ''}{key_name}=????" + optional = [p[-1].key_spec.is_optional() for p in paths if len(p) > 0] + optional_str = "Yes" if all(optional) and len(optional) > 0 else ("Sometimes" if any(optional) else "No") + + return { + "title": key_name, + "optional": optional_str, + # "optional_by_path": optional, + "href": href, + "rel": "child", + "type": "application/json", + "paths": set(tuple(f"{m.key}={m.value}" for m in p) for p in paths), + # "description": mars_language.get(key_name, {}).get("description", ""), + # "values": mars_language.get(key_name, {}).get("values", "") + + } + + + # Format the response as a STAC collection + stac_collection = { + "type": "Collection", + "stac_version": "1.0.0", + "id": "partial-matches", + "description": "STAC collection representing potential children of this request", + "links": [ + make_link(key_name, paths) + for key_name, paths in key_frontier.items() + ] + } + + return stac_collection \ No newline at end of file diff --git a/backend/requirements.txt b/backend/requirements.txt new file mode 100644 index 0000000..33bd9ae --- /dev/null +++ b/backend/requirements.txt @@ -0,0 +1,2 @@ +fastapi[standard] +pe \ No newline at end of file diff --git a/backend/standard_fdb_schema b/backend/standard_fdb_schema new file mode 100644 index 0000000..394223f --- /dev/null +++ b/backend/standard_fdb_schema @@ -0,0 +1,590 @@ + +# * Format of the rules is: + +# [a1, a2, a3 ...[b1, b2, b3... [c1, c2, c3...]]] + +# - The first level (a) defines which attributes are used to name the top level directory +# - The second level (b) defines which attributes are used to name the data files +# - The third level (c) defines which attributes are used as index keys + +# * Rules can be grouped + +# [a1, a2, a3 ... +# [b1, b2, b3... [c1, c2, c3...]] +# [B1, B2, B3... [C1, C2, C3...]] +# ] + +# * A list of values can be given for an attribute +# [ ..., stream=enfo/efov, ... ] +# This will be used when matching rules. + +# * Attributes can be typed +# Globally, at the begining of this file: + +# refdate: Date; + +# or in the context of a rule: +# [type=cl, ... [date:ClimateMonth, ...]] + +# Typing attributes is done when the user's requests or the GRIB values need to be modified before directories, files and indexes are created. For example, ClimateMonth will transform 2010-04-01 to 'may' internally. + +# * Attributes can be optional +# [ step, levelist?, param ] +# They will be replaced internally by an empty value. It is also posiible to provide a default subtitution value: e.g. [domain?g] will consider the domain to be 'g' if missing. + +# * Attributes can be removed: +# [grid-] +# This is useful to remove attributes present in the GRIB that should not be ignored + +# * Rules are matched: + +# - If the attributes are present in the GRIB/Request, or marked optional or ignored +# - If a list of possible value is provided, one of them must match, for example +# [ class, expver, stream=enfo/efov, date, time, domain ] +# will match either stream=enfo or stream=efov, all other attributes will be matched if they exist in the GRIB or user's request + +# * On archive: +# - Attributes are extracted from the GRIB (namespace 'mars'), possibly modified by the attribute type +# - Only the first rule is used, so order is important +# - All GRIB attributes must be used by the rules, otherwise an error is raised + +# * On retrieve: +# - Attributes are extracted from the user's request, possibly modified by the attribute type (e.g. for handling of U/V) +# - All the matching rules are considered +# - Only attributes listed in the rules are used to extract values from the user's request + + +# Default types + +param: Param; +step: Step; +date: Date; +hdate: Date; +refdate: Date; +latitude: Double; +longitude: Double; +levelist: Double; +grid: Grid; +expver: Expver; + +time: Time; +fcmonth: Integer; + +number: Integer; +frequency: Integer; +direction: Integer; +channel: Integer; + +instrument: Integer; +ident: Integer; + +diagnostic: Integer; +iteration: Integer; +system: Integer; +method: Integer; + +# ??????? + +# reference: Integer; +# fcperiod: Integer; + +# opttime: Integer; +# leadtime: Integer; + +# quantile: ?????? +# range: ?????? + +# band: Integer; + + +######################################################## +# These rules must be first, otherwise fields of These +# classes will be index with the default rule for oper +[ class=ti/s2, expver, stream, date, time, model + [ origin, type, levtype, hdate? + [ step, number?, levelist?, param ]] +] + +[ class=ms, expver, stream, date, time, country=de + [ domain, type, levtype, dbase, rki, rty, ty + [ step, levelist?, param ]] +] + +[ class=ms, expver, stream, date, time, country=it + [ domain, type, levtype, model, bcmodel, icmodel:First3 + [ step, levelist?, param ] + ] +] + +[ class=el, expver, stream, date, time, domain + [ origin, type, levtype + [ step, levelist?, param ]] +] + +######################################################## +# The are the rules matching most of the fields +# oper/dcda +[ class, expver, stream=oper/dcda/scda, date, time, domain? + + [ type=im/sim + [ step?, ident, instrument, channel ]] + + [ type=ssd + [ step, param, ident, instrument, channel ]] + + [ type=4i, levtype + [ step, iteration, levelist, param ]] + + [ type=me, levtype + [ step, number, levelist?, param ]] + + [ type=ef, levtype + [ step, levelist?, param, channel? ]] + + [ type=ofb/mfb + [ obsgroup, reportype ]] + + [ type, levtype + [ step, levelist?, param ]] + +] + +# dcwv/scwv/wave +[ class, expver, stream=dcwv/scwv/wave, date, time, domain + [ type, levtype + [ step, param, frequency?, direction? ]]] + +# enfo +[ class, expver, stream=enfo/efov, date, time, domain + + [ type, levtype=dp, product?, section? + [ step, number?, levelist?, latitude?, longitude?, range?, param ]] + + [ type=tu, levtype, reference + [ step, number, levelist?, param ]] + + [ type, levtype + [ step, quantile?, number?, levelist?, param ]] + +] + +# waef/weov +[ class, expver, stream=waef/weov, date, time, domain + [ type, levtype + [ step, number?, param, frequency?, direction? ]] +] + +######################################################## +# enda +[ class, expver, stream=enda, date, time, domain + + [ type=ef/em/es/ses, levtype + [ step, number?, levelist?, param, channel? ]] + + [ type=ssd + [ step, number, param, ident, instrument, channel ]] + + + [ type, levtype + [ step, number?, levelist?, param ]] +] + +# ewda +[ class, expver, stream=ewda, date, time, domain + [ type, levtype + [ step, number?, param, frequency?, direction? ]] +] + + +######################################################## +# elda +[ class, expver, stream=elda, date, time, domain? + + [ type=ofb/mfb + [ obsgroup, reportype ]] + + [ type, levtype, anoffset + [ step, number?, levelist?, iteration?, param, channel? ]] +] + +# ewda +[ class, expver, stream=ewla, date, time, domain + [ type, levtype, anoffset + [ step, number?, param, frequency?, direction? ]] +] + +######################################################## +# elda +[ class, expver, stream=lwda, date, time, domain? + + [ type=ssd, anoffset + [ step, param, ident, instrument, channel ]] + + [type=me, levtype, anoffset + [ number, step, levelist?, param]] + + [ type=4i, levtype, anoffset + [ step, iteration, levelist, param ]] + + [ type=ofb/mfb + [ obsgroup, reportype ]] + + [ type, levtype, anoffset + [ step, levelist?, param]] +] + +# ewda +[ class, expver, stream=lwwv, date, time, domain + [ type, levtype, anoffset + [ step, param, frequency?, direction? ]] +] +######################################################## +# amap +[ class, expver, stream=amap, date, time, domain + [ type, levtype, origin + [ step, levelist?, param ]]] + +# maed +[ class, expver, stream=maed, date, time, domain + [ type, levtype, origin + [ step, levelist?, param ]]] + +# mawv +[ class, expver, stream=mawv, date, time, domain + [ type, levtype, origin + [ step, param, frequency?, direction? ]]] + +# cher +[ class, expver, stream=cher, date, time, domain + [ type, levtype + [ step, levelist, param ]]] + + +# efhc +[ class, expver, stream=efhc, refdate, time, domain + [ type, levtype, date + [ step, number?, levelist?, param ]]] + +# efho +[ class, expver, stream=efho, date, time, domain + [ type, levtype, hdate + [ step, number?, levelist?, param ]]] + + +# efhs +[ class, expver, stream=efhs, date, time, domain + [ type, levtype + [ step, quantile?, number?, levelist?, param ]]] + +# wehs +[ class, expver, stream=wehs, date, time, domain + [ type, levtype + [ step, quantile?, number?, levelist?, param ]]] + +# kwbc +[ class, expver, stream=kwbc, date, time, domain + [ type, levtype + [ step, number?, levelist?, param ]]] + +# ehmm +[ class, expver, stream=ehmm, date, time, domain + [ type, levtype, hdate + [ fcmonth, levelist?, param ]]] + + +# ammc/cwao/edzw/egrr/lfpw/rjtd/toga +[ class, expver, stream=ammc/cwao/edzw/egrr/lfpw/rjtd/toga/fgge, date, time, domain + [ type, levtype + [ step, levelist?, param ]]] + +######################################################################## + +# enfh +[ class, expver, stream=enfh, date, time, domain + + [ type, levtype=dp, hdate, product?, section? + [ step, number?, levelist?, latitude?, longitude?, range?, param ]] + + [ type, levtype, hdate + [ step, number?, levelist?, param ]] +] + +# enwh +[ class, expver, stream=enwh, date, time, domain + [ type, levtype, hdate + [ step, number?, param, frequency?, direction? ]] +] + +######################################################################## +# sens +[ class, expver, stream=sens, date, time, domain + [ type, levtype + [ step, diagnostic, iteration, levelist?, param ]]] + +######################################################################## +# esmm +[ class, expver, stream=esmm, date, time, domain + [ type, levtype + [ fcmonth, levelist?, param ]]] +# ewhc +[ class, expver, stream=ewhc, refdate, time, domain + [ type, levtype, date + [ step, number?, param, frequency?, direction? ]]] + +######################################################################## +# ewho +[ class, expver, stream=ewho, date, time, domain + [ type, levtype, hdate + [ step, number?, param, frequency?, direction? ]]] + +# mfam +[ class, expver, stream=mfam, date, time, domain + + [ type=pb/pd, levtype, origin, system?, method + [ fcperiod, quantile, levelist?, param ]] + + [ type, levtype, origin, system?, method + [ fcperiod, number?, levelist?, param ]] + +] + +# mfhm +[ class, expver, stream=mfhm, refdate, time, domain + [ type, levtype, origin, system?, method, date? + [ fcperiod, number?, levelist?, param ]]] +# mfhw +[ class, expver, stream=mfhw, refdate, time, domain + [ type, levtype, origin, system?, method, date + [ step, number?, param ]]] +# mfwm +[ class, expver, stream=mfwm, date, time, domain + [ type, levtype, origin, system?, method + [ fcperiod, number, param ]]] +# mhwm +[ class, expver, stream=mhwm, refdate, time, domain + [ type, levtype, origin, system?, method, date + [ fcperiod, number, param ]]] + +# mmsf +[ class, expver, stream=mmsf, date, time, domain + + [ type, levtype=dp, origin, product, section, system?, method + [ step, number, levelist?, latitude?, longitude?, range?, param ]] + + [ type, levtype, origin, system?, method + [ step, number, levelist?, param ]] +] + +# mnfc +[ class, expver, stream=mnfc, date, time, domain + + [ type, levtype=dp, origin, product, section, system?, method + [ step, number?, levelist?, latitude?, longitude?, range?, param ]] + + [ type, levtype, origin, system?, method + [ step, number?, levelist?, param ]] +] + +# mnfh +[ class, expver, stream=mnfh, refdate, time, domain + [ type, levtype=dp, origin, product, section, system?, method, date + [ step, number?, levelist?, latitude?, longitude?, range?, param ]] + [ type, levtype, origin, system?, method, date? + [ step, number?, levelist?, param ]] +] + +# mnfm +[ class, expver, stream=mnfm, date, time, domain + [ type, levtype, origin, system?, method + [ fcperiod, number?, levelist?, param ]]] + +# mnfw +[ class, expver, stream=mnfw, date, time, domain + [ type, levtype, origin, system?, method + [ step, number?, param ]]] + +# ea/mnth +[ class=ea, expver, stream=mnth, date, domain + [ type, levtype + [ time, step?, levelist?, param ]]] + +# mnth +[ class, expver, stream=mnth, domain + [ type=cl, levtype + [ date: ClimateMonthly, time, levelist?, param ]] + [ type, levtype + [ date , time, step?, levelist?, param ]]] + +# mofc +[ class, expver, stream=mofc, date, time, domain + [ type, levtype=dp, product, section, system?, method + [ step, number?, levelist?, latitude?, longitude?, range?, param ]] + [ type, levtype, system?, method + [ step, number?, levelist?, param ]] +] + +# mofm +[ class, expver, stream=mofm, date, time, domain + [ type, levtype, system?, method + [ fcperiod, number, levelist?, param ]]] + +# mmsa/msmm +[ class, expver, stream=mmsa, date, time, domain + [ type, levtype, origin, system?, method + [ fcmonth, number?, levelist?, param ]]] + +[ class, expver, stream=msmm, date, time, domain + [ type, levtype, origin, system?, method + [ fcmonth, number?, levelist?, param ]]] + +# ocea +[ class, expver, stream=ocea, date, time, domain + [ type, levtype, product, section, system?, method + [ step, number, levelist?, latitude?, longitude?, range?, param ]] +] + +#=# seas +[ class, expver, stream=seas, date, time, domain + + [ type, levtype=dp, product, section, system?, method + [ step, number, levelist?, latitude?, longitude?, range?, param ]] + + [ type, levtype, system?, method + [ step, number, levelist?, param ]] +] + +# sfmm/smma +[ class, expver, stream=sfmm/smma, date, time, domain + [ type, levtype, system?, method + [ fcmonth, number?, levelist?, param ]]] + +# supd +[ class=od, expver, stream=supd, date, time, domain + [ type, levtype, origin?, grid + [ step, levelist?, param ]]] + +# For era +[ class, expver, stream=supd, date, time, domain + [ type, levtype, grid- # The minus sign is here to consume 'grid', but don't index it + [ step, levelist?, param ]]] + +# swmm +[ class, expver, stream=swmm, date, time, domain + [ type, levtype, system?, method + [ fcmonth, number, param ]]] + +# wamf +[ class, expver, stream=wamf, date, time, domain + [ type, levtype, system?, method + [ step, number?, param ]]] + +# ea/wamo +[ class=ea, expver, stream=wamo, date, domain + [ type, levtype + [ time, step?, param ]]] + +# wamo +[ class, expver, stream=wamo, domain + [ type=cl, levtype + [ date: ClimateMonthly, time, param ]] + [ type, levtype + [ date, time, step?, param ]]] + +# wamd +[ class, expver, stream=wamd, date, domain + [ type, levtype + [ param ]]] + +# wasf +[ class, expver, stream=wasf, date, time, domain + [ type, levtype, system?, method + [ step, number, param ]]] +# wmfm +[ class, expver, stream=wmfm, date, time, domain + [ type, levtype, system?, method + [ fcperiod, number, param ]]] + +# moda +[ class, expver, stream=moda, date, domain + [ type, levtype + [ levelist?, param ]]] + +# msdc/mdfa/msda +[ class, expver, stream=msdc/mdfa/msda, domain + [ type, levtype + [ date, time?, step?, levelist?, param ]]] + + + +# seap +[ class, expver, stream=seap, date, time, domain + [ type=sv/svar, levtype, origin, method? + [ step, leadtime, opttime, number, levelist?, param ]] + + [ type=ef, levtype, origin + [ step, levelist?, param, channel? ]] + + [ type, levtype, origin + [ step, levelist?, param ]] + + ] + +[ class, expver, stream=mmaf, date, time, domain + [ type, levtype, origin, system?, method + [ step, number, levelist?, param ]] +] + +[ class, expver, stream=mmam, date, time, domain + [ type, levtype, origin, system?, method + [ fcmonth, number, levelist?, param ]] +] + + +[ class, expver, stream=dacl, domain + [ type=pb, levtype + [ date: ClimateDaily, time, step, quantile, levelist?, param ]] + [ type, levtype + [ date: ClimateDaily, time, step, levelist?, param ]] + +] + +[ class, expver, stream=dacw, domain + [ type=pb, levtype + [ date: ClimateDaily, time, step, quantile, param ]] + [ type, levtype + [ date: ClimateDaily, time, step, param ]] + +] + +[ class, expver, stream=edmm/ewmm, date, time, domain + [ type=ssd + [ step, number, param, ident, instrument, channel ]] + [ type, levtype + [ step, number, levelist?, param ]] +] + +[ class, expver, stream=edmo/ewmo, date, domain + [ type, levtype + [ number, levelist?, param ]] +] + +# stream gfas +[ class=mc/rd, expver, stream=gfas, date, time, domain + [ type=ga, levtype + [ step, param ]] + + [ type=gsd + [ param, ident, instrument ]] + +] + +# class is e2 +[ class, expver, stream=espd, date, time, domain + [ type, levtype, origin, grid + [ step, number, levelist?, param ]]] + +[ class=cs, expver, stream, date:Default, time, domain + [ type, levtype + [ step, levelist?, param ]]] + + diff --git a/backend/test_schema b/backend/test_schema new file mode 100644 index 0000000..a0a684d --- /dev/null +++ b/backend/test_schema @@ -0,0 +1,11 @@ +[ class=od, stream, date, time + [ domain, type, levtype, dbase, rki, rty, ty + [ step, levelist?, param ]] +] + +[ class=ensemble, number, stream, date, time, + [ domain, type, levtype, dbase, rki, rty, ty + [ step, levelist?, param ]] +] + +[ class, foo] \ No newline at end of file diff --git a/backend/tests.ipynb b/backend/tests.ipynb new file mode 100644 index 0000000..ca0de70 --- /dev/null +++ b/backend/tests.ipynb @@ -0,0 +1,156 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 5, + "id": "2f01a012-002a-465c-9b09-681bdb3fc26d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "class\n", + "type\n", + "stream\n", + "expver\n", + "dataset\n", + "model\n", + "repres\n", + "obsgroup\n", + "reportype\n", + "levtype\n", + "levelist\n", + "param\n", + "date\n", + "year\n", + "month\n", + "hdate\n", + "offsetdate\n", + "fcmonth\n", + "fcperiod\n", + "time\n", + "offsettime\n", + "step\n", + "anoffset\n", + "reference\n", + "number\n", + "quantile\n", + "domain\n", + "frequency\n", + "direction\n", + "diagnostic\n", + "iteration\n", + "channel\n", + "ident\n", + "instrument\n", + "method\n", + "origin\n", + "system\n", + "activity\n", + "experiment\n", + "generation\n", + "realization\n", + "resolution\n" + ] + } + ], + "source": [ + "language_yaml = \"./language.yaml\"\n", + "import yaml\n", + "\n", + "with open(language_yaml, \"r\") as f:\n", + " mars_language = yaml.safe_load(f)[\"_field\"]\n", + "\n", + "for k in mars_language.keys(): print(k)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "be9074a8-a56f-4fd0-a466-de8904faaa1c", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "9dd26fe4-5da5-48a5-9e43-83ac1085f7e6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "([Key(key='class', value='od', key_spec=class=od, reason='Matches'),\n", + " Key(key='stream', value=5, key_spec=stream, reason='Matches'),\n", + " Key(key='date', value='', key_spec=date, reason='Key Missing')],\n", + " [Key(key='class', value='ensemble', key_spec=class=ensemble, reason='Matches'),\n", + " Key(key='number', value='2', key_spec=number, reason='Matches'),\n", + " Key(key='stream', value=5, key_spec=stream, reason='Matches'),\n", + " Key(key='date', value='', key_spec=date, reason='Key Missing')])" + ] + }, + "execution_count": 36, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from fdb_schema import FDBSchemaFile\n", + "schema = FDBSchemaFile(\"./test_schema\")\n", + "\n", + "r = {\n", + " \"class\" : [\"ensemble\", \"od\"],\n", + " \"number\" : \"2\",\n", + " \"stream\" : 5,\n", + "}\n", + "\n", + "a, b = schema.match_all(r)\n", + "a, b" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f46268e3-e197-47b9-bb6e-94f06e0bf648", + "metadata": {}, + "outputs": [], + "source": [ + "([],\n", + " [[Key(key='class', value='od', key_spec=class=od, reason='Matches'),\n", + " Key(key='stream', value=5, key_spec=stream, reason='Matches'),\n", + " Key(key='date', value='', key_spec=date, reason='Key Missing')],\n", + " \n", + " [Key(key='class', value='ensemble', key_spec=class=ensemble, reason='Matches'),\n", + " Key(key='number', value='2', key_spec=number, reason='Matches'),\n", + " Key(key='stream', value=5, key_spec=stream, reason='Matches'),\n", + " Key(key='date', value='', key_spec=date, reason='Key Missing')]])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:micromamba-ionbeam]", + "language": "python", + "name": "conda-env-micromamba-ionbeam-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 0000000..d6272ff --- /dev/null +++ b/compose.yaml @@ -0,0 +1,20 @@ +services: + backend: + build: + context: ./backend + dockerfile: Dockerfile + ports: + - "8000:8000" + volumes: + - ./backend:/app + + web: + image: nginx + ports: + - "8123:80" + environment: + - NGINX_HOST=localhost + - NGINX_PORT=80 + volumes: + - ./webapp:/usr/share/nginx/html + \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 269cadc..0000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source -BUILDDIR = build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/README.rst b/docs/README.rst deleted file mode 100644 index 9ff848b..0000000 --- a/docs/README.rst +++ /dev/null @@ -1,44 +0,0 @@ - -**************************** -python-package-template-repo -**************************** - -A template documentation tree for creating readthedocs compatible documentation. - -**Quick start** - -Follow these steps to prepare your documentation for readthedocs. - -#. Install requirements - - pip3 install -r requirements.txt - -#. Create the supporting rst files and add them to the source directory, you should remove any -existing rst files to ensure a clean build: - - rm source/reference/*rst - sphinx-apidoc -o source/reference -H "API reference" --tocfile api -f ../PACKAGE_NAME/ - -#. Update the source/index.rst to reference the rst files create above. Depending on the contents of your package and the -rst files produced you will need to add something like the following to the Reference section: - - * :doc:`modules` - - .. toctree:: - :maxdepth: 1 - :hidden: - :caption: Reference - - modules - - -These steps will allow readthedocs to construct your documentation pages. It is possible to build the html pages locally -for testing. From the `docs/source` directory execute the following: - - make html - open build/html/index.html # To open with your default html application - - - - - diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index 5394189..0000000 --- a/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd \ No newline at end of file diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 88ea917..0000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -# Requirements for readthedocs -sphinx -ipykernel -nbsphinx -sphinx_rtd_theme \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 245ddef..0000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,57 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys - -# sys.path.insert(0, os.path.abspath('.')) -# sys.path.insert(0, os.path.abspath('..')) -sys.path.insert(0, os.path.abspath("../..")) - - -# -- Project information ----------------------------------------------------- - -project = "PACKAGE_NAME" -copyright = "2022, ECMWF" -author = "ECMWF" - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.napoleon", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = "alabaster" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] diff --git a/docs/source/getting_started/installing.rst b/docs/source/getting_started/installing.rst deleted file mode 100644 index b63e95c..0000000 --- a/docs/source/getting_started/installing.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. _installing: - -Installing -========== - - -Pip install ------------ - -To install PACKAGE_NAME, just run the following command: - -.. code-block:: bash - - pip install PACKAGE_NAME - -The PACKAGE_NAME ``pip`` package has been tested successfully with the latest versions of -its dependencies (`build logs `_). - -Conda install -------------- - -No conda package has been created yet. -``pip install PACKAGE_NAME`` can be used in a conda environment. - -.. note:: - - Mixing ``pip`` and ``conda`` could create some dependencies issues, - we recommend installing as many dependencies as possible with conda, - then install PACKAGE_NAME with ``pip``, `as recommended by the anaconda team - `_. - - -Troubleshooting ---------------- - -Python 3.7 or above is required -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - PACKAGE_NAME requires Python 3.7 or above. Depending on your installation, - you may need to substitute ``pip`` to ``pip3`` in the examples below. - - diff --git a/docs/source/getting_started/overview.rst b/docs/source/getting_started/overview.rst deleted file mode 100644 index 34cb17f..0000000 --- a/docs/source/getting_started/overview.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _overview: - -Overview -======== - - -This package is a template - diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index e7de7eb..0000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. PACKAGE_NAME documentation master file, created by - sphinx-quickstart on Wed Mar 30 16:04:07 2022. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to PACKAGE_NAME's documentation! -======================================== - -Documentation -_____________ - -**Getting Started** - -* :doc:`getting_started/overview` -* :doc:`getting_started/installing` - -.. toctree:: - :maxdepth: 1 - :hidden: - :caption: Getting Started - - getting_started/overview - getting_started/installing - -**Reference** - -* :doc:`reference/api` - -.. toctree:: - :maxdepth: 1 - :hidden: - :caption: Reference - - reference/api - -License -------- - -*PACKAGE_NAME* is available under the open source `Apache License`__. - -__ http://www.apache.org/licenses/LICENSE-2.0.html - - diff --git a/docs/source/reference/PACKAGE_NAME.rst b/docs/source/reference/PACKAGE_NAME.rst deleted file mode 100644 index cb04b8c..0000000 --- a/docs/source/reference/PACKAGE_NAME.rst +++ /dev/null @@ -1,21 +0,0 @@ -PACKAGE\_NAME package -===================== - -Submodules ----------- - -PACKAGE\_NAME.sample module ---------------------------- - -.. automodule:: PACKAGE_NAME.sample - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: PACKAGE_NAME - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/reference/api.rst b/docs/source/reference/api.rst deleted file mode 100644 index 23bab81..0000000 --- a/docs/source/reference/api.rst +++ /dev/null @@ -1,7 +0,0 @@ -API reference -============= - -.. toctree:: - :maxdepth: 4 - - PACKAGE_NAME diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index ccc7fe5..0000000 --- a/pytest.ini +++ /dev/null @@ -1,3 +0,0 @@ -[pytest] -addopts=-s --verbose -testpaths = tests \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index cb12bd2..0000000 --- a/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -numpy -pytest[tests] \ No newline at end of file diff --git a/run.sh b/run.sh new file mode 100755 index 0000000..2ee8219 --- /dev/null +++ b/run.sh @@ -0,0 +1,2 @@ +cd backend +fastapi dev main.py \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100644 index 9979838..0000000 --- a/setup.py +++ /dev/null @@ -1,58 +0,0 @@ -# (C) Copyright 1996- ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import io -import os - -import setuptools - - -def read(fname): - file_path = os.path.join(os.path.dirname(__file__), fname) - return io.open(file_path, encoding="utf-8").read() - - -install_requires = ["numpy"] -tests_require = ["pytest"] - -meta = {} -exec(read("PACKAGE_NAME/__meta__.py"), meta) - - -setuptools.setup( - # Essential details on the package and its dependencies - name=meta["__name__"], - version=meta["__version__"], - description=meta.get("__description__", ""), - long_description=read("README.rst"), - author=meta.get( - "__author__", "European Centre for Medium-Range Weather Forecasts (ECMWF)" - ), - author_email=meta.get("__author_email__", "software.support@ecmwf.int"), - license="Apache License Version 2.0", - url="https://github.com/ecmwf-projects/python-package-template-repo", - packages=setuptools.find_packages(), - include_package_data=True, - install_requires=install_requires, - zip_safe=True, - classifiers=[ - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", - "Operating System :: OS Independent", - ], - tests_require=tests_require, - test_suite="tests", -) diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/test_sample.py b/tests/test_sample.py deleted file mode 100644 index 05cb27f..0000000 --- a/tests/test_sample.py +++ /dev/null @@ -1,37 +0,0 @@ -# (C) Copyright 1996- ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -""" -Demo tests for raw template. -""" - -import unittest - -from PACKAGE_NAME.sample import speed_direction_to_uv - - -class TestSpeedDirectionToUV(unittest.TestCase): - def test_zero_speed(self): - # Test that a wind speed of 0 results in u and v values of 0 - self.assertEqual(speed_direction_to_uv(0, 10), (0, 0)) - - def test_zero_direction(self): - # Test that a wind direction of 0 results u==speed and v==0 - self.assertEqual(speed_direction_to_uv(10, 0), (10, 0)) - - def test_180_direction(self): - # Test that a wind direction of 180 results u==-speed and v==0 - wind_u, wind_v = speed_direction_to_uv(10, 180) - self.assertEqual(wind_u, -10) - self.assertAlmostEqual(wind_v, 0) - - def test_90_direction(self): - # Test that a wind direction of 90 results u==0 and v==speed - wind_u, wind_v = speed_direction_to_uv(10, 90) - self.assertAlmostEqual(wind_u, 0) - self.assertEqual(wind_v, 10) diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 9fe751b..0000000 --- a/tox.ini +++ /dev/null @@ -1,8 +0,0 @@ -[flake8] -; ignore = E226,E302,E41 -max-line-length = 120 -; exclude = tests/* -; See https://black.readthedocs.io/en/stable/the_black_code_style.html -extend-ignore = E203 -[isort] -profile=black \ No newline at end of file diff --git a/webapp/app.js b/webapp/app.js new file mode 100644 index 0000000..247778f --- /dev/null +++ b/webapp/app.js @@ -0,0 +1,315 @@ +// app.js + +// const API_BASE_URL = "http://127.0.0.1:8000/tree"; + +// Take the query string and stick it on the API URL +function getSTACUrlFromQuery() { + const params = new URLSearchParams(window.location.search); + + // get current window url and remove path part + let api_url = new URL(window.location.href); + api_url.pathname = "/tree"; + + for (const [key, value] of params.entries()) { + api_url.searchParams.set(key, value); + } + + console.log(api_url.toString()); + return api_url.toString(); +} + +function get_request_from_url() { + // Extract the query params in order and split any with a , delimiter + // request is an ordered array of [key, [value1, value2, value3, ...]] + const url = new URL(window.location.href); + const params = new URLSearchParams(url.search); + const request = []; + for (const [key, value] of params.entries()) { + request.push([key, value.split(",")]); + } + return request; +} + +function make_url_from_request(request) { + const url = new URL(window.location.href); + url.search = ""; // Clear existing params + const params = new URLSearchParams(); + + for (const [key, values] of request) { + params.set(key, values.join(",")); + } + url.search = params.toString(); + + return url.toString().replace(/%2C/g, ","); +} + +function goToPreviousUrl() { + let request = get_request_from_url(); + request.pop(); + console.log("Request:", request); + const url = make_url_from_request(request); + console.log("URL:", url); + window.location.href = make_url_from_request(request); +} + +// Function to generate a new STAC URL based on current selection +function goToNextUrl() { + const request = get_request_from_url(); + + // Get the currently selected key = value,value2,value3 pairs + const items = Array.from(document.querySelectorAll("div#items > div")); + + let any_new_keys = false; + const new_keys = items.map((item) => { + const key = item.dataset.key; + const key_type = item.dataset.keyType; + let values = []; + + if (key === "date") { + const datePicker = item.querySelector("input[type='date']"); + //format date as YYYYMMDD + values.push(datePicker.value.replace(/-/g, "")); + } else if (key === "time") { + const timePicker = item.querySelector("input[type='time']"); + //format time as HHMM + console.log("replace", timePicker.value.replace(":", "")); + values.push(timePicker.value.replace(":", "")); + } else if (key_type === "enum") { + values.push( + ...Array.from( + item.querySelectorAll("input[type='checkbox']:checked") + ).map((checkbox) => checkbox.value) + ); + } else { + const any = item.querySelector("input[type='text']"); + if (any.value !== "") { + values.push(any.value); + } + } + + // Keep track of whether any new keys are selected + if (values.length > 0) { + any_new_keys = true; + } + + return { key, values }; + }); + + // if not new keys are selected, do nothing + if (!any_new_keys) { + return; + } + + // Update the request with the new keys + for (const { key, values } of new_keys) { + // Find the index of the existing key in the request array + const existingIndex = request.findIndex( + ([existingKey, existingValues]) => existingKey === key + ); + + if (existingIndex !== -1) { + // If the key already exists, append the values + request[existingIndex][1] = [...request[existingIndex][1], ...values]; + } else { + // If the key doesn't exist, add a new entry + request.push([key, values]); + } + } + + const url = make_url_from_request(request); + window.location.href = url; +} + +async function createCatalogItem(link, itemsContainer) { + const itemDiv = document.createElement("div"); + itemDiv.className = "item loading"; + itemDiv.textContent = "Loading..."; + itemsContainer.appendChild(itemDiv); + + try { + // Fetch details for each item/collection asynchronously + let base_url = new URL(window.location.href); + base_url.pathname = "/tree"; + let url = new URL(link.href, base_url); + console.log("Fetching item details:", url); + const response = await fetch(url); + const itemData = await response.json(); + + // Update the item div with real content + itemDiv.classList.remove("loading"); + itemDiv.innerHTML = ""; // Clear "Loading..." text + + // add data-key attribute to the itemDiv + itemDiv.dataset.key = itemData.id; + itemDiv.dataset.keyType = itemData.key_type; + + const title = document.createElement("h3"); + title.className = "item-title"; + title.textContent = itemData.title || "No title available"; + itemDiv.appendChild(title); + + const key_type = document.createElement("p"); + key_type.className = "item-type"; + key_type.textContent = `Key Type: ${itemData.key_type || "Unknown"}`; + itemDiv.appendChild(key_type); + + const optional = document.createElement("p"); + optional.className = "item-type"; + optional.textContent = `Optional: ${link.optional || "Unknown"}`; + itemDiv.appendChild(optional); + + // const id = document.createElement("p"); + // id.className = "item-id"; + // id.textContent = `ID: ${itemData.id || link.href.split("/").pop()}`; + // itemDiv.appendChild(id); + + const description = document.createElement("p"); + description.className = "item-description"; + const descText = itemData.description + ? itemData.description.slice(0, 100) + : "No description available"; + description.textContent = `${descText}...`; + itemDiv.appendChild(description); + + if (itemData.key_type === "date" || itemData.key_type === "time") { + // Render a date picker for the "date" key + const picker = ``; + //convert picker to HTML node + const pickerNode = document + .createRange() + .createContextualFragment(picker); + itemDiv.appendChild(pickerNode); + } + // Otherwise create a scrollable list with checkboxes for values if available + else if ( + itemData.key_type === "enum" && + itemData.values && + itemData.values.length > 0 + ) { + const listContainer = renderCheckboxList(itemData); + itemDiv.appendChild(listContainer); + } else { + const any = ``; + const anyNode = document.createRange().createContextualFragment(any); + itemDiv.appendChild(anyNode); + } + } catch (error) { + console.error("Error loading item data:", error); + + // In case of an error, display an error message + itemDiv.innerHTML = "

Error loading item details

"; + } +} + +// Render catalog items in the sidebar +function renderCatalogItems(links) { + const itemsContainer = document.getElementById("items"); + itemsContainer.innerHTML = ""; // Clear previous items + + console.log("Number of Links:", links); + const children = links.filter( + (link) => link.rel === "child" || link.rel === "items" + ); + console.log("Number of Children:", children.length); + + children.forEach((link) => { + createCatalogItem(link, itemsContainer); + }); +} + +// Fetch and display item details +async function loadItemDetails(url) { + try { + const resolved_url = new URL(url, API_BASE_URL); + const response = await fetch(resolved_url); + const item = await response.json(); + + // Show details in the 'details' panel + const itemDetails = document.getElementById("item-details"); + itemDetails.textContent = JSON.stringify(item, null, 2); + } catch (error) { + console.error("Error loading item details:", error); + } +} + +function show_resp_in_sidebar(catalog) { + const itemDetails = document.getElementById("item-details"); + itemDetails.textContent = JSON.stringify(catalog, null, 2); +} + +// Fetch STAC catalog and display items +async function fetchCatalog(stacUrl) { + try { + const response = await fetch(stacUrl); + const catalog = await response.json(); + // Always load the most recently clicked item on the right-hand side + show_resp_in_sidebar(catalog); + + // Render the items from the catalog + if (catalog.links) { + console.log("Fetched STAC catalog:", stacUrl, catalog.links); + renderCatalogItems(catalog.links); + } + } catch (error) { + console.error("Error fetching STAC catalog:", error); + } +} + +// Initialize the viewer by fetching the STAC catalog +function initializeViewer() { + const stacUrl = getSTACUrlFromQuery(); + + if (stacUrl) { + console.log("Fetching STAC catalog from query string URL:", stacUrl); + fetchCatalog(stacUrl); + } else { + console.error("No STAC URL provided in the query string."); + } + + // Add event listener for the "Generate STAC URL" button + const generateUrlBtn = document.getElementById("next-btn"); + generateUrlBtn.addEventListener("click", goToNextUrl); + + const previousUrlBtn = document.getElementById("previous-btn"); + previousUrlBtn.addEventListener("click", goToPreviousUrl); + + // Add event listener for the "Raw STAC" button + const stacAnchor = document.getElementById("stac-anchor"); + stacAnchor.href = getSTACUrlFromQuery(); +} + +// Call initializeViewer on page load +initializeViewer(); + +function renderCheckboxList(itemData) { + const listContainer = document.createElement("div"); + listContainer.className = "item-list-container"; + + const listLabel = document.createElement("label"); + listLabel.textContent = "Select values:"; + listLabel.className = "list-label"; + + const scrollableList = document.createElement("div"); + scrollableList.className = "scrollable-list"; + + const checkboxesHtml = itemData.values + .map((valueArray) => { + const value = Array.isArray(valueArray) ? valueArray[0] : valueArray; + const labelText = Array.isArray(valueArray) + ? valueArray.join(" - ") + : valueArray; + return ` +
+ + +
+ `; + }) + .join(""); + + scrollableList.innerHTML = checkboxesHtml; + + listContainer.appendChild(listLabel); + listContainer.appendChild(scrollableList); + return listContainer; +} diff --git a/webapp/index.html b/webapp/index.html new file mode 100644 index 0000000..c54f520 --- /dev/null +++ b/webapp/index.html @@ -0,0 +1,33 @@ + + + + + + STAC Viewer + + + +
+
+

STAC Items

+

Select one or more items and then click next iteratively build up a full request.

+ + + +
+ +
+
+
+

Raw STAC

+

+        
+
+ + + + \ No newline at end of file diff --git a/webapp/run.sh b/webapp/run.sh new file mode 100644 index 0000000..9ae1030 --- /dev/null +++ b/webapp/run.sh @@ -0,0 +1,2 @@ +# https://github.com/cytb/simple-autoreload-server +autoreload-server . 8123 \ No newline at end of file diff --git a/webapp/styles.css b/webapp/styles.css new file mode 100644 index 0000000..a9dd18f --- /dev/null +++ b/webapp/styles.css @@ -0,0 +1,153 @@ +body { + font-family: Arial, sans-serif; + margin: 0; + padding: 0; +} +#viewer { + display: flex; + flex-direction: row; + height: 100vh; +} +#catalog-list { + width: 30%; + padding: 10px; + overflow-y: scroll; + background-color: #f4f4f4; + border-right: 1px solid #ddd; +} + +#catalog-list h2 { + margin-top: 0; +} +#details { + width: 70%; + padding: 10px; +} + +.sidebar-header { + display: flex; + justify-content: space-between; /* Center buttons horizontally */ + margin-bottom: 10px; /* Space below header */ + height: 3em; +} + +.sidebar-header button { + width: 10em; +} + +canvas { + width: 100%; + height: 300px; + border: 1px solid #ccc; + margin-top: 20px; +} + +/* Updated CSS for the item elements in the catalog list */ +.item { + background-color: white; + border: 1px solid #ddd; + padding: 10px; + margin-bottom: 10px; + border-radius: 5px; + transition: background-color 0.2s ease; +} + +.item-title { + font-size: 18px; + margin: 0; + color: #333; +} + +.item-type { + font-size: 14px; + margin: 5px 0; + color: #666; +} + +.item-id, .item-key-type { + font-size: 12px; + color: #999; +} + +.item-description { + font-size: 13px; + margin: 5px 0; + color: #444; + font-style: italic; +} + +#items { + padding: 10px; +} + +.item.selected { + background-color: #d4e9ff; /* Lighter blue for selection */ + border-color: #003399; /* Keep the original ECMWF blue for the border */ +} + +#item-details { + white-space: pre-wrap; + background-color: #f9f9f9; + padding: 10px; + border: 1px solid #ccc; +} + + +/* Button styles */ +button { + height: 3em; + padding: 10px 20px; /* Padding around button text */ + margin: 0 5px; /* Margin between buttons */ + background-color: #003399; /* ECMWF blue */ + color: white; /* White text color */ + border: none; /* Remove default button border */ + cursor: pointer; /* Pointer cursor on hover */ + border-radius: 5px; /* Rounded corners */ + transition: background-color 0.3s ease; /* Smooth background color transition */ +} + +button:hover { + background-color: #001f66; /* Darker shade of ECMWF blue on hover */ +} + +.item-list-container { + margin-top: 20px; + margin-bottom: 20px; +} + +.scrollable-list { + max-height: 200px; + overflow-y: auto; + padding: 10px; + border: 1px solid #ccc; + border-radius: 4px; + background-color: #fff; + box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1); +} + +.checkbox-container { + display: flex; + align-items: center; + margin-bottom: 10px; +} + +.item-checkbox { + margin-right: 10px; + cursor: pointer; +} + +.checkbox-label { + font-size: 16px; + color: #333; +} + +.checkbox-container:hover .checkbox-label { + color: #003399; +} + +.list-label { + font-weight: bold; + margin-bottom: 5px; + display: block; + color: #003399; +} \ No newline at end of file