diff --git a/Dockerfile b/Dockerfile
index b7d00c864..e5df99152 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -22,7 +22,7 @@ RUN mvn -Pstandalone clean install
# ==============================
-FROM atomgraph/letsencrypt-tomcat:10.1.34
+FROM atomgraph/letsencrypt-tomcat:10.1.46
LABEL maintainer="martynas@atomgraph.com"
@@ -72,14 +72,12 @@ ENV OWNER_CERT_ALIAS=root-owner
ENV OWNER_KEYSTORE=/var/linkeddatahub/ssl/owner/keystore.p12
ENV OWNER_CERT=/var/linkeddatahub/ssl/owner/cert.pem
ENV OWNER_PUBLIC_KEY=/var/linkeddatahub/ssl/owner/public.pem
-ENV OWNER_PRIVATE_KEY=/var/linkeddatahub/ssl/owner/private.key
ENV SECRETARY_COMMON_NAME=LinkedDataHub
ENV SECRETARY_CERT_ALIAS=root-secretary
ENV SECRETARY_KEYSTORE=/var/linkeddatahub/ssl/secretary/keystore.p12
ENV SECRETARY_CERT=/var/linkeddatahub/ssl/secretary/cert.pem
ENV SECRETARY_PUBLIC_KEY=/var/linkeddatahub/ssl/secretary/public.pem
-ENV SECRETARY_PRIVATE_KEY=/var/linkeddatahub/ssl/secretary/private.key
ENV CLIENT_KEYSTORE_MOUNT=/var/linkeddatahub/ssl/secretary/keystore.p12
ENV CLIENT_KEYSTORE="$CATALINA_HOME/webapps/ROOT/WEB-INF/keystore.p12"
@@ -147,12 +145,22 @@ COPY platform/import-letsencrypt-stg-roots.sh import-letsencrypt-stg-roots.sh
COPY platform/select-root-services.rq select-root-services.rq
-# copy the metadata of the built-in secretary agent
+COPY platform/select-agent-metadata.rq select-agent-metadata.rq
+
+# copy the metadata of built-in agents
COPY platform/root-secretary.trig.template root-secretary.trig.template
COPY platform/root-owner.trig.template root-owner.trig.template
+COPY platform/root-secretary-authorization.trig.template root-secretary-authorization.trig.template
+
+COPY platform/root-owner-authorization.trig.template root-owner-authorization.trig.template
+
+# copy the metadata of the namespace ontology
+
+COPY platform/namespace-ontology.trig.template namespace-ontology.trig.template
+
# copy default datasets
COPY platform/datasets/admin.trig /var/linkeddatahub/datasets/admin.trig
@@ -197,7 +205,7 @@ RUN useradd --no-log-init -U ldh && \
RUN ./import-letsencrypt-stg-roots.sh
HEALTHCHECK --start-period=80s --retries=5 \
- CMD curl -f -I "http://localhost:${HTTP_PORT}/ns" -H "Accept: application/n-triples" || exit 1 # relies on public access to the namespace document
+ CMD curl -f -I "http://localhost:7070/ns" -H "Accept: application/n-triples" || exit 1 # relies on public access to the namespace document
USER ldh
diff --git a/bin/add-generic-service.sh b/bin/add-generic-service.sh
index 645c33998..0dd84cdb5 100755
--- a/bin/add-generic-service.sh
+++ b/bin/add-generic-service.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/add-result-set-chart.sh b/bin/add-result-set-chart.sh
index b4a0c7d7e..f93e8fe9b 100755
--- a/bin/add-result-set-chart.sh
+++ b/bin/add-result-set-chart.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/add-select.sh b/bin/add-select.sh
index 0d48ab075..fc54ffb6f 100755
--- a/bin/add-select.sh
+++ b/bin/add-select.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/add-view.sh b/bin/add-view.sh
index 24827c982..826ed6cda 100755
--- a/bin/add-view.sh
+++ b/bin/add-view.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/admin/acl/add-agent-to-group.sh b/bin/admin/acl/add-agent-to-group.sh
index 0cc212b19..b7e8abb3a 100755
--- a/bin/admin/acl/add-agent-to-group.sh
+++ b/bin/admin/acl/add-agent-to-group.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
@@ -79,4 +80,4 @@ sparql+="}\n"
# PATCH SPARQL to the named graph
-echo -e "$sparql" | curl -X PATCH --data-binary @- -s -k -E "$cert_pem_file":"$cert_password" "$target" -H "Content-Type: application/sparql-update"
\ No newline at end of file
+echo -e "$sparql" | curl -f -X PATCH --data-binary @- -s -k -E "$cert_pem_file":"$cert_password" "$target" -H "Content-Type: application/sparql-update"
\ No newline at end of file
diff --git a/bin/admin/acl/create-authorization.sh b/bin/admin/acl/create-authorization.sh
index 82e5cd909..b2a1b6d68 100755
--- a/bin/admin/acl/create-authorization.sh
+++ b/bin/admin/acl/create-authorization.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/admin/acl/create-group.sh b/bin/admin/acl/create-group.sh
index 12972ddf6..dc33cdb19 100755
--- a/bin/admin/acl/create-group.sh
+++ b/bin/admin/acl/create-group.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/admin/acl/make-public.sh b/bin/admin/acl/make-public.sh
index 95b1ee1ec..4cc906c01 100755
--- a/bin/admin/acl/make-public.sh
+++ b/bin/admin/acl/make-public.sh
@@ -58,7 +58,13 @@ if [ -z "$base" ] ; then
exit 1
fi
-target="${base}admin/acl/authorizations/public/"
+admin_uri() {
+ local uri="$1"
+ echo "$uri" | sed 's|://|://admin.|'
+}
+
+admin_base=$(admin_uri "$base")
+target="${admin_base}acl/authorizations/public/"
if [ -n "$proxy" ]; then
# rewrite target hostname to proxy hostname
@@ -73,7 +79,7 @@ curl -X PATCH \
-H "Content-Type: application/sparql-update" \
"$target" \
--data-binary @- <
+BASE <${admin_base}>
PREFIX acl:
PREFIX def:
@@ -84,10 +90,10 @@ PREFIX foaf:
INSERT
{
acl:accessToClass def:Root, dh:Container, dh:Item, nfo:FileDataObject ;
- acl:accessTo <../sparql> .
+ acl:accessTo <${base}sparql> .
a acl:Authorization ;
- acl:accessTo <../sparql> ;
+ acl:accessTo <${base}sparql> ;
acl:mode acl:Append ;
acl:agentClass foaf:Agent, acl:AuthenticatedAgent . # hacky way to allow queries over POST
}
diff --git a/bin/admin/add-ontology-import.sh b/bin/admin/add-ontology-import.sh
index 3333c9ab4..df5ce800d 100755
--- a/bin/admin/add-ontology-import.sh
+++ b/bin/admin/add-ontology-import.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
@@ -80,4 +81,4 @@ sparql+="}\n"
# PATCH SPARQL to the named graph
-echo -e "$sparql" | curl -X PATCH --data-binary @- -v -k -E "$cert_pem_file":"$cert_password" "$target" -H "Content-Type: application/sparql-update"
\ No newline at end of file
+echo -e "$sparql" | curl -f -X PATCH --data-binary @- -v -k -E "$cert_pem_file":"$cert_password" "$target" -H "Content-Type: application/sparql-update"
\ No newline at end of file
diff --git a/bin/admin/model/add-class.sh b/bin/admin/model/add-class.sh
index 9d8abd639..f1784ad56 100755
--- a/bin/admin/model/add-class.sh
+++ b/bin/admin/model/add-class.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/admin/model/add-construct.sh b/bin/admin/model/add-construct.sh
index b3a00cd1c..675188a5b 100755
--- a/bin/admin/model/add-construct.sh
+++ b/bin/admin/model/add-construct.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/admin/model/add-property-constraint.sh b/bin/admin/model/add-property-constraint.sh
index 05787dc11..0fe5a00d0 100755
--- a/bin/admin/model/add-property-constraint.sh
+++ b/bin/admin/model/add-property-constraint.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/admin/model/add-restriction.sh b/bin/admin/model/add-restriction.sh
index 6284baae9..f5101a9b4 100755
--- a/bin/admin/model/add-restriction.sh
+++ b/bin/admin/model/add-restriction.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/admin/model/add-select.sh b/bin/admin/model/add-select.sh
index db82da765..02cc8d921 100755
--- a/bin/admin/model/add-select.sh
+++ b/bin/admin/model/add-select.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/admin/model/create-ontology.sh b/bin/admin/model/create-ontology.sh
index d691b286d..62cd31b53 100755
--- a/bin/admin/model/create-ontology.sh
+++ b/bin/admin/model/create-ontology.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/content/add-object-block.sh b/bin/content/add-object-block.sh
index 16b64532d..92fd89705 100755
--- a/bin/content/add-object-block.sh
+++ b/bin/content/add-object-block.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/content/add-xhtml-block.sh b/bin/content/add-xhtml-block.sh
index 3af9cf508..c72fac1a2 100755
--- a/bin/content/add-xhtml-block.sh
+++ b/bin/content/add-xhtml-block.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/create-container.sh b/bin/create-container.sh
index 29ef0bdde..fb9d4328d 100755
--- a/bin/create-container.sh
+++ b/bin/create-container.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/create-item.sh b/bin/create-item.sh
index e66398cb7..63d3d19ce 100755
--- a/bin/create-item.sh
+++ b/bin/create-item.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/get.sh b/bin/get.sh
index 50f43f3c9..09ebb262f 100755
--- a/bin/get.sh
+++ b/bin/get.sh
@@ -84,7 +84,7 @@ fi
# GET RDF document
if [ -n "$head" ] ; then
- curl -v -k -E "$cert_pem_file":"$cert_password" -H "Accept: ${accept}" "$target" --head
+ curl -f -v -k -E "$cert_pem_file":"$cert_password" -H "Accept: ${accept}" "$target" --head
else
- curl -v -k -E "$cert_pem_file":"$cert_password" -H "Accept: ${accept}" "$target"
+ curl -f -v -k -E "$cert_pem_file":"$cert_password" -H "Accept: ${accept}" "$target"
fi
\ No newline at end of file
diff --git a/bin/imports/create-csv-import.sh b/bin/imports/create-csv-import.sh
index f77e41a3b..ffd745575 100755
--- a/bin/imports/create-csv-import.sh
+++ b/bin/imports/create-csv-import.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/imports/create-file.sh b/bin/imports/create-file.sh
index bbb21670d..36413d34c 100755
--- a/bin/imports/create-file.sh
+++ b/bin/imports/create-file.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
@@ -176,7 +177,7 @@ if [ -n "$proxy" ]; then
fi
# POST RDF/POST multipart form and capture the effective URL
-effective_url=$(echo -e "$rdf_post" | curl -w '%{url_effective}' -v -s -k -X PUT -H "Accept: text/turtle" -E "$cert_pem_file":"$cert_password" -o /dev/null --config - "$target")
+effective_url=$(echo -e "$rdf_post" | curl -w '%{url_effective}' -f -v -s -k -X PUT -H "Accept: text/turtle" -E "$cert_pem_file":"$cert_password" -o /dev/null --config - "$target")
# If using proxy, rewrite the effective URL back to original hostname
if [ -n "$proxy" ]; then
diff --git a/bin/imports/create-query.sh b/bin/imports/create-query.sh
index 990edf959..ff9a8eab2 100755
--- a/bin/imports/create-query.sh
+++ b/bin/imports/create-query.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/imports/create-rdf-import.sh b/bin/imports/create-rdf-import.sh
index ccbb24c4a..b51113a96 100755
--- a/bin/imports/create-rdf-import.sh
+++ b/bin/imports/create-rdf-import.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+set -eo pipefail
print_usage()
{
diff --git a/bin/patch.sh b/bin/patch.sh
index 93c821c08..e4fa67b83 100755
--- a/bin/patch.sh
+++ b/bin/patch.sh
@@ -70,4 +70,4 @@ fi
# resolve SPARQL update from stdin against base URL and PATCH it to the server
# uparse currently does not support --base: https://github.com/apache/jena/issues/3296
-cat - | curl -v -k -E "$cert_pem_file":"$cert_password" --data-binary @- -H "Content-Type: application/sparql-update" -X PATCH -o /dev/null "$final_url"
+cat - | curl -f -v -k -E "$cert_pem_file":"$cert_password" --data-binary @- -H "Content-Type: application/sparql-update" -X PATCH -o /dev/null "$final_url"
diff --git a/bin/post.sh b/bin/post.sh
index a820065d7..54e49eafe 100755
--- a/bin/post.sh
+++ b/bin/post.sh
@@ -80,7 +80,7 @@ else
fi
# resolve RDF document from stdin against base URL and POST to the server and print request URL
-effective_url=$(cat - | turtle --base="$url" | curl -w '%{url_effective}' -v -k -E "$cert_pem_file":"$cert_password" -d @- -H "Content-Type: ${content_type}" -H "Accept: text/turtle" -o /dev/null "$final_url")
+effective_url=$(cat - | turtle --base="$url" | curl -w '%{url_effective}' -f -v -k -E "$cert_pem_file":"$cert_password" -d @- -H "Content-Type: ${content_type}" -H "Accept: text/turtle" -o /dev/null "$final_url") || exit $?
# If using proxy, rewrite the effective URL back to original hostname
if [ -n "$proxy" ]; then
diff --git a/bin/put.sh b/bin/put.sh
index 3f890a369..799d81d2b 100755
--- a/bin/put.sh
+++ b/bin/put.sh
@@ -80,7 +80,7 @@ else
fi
# resolve RDF document from stdin against base URL and PUT to the server and print request URL
-effective_url=$(cat - | turtle --base="$url" | curl -w '%{url_effective}' -v -k -E "$cert_pem_file":"$cert_password" -d @- -X PUT -H "Content-Type: ${content_type}" -H "Accept: text/turtle" -o /dev/null "$final_url")
+effective_url=$(cat - | turtle --base="$url" | curl -w '%{url_effective}' -f -v -k -E "$cert_pem_file":"$cert_password" -d @- -X PUT -H "Content-Type: ${content_type}" -H "Accept: text/turtle" -o /dev/null "$final_url") || exit $?
# If using proxy, rewrite the effective URL back to original hostname
if [ -n "$proxy" ]; then
diff --git a/bin/webid-keygen-pem.sh b/bin/webid-keygen-pem.sh
index cc7d8c2ee..d5b93ac82 100755
--- a/bin/webid-keygen-pem.sh
+++ b/bin/webid-keygen-pem.sh
@@ -4,7 +4,7 @@
if [ "$#" -ne 6 ]; then
echo "Usage: $0" '$alias $cert_file $keystore_password $key_password $webid_uri $validity' >&2
- echo "Example: $0 martynas martynas.localhost.p12 Martynas Martynas https://localhost:4443/admin/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 365" >&2
+ echo "Example: $0 martynas martynas.localhost.p12 Martynas Martynas https://admin.localhost:4443/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 365" >&2
exit 1
fi
diff --git a/bin/webid-keygen.sh b/bin/webid-keygen.sh
index 7d7fc8594..787180f22 100755
--- a/bin/webid-keygen.sh
+++ b/bin/webid-keygen.sh
@@ -4,7 +4,7 @@
if [ "$#" -ne 6 ]; then
echo "Usage: $0" '$alias $cert_file $keystore_password $key_password $webid_uri $validity' >&2
- echo "Example: $0 martynas martynas.localhost.p12 Password Password https://localhost:4443/admin/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 3650" >&2
+ echo "Example: $0 martynas martynas.localhost.p12 Password Password https://admin.localhost:4443/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 3650" >&2
exit 1
fi
diff --git a/config/system.trig b/config/system.trig
index 2fdf7c99c..647f582c7 100644
--- a/config/system.trig
+++ b/config/system.trig
@@ -1,4 +1,5 @@
@prefix lapp: .
+@prefix ldh: .
@prefix a: .
@prefix ac: .
@prefix rdf: .
@@ -16,7 +17,8 @@
a lapp:Application, lapp:AdminApplication ;
dct:title "LinkedDataHub admin" ;
- ldt:base ;
+ # ldt:base ;
+ ldh:origin ;
ldt:ontology ;
ldt:service ;
ac:stylesheet ;
@@ -35,8 +37,9 @@
a lapp:Application, lapp:EndUserApplication ;
dct:title "LinkedDataHub" ;
- ldt:base <> ;
- ldt:ontology ;
+ # ldt:base ;
+ ldh:origin ;
+ ldt:ontology ;
ldt:service ;
lapp:adminApplication ;
lapp:frontendProxy ;
diff --git a/docker-compose.yml b/docker-compose.yml
index 0e6d3ce14..f4f3d8dad 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -56,6 +56,11 @@ services:
- MAIL_SMTP_HOST=email-server
- MAIL_SMTP_PORT=25
- MAIL_USER=linkeddatahub@localhost
+ - REMOTE_IP_VALVE=true
+ - REMOTE_IP_VALVE_PROTOCOL_HEADER=X-Forwarded-Proto
+ - REMOTE_IP_VALVE_PORT_HEADER=X-Forwarded-Port
+ - REMOTE_IP_VALVE_REMOTE_IP_HEADER=X-Forwarded-For
+ - REMOTE_IP_VALVE_HOST_HEADER=X-Forwarded-Host
- OWNER_MBOX=${OWNER_MBOX}
#- OWNER_URI=${OWNER_URI}
- OWNER_GIVEN_NAME=${OWNER_GIVEN_NAME}
@@ -163,7 +168,7 @@ configs:
# server with optional client cert authentication
server {
listen 8443 ssl;
- server_name ${HOST};
+ server_name *.${HOST} ${HOST};
ssl_certificate /etc/nginx/ssl/server.crt;
ssl_certificate_key /etc/nginx/ssl/server.key;
ssl_session_cache shared:SSL:1m;
@@ -175,6 +180,11 @@ configs:
#proxy_cache backcache;
limit_req zone=linked_data burst=30 nodelay;
+ proxy_set_header Host $$host;
+ proxy_set_header X-Forwarded-Host $$host;
+ proxy_set_header X-Forwarded-Proto $$scheme;
+ proxy_set_header X-Forwarded-Port ${HTTPS_PORT};
+
proxy_set_header Client-Cert '';
proxy_set_header Client-Cert $$ssl_client_escaped_cert;
@@ -185,6 +195,11 @@ configs:
proxy_pass http://linkeddatahub;
limit_req zone=static_files burst=20 nodelay;
+ proxy_set_header Host $$host;
+ proxy_set_header X-Forwarded-Host $$host;
+ proxy_set_header X-Forwarded-Proto $$scheme;
+ proxy_set_header X-Forwarded-Port ${HTTPS_PORT};
+
proxy_set_header Client-Cert '';
proxy_set_header Client-Cert $$ssl_client_escaped_cert;
@@ -202,7 +217,7 @@ configs:
# server with client cert authentication on
server {
listen 9443 ssl;
- server_name ${HOST};
+ server_name *.${HOST} ${HOST};
ssl_certificate /etc/nginx/ssl/server.crt;
ssl_certificate_key /etc/nginx/ssl/server.key;
ssl_session_cache shared:SSL:1m;
@@ -214,6 +229,11 @@ configs:
#proxy_cache backcache;
limit_req zone=linked_data burst=30 nodelay;
+ proxy_set_header Host $$host;
+ proxy_set_header X-Forwarded-Host $$host;
+ proxy_set_header X-Forwarded-Proto $$scheme;
+ proxy_set_header X-Forwarded-Port ${HTTPS_PORT};
+
proxy_set_header Client-Cert '';
proxy_set_header Client-Cert $$ssl_client_escaped_cert;
}
@@ -226,7 +246,7 @@ configs:
server {
listen 8080;
- server_name ${HOST};
+ server_name *.${HOST} ${HOST};
location / {
return 301 https://$$server_name:${HTTPS_PORT}$$request_uri;
diff --git a/http-tests/access/group-authorization.sh b/http-tests/access/group-authorization.sh
index eb91aa837..69e5378c2 100755
--- a/http-tests/access/group-authorization.sh
+++ b/http-tests/access/group-authorization.sh
@@ -19,7 +19,7 @@ ntriples=$(curl -k -s -G \
"${ADMIN_BASE_URL}access"
)
-if echo "$ntriples" | grep -q ' '; then
+if echo "$ntriples" | grep -q " <${ADMIN_BASE_URL}acl/groups/writers/#this>"; then
exit 1
fi
@@ -50,6 +50,6 @@ ntriples=$(curl -k -s -G \
"${ADMIN_BASE_URL}access"
)
-if ! echo "$ntriples" | grep -q ' '; then
+if ! echo "$ntriples" | grep -q " <${ADMIN_BASE_URL}acl/groups/writers/#this>"; then
exit 1
fi
diff --git a/http-tests/admin/acl/add-delete-authorization.sh b/http-tests/admin/acl/add-delete-authorization.sh
index 4e4cf1b19..0692735f7 100755
--- a/http-tests/admin/acl/add-delete-authorization.sh
+++ b/http-tests/admin/acl/add-delete-authorization.sh
@@ -28,7 +28,27 @@ container=$(create-container.sh \
--slug "$slug" \
--parent "$END_USER_BASE_URL")
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake DELETE authorization from test.localhost" \
+ --agent "$AGENT_URI" \
+ --to "$container" \
+ --write
+
+# access is still denied (fake authorization filtered out)
+
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Accept: application/n-triples" \
+ -X DELETE \
+ "$container" \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -39,7 +59,7 @@ create-authorization.sh \
--to "$container" \
--write
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
curl -k -w "%{http_code}\n" -o /dev/null -f -s \
-E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
diff --git a/http-tests/admin/acl/add-delete-class-authorization.sh b/http-tests/admin/acl/add-delete-class-authorization.sh
index a814147e0..b763c5c5b 100755
--- a/http-tests/admin/acl/add-delete-class-authorization.sh
+++ b/http-tests/admin/acl/add-delete-class-authorization.sh
@@ -28,7 +28,27 @@ container=$(create-container.sh \
--slug "$slug" \
--parent "$END_USER_BASE_URL")
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake DELETE class authorization from test.localhost" \
+ --agent "$AGENT_URI" \
+ --to-all-in "https://www.w3.org/ns/ldt/document-hierarchy#Container" \
+ --write
+
+# access is still denied (fake authorization filtered out)
+
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Accept: application/n-triples" \
+ -X DELETE \
+ "$container" \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -39,7 +59,7 @@ create-authorization.sh \
--to-all-in "https://www.w3.org/ns/ldt/document-hierarchy#Container" \
--write
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
curl -k -w "%{http_code}\n" -o /dev/null -f -s \
-E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
diff --git a/http-tests/admin/acl/add-delete-group-authorization.sh b/http-tests/admin/acl/add-delete-group-authorization.sh
index ae55921ca..c6fe39bff 100755
--- a/http-tests/admin/acl/add-delete-group-authorization.sh
+++ b/http-tests/admin/acl/add-delete-group-authorization.sh
@@ -44,7 +44,27 @@ container=$(create-container.sh \
--slug "$slug" \
--parent "$END_USER_BASE_URL")
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake DELETE group authorization from test.localhost" \
+ --agent-group "$group" \
+ --to "$container" \
+ --write
+
+# access is still denied (fake authorization filtered out)
+
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Accept: application/n-triples" \
+ -X DELETE \
+ "$container" \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -55,7 +75,7 @@ create-authorization.sh \
--to "$container" \
--write
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
curl -k -w "%{http_code}\n" -o /dev/null -f -s \
-E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
diff --git a/http-tests/admin/acl/add-get-authorization.sh b/http-tests/admin/acl/add-get-authorization.sh
index 9273104e9..5f9b0c701 100755
--- a/http-tests/admin/acl/add-get-authorization.sh
+++ b/http-tests/admin/acl/add-get-authorization.sh
@@ -15,7 +15,26 @@ curl -k -w "%{http_code}\n" -o /dev/null -s \
"$END_USER_BASE_URL" \
| grep -q "$STATUS_FORBIDDEN"
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake GET authorization from test.localhost" \
+ --agent "$AGENT_URI" \
+ --to "$END_USER_BASE_URL" \
+ --read
+
+# access is still denied (fake authorization filtered out)
+
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Accept: application/n-triples" \
+ "$END_USER_BASE_URL" \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -26,7 +45,7 @@ create-authorization.sh \
--to "$END_USER_BASE_URL" \
--read
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
curl -k -w "%{http_code}\n" -o /dev/null -f -s \
-E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
diff --git a/http-tests/admin/acl/add-get-class-authorization.sh b/http-tests/admin/acl/add-get-class-authorization.sh
index 0f2b099c1..2d975c739 100755
--- a/http-tests/admin/acl/add-get-class-authorization.sh
+++ b/http-tests/admin/acl/add-get-class-authorization.sh
@@ -15,7 +15,26 @@ curl -k -w "%{http_code}\n" -o /dev/null -s \
"$END_USER_BASE_URL" \
| grep -q "$STATUS_FORBIDDEN"
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake GET Container authorization from test.localhost" \
+ --agent "$AGENT_URI" \
+ --to-all-in "https://w3id.org/atomgraph/linkeddatahub/default#Root" \
+ --read
+
+# access is still denied (fake authorization filtered out)
+
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Accept: application/n-triples" \
+ "$END_USER_BASE_URL" \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -26,7 +45,7 @@ create-authorization.sh \
--to-all-in "https://w3id.org/atomgraph/linkeddatahub/default#Root" \
--read
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
curl -k -w "%{http_code}\n" -o /dev/null -f -s \
-E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
diff --git a/http-tests/admin/acl/add-get-group-authorization.sh b/http-tests/admin/acl/add-get-group-authorization.sh
index 8e99c9e8c..6c890a6ea 100755
--- a/http-tests/admin/acl/add-get-group-authorization.sh
+++ b/http-tests/admin/acl/add-get-group-authorization.sh
@@ -31,7 +31,26 @@ group=$(curl -s -k \
| cat \
| sed -rn "s/<${group_doc//\//\\/}> <(.*)> \./\1/p")
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake GET group authorization from test.localhost" \
+ --agent-group "$group" \
+ --to "$END_USER_BASE_URL" \
+ --read
+
+# access is still denied (fake authorization filtered out)
+
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Accept: application/n-triples" \
+ "$END_USER_BASE_URL" \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -42,7 +61,7 @@ create-authorization.sh \
--to "$END_USER_BASE_URL" \
--read
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
curl -k -w "%{http_code}\n" -o /dev/null -f -s \
-E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
diff --git a/http-tests/admin/acl/add-post-authorization.sh b/http-tests/admin/acl/add-post-authorization.sh
index d6abdcf62..c07bcf864 100755
--- a/http-tests/admin/acl/add-post-authorization.sh
+++ b/http-tests/admin/acl/add-post-authorization.sh
@@ -22,7 +22,33 @@ EOF
) \
| grep -q "$STATUS_FORBIDDEN"
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake POST authorization from test.localhost" \
+ --agent "$AGENT_URI" \
+ --to "$END_USER_BASE_URL" \
+ --append
+
+# access is still denied (fake authorization filtered out)
+
+(
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Content-Type: application/n-triples" \
+ -H "Accept: application/n-triples" \
+ -X POST \
+ --data-binary @- \
+ "$END_USER_BASE_URL" < .
+EOF
+) \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -33,7 +59,7 @@ create-authorization.sh \
--to "$END_USER_BASE_URL" \
--append
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
(
curl -k -w "%{http_code}\n" -o /dev/null -s \
diff --git a/http-tests/admin/acl/add-post-class-authorization.sh b/http-tests/admin/acl/add-post-class-authorization.sh
index 50f4f304e..f09d3102c 100755
--- a/http-tests/admin/acl/add-post-class-authorization.sh
+++ b/http-tests/admin/acl/add-post-class-authorization.sh
@@ -22,7 +22,33 @@ EOF
) \
| grep -q "$STATUS_FORBIDDEN"
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake POST class authorization from test.localhost" \
+ --agent "$AGENT_URI" \
+ --to-all-in "https://w3id.org/atomgraph/linkeddatahub/default#Root" \
+ --append
+
+# access is still denied (fake authorization filtered out)
+
+(
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Content-Type: application/n-triples" \
+ -H "Accept: application/n-triples" \
+ -X POST \
+ --data-binary @- \
+ "$END_USER_BASE_URL" < .
+EOF
+) \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -33,7 +59,7 @@ create-authorization.sh \
--to-all-in "https://w3id.org/atomgraph/linkeddatahub/default#Root" \
--append
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
(
curl -k -w "%{http_code}\n" -o /dev/null -s \
diff --git a/http-tests/admin/acl/add-post-group-authorization.sh b/http-tests/admin/acl/add-post-group-authorization.sh
index e3e05ad9e..a6d048f7c 100755
--- a/http-tests/admin/acl/add-post-group-authorization.sh
+++ b/http-tests/admin/acl/add-post-group-authorization.sh
@@ -38,7 +38,33 @@ group=$(curl -s -k \
| cat \
| sed -rn "s/<${group_doc//\//\\/}> <(.*)> \./\1/p")
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake POST group authorization from test.localhost" \
+ --agent-group "$group" \
+ --to "$END_USER_BASE_URL" \
+ --append
+
+# access is still denied (fake authorization filtered out)
+
+(
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Content-Type: application/n-triples" \
+ -H "Accept: application/n-triples" \
+ -X POST \
+ --data-binary @- \
+ "$END_USER_BASE_URL" < .
+EOF
+) \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -49,7 +75,7 @@ create-authorization.sh \
--to "$END_USER_BASE_URL" \
--append
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
(
curl -k -w "%{http_code}\n" -o /dev/null -s \
diff --git a/http-tests/admin/acl/add-put-authorization.sh b/http-tests/admin/acl/add-put-authorization.sh
index 60340973a..f35bbc4b4 100755
--- a/http-tests/admin/acl/add-put-authorization.sh
+++ b/http-tests/admin/acl/add-put-authorization.sh
@@ -22,7 +22,33 @@ EOF
) \
| grep -q "$STATUS_FORBIDDEN"
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake PUT authorization from test.localhost" \
+ --agent "$AGENT_URI" \
+ --to "$END_USER_BASE_URL" \
+ --write
+
+# access is still denied (fake authorization filtered out)
+
+(
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Content-Type: application/n-triples" \
+ -H "Accept: application/n-triples" \
+ -X PUT \
+ --data-binary @- \
+ "$END_USER_BASE_URL" < .
+EOF
+) \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -41,7 +67,7 @@ root_ntriples=$(get.sh \
--accept 'application/n-triples' \
"$END_USER_BASE_URL")
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
# request body with document instance is required
echo "$root_ntriples" \
diff --git a/http-tests/admin/acl/add-put-class-authorization.sh b/http-tests/admin/acl/add-put-class-authorization.sh
index 86a782969..a23c4cb75 100755
--- a/http-tests/admin/acl/add-put-class-authorization.sh
+++ b/http-tests/admin/acl/add-put-class-authorization.sh
@@ -22,7 +22,33 @@ EOF
) \
| grep -q "$STATUS_FORBIDDEN"
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake PUT class authorization from test.localhost" \
+ --agent "$AGENT_URI" \
+ --to-all-in "https://w3id.org/atomgraph/linkeddatahub/default#Root" \
+ --write
+
+# access is still denied (fake authorization filtered out)
+
+(
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Content-Type: application/n-triples" \
+ -H "Accept: application/n-triples" \
+ -X PUT \
+ --data-binary @- \
+ "$END_USER_BASE_URL" < .
+EOF
+) \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -41,7 +67,7 @@ root_ntriples=$(get.sh \
--accept 'application/n-triples' \
"$END_USER_BASE_URL")
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
# request body with document instance is required
echo "$root_ntriples" \
diff --git a/http-tests/admin/acl/add-put-group-authorization.sh b/http-tests/admin/acl/add-put-group-authorization.sh
index d97d92caf..1d5ccf9d3 100755
--- a/http-tests/admin/acl/add-put-group-authorization.sh
+++ b/http-tests/admin/acl/add-put-group-authorization.sh
@@ -38,7 +38,33 @@ group=$(curl -s -k \
| cat \
| sed -rn "s/<${group_doc//\//\\/}> <(.*)> \./\1/p")
-# create authorization
+# create fake test.localhost authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake PUT group authorization from test.localhost" \
+ --agent-group "$group" \
+ --to "$END_USER_BASE_URL" \
+ --write
+
+# access is still denied (fake authorization filtered out)
+
+(
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \
+ -H "Content-Type: application/n-triples" \
+ -H "Accept: application/n-triples" \
+ -X PUT \
+ --data-binary @- \
+ "$END_USER_BASE_URL" < .
+EOF
+) \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -57,7 +83,7 @@ root_ntriples=$(get.sh \
--accept 'application/n-triples' \
"$END_USER_BASE_URL")
-# access is allowed after authorization is created
+# access is allowed after real authorization is created
# request body with document instance is required
echo "$root_ntriples" \
diff --git a/http-tests/admin/acl/make-public.sh b/http-tests/admin/acl/make-public.sh
index b9398e029..a3900b107 100755
--- a/http-tests/admin/acl/make-public.sh
+++ b/http-tests/admin/acl/make-public.sh
@@ -14,7 +14,25 @@ curl -k -w "%{http_code}\n" -o /dev/null -v \
"$END_USER_BASE_URL" \
| grep -q "$STATUS_FORBIDDEN"
-# create public authorization
+# create fake test.localhost public authorization (should be filtered out)
+
+create-authorization.sh \
+ -f "$OWNER_CERT_FILE" \
+ -p "$OWNER_CERT_PWD" \
+ -b "https://admin.test.localhost:4443/" \
+ --label "Fake public access from test.localhost" \
+ --agent-class 'http://xmlns.com/foaf/0.1/Agent' \
+ --to "$END_USER_BASE_URL" \
+ --read
+
+# public access is still forbidden (fake authorization filtered out)
+
+curl -k -w "%{http_code}\n" -o /dev/null -v \
+ -H "Accept: application/n-triples" \
+ "$END_USER_BASE_URL" \
+| grep -q "$STATUS_FORBIDDEN"
+
+# create real localhost public authorization
create-authorization.sh \
-f "$OWNER_CERT_FILE" \
@@ -25,7 +43,7 @@ create-authorization.sh \
--to "$END_USER_BASE_URL" \
--read
-# public access is allowed after authorization is created
+# public access is allowed after real authorization is created
curl -k -w "%{http_code}\n" -o /dev/null -f -v \
-H "Accept: application/n-triples" \
diff --git a/http-tests/admin/model/add-property-constraint.sh b/http-tests/admin/model/add-property-constraint.sh
index 7571a14fb..c5e179841 100755
--- a/http-tests/admin/model/add-property-constraint.sh
+++ b/http-tests/admin/model/add-property-constraint.sh
@@ -58,12 +58,16 @@ turtle+="_:item a <${namespace_doc}#ConstrainedClass> .\n"
turtle+="_:item dct:title \"Failure\" .\n"
turtle+="_:item sioc:has_container <${END_USER_BASE_URL}> .\n"
+# Using direct curl instead of put.sh because put.sh uses -f flag which exits on 4xx errors,
+# but this test expects to capture the 422 response
response=$(echo -e "$turtle" \
| turtle --base="$END_USER_BASE_URL" \
-| put.sh \
- -f "$OWNER_CERT_FILE" \
- -p "$OWNER_CERT_PWD" \
- --content-type "text/turtle" \
+| curl -k -v \
+ -E "$OWNER_CERT_FILE":"$OWNER_CERT_PWD" \
+ -d @- \
+ -X PUT \
+ -H "Content-Type: text/turtle" \
+ -H "Accept: text/turtle" \
"$END_USER_BASE_URL" \
2>&1) # redirect output from stderr to stdout
diff --git a/http-tests/config/system.trig b/http-tests/config/system.trig
new file mode 100644
index 000000000..47ed5c76a
--- /dev/null
+++ b/http-tests/config/system.trig
@@ -0,0 +1,92 @@
+@prefix lapp: .
+@prefix ldh: .
+@prefix a: .
+@prefix ac: .
+@prefix rdf: .
+@prefix rdfs: .
+@prefix xsd: .
+@prefix ldt: .
+@prefix sd: .
+@prefix dct: .
+@prefix foaf: .
+
+### do not use blank nodes to identify resources! ###
+### urn: URI scheme is used because applications/services are not accessible in their own dataspace (under $BASE_URI) ###
+
+# root admin
+
+ a lapp:Application, lapp:AdminApplication ;
+ dct:title "LinkedDataHub admin" ;
+ # ldt:base ;
+ ldh:origin ;
+ ldt:ontology ;
+ ldt:service ;
+ ac:stylesheet ;
+ lapp:endUserApplication ;
+ lapp:frontendProxy .
+
+ a sd:Service ;
+ dct:title "LinkedDataHub admin service" ;
+ sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ;
+ sd:endpoint ;
+ a:graphStore ;
+ a:quadStore ;
+ lapp:backendProxy .
+
+# root end-user
+
+ a lapp:Application, lapp:EndUserApplication ;
+ dct:title "LinkedDataHub" ;
+ # ldt:base ;
+ ldh:origin ;
+ ldt:ontology ;
+ ldt:service ;
+ lapp:adminApplication ;
+ lapp:frontendProxy ;
+ lapp:public true .
+
+ a sd:Service ;
+ dct:title "LinkedDataHub service" ;
+ sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ;
+ sd:endpoint ;
+ a:graphStore ;
+ a:quadStore ;
+ lapp:backendProxy .
+
+# test admin
+
+ a lapp:Application, lapp:AdminApplication ;
+ dct:title "Test admin" ;
+ ldh:origin ;
+ ldt:ontology ;
+ ldt:service ;
+ ac:stylesheet ;
+ lapp:endUserApplication ;
+ lapp:frontendProxy .
+
+ a sd:Service ;
+ dct:title "Test admin service" ;
+ sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ;
+ sd:endpoint ;
+ a:graphStore ;
+ a:quadStore ;
+ lapp:backendProxy .
+
+# test end-user
+
+ a lapp:Application, lapp:EndUserApplication ;
+ dct:title "Test" ;
+ ldh:origin ;
+ ldt:ontology ;
+ ldt:service ;
+ lapp:adminApplication ;
+ lapp:frontendProxy ;
+ lapp:public true .
+
+ a sd:Service ;
+ dct:title "Test service" ;
+ sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ;
+ sd:endpoint ;
+ a:graphStore ;
+ a:quadStore ;
+ lapp:backendProxy .
diff --git a/http-tests/dataspaces/non-existent-dataspace.sh b/http-tests/dataspaces/non-existent-dataspace.sh
new file mode 100755
index 000000000..ae443f7d3
--- /dev/null
+++ b/http-tests/dataspaces/non-existent-dataspace.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# Test that accessing a non-configured dataspace returns 404, not 500
+
+# Try to access admin on non-existent test.localhost dataspace
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -H "Accept: application/n-triples" \
+ "https://admin.non-existing.localhost:4443/" \
+| grep -q "$STATUS_NOT_FOUND"
+
+# Try to access end-user on non-existent test.localhost dataspace
+curl -k -w "%{http_code}\n" -o /dev/null -s \
+ -H "Accept: application/n-triples" \
+ "https://non-existing.localhost:4443/" \
+| grep -q "$STATUS_NOT_FOUND"
diff --git a/http-tests/docker-compose.http-tests.yml b/http-tests/docker-compose.http-tests.yml
index 0d8e28d3a..158c2e29c 100644
--- a/http-tests/docker-compose.http-tests.yml
+++ b/http-tests/docker-compose.http-tests.yml
@@ -11,8 +11,10 @@ services:
environment:
- JPDA_ADDRESS=*:8000 # debugger host - performance hit when enabled
volumes:
- - ./http-tests/datasets/owner:/var/linkeddatahub/datasets/owner
- - ./http-tests/datasets/secretary:/var/linkeddatahub/datasets/secretary
+ - ./http-tests/config/system.trig:/var/linkeddatahub/datasets/system.trig:ro
+ - ./http-tests/root-owner.trig.template:/var/linkeddatahub/root-owner.trig.template:ro
+ - ./datasets/owner:/var/linkeddatahub/datasets/owner
+ - ./datasets/secretary:/var/linkeddatahub/datasets/secretary
- ./http-tests/uploads:/var/www/linkeddatahub/uploads
- ./http-tests/ssl/server:/var/linkeddatahub/ssl/server
- ./http-tests/ssl/owner:/var/linkeddatahub/ssl/owner
diff --git a/http-tests/root-owner.trig.template b/http-tests/root-owner.trig.template
new file mode 100644
index 000000000..1b78aad03
--- /dev/null
+++ b/http-tests/root-owner.trig.template
@@ -0,0 +1,88 @@
+@prefix rdfs: .
+@prefix xsd: .
+@prefix acl: .
+@prefix cert: .
+@prefix dh: .
+@prefix sioc: .
+@prefix foaf: .
+@prefix dct: .
+
+# AGENT
+
+<${OWNER_DOC_URI}>
+{
+
+ <${OWNER_DOC_URI}> a dh:Item ;
+ foaf:primaryTopic <${OWNER_URI}> ;
+ sioc:has_container ;
+ dct:title "${OWNER_COMMON_NAME}" .
+
+ <${OWNER_URI}> a foaf:Agent ;
+ foaf:name "${OWNER_COMMON_NAME}" ;
+ foaf:mbox ;
+ cert:key .
+
+ # secretary delegates the owner agent
+
+ <${SECRETARY_URI}> acl:delegates <${OWNER_URI}> .
+
+}
+
+# PUBLIC KEY
+
+
+{
+
+ a dh:Item ;
+ foaf:primaryTopic ;
+ sioc:has_container ;
+ dct:title "${OWNER_COMMON_NAME}" .
+
+ a cert:PublicKey ;
+ rdfs:label "${OWNER_COMMON_NAME}" ;
+ cert:modulus "${OWNER_PUBLIC_KEY_MODULUS}"^^xsd:hexBinary;
+ cert:exponent 65537 .
+
+}
+
+# AUTHORIZATIONS
+
+# root owner is a member of the owners group
+
+
+{
+
+ foaf:member <${OWNER_URI}> .
+}
+
+ # TO-DO: use $OWNER_AUTH_UUID
+{
+
+ a dh:Item ;
+ foaf:primaryTopic ;
+ sioc:has_container ;
+ dct:title "Public owner's WebID" .
+
+ a acl:Authorization ;
+ acl:accessTo <${OWNER_DOC_URI}>, ;
+ acl:mode acl:Read ;
+ acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
+
+}
+
+# test.localhost owner authorization (for HTTP tests)
+
+
+{
+
+ a dh:Item ;
+ foaf:primaryTopic ;
+ dct:title "Test owner Control authorization" .
+
+ a acl:Authorization ;
+ acl:accessTo ;
+ acl:accessToClass ;
+ acl:mode acl:Control ;
+ acl:agent <${OWNER_URI}> .
+
+}
diff --git a/http-tests/run.sh b/http-tests/run.sh
index 6360c315d..49e8ca193 100755
--- a/http-tests/run.sh
+++ b/http-tests/run.sh
@@ -110,7 +110,7 @@ export HTTP_TEST_ROOT="$PWD"
export END_USER_ENDPOINT_URL="http://localhost:3031/ds/"
export ADMIN_ENDPOINT_URL="http://localhost:3030/ds/"
export END_USER_BASE_URL="https://localhost:4443/"
-export ADMIN_BASE_URL="https://localhost:4443/admin/"
+export ADMIN_BASE_URL="https://admin.localhost:4443/"
export END_USER_VARNISH_SERVICE="varnish-end-user"
export ADMIN_VARNISH_SERVICE="varnish-admin"
export FRONTEND_VARNISH_SERVICE="varnish-frontend"
@@ -142,6 +142,8 @@ run_tests $(find ./add/ -type f -name '*.sh')
(( error_count += $? ))
run_tests $(find ./admin/ -type f -name '*.sh')
(( error_count += $? ))
+run_tests $(find ./dataspaces/ -type f -name '*.sh')
+(( error_count += $? ))
run_tests $(find ./access/ -type f -name '*.sh')
(( error_count += $? ))
run_tests $(find ./imports/ -type f -name '*.sh')
diff --git a/platform/datasets/admin.trig b/platform/datasets/admin.trig
index 720af6949..fc59849c2 100644
--- a/platform/datasets/admin.trig
+++ b/platform/datasets/admin.trig
@@ -391,7 +391,6 @@ WHERE
### ADMIN-SPECIFIC
-@prefix ns: <../ns#> .
@prefix lacl: .
@prefix adm: .
@prefix rdfs: .
@@ -619,62 +618,6 @@ WHERE
# AUTHORIZATIONS
-# public
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "Public access" ;
- foaf:primaryTopic .
-
- a acl:Authorization ;
- rdfs:label "Public access" ;
- rdfs:comment "Allows non-authenticated access" ;
- acl:mode acl:Read ;
- acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
-
-}
-
-# public namespace
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "Public namespace access" ;
- foaf:primaryTopic .
-
- a acl:Authorization ;
- rdfs:label "Public namespace access" ;
- rdfs:comment "Allows non-authenticated access" ;
- acl:accessTo <../ns> ; # end-user ontologies are public
- acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST
- acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
-
-}
-
-# SPARQL endpoint
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "SPARQL endpoint access" ;
- foaf:primaryTopic .
-
- a acl:Authorization ;
- rdfs:label "SPARQL endpoint access" ;
- rdfs:comment "Allows only authenticated access" ;
- acl:accessTo <../sparql> ;
- acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST
- acl:agentClass acl:AuthenticatedAgent .
-
-}
-
# access endpoint
@@ -804,52 +747,12 @@ WHERE
rdfs:label "Full control" ;
rdfs:comment "Allows full read/write access to all application resources" ;
acl:accessToClass dh:Item, dh:Container, def:Root ;
- acl:accessTo <../sparql>, <../importer>, <../add>, <../generate>, <../ns>, , ;
+ acl:accessTo , ;
acl:mode acl:Read, acl:Append, acl:Write, acl:Control ;
acl:agentGroup .
}
-# write/append access
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "Write/append access" ;
- foaf:primaryTopic .
-
- a acl:Authorization ;
- rdfs:label "Write/append access" ;
- rdfs:comment "Allows write access to all documents and containers" ;
- acl:accessToClass dh:Item, dh:Container, def:Root ;
- acl:accessTo <../sparql>, <../importer>, <../add>, <../generate>, <../ns> ;
- acl:mode acl:Write, acl:Append ;
- acl:agentGroup , .
-
-}
-
-# read access
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "Read access" ;
- foaf:primaryTopic .
-
- a acl:Authorization ;
- rdfs:label "Read access" ;
- rdfs:comment "Allows read access to all resources" ;
- acl:accessToClass dh:Item, dh:Container, def:Root, ;
- acl:accessTo <../sparql> ;
- acl:mode acl:Read ;
- acl:agentGroup , , .
-
-}
-
# GROUPS
# owners
@@ -917,24 +820,3 @@ WHERE
rdf:value ldh:ChildrenView .
}
-
-# ONTOLOGIES
-
-# namespace
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "Namespace" ;
- foaf:primaryTopic ns: .
-
- ns: a owl:Ontology ;
- rdfs:label "Namespace" ;
- rdfs:comment "Namespace of the application" ;
- foaf:isPrimaryTopicOf <../ns> ;
- owl:imports ;
- owl:versionInfo "1.0-SNAPSHOT" .
-
-}
\ No newline at end of file
diff --git a/platform/entrypoint.sh b/platform/entrypoint.sh
index d6a50090d..b00a5075f 100755
--- a/platform/entrypoint.sh
+++ b/platform/entrypoint.sh
@@ -13,40 +13,62 @@ fi
# change server configuration
if [ -n "$HTTP" ]; then
- HTTP_PARAM="--stringparam http $HTTP "
+ HTTP_PARAM="--stringparam Connector.http $HTTP "
fi
if [ -n "$HTTP_SCHEME" ]; then
- HTTP_SCHEME_PARAM="--stringparam http.scheme $HTTP_SCHEME "
+ HTTP_SCHEME_PARAM="--stringparam Connector.scheme.http $HTTP_SCHEME "
fi
if [ -n "$HTTP_PORT" ]; then
- HTTP_PORT_PARAM="--stringparam http.port $HTTP_PORT "
+ HTTP_PORT_PARAM="--stringparam Connector.port.http $HTTP_PORT "
fi
if [ -n "$HTTP_PROXY_NAME" ]; then
lc_proxy_name=$(echo "$HTTP_PROXY_NAME" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case
- HTTP_PROXY_NAME_PARAM="--stringparam http.proxyName $lc_proxy_name "
+ HTTP_PROXY_NAME_PARAM="--stringparam Connector.proxyName.http $lc_proxy_name "
fi
if [ -n "$HTTP_PROXY_PORT" ]; then
- HTTP_PROXY_PORT_PARAM="--stringparam http.proxyPort $HTTP_PROXY_PORT "
+ HTTP_PROXY_PORT_PARAM="--stringparam Connector.proxyPort.http $HTTP_PROXY_PORT "
fi
if [ -n "$HTTP_REDIRECT_PORT" ]; then
- HTTP_REDIRECT_PORT_PARAM="--stringparam http.redirectPort $HTTP_REDIRECT_PORT "
+ HTTP_REDIRECT_PORT_PARAM="--stringparam Connector.redirectPort.http $HTTP_REDIRECT_PORT "
fi
if [ -n "$HTTP_CONNECTION_TIMEOUT" ]; then
- HTTP_CONNECTION_TIMEOUT_PARAM="--stringparam http.connectionTimeout $HTTP_CONNECTION_TIMEOUT "
+ HTTP_CONNECTION_TIMEOUT_PARAM="--stringparam Connector.connectionTimeout.http $HTTP_CONNECTION_TIMEOUT "
fi
if [ -n "$HTTP_COMPRESSION" ]; then
- HTTP_COMPRESSION_PARAM="--stringparam http.compression $HTTP_COMPRESSION "
+ HTTP_COMPRESSION_PARAM="--stringparam Connector.compression.http $HTTP_COMPRESSION "
fi
if [ -n "$HTTPS" ]; then
- HTTPS_PARAM="--stringparam https $HTTPS "
+ HTTPS_PARAM="--stringparam Connector.https $HTTPS "
+fi
+
+# RemoteIpValve configuration takes precedence over Connector proxy settings
+
+if [ -n "$REMOTE_IP_VALVE" ]; then
+ REMOTE_IP_VALVE_PARAM="--stringparam RemoteIpValve $REMOTE_IP_VALVE "
+fi
+
+if [ -n "$REMOTE_IP_VALVE_PROTOCOL_HEADER" ]; then
+ REMOTE_IP_VALVE_PROTOCOL_HEADER_PARAM="--stringparam RemoteIpValve.protocolHeader $REMOTE_IP_VALVE_PROTOCOL_HEADER "
+fi
+
+if [ -n "$REMOTE_IP_VALVE_PORT_HEADER" ]; then
+ REMOTE_IP_VALVE_PORT_HEADER_PARAM="--stringparam RemoteIpValve.portHeader $REMOTE_IP_VALVE_PORT_HEADER "
+fi
+
+if [ -n "$REMOTE_IP_VALVE_REMOTE_IP_HEADER" ]; then
+ REMOTE_IP_VALVE_REMOTE_IP_HEADER_PARAM="--stringparam RemoteIpValve.remoteIpHeader $REMOTE_IP_VALVE_REMOTE_IP_HEADER "
+fi
+
+if [ -n "$REMOTE_IP_VALVE_HOST_HEADER" ]; then
+ REMOTE_IP_VALVE_HOST_HEADER_PARAM="--stringparam RemoteIpValve.hostHeader $REMOTE_IP_VALVE_HOST_HEADER "
fi
transform="xsltproc \
@@ -60,6 +82,11 @@ transform="xsltproc \
$HTTP_CONNECTION_TIMEOUT_PARAM \
$HTTP_COMPRESSION_PARAM \
$HTTPS_PARAM \
+ $REMOTE_IP_VALVE_PARAM \
+ $REMOTE_IP_VALVE_PROTOCOL_HEADER_PARAM \
+ $REMOTE_IP_VALVE_PORT_HEADER_PARAM \
+ $REMOTE_IP_VALVE_REMOTE_IP_HEADER_PARAM \
+ $REMOTE_IP_VALVE_HOST_HEADER_PARAM \
conf/letsencrypt-tomcat.xsl \
conf/server.xml"
@@ -184,25 +211,35 @@ if [ -z "$MAIL_USER" ]; then
exit 1
fi
-# construct base URI (ignore default HTTP and HTTPS ports)
+# construct base URI and origins (ignore default HTTP and HTTPS ports for URI, but always include port for origins)
if [ "$PROTOCOL" = "https" ]; then
if [ "$HTTPS_PROXY_PORT" = 443 ]; then
export BASE_URI="${PROTOCOL}://${HOST}${ABS_PATH}"
+ export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}${ABS_PATH}"
else
export BASE_URI="${PROTOCOL}://${HOST}:${HTTPS_PROXY_PORT}${ABS_PATH}"
+ export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}:${HTTPS_PROXY_PORT}${ABS_PATH}"
fi
+ export ORIGIN="${PROTOCOL}://${HOST}:${HTTPS_PROXY_PORT}"
else
if [ "$HTTP_PROXY_PORT" = 80 ]; then
export BASE_URI="${PROTOCOL}://${HOST}${ABS_PATH}"
+ export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}${ABS_PATH}"
else
export BASE_URI="${PROTOCOL}://${HOST}:${HTTP_PROXY_PORT}${ABS_PATH}"
+ export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}:${HTTP_PROXY_PORT}${ABS_PATH}"
fi
+ export ORIGIN="${PROTOCOL}://${HOST}:${HTTP_PROXY_PORT}"
fi
BASE_URI=$(echo "$BASE_URI" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case
+ADMIN_BASE_URI=$(echo "$ADMIN_BASE_URI" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case
+ORIGIN=$(echo "$ORIGIN" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case
printf "\n### Base URI: %s\n" "$BASE_URI"
+printf "\n### Admin Base URI: %s\n" "$ADMIN_BASE_URI"
+printf "\n### Origin: %s\n" "$ORIGIN"
# functions that wait for other services to start
@@ -308,7 +345,6 @@ generate_cert()
local keystore_password="${11}"
local cert_output="${12}"
local public_key_output="${13}"
- local private_key_output="${14}"
# Build the Distinguished Name (DN) string, only including components if they're non-empty
dname="CN=${common_name}"
@@ -358,11 +394,11 @@ get_modulus()
}
OWNER_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase
-OWNER_URI="${OWNER_URI:-${BASE_URI}admin/acl/agents/${OWNER_UUID}/#this}" # WebID URI. Can be external!
+OWNER_URI="${OWNER_URI:-${ADMIN_BASE_URI}acl/agents/${OWNER_UUID}/#this}" # WebID URI. Can be external!
OWNER_COMMON_NAME="$OWNER_GIVEN_NAME $OWNER_FAMILY_NAME" # those are required
SECRETARY_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase
-SECRETARY_URI="${SECRETARY_URI:-${BASE_URI}admin/acl/agents/${SECRETARY_UUID}/#this}" # WebID URI. Can be external!
+SECRETARY_URI="${SECRETARY_URI:-${ADMIN_BASE_URI}acl/agents/${SECRETARY_UUID}/#this}" # WebID URI. Can be external!
OWNER_DATASET_PATH="/var/linkeddatahub/datasets/owner/${OWNER_CERT_ALIAS}.trig"
@@ -385,19 +421,21 @@ if [ ! -f "$OWNER_PUBLIC_KEY" ]; then
"$OWNER_ORG_UNIT" "$OWNER_ORGANIZATION" \
"$OWNER_LOCALITY" "$OWNER_STATE_OR_PROVINCE" "$OWNER_COUNTRY_NAME" \
"$CERT_VALIDITY" "$OWNER_KEYSTORE" "$OWNER_CERT_PASSWORD" \
- "$OWNER_CERT" "$OWNER_PUBLIC_KEY" "$OWNER_PRIVATE_KEY"
+ "$OWNER_CERT" "$OWNER_PUBLIC_KEY"
# write owner's metadata to a file
mkdir -p "$(dirname "$OWNER_DATASET_PATH")"
- OWNER_DOC_URI="${BASE_URI}admin/acl/agents/${OWNER_UUID}/"
+ OWNER_DOC_URI="${ADMIN_BASE_URI}acl/agents/${OWNER_UUID}/"
OWNER_KEY_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase
+ OWNER_KEY_DOC_URI="${ADMIN_BASE_URI}acl/public-keys/${OWNER_KEY_UUID}/"
+ OWNER_KEY_URI="${OWNER_KEY_DOC_URI}#this"
OWNER_PUBLIC_KEY_MODULUS=$(get_modulus "$OWNER_PUBLIC_KEY")
printf "\n### Root owner WebID public key modulus: %s\n" "$OWNER_PUBLIC_KEY_MODULUS"
- export OWNER_COMMON_NAME OWNER_URI OWNER_DOC_URI OWNER_PUBLIC_KEY_MODULUS OWNER_KEY_UUID SECRETARY_URI
+ export OWNER_COMMON_NAME OWNER_URI OWNER_DOC_URI OWNER_KEY_DOC_URI OWNER_KEY_URI OWNER_PUBLIC_KEY_MODULUS SECRETARY_URI
envsubst < root-owner.trig.template > "$OWNER_DATASET_PATH"
fi
@@ -422,29 +460,62 @@ if [ ! -f "$SECRETARY_PUBLIC_KEY" ]; then
"" "" \
"" "" "" \
"$CERT_VALIDITY" "$SECRETARY_KEYSTORE" "$SECRETARY_CERT_PASSWORD" \
- "$SECRETARY_CERT" "$SECRETARY_PUBLIC_KEY" "$SECRETARY_PRIVATE_KEY"
+ "$SECRETARY_CERT" "$SECRETARY_PUBLIC_KEY"
# write secretary's metadata to a file
mkdir -p "$(dirname "$SECRETARY_DATASET_PATH")"
- SECRETARY_DOC_URI="${BASE_URI}admin/acl/agents/${SECRETARY_UUID}/"
+ SECRETARY_DOC_URI="${ADMIN_BASE_URI}acl/agents/${SECRETARY_UUID}/"
SECRETARY_KEY_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase
+ SECRETARY_KEY_DOC_URI="${ADMIN_BASE_URI}acl/public-keys/${SECRETARY_KEY_UUID}/"
+ SECRETARY_KEY_URI="${SECRETARY_KEY_DOC_URI}#this"
SECRETARY_PUBLIC_KEY_MODULUS=$(get_modulus "$SECRETARY_PUBLIC_KEY")
printf "\n### Secretary WebID public key modulus: %s\n" "$SECRETARY_PUBLIC_KEY_MODULUS"
- export SECRETARY_URI SECRETARY_DOC_URI SECRETARY_PUBLIC_KEY_MODULUS SECRETARY_KEY_UUID
+ export SECRETARY_URI SECRETARY_DOC_URI SECRETARY_KEY_DOC_URI SECRETARY_KEY_URI SECRETARY_PUBLIC_KEY_MODULUS
envsubst < root-secretary.trig.template > "$SECRETARY_DATASET_PATH"
fi
-if [ -z "$LOAD_DATASETS" ]; then
- if [ ! -d /var/linkeddatahub/based-datasets ]; then
- LOAD_DATASETS=true
- else
- LOAD_DATASETS=false
- fi
-fi
+mkdir -p /var/linkeddatahub/based-datasets
+
+# If certs already exist, extract metadata from existing .trig files using SPARQL and create .nq files
+printf "\n### Reading owner metadata from existing file: %s\n" /var/linkeddatahub/based-datasets/root-owner.nq
+
+trig --base="$ADMIN_BASE_URI" --output=nq "$OWNER_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-owner.nq
+
+owner_metadata=$(sparql --data=/var/linkeddatahub/based-datasets/root-owner.nq --query=select-agent-metadata.rq --results=XML)
+
+OWNER_URI=$(echo "$owner_metadata" | xmlstarlet sel -N srx="http://www.w3.org/2005/sparql-results#" -T -t -v "/srx:sparql/srx:results/srx:result/srx:binding[@name='agent']/srx:uri")
+OWNER_DOC_URI=$(echo "$owner_metadata" | xmlstarlet sel -N srx="http://www.w3.org/2005/sparql-results#" -T -t -v "/srx:sparql/srx:results/srx:result/srx:binding[@name='doc']/srx:uri")
+OWNER_KEY_URI=$(echo "$owner_metadata" | xmlstarlet sel -N srx="http://www.w3.org/2005/sparql-results#" -T -t -v "/srx:sparql/srx:results/srx:result/srx:binding[@name='key']/srx:uri")
+OWNER_KEY_DOC_URI=$(echo "$OWNER_KEY_URI" | sed 's|#this$||')
+OWNER_KEY_URI="${OWNER_KEY_DOC_URI}#this"
+
+printf "\n### Extracted OWNER_URI: %s\n" "$OWNER_URI"
+printf "\n### Extracted OWNER_DOC_URI: %s\n" "$OWNER_DOC_URI"
+printf "\n### Extracted OWNER_KEY_URI: %s\n" "$OWNER_KEY_URI"
+printf "\n### Extracted OWNER_KEY_DOC_URI: %s\n" "$OWNER_KEY_DOC_URI"
+
+printf "\n### Reading secretary metadata from existing file: %s\n" /var/linkeddatahub/based-datasets/root-secretary.nq
+
+trig --base="$ADMIN_BASE_URI" --output=nq "$SECRETARY_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-secretary.nq
+
+secretary_metadata=$(sparql --data=/var/linkeddatahub/based-datasets/root-secretary.nq --query=select-agent-metadata.rq --results=XML)
+
+SECRETARY_URI=$(echo "$secretary_metadata" | xmlstarlet sel -N srx="http://www.w3.org/2005/sparql-results#" -T -t -v "/srx:sparql/srx:results/srx:result/srx:binding[@name='agent']/srx:uri")
+SECRETARY_DOC_URI=$(echo "$secretary_metadata" | xmlstarlet sel -N srx="http://www.w3.org/2005/sparql-results#" -T -t -v "/srx:sparql/srx:results/srx:result/srx:binding[@name='doc']/srx:uri")
+SECRETARY_KEY_URI=$(echo "$secretary_metadata" | xmlstarlet sel -N srx="http://www.w3.org/2005/sparql-results#" -T -t -v "/srx:sparql/srx:results/srx:result/srx:binding[@name='key']/srx:uri")
+SECRETARY_KEY_DOC_URI=$(echo "$SECRETARY_KEY_URI" | sed 's|#this$||')
+SECRETARY_KEY_URI="${SECRETARY_KEY_DOC_URI}#this"
+
+printf "\n### Extracted SECRETARY_URI: %s\n" "$SECRETARY_URI"
+printf "\n### Extracted SECRETARY_DOC_URI: %s\n" "$SECRETARY_DOC_URI"
+printf "\n### Extracted SECRETARY_KEY_URI: %s\n" "$SECRETARY_KEY_URI"
+printf "\n### Extracted SECRETARY_KEY_DOC_URI: %s\n" "$SECRETARY_KEY_DOC_URI"
+
+# Note: LOAD_DATASETS check is now done per-app inside the loop
# base the $CONTEXT_DATASET
@@ -476,7 +547,7 @@ readarray apps < <(xmlstarlet sel -B \
-o "\"" \
-v "srx:binding[@name = 'endUserApp']" \
-o "\" \"" \
- -v "srx:binding[@name = 'endUserBase']" \
+ -v "srx:binding[@name = 'endUserOrigin']" \
-o "\" \"" \
-v "srx:binding[@name = 'endUserQuadStore']" \
-o "\" \"" \
@@ -490,7 +561,7 @@ readarray apps < <(xmlstarlet sel -B \
-o "\" \"" \
-v "srx:binding[@name = 'adminApp']" \
-o "\" \"" \
- -v "srx:binding[@name = 'adminBase']" \
+ -v "srx:binding[@name = 'adminOrigin']" \
-o "\" \"" \
-v "srx:binding[@name = 'adminQuadStore']" \
-o "\" \"" \
@@ -508,21 +579,21 @@ readarray apps < <(xmlstarlet sel -B \
for app in "${apps[@]}"; do
app_array=(${app})
end_user_app="${app_array[0]//\"/}"
- end_user_base_uri="${app_array[1]//\"/}"
+ end_user_origin="${app_array[1]//\"/}"
end_user_quad_store_url="${app_array[2]//\"/}"
end_user_endpoint_url="${app_array[3]//\"/}"
end_user_service_auth_user="${app_array[4]//\"/}"
end_user_service_auth_pwd="${app_array[5]//\"/}"
end_user_owner="${app_array[6]//\"/}"
admin_app="${app_array[7]//\"/}"
- admin_base_uri="${app_array[8]//\"/}"
+ admin_origin="${app_array[8]//\"/}"
admin_quad_store_url="${app_array[9]//\"/}"
admin_endpoint_url="${app_array[10]//\"/}"
admin_service_auth_user="${app_array[11]//\"/}"
admin_service_auth_pwd="${app_array[12]//\"/}"
admin_owner="${app_array[13]//\"/}"
- printf "\n### Processing dataspace. End-user app: %s Admin app: %s\n" "$end_user_app" "$admin_app"
+ printf "\n### Processing dataspace. End-user app: %s (origin: %s) Admin app: %s (origin: %s)\n" "$end_user_app" "$end_user_origin" "$admin_app" "$admin_origin"
if [ -z "$end_user_app" ]; then
printf "\nEnd-user app URI could not be extracted from %s. Exiting...\n" "$CONTEXT_DATASET"
@@ -536,8 +607,8 @@ for app in "${apps[@]}"; do
printf "\nAdmin app URI could not be extracted for the <%s> app. Exiting...\n" "$end_user_app"
exit 1
fi
- if [ -z "$admin_base_uri" ]; then
- printf "\nAdmin base URI extracted for the <%s> app. Exiting...\n" "$end_user_app"
+ if [ -z "$admin_origin" ]; then
+ printf "\nAdmin origin could not be extracted for the <%s> app. Exiting...\n" "$end_user_app"
exit 1
fi
if [ -z "$admin_quad_store_url" ]; then
@@ -545,13 +616,15 @@ for app in "${apps[@]}"; do
exit 1
fi
- # check if this app is the root app
- if [ "$end_user_base_uri" = "$BASE_URI" ]; then
+ # check if this app is the root app by comparing origins
+ if [ "$end_user_origin" = "$ORIGIN" ]; then
root_end_user_app="$end_user_app"
+ #root_end_user_origin="$end_user_origin"
root_end_user_quad_store_url="$end_user_quad_store_url"
root_end_user_service_auth_user="$end_user_service_auth_user"
root_end_user_service_auth_pwd="$end_user_service_auth_pwd"
root_admin_app="$admin_app"
+ #root_admin_origin="$admin_origin"
root_admin_quad_store_url="$admin_quad_store_url"
root_admin_service_auth_user="$admin_service_auth_user"
root_admin_service_auth_pwd="$admin_service_auth_pwd"
@@ -569,9 +642,23 @@ for app in "${apps[@]}"; do
printf "\n### Quad store URL of the root end-user service: %s\n" "$end_user_quad_store_url"
printf "\n### Quad store URL of the root admin service: %s\n" "$admin_quad_store_url"
- # load default admin/end-user datasets if we haven't yet created a folder with re-based versions of them (and then create it)
- if [ "$LOAD_DATASETS" = "true" ]; then
- mkdir -p /var/linkeddatahub/based-datasets
+ # Create app-specific subfolder based on end-user origin
+ app_folder=$(echo "$end_user_origin" | sed 's|https://||' | sed 's|http://||' | sed 's|[:/]|-|g')
+
+ # Determine whether to load datasets for this app
+ load_datasets_for_app="$LOAD_DATASETS"
+ if [ -z "$load_datasets_for_app" ]; then
+ if [ ! -d "/var/linkeddatahub/based-datasets/${app_folder}" ]; then
+ load_datasets_for_app=true
+ else
+ load_datasets_for_app=false
+ fi
+ fi
+
+ # Check if this specific app's datasets should be loaded
+ if [ "$load_datasets_for_app" = true ]; then
+ printf "\n### Loading datasets for app: %s\n" "$app_folder"
+ mkdir -p "/var/linkeddatahub/based-datasets/${app_folder}"
# create query file by injecting environmental variables into the template
@@ -580,7 +667,7 @@ for app in "${apps[@]}"; do
END_USER_DATASET=$(echo "$END_USER_DATASET_URL" | cut -c 8-) # strip leading file://
printf "\n### Reading end-user dataset from a local file: %s\n" "$END_USER_DATASET" ;;
- *)
+ *)
END_USER_DATASET=$(mktemp)
printf "\n### Downloading end-user dataset from a URL: %s\n" "$END_USER_DATASET_URL"
@@ -593,7 +680,7 @@ for app in "${apps[@]}"; do
ADMIN_DATASET=$(echo "$ADMIN_DATASET_URL" | cut -c 8-) # strip leading file://
printf "\n### Reading admin dataset from a local file: %s\n" "$ADMIN_DATASET" ;;
- *)
+ *)
ADMIN_DATASET=$(mktemp)
printf "\n### Downloading admin dataset from a URL: %s\n" "$ADMIN_DATASET_URL"
@@ -601,42 +688,83 @@ for app in "${apps[@]}"; do
curl "$ADMIN_DATASET_URL" > "$ADMIN_DATASET" ;;
esac
- trig --base="$end_user_base_uri" "$END_USER_DATASET" > /var/linkeddatahub/based-datasets/end-user.nq
+ trig --base="${end_user_origin}/" "$END_USER_DATASET" > "/var/linkeddatahub/based-datasets/${app_folder}/end-user.nq"
printf "\n### Waiting for %s...\n" "$end_user_quad_store_url"
wait_for_url "$end_user_quad_store_url" "$end_user_service_auth_user" "$end_user_service_auth_pwd" "$TIMEOUT" "application/n-quads"
printf "\n### Loading end-user dataset into the triplestore...\n"
- append_quads "$end_user_quad_store_url" "$end_user_service_auth_user" "$end_user_service_auth_pwd" /var/linkeddatahub/based-datasets/end-user.nq "application/n-quads"
+ append_quads "$end_user_quad_store_url" "$end_user_service_auth_user" "$end_user_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/end-user.nq" "application/n-quads"
- trig --base="$admin_base_uri" "$ADMIN_DATASET" > /var/linkeddatahub/based-datasets/admin.nq
+ trig --base="${admin_origin}/" "$ADMIN_DATASET" > "/var/linkeddatahub/based-datasets/${app_folder}/admin.nq"
printf "\n### Waiting for %s...\n" "$admin_quad_store_url"
wait_for_url "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "$TIMEOUT" "application/n-quads"
printf "\n### Loading admin dataset into the triplestore...\n"
- append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/admin.nq "application/n-quads"
+ append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/admin.nq" "application/n-quads"
+
+ namespace_ontology_dataset_path="/var/linkeddatahub/datasets/${app_folder}/namespace-ontology.trig"
+ mkdir -p "$(dirname "$namespace_ontology_dataset_path")"
+ export end_user_origin admin_origin
+ envsubst < namespace-ontology.trig.template > "$namespace_ontology_dataset_path"
+
+ trig --base="${admin_origin}/" --output=nq "$namespace_ontology_dataset_path" > "/var/linkeddatahub/based-datasets/${app_folder}/namespace-ontology.nq"
+
+ printf "\n### Loading namespace ontology into the admin triplestore...\n"
+ append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/namespace-ontology.nq" "application/n-quads"
+
+ # Load full owner/secretary metadata (agent + key) only for root app
+ if [ "$end_user_origin" = "$ORIGIN" ]; then
+ printf "\n### Uploading the metadata of the owner agent...\n\n"
+ append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-owner.nq "application/n-quads"
+
+ printf "\n### Uploading the metadata of the secretary agent...\n\n"
+ append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-secretary.nq "application/n-quads"
+ fi
+
+ # Load owner/secretary authorizations for this app (with app-specific UUIDs)
+ # Note: OWNER_URI and SECRETARY_URI reference the root admin URIs
+ owner_auth_dataset_path="/var/linkeddatahub/datasets/${app_folder}/owner-authorization.trig"
+ mkdir -p "$(dirname "$owner_auth_dataset_path")"
+
+ OWNER_AUTH_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]')
+ OWNER_AUTH_DOC_URI="${admin_origin}/acl/authorizations/${OWNER_AUTH_UUID}/"
+ OWNER_AUTH_URI="${OWNER_AUTH_DOC_URI}#auth"
+
+ export OWNER_URI OWNER_DOC_URI OWNER_KEY_DOC_URI OWNER_AUTH_DOC_URI OWNER_AUTH_URI
+ envsubst < root-owner-authorization.trig.template > "$owner_auth_dataset_path"
+
+ trig --base="${admin_origin}/" --output=nq "$owner_auth_dataset_path" > "/var/linkeddatahub/based-datasets/${app_folder}/owner-authorization.nq"
+
+ printf "\n### Uploading owner authorizations for this app...\n\n"
+ append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/owner-authorization.nq" "application/n-quads"
+
+ secretary_auth_dataset_path="/var/linkeddatahub/datasets/${app_folder}/secretary-authorization.trig"
+ mkdir -p "$(dirname "$secretary_auth_dataset_path")"
- trig --base="$admin_base_uri" --output=nq "$OWNER_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-owner.nq
+ SECRETARY_AUTH_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]')
+ SECRETARY_AUTH_DOC_URI="${admin_origin}/acl/authorizations/${SECRETARY_AUTH_UUID}/"
+ SECRETARY_AUTH_URI="${SECRETARY_AUTH_DOC_URI}#auth"
- printf "\n### Uploading the metadata of the owner agent...\n\n"
- append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-owner.nq "application/n-quads"
+ export SECRETARY_URI SECRETARY_DOC_URI SECRETARY_KEY_DOC_URI SECRETARY_AUTH_DOC_URI SECRETARY_AUTH_URI
+ envsubst < root-secretary-authorization.trig.template > "$secretary_auth_dataset_path"
- trig --base="$admin_base_uri" --output=nq "$SECRETARY_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-secretary.nq
+ trig --base="${admin_origin}/" --output=nq "$secretary_auth_dataset_path" > "/var/linkeddatahub/based-datasets/${app_folder}/secretary-authorization.nq"
- printf "\n### Uploading the metadata of the secretary agent...\n\n"
- append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-secretary.nq "application/n-quads"
+ printf "\n### Uploading secretary authorizations for this app...\n\n"
+ append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/secretary-authorization.nq" "application/n-quads"
fi
done
rm -f root_service_metadata.xml
if [ -z "$root_end_user_app" ]; then
- printf "\nRoot end-user app with base URI <%s> not found. Exiting...\n" "$BASE_URI"
+ printf "\nRoot end-user app with origin <%s> not found. Exiting...\n" "$ORIGIN"
exit 1
fi
if [ -z "$root_admin_app" ]; then
- printf "\nRoot admin app (for end-user app with base URI <%s>) not found. Exiting...\n" "$BASE_URI"
+ printf "\nRoot admin app (for end-user app with origin <%s>) not found. Exiting...\n" "$ORIGIN"
exit 1
fi
diff --git a/platform/namespace-ontology.trig.template b/platform/namespace-ontology.trig.template
new file mode 100644
index 000000000..c62472479
--- /dev/null
+++ b/platform/namespace-ontology.trig.template
@@ -0,0 +1,134 @@
+@prefix def: .
+@prefix ldh: .
+@prefix ac: .
+@prefix rdf: .
+@prefix xsd: .
+@prefix dh: .
+@prefix sd: .
+@prefix sp: .
+@prefix sioc: .
+@prefix foaf: .
+@prefix dct: .
+@prefix spin: .
+@prefix lacl: .
+@prefix adm: .
+@prefix rdfs: .
+@prefix owl: .
+@prefix acl: .
+@prefix cert: .
+@prefix spin: .
+
+# namespace ontology
+
+<${admin_origin}/ontologies/namespace/>
+{
+ <${admin_origin}/ontologies/namespace/> a dh:Item ;
+ sioc:has_container <${admin_origin}/ontologies/> ;
+ dct:title "Namespace" ;
+ foaf:primaryTopic <${end_user_origin}/ns#> .
+
+ <${end_user_origin}/ns#> a owl:Ontology ;
+ rdfs:label "Namespace" ;
+ rdfs:comment "Namespace of the application" ;
+ foaf:isPrimaryTopicOf <${end_user_origin}/ns> ;
+ owl:imports ;
+ owl:versionInfo "1.0-SNAPSHOT" .
+}
+
+# public namespace authorization
+
+<${admin_origin}/acl/authorizations/public-namespace/>
+{
+
+ <${admin_origin}/acl/authorizations/public-namespace/> a dh:Item ;
+ sioc:has_container <${admin_origin}/acl/authorizations/> ;
+ dct:title "Public namespace access" ;
+ foaf:primaryTopic <${admin_origin}/acl/authorizations/public-namespace/#this> .
+
+ <${admin_origin}/acl/authorizations/public-namespace/#this> a acl:Authorization ;
+ rdfs:label "Public namespace access" ;
+ rdfs:comment "Allows non-authenticated access" ;
+ acl:accessTo <${end_user_origin}/ns> ; # end-user ontologies are public
+ acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST
+ acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
+
+}
+
+# SPARQL endpoint authorization
+
+<${admin_origin}/acl/authorizations/sparql-endpoint/>
+{
+
+ <${admin_origin}/acl/authorizations/sparql-endpoint/> a dh:Item ;
+ sioc:has_container <${admin_origin}/acl/authorizations/> ;
+ dct:title "SPARQL endpoint access" ;
+ foaf:primaryTopic <${admin_origin}/acl/authorizations/sparql-endpoint/#this> .
+
+ <${admin_origin}/acl/authorizations/sparql-endpoint/#this> a acl:Authorization ;
+ rdfs:label "SPARQL endpoint access" ;
+ rdfs:comment "Allows only authenticated access" ;
+ acl:accessTo <${end_user_origin}/sparql> ;
+ acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST
+ acl:agentClass acl:AuthenticatedAgent .
+
+}
+
+# write/append authorization
+
+<${admin_origin}/acl/authorizations/write-append/>
+{
+
+ <${admin_origin}/acl/authorizations/write-append/> a dh:Item ;
+ sioc:has_container <${admin_origin}/acl/authorizations/> ;
+ dct:title "Write/append access" ;
+ foaf:primaryTopic <${admin_origin}/acl/authorizations/write-append/#this> .
+
+ <${admin_origin}/acl/authorizations/write-append/#this> a acl:Authorization ;
+ rdfs:label "Write/append access" ;
+ rdfs:comment "Allows write access to all documents and containers" ;
+ acl:accessToClass dh:Item, dh:Container, def:Root ;
+ acl:accessTo <${end_user_origin}/sparql>, <${end_user_origin}/importer>, <${end_user_origin}/add>, <${end_user_origin}/generate>, <${end_user_origin}/ns> ;
+ acl:mode acl:Write, acl:Append ;
+ acl:agentGroup <${admin_origin}/acl/groups/owners/#this>, <${admin_origin}/acl/groups/writers/#this> .
+
+}
+
+# full access authorization
+
+<${admin_origin}/acl/authorizations/full-control/>
+{
+
+ <${admin_origin}/acl/authorizations/full-control/> a dh:Item ;
+ sioc:has_container <${admin_origin}/acl/authorizations/> ;
+ dct:title "Full control" ;
+ foaf:primaryTopic <${admin_origin}/acl/authorizations/full-control/#this> .
+
+ <${admin_origin}/acl/authorizations/full-control/#this> a acl:Authorization ;
+ rdfs:label "Full control" ;
+ rdfs:comment "Allows full read/write access to all application resources" ;
+ acl:accessToClass dh:Item, dh:Container, def:Root ;
+ acl:accessTo <${end_user_origin}/sparql>, <${end_user_origin}/importer>, <${end_user_origin}/add>, <${end_user_origin}/generate>, <${end_user_origin}/ns> ;
+ acl:mode acl:Read, acl:Append, acl:Write, acl:Control ;
+ acl:agentGroup <${admin_origin}/acl/groups/owners/#this> .
+
+}
+
+# read access
+
+<${admin_origin}/acl/authorizations/read/>
+{
+
+ <${admin_origin}/acl/authorizations/read/> a dh:Item ;
+ sioc:has_container <${admin_origin}/acl/authorizations/> ;
+ dct:title "Read access" ;
+ foaf:primaryTopic <${admin_origin}/acl/authorizations/read/#this> .
+
+ <${admin_origin}/acl/authorizations/read/#this> a acl:Authorization ;
+ rdfs:label "Read access" ;
+ rdfs:comment "Allows read access to all resources" ;
+ acl:accessToClass dh:Item, dh:Container, def:Root, ;
+ acl:accessTo <${end_user_origin}/sparql> ;
+ acl:mode acl:Read ;
+ acl:agentGroup <${admin_origin}/acl/groups/owners/#this>, <${admin_origin}/acl/groups/writers/#this>, <${admin_origin}/acl/groups/readers/#this> .
+
+}
diff --git a/platform/root-owner-authorization.trig.template b/platform/root-owner-authorization.trig.template
new file mode 100644
index 000000000..35357cd70
--- /dev/null
+++ b/platform/root-owner-authorization.trig.template
@@ -0,0 +1,33 @@
+@prefix rdfs: .
+@prefix xsd: .
+@prefix acl: .
+@prefix cert: .
+@prefix dh: .
+@prefix sioc: .
+@prefix foaf: .
+@prefix dct: .
+
+# AUTHORIZATIONS
+
+# root owner is a member of the owners group
+
+
+{
+
+ foaf:member <${OWNER_URI}> .
+}
+
+<${OWNER_AUTH_DOC_URI}>
+{
+
+ <${OWNER_AUTH_DOC_URI}> a dh:Item ;
+ foaf:primaryTopic <${OWNER_AUTH_URI}> ;
+ sioc:has_container ;
+ dct:title "Public owner's WebID" .
+
+ <${OWNER_AUTH_URI}> a acl:Authorization ;
+ acl:accessTo <${OWNER_DOC_URI}>, <${OWNER_KEY_DOC_URI}> ;
+ acl:mode acl:Read ;
+ acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
+
+}
diff --git a/platform/root-owner.trig.template b/platform/root-owner.trig.template
index 5a0196568..64567bd60 100644
--- a/platform/root-owner.trig.template
+++ b/platform/root-owner.trig.template
@@ -20,7 +20,7 @@
<${OWNER_URI}> a foaf:Agent ;
foaf:name "${OWNER_COMMON_NAME}" ;
foaf:mbox ;
- cert:key .
+ cert:key <${OWNER_KEY_URI}> .
# secretary delegates the owner agent
@@ -30,42 +30,17 @@
# PUBLIC KEY
-
+<${OWNER_KEY_DOC_URI}>
{
- a dh:Item ;
- foaf:primaryTopic ;
+ <${OWNER_KEY_DOC_URI}> a dh:Item ;
+ foaf:primaryTopic <${OWNER_KEY_URI}> ;
sioc:has_container ;
dct:title "${OWNER_COMMON_NAME}" .
- a cert:PublicKey ;
+ <${OWNER_KEY_URI}> a cert:PublicKey ;
rdfs:label "${OWNER_COMMON_NAME}" ;
cert:modulus "${OWNER_PUBLIC_KEY_MODULUS}"^^xsd:hexBinary;
cert:exponent 65537 .
-}
-
-# AUTHORIZATIONS
-
-# root owner is a member of the owners group
-
-
-{
-
- foaf:member <${OWNER_URI}> .
-}
-
- # TO-DO: use $OWNER_AUTH_UUID
-{
-
- a dh:Item ;
- foaf:primaryTopic ;
- sioc:has_container ;
- dct:title "Public owner's WebID" .
-
- a acl:Authorization ;
- acl:accessTo <${OWNER_DOC_URI}>, ;
- acl:mode acl:Read ;
- acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
-
}
\ No newline at end of file
diff --git a/platform/root-secretary-authorization.trig.template b/platform/root-secretary-authorization.trig.template
new file mode 100644
index 000000000..4bedeb5cb
--- /dev/null
+++ b/platform/root-secretary-authorization.trig.template
@@ -0,0 +1,34 @@
+@prefix rdfs: .
+@prefix xsd: .
+@prefix acl: .
+@prefix cert: .
+@prefix dh: .
+@prefix sioc: .
+@prefix foaf: .
+@prefix dct: .
+
+# AUTHORIZATION
+
+# secretary is a member of the writers group
+
+
+{
+
+ foaf:member <${SECRETARY_URI}> .
+
+}
+
+<${SECRETARY_AUTH_DOC_URI}>
+{
+
+ <${SECRETARY_AUTH_DOC_URI}> a dh:Item ;
+ foaf:primaryTopic <${SECRETARY_AUTH_URI}> ;
+ sioc:has_container ;
+ dct:title "Public secretary's WebID" .
+
+ <${SECRETARY_AUTH_URI}> a acl:Authorization ;
+ acl:accessTo <${SECRETARY_DOC_URI}>, <${SECRETARY_KEY_DOC_URI}> ;
+ acl:mode acl:Read ;
+ acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
+
+}
diff --git a/platform/root-secretary.trig.template b/platform/root-secretary.trig.template
index a6579251c..4aa9a333b 100644
--- a/platform/root-secretary.trig.template
+++ b/platform/root-secretary.trig.template
@@ -19,49 +19,23 @@
<${SECRETARY_URI}> a foaf:Agent ;
foaf:name "LinkedDataHub" ;
- cert:key .
+ cert:key <${SECRETARY_KEY_URI}> .
}
# PUBLIC KEY
-
+<${SECRETARY_KEY_DOC_URI}>
{
- a dh:Item ;
- foaf:primaryTopic ;
+ <${SECRETARY_KEY_DOC_URI}> a dh:Item ;
+ foaf:primaryTopic <${SECRETARY_KEY_URI}> ;
sioc:has_container ;
dct:title "LinkedDataHub" .
- a cert:PublicKey ;
+ <${SECRETARY_KEY_URI}> a cert:PublicKey ;
rdfs:label "LinkedDataHub" ;
cert:modulus "${SECRETARY_PUBLIC_KEY_MODULUS}"^^xsd:hexBinary;
cert:exponent 65537 .
-}
-
-# AUTHORIZATION
-
-# secretary is a member of the writers group
-
-
-{
-
- foaf:member <${SECRETARY_URI}> .
-
-}
-
- # TO-DO: use $SECRETARY_AUTH_UUID
-{
-
- a dh:Item ;
- foaf:primaryTopic ;
- sioc:has_container ;
- dct:title "Public secretary's WebID" .
-
- a acl:Authorization ;
- acl:accessTo <${SECRETARY_DOC_URI}>, ;
- acl:mode acl:Read ;
- acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
-
}
\ No newline at end of file
diff --git a/platform/select-agent-metadata.rq b/platform/select-agent-metadata.rq
new file mode 100644
index 000000000..bb01ebe55
--- /dev/null
+++ b/platform/select-agent-metadata.rq
@@ -0,0 +1,13 @@
+PREFIX foaf:
+PREFIX cert:
+SELECT ?agent ?doc ?key
+WHERE {
+GRAPH ?g1 {
+ ?agent a foaf:Agent .
+ ?agent cert:key ?key .
+}
+GRAPH ?g2 {
+ ?doc foaf:primaryTopic ?agent .
+}
+}
+LIMIT 1
diff --git a/platform/select-root-services.rq b/platform/select-root-services.rq
index 658fa4d61..2a307e4e1 100644
--- a/platform/select-root-services.rq
+++ b/platform/select-root-services.rq
@@ -2,15 +2,16 @@ PREFIX ldt:
PREFIX sd:
PREFIX a:
PREFIX lapp:
+PREFIX ldh:
PREFIX foaf:
-SELECT ?endUserApp ?endUserBase ?endUserQuadStore ?endUserEndpoint ?endUserAuthUser ?endUserAuthPwd ?endUserMaker ?adminApp ?adminBase ?adminQuadStore ?adminEndpoint ?adminAuthUser ?adminAuthPwd ?adminMaker
+SELECT ?endUserApp ?endUserOrigin ?endUserQuadStore ?endUserEndpoint ?endUserAuthUser ?endUserAuthPwd ?endUserMaker ?adminApp ?adminOrigin ?adminQuadStore ?adminEndpoint ?adminAuthUser ?adminAuthPwd ?adminMaker
{
- ?endUserApp ldt:base ?endUserBase ;
+ ?endUserApp ldh:origin ?endUserOrigin ;
ldt:service ?endUserService ;
lapp:adminApplication ?adminApp .
?adminApp ldt:service ?adminService ;
- ldt:base ?adminBase .
+ ldh:origin ?adminOrigin .
?endUserService a:quadStore ?endUserQuadStore ;
sd:endpoint ?endUserEndpoint .
?adminService a:quadStore ?adminQuadStore ;
diff --git a/pom.xml b/pom.xml
index 0f9b5d06e..b918c2e42 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
com.atomgraph
linkeddatahub
- 5.0.23
+ 5.0.24-SNAPSHOT
${packaging.type}
AtomGraph LinkedDataHub
@@ -46,7 +46,7 @@
https://github.com/AtomGraph/LinkedDataHub
scm:git:git://github.com/AtomGraph/LinkedDataHub.git
scm:git:git@github.com:AtomGraph/LinkedDataHub.git
- linkeddatahub-5.0.23
+ linkeddatahub-2.1.1
diff --git a/src/main/java/com/atomgraph/linkeddatahub/Application.java b/src/main/java/com/atomgraph/linkeddatahub/Application.java
index 49192395b..c554be6ac 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/Application.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/Application.java
@@ -123,7 +123,6 @@
import com.atomgraph.linkeddatahub.writer.factory.ModeFactory;
import com.atomgraph.linkeddatahub.writer.function.DecodeURI;
import com.atomgraph.server.mapper.NotAcceptableExceptionMapper;
-import com.atomgraph.server.vocabulary.LDT;
import com.atomgraph.server.mapper.OntologyExceptionMapper;
import com.atomgraph.server.mapper.jena.DatatypeFormatExceptionMapper;
import com.atomgraph.server.mapper.jena.QueryParseExceptionMapper;
@@ -664,7 +663,7 @@ public Application(final ServletConfig servletConfig, final MediaTypes mediaType
if (proxyHostname != null)
{
- ClientRequestFilter rewriteFilter = new ClientUriRewriteFilter(baseURI, proxyScheme, proxyHostname, proxyPort); // proxyPort can be null
+ ClientRequestFilter rewriteFilter = new ClientUriRewriteFilter(proxyScheme, proxyHostname, proxyPort); // proxyPort can be null
client.register(rewriteFilter);
externalClient.register(rewriteFilter);
@@ -902,7 +901,16 @@ protected void configure()
@Override
protected void configure()
{
- bindFactory(ApplicationFactory.class).to(com.atomgraph.linkeddatahub.apps.model.Application.class).
+ bindFactory(ApplicationFactory.class).to(new TypeLiteral>() {}).
+ in(RequestScoped.class);
+ }
+ });
+ register(new AbstractBinder()
+ {
+ @Override
+ protected void configure()
+ {
+ bindFactory(com.atomgraph.linkeddatahub.server.factory.UnwrappedApplicationFactory.class).to(com.atomgraph.linkeddatahub.apps.model.Application.class).
in(RequestScoped.class);
}
});
@@ -1172,23 +1180,9 @@ public void handleAuthorizationCreated(AuthorizationCreated event) throws Messag
* @param absolutePath request URL without the query string
* @return app resource or null, if none matched
*/
- public Resource matchApp(Resource type, URI absolutePath)
- {
- return matchApp(getContextModel(), type, absolutePath); // make sure we return an immutable model
- }
-
- /**
- * Matches application by type and request URL in a given application model.
- * It finds the apps where request URL is relative to the app base URI, and returns the one with the longest match.
- *
- * @param appModel application model
- * @param type application type
- * @param absolutePath request URL without the query string
- * @return app resource or null, if none matched
- */
- public Resource matchApp(Model appModel, Resource type, URI absolutePath)
+ public Resource matchApp(URI absolutePath)
{
- return getLongestURIResource(getLengthMap(getRelativeBaseApps(appModel, type, absolutePath)));
+ return getAppByOrigin(getContextModel(), LAPP.Application, absolutePath); // make sure we return an immutable model
}
/**
@@ -1207,35 +1201,63 @@ public Resource getLongestURIResource(Map lengthMap)
}
/**
- * Builds a base URI to application resource map from the application model.
+ * Normalizes a URI origin by adding explicit default ports (80 for HTTP, 443 for HTTPS).
+ * An origin consists of scheme, hostname, and port.
+ * This allows comparing origins with implicit and explicit default ports.
+ *
+ * @param uri the URI to normalize
+ * @return normalized origin string in format "scheme://host:port"
+ * @see Origin - MDN Web Docs
+ */
+ public static String normalizeOrigin(URI uri)
+ {
+ if (uri == null) throw new IllegalArgumentException("URI cannot be null");
+
+ String scheme = uri.getScheme();
+ String host = uri.getHost();
+ int port = uri.getPort();
+
+ if (port == -1)
+ {
+ if ("https".equals(scheme)) port = 443;
+ else if ("http".equals(scheme)) port = 80;
+ }
+
+ return scheme + "://" + host + ":" + port;
+ }
+
+ /**
+ * Finds application by origin matching from the application model.
* Applications are filtered by type first.
- *
+ *
* @param model application model
* @param type application type
* @param absolutePath request URL (without the query string)
- * @return URI to app map
+ * @return app resource or null if no match found
*/
- public Map getRelativeBaseApps(Model model, Resource type, URI absolutePath)
+ public Resource getAppByOrigin(Model model, Resource type, URI absolutePath)
{
if (model == null) throw new IllegalArgumentException("Model cannot be null");
if (type == null) throw new IllegalArgumentException("Resource cannot be null");
if (absolutePath == null) throw new IllegalArgumentException("URI cannot be null");
- Map apps = new HashMap<>();
-
+ String requestOrigin = normalizeOrigin(absolutePath);
+
ResIterator it = model.listSubjectsWithProperty(RDF.type, type);
try
{
while (it.hasNext())
{
Resource app = it.next();
-
- if (!app.hasProperty(LDT.base))
- throw new InternalServerErrorException(new IllegalStateException("Application resource <" + app.getURI() + "> has no ldt:base value"));
-
- URI base = URI.create(app.getPropertyResourceValue(LDT.base).getURI());
- URI relative = base.relativize(absolutePath);
- if (!relative.isAbsolute()) apps.put(base, app);
+
+ // Use origin-based matching - return immediately on match since origins are unique
+ if (app.hasProperty(LDH.origin))
+ {
+ URI appOriginURI = URI.create(app.getPropertyResourceValue(LDH.origin).getURI());
+ String normalizedAppOrigin = normalizeOrigin(appOriginURI);
+
+ if (requestOrigin.equals(normalizedAppOrigin)) return app;
+ }
}
}
finally
@@ -1243,7 +1265,7 @@ public Map getRelativeBaseApps(Model model, Resource type, URI ab
it.close();
}
- return apps;
+ return null;
}
/**
diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java
index eeb505f5d..dcb914c46 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java
@@ -57,14 +57,28 @@ public interface Application extends Resource, com.atomgraph.core.model.Applicat
/**
* Returns the application's base URI.
- *
+ *
* @return URI of the base resource
*/
URI getBaseURI();
-
+
+ /**
+ * Returns the application's origin resource.
+ *
+ * @return origin resource
+ */
+ Resource getOrigin();
+
+ /**
+ * Returns the application's origin URI.
+ *
+ * @return URI of the origin resource
+ */
+ URI getOriginURI();
+
/**
* Returns applications service.
- *
+ *
* @return service resource
*/
@Override
diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java
index 7c2bbfc66..649291121 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java
@@ -21,6 +21,7 @@
import com.atomgraph.linkeddatahub.model.Service;
import com.atomgraph.linkeddatahub.vocabulary.FOAF;
import com.atomgraph.linkeddatahub.vocabulary.LAPP;
+import com.atomgraph.linkeddatahub.vocabulary.LDH;
import com.atomgraph.server.vocabulary.LDT;
import org.apache.jena.enhanced.EnhGraph;
import org.apache.jena.graph.Node;
@@ -55,14 +56,26 @@ public ApplicationImpl(Node n, EnhGraph g)
@Override
public Resource getBase()
{
- return getPropertyResourceValue(LDT.base);
+ return getModel().createResource(getOriginURI().resolve("/").toString());
}
@Override
public URI getBaseURI()
{
- if (getBase() != null) return URI.create(getBase().getURI());
-
+ return getOriginURI().resolve("/");
+ }
+
+ @Override
+ public Resource getOrigin()
+ {
+ return getPropertyResourceValue(LDH.origin);
+ }
+
+ @Override
+ public URI getOriginURI()
+ {
+ if (getOrigin() != null) return URI.create(getOrigin().getURI());
+
return null;
}
diff --git a/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java b/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java
index db62d4dea..ec5fe4e97 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java
@@ -20,12 +20,13 @@
import java.net.URI;
import jakarta.ws.rs.client.ClientRequestContext;
import jakarta.ws.rs.client.ClientRequestFilter;
+import jakarta.ws.rs.core.HttpHeaders;
import jakarta.ws.rs.core.UriBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Client request filter that rewrites the target URL using a proxy URL.
+ * Client request filter that rewrites the target localhost URLs to internal proxy URLs.
*
* @author {@literal Martynas Jusevičius }
*/
@@ -34,21 +35,18 @@ public class ClientUriRewriteFilter implements ClientRequestFilter
private static final Logger log = LoggerFactory.getLogger(ClientUriRewriteFilter.class);
- private final URI baseURI;
private final String scheme, hostname;
private final Integer port;
/**
* Constructs filter from URI components.
*
- * @param baseURI base URI
* @param scheme new scheme
* @param hostname new hostname
* @param port new port number
*/
- public ClientUriRewriteFilter(URI baseURI, String scheme, String hostname, Integer port)
+ public ClientUriRewriteFilter(String scheme, String hostname, Integer port)
{
- this.baseURI = baseURI;
this.scheme = scheme;
this.hostname = hostname;
this.port = port;
@@ -57,7 +55,12 @@ public ClientUriRewriteFilter(URI baseURI, String scheme, String hostname, Integ
@Override
public void filter(ClientRequestContext cr) throws IOException
{
- if (getBaseURI().relativize(cr.getUri()).isAbsolute()) return; // don't rewrite URIs that are not relative to the base URI (e.g. SPARQL Protocol URLs)
+ if (!cr.getUri().getHost().equals("localhost") && !cr.getUri().getHost().endsWith(".localhost")) return;
+
+ // Preserve original host for nginx routing
+ String originalHost = cr.getUri().getHost();
+ if (cr.getUri().getPort() != -1) originalHost += ":" + cr.getUri().getPort();
+ cr.getHeaders().putSingle(HttpHeaders.HOST, originalHost);
String newScheme = cr.getUri().getScheme();
if (getScheme() != null) newScheme = getScheme();
@@ -68,17 +71,7 @@ public void filter(ClientRequestContext cr) throws IOException
if (log.isDebugEnabled()) log.debug("Rewriting client request URI from '{}' to '{}'", cr.getUri(), newUri);
cr.setUri(newUri);
}
-
- /**
- * Base URI of the application
- *
- * @return base URI
- */
- public URI getBaseURI()
- {
- return baseURI;
- }
-
+
/**
* Scheme component of the new (rewritten) URI.
*
diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java
index 1efa29e00..7f86014e9 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java
@@ -76,7 +76,7 @@ public class Namespace extends com.atomgraph.core.model.impl.SPARQLEndpointImpl
/**
* Constructs endpoint from the in-memory ontology model.
- *
+ *
* @param request current request
* @param uriInfo current request's URI info
* @param application current end-user application
@@ -86,7 +86,7 @@ public class Namespace extends com.atomgraph.core.model.impl.SPARQLEndpointImpl
* @param system system application
*/
@Inject
- public Namespace(@Context Request request, @Context UriInfo uriInfo,
+ public Namespace(@Context Request request, @Context UriInfo uriInfo,
Application application, Optional ontology, MediaTypes mediaTypes,
@Context SecurityContext securityContext, com.atomgraph.linkeddatahub.Application system)
{
@@ -98,6 +98,22 @@ public Namespace(@Context Request request, @Context UriInfo uriInfo,
this.system = system;
}
+ /**
+ * If SPARQL query is provided, returns its result over the in-memory namespace ontology graph.
+ * If query is not provided
+ *
+ * - returns constructed instance if forClass URL param value (ontology class URI) is provided
+ * - otherwise, returns the namespace ontology graph (which is standalone, i.e. not the full ontology imports closure)
+ *
+ *
+ * @param query SPARQL query string (optional)
+ * @param defaultGraphUris default graph URI (ignored)
+ * @param namedGraphUris named graph URIs (ignored)
+ *
+ * {@link com.atomgraph.linkeddatahub.server.model.impl.Dispatcher#getNamespace()}
+ *
+ * @return response
+ */
@Override
@GET
public Response get(@QueryParam(QUERY) Query query,
@@ -122,11 +138,11 @@ public Response get(@QueryParam(QUERY) Query query,
if (getApplication().canAs(EndUserApplication.class))
{
- String ontologyURI = getURI().toString() + "#"; // TO-DO: hard-coding "#" is not great. Replace with RDF property lookup.
+ // the application ontology MUST use a URI! This is the URI this ontology endpoint is deployed on by the Dispatcher class
+ String ontologyURI = getApplication().getOntology().getURI();
if (log.isDebugEnabled()) log.debug("Returning namespace ontology from OntDocumentManager: {}", ontologyURI);
// not returning the injected in-memory ontology because it has inferences applied to it
- OntologyModelGetter modelGetter = new OntologyModelGetter(getApplication().as(EndUserApplication.class),
- getSystem().getOntModelSpec(), getSystem().getOntologyQuery(), getSystem().getClient(), getSystem().getMediaTypes());
+ OntologyModelGetter modelGetter = new OntologyModelGetter(getApplication().as(EndUserApplication.class), getSystem().getOntModelSpec(), getSystem().getOntologyQuery());
return getResponseBuilder(modelGetter.getModel(ontologyURI)).build();
}
else throw new BadRequestException("SPARQL query string not provided");
diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java
index f72a85376..a72180fc4 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java
@@ -107,12 +107,7 @@ public Response get(@QueryParam(QUERY) Query unused,
@QueryParam(DEFAULT_GRAPH_URI) List defaultGraphUris, @QueryParam(NAMED_GRAPH_URI) List namedGraphUris)
{
final Agent agent = getAgentContext().map(AgentContext::getAgent).orElse(null);
-// final Agent agent = ModelFactory.createDefaultModel().
-// createResource(getUriInfo().getQueryParameters().getFirst("agent")).
-// addProperty(RDF.type, FOAF.Agent).
-// as(Agent.class);
-
- //final ParameterizedSparqlString pss = getApplication().canAs(EndUserApplication.class) ? getACLQuery() : getOwnerACLQuery();
+
try
{
if (!getUriInfo().getQueryParameters().containsKey(SPIN.THIS_VAR_NAME)) throw new BadRequestException("?this query param is not provided");
diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java
index afe779b8e..fa539312a 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java
@@ -101,8 +101,7 @@ public Response post(@FormParam("uri") String ontologyURI, @HeaderParam("Referer
// !!! we need to reload the ontology model before returning a response, to make sure the next request already gets the new version !!!
// same logic as in OntologyFilter. TO-DO: encapsulate?
- OntologyModelGetter modelGetter = new OntologyModelGetter(app,
- ontModelSpec, getSystem().getOntologyQuery(), getSystem().getNoCertClient(), getSystem().getMediaTypes());
+ OntologyModelGetter modelGetter = new OntologyModelGetter(app, ontModelSpec, getSystem().getOntologyQuery());
ontModelSpec.setImportModelGetter(modelGetter);
if (log.isDebugEnabled()) log.debug("Started loading ontology with URI '{}' from the admin dataset", ontologyURI);
Model baseModel = modelGetter.getModel(ontologyURI);
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/factory/ApplicationFactory.java b/src/main/java/com/atomgraph/linkeddatahub/server/factory/ApplicationFactory.java
index cc9dd4fea..3f112a08e 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/factory/ApplicationFactory.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/factory/ApplicationFactory.java
@@ -20,6 +20,7 @@
import jakarta.ws.rs.container.ContainerRequestContext;
import jakarta.ws.rs.core.Context;
import jakarta.ws.rs.ext.Provider;
+import java.util.Optional;
import org.glassfish.hk2.api.Factory;
import org.glassfish.hk2.api.ServiceLocator;
import org.slf4j.Logger;
@@ -32,32 +33,32 @@
* @see com.atomgraph.linkeddatahub.server.model.impl.Dispatcher
*/
@Provider
-public class ApplicationFactory implements Factory
+public class ApplicationFactory implements Factory>
{
private static final Logger log = LoggerFactory.getLogger(ApplicationFactory.class);
-
+
@Context private ServiceLocator serviceLocator;
-
+
@Override
- public com.atomgraph.linkeddatahub.apps.model.Application provide()
+ public Optional provide()
{
return getApplication();
}
@Override
- public void dispose(com.atomgraph.linkeddatahub.apps.model.Application t)
+ public void dispose(Optional t)
{
}
/**
* Retrieves application from the request context.
- *
- * @return application resource
+ *
+ * @return optional application resource
*/
- public com.atomgraph.linkeddatahub.apps.model.Application getApplication()
+ public Optional getApplication()
{
- return (com.atomgraph.linkeddatahub.apps.model.Application)getContainerRequestContext().getProperty(LAPP.Application.getURI());
+ return (Optional)getContainerRequestContext().getProperty(LAPP.Application.getURI());
}
/**
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/factory/ServiceFactory.java b/src/main/java/com/atomgraph/linkeddatahub/server/factory/ServiceFactory.java
index 52da8c31b..d3e510bb6 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/factory/ServiceFactory.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/factory/ServiceFactory.java
@@ -54,13 +54,16 @@ public void dispose(Optional t)
/**
* Retrieves (optional) service from container request context.
- *
+ *
* @return optional service
*/
public Optional getService()
{
- Application app = (Application)getContainerRequestContext().getProperty(LAPP.Application.getURI());
- Service service = app.getService();
+ Optional appOpt = (Optional)getContainerRequestContext().getProperty(LAPP.Application.getURI());
+
+ if (!appOpt.isPresent()) return Optional.empty();
+
+ Service service = appOpt.get().getService();
return Optional.of(service);
}
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/factory/UnwrappedApplicationFactory.java b/src/main/java/com/atomgraph/linkeddatahub/server/factory/UnwrappedApplicationFactory.java
new file mode 100644
index 000000000..179550fc7
--- /dev/null
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/factory/UnwrappedApplicationFactory.java
@@ -0,0 +1,62 @@
+/**
+ * Copyright 2025 Martynas Jusevičius
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.atomgraph.linkeddatahub.server.factory;
+
+import com.atomgraph.linkeddatahub.apps.model.Application;
+import jakarta.inject.Inject;
+import jakarta.ws.rs.ext.Provider;
+import java.util.Optional;
+import org.glassfish.hk2.api.Factory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * JAX-RS factory that unwraps Optional<Application> for direct injection.
+ * This allows resource constructors to inject Application directly while
+ * filters and providers can inject Optional<Application>.
+ *
+ * @author Martynas Jusevičius {@literal }
+ * @see ApplicationFactory
+ */
+@Provider
+public class UnwrappedApplicationFactory implements Factory
+{
+
+ private static final Logger log = LoggerFactory.getLogger(UnwrappedApplicationFactory.class);
+
+ @Inject jakarta.inject.Provider> optionalApp;
+
+ @Override
+ public Application provide()
+ {
+ Optional appOpt = optionalApp.get();
+
+ if (!appOpt.isPresent())
+ {
+ if (log.isErrorEnabled()) log.error("Application not present when unwrapping in UnwrappedApplicationFactory");
+ return null; // This should only happen if ApplicationFilter threw NotFoundException
+ }
+
+ return appOpt.get();
+ }
+
+ @Override
+ public void dispose(Application t)
+ {
+ }
+
+}
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/ApplicationFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/ApplicationFilter.java
index 8bd3f2737..358e1491d 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/ApplicationFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/ApplicationFilter.java
@@ -27,6 +27,7 @@
import jakarta.annotation.Priority;
import jakarta.inject.Inject;
import jakarta.ws.rs.BadRequestException;
+import jakarta.ws.rs.NotFoundException;
import jakarta.ws.rs.container.ContainerRequestContext;
import jakarta.ws.rs.container.ContainerRequestFilter;
import jakarta.ws.rs.container.PreMatching;
@@ -58,9 +59,23 @@ public class ApplicationFilter implements ContainerRequestFilter
@Override
public void filter(ContainerRequestContext request) throws IOException
{
+ // used by ModeFactory and ModelXSLTWriterBase - set early so it's available even if app matching fails
+ if (request.getUriInfo().getQueryParameters().containsKey(AC.mode.getLocalName()))
+ {
+ List modeUris = request.getUriInfo().getQueryParameters().get(AC.mode.getLocalName());
+ List modes = modeUris.stream().map(Mode::new).collect(Collectors.toList());
+ request.setProperty(AC.mode.getURI(), modes);
+ }
+ else request.setProperty(AC.mode.getURI(), Collections.emptyList());
+
// there always have to be an app
- Resource appResource = getSystem().matchApp(LAPP.Application, request.getUriInfo().getAbsolutePath());
- if (appResource == null) throw new IllegalStateException("Request URI '" + request.getUriInfo().getAbsolutePath() + "' has not matched any lapp:Application");
+ Resource appResource = getSystem().matchApp(request.getUriInfo().getAbsolutePath());
+ if (appResource == null)
+ {
+ // Set empty Optional so response filters can safely check
+ request.setProperty(LAPP.Application.getURI(), Optional.empty());
+ throw new NotFoundException("Request URI '" + request.getUriInfo().getAbsolutePath() + "' has not matched any lapp:Application");
+ }
// instead of InfModel, do faster explicit checks for subclasses and add rdf:type
if (!appResource.canAs(com.atomgraph.linkeddatahub.apps.model.Application.class) &&
@@ -69,7 +84,7 @@ public void filter(ContainerRequestContext request) throws IOException
throw new IllegalStateException("Resource <" + appResource + "> cannot be cast to lapp:Application");
com.atomgraph.linkeddatahub.apps.model.Application app = appResource.as(com.atomgraph.linkeddatahub.apps.model.Application.class);
- request.setProperty(LAPP.Application.getURI(), app); // wrap into a helper class so it doesn't interfere with injection of Application
+ request.setProperty(LAPP.Application.getURI(), Optional.of(app)); // wrap in Optional so response filters can handle missing applications
// use the ?uri URL parameter to override the effective request URI if its URI value is relative to the app's base URI
final URI requestURI;
@@ -107,15 +122,6 @@ public void filter(ContainerRequestContext request) throws IOException
if (request.getUriInfo().getQueryParameters().containsKey(AC.accept.getLocalName()))
request.getHeaders().putSingle(HttpHeaders.ACCEPT, request.getUriInfo().getQueryParameters().getFirst(AC.accept.getLocalName()));
- // used by ModeFactory and ModelXSLTWriterBase
- if (request.getUriInfo().getQueryParameters().containsKey(AC.mode.getLocalName()))
- {
- List modeUris = request.getUriInfo().getQueryParameters().get(AC.mode.getLocalName());
- List modes = modeUris.stream().map(Mode::new).collect(Collectors.toList());
- request.setProperty(AC.mode.getURI(), modes);
- }
- else request.setProperty(AC.mode.getURI(), Collections.emptyList());
-
// TO-DO: move Dataset logic to a separate ContainerRequestFilter?
Resource datasetResource = getSystem().matchDataset(LAPP.Dataset, request.getUriInfo().getAbsolutePath());
if (datasetResource != null)
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthenticationFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthenticationFilter.java
index eeac51513..a556d2a07 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthenticationFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthenticationFilter.java
@@ -54,7 +54,7 @@ public abstract class AuthenticationFilter implements ContainerRequestFilter
public static final String ON_BEHALF_OF = "On-Behalf-Of";
@Inject com.atomgraph.linkeddatahub.Application system;
- @Inject jakarta.inject.Provider app;
+ @Inject jakarta.inject.Provider> app;
@Inject jakarta.inject.Provider> dataset;
/**
@@ -111,14 +111,14 @@ public void filter(ContainerRequestContext request) throws IOException
/**
* Returns the SPARQL service for agent data.
- *
+ *
* @return service resource
*/
protected Service getAgentService()
{
- return getApplication().canAs(EndUserApplication.class) ?
- getApplication().as(EndUserApplication.class).getAdminApplication().getService() :
- getApplication().getService();
+ return getApplication().get().canAs(EndUserApplication.class) ?
+ getApplication().get().as(EndUserApplication.class).getAdminApplication().getService() :
+ getApplication().get().getService();
}
/**
@@ -183,10 +183,10 @@ protected Resource getResourceByPropertyValue(Model model, Property property, RD
/**
* Returns currently matched application.
- *
- * @return application resource
+ *
+ * @return optional application resource
*/
- public Application getApplication()
+ public Optional getApplication()
{
return app.get();
}
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthorizationFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthorizationFilter.java
index 430887a45..2ddbda545 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthorizationFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthorizationFilter.java
@@ -83,7 +83,7 @@ public class AuthorizationFilter implements ContainerRequestFilter
);
@Inject com.atomgraph.linkeddatahub.Application system;
- @Inject jakarta.inject.Provider app;
+ @Inject jakarta.inject.Provider> app;
@Inject jakarta.inject.Provider> dataset;
private ParameterizedSparqlString documentTypeQuery, documentOwnerQuery, aclQuery, ownerAclQuery;
@@ -120,8 +120,8 @@ public void filter(ContainerRequestContext request) throws IOException
if (log.isWarnEnabled()) log.warn("Skipping authentication/authorization, request method not recognized: {}", request.getMethod());
return;
}
-
- if (getApplication().isReadAllowed())
+
+ if (getApplication().isPresent() && getApplication().get().isReadAllowed())
{
if (request.getMethod().equals(HttpMethod.GET) || request.getMethod().equals(HttpMethod.HEAD)) // allow read-only methods
{
@@ -169,7 +169,7 @@ public Model authorize(ContainerRequestContext request, Resource agent, Resource
createOwnerAuthorization(authorizations, accessTo, agent);
}
- ResultSetRewindable docTypesResult = loadResultSet(getApplication().getService(), getDocumentTypeQuery(), thisQsm);
+ ResultSetRewindable docTypesResult = loadResultSet(getApplication().get().getService(), getDocumentTypeQuery(), thisQsm);
try
{
if (!docTypesResult.hasNext()) // if the document resource has no types, we assume the document does not exist
@@ -185,7 +185,7 @@ public Model authorize(ContainerRequestContext request, Resource agent, Resource
thisQsm.add(SPIN.THIS_VAR_NAME, accessTo);
docTypesResult.close();
- docTypesResult = loadResultSet(getApplication().getService(), getDocumentTypeQuery(), thisQsm);
+ docTypesResult = loadResultSet(getApplication().get().getService(), getDocumentTypeQuery(), thisQsm);
Set parentTypes = new HashSet<>();
docTypesResult.forEachRemaining(qs -> parentTypes.add(qs.getResource("Type")));
@@ -205,13 +205,13 @@ public Model authorize(ContainerRequestContext request, Resource agent, Resource
else return null;
}
- ParameterizedSparqlString pss = getApplication().canAs(EndUserApplication.class) ? getACLQuery() : getOwnerACLQuery();
+ ParameterizedSparqlString pss = getApplication().get().canAs(EndUserApplication.class) ? getACLQuery() : getOwnerACLQuery();
Query query = new SetResultSetValues().apply(pss.asQuery(), docTypesResult);
pss = new ParameterizedSparqlString(query.toString()); // make sure VALUES are now part of the query string
assert pss.toString().contains("VALUES");
// note we're not setting the $mode value on the ACL queries as we want to provide the AuthorizationContext with all of the agent's authorizations
- authorizations.add(loadModel(getAdminService(), pss, new AuthorizationParams(getApplication().getBase(), accessTo, agent).get()));
+ authorizations.add(loadModel(getAdminService(), pss, new AuthorizationParams(getAdminBase(), accessTo, agent).get()));
// access denied if the agent has no authorization to the requested document with the requested ACL mode
if (getAuthorizationByMode(authorizations, accessMode) == null) return null;
@@ -256,7 +256,7 @@ protected boolean isOwner(Resource accessTo, Resource agent)
ParameterizedSparqlString pss = getDocumentOwnerQuery();
pss.setParams(qsm);
- ResultSetRewindable ownerResult = loadResultSet(getApplication().getService(), getDocumentOwnerQuery(), qsm); // could use ASK query in principle
+ ResultSetRewindable ownerResult = loadResultSet(getApplication().get().getService(), getDocumentOwnerQuery(), qsm); // could use ASK query in principle
try
{
return ownerResult.hasNext() && agent.equals(ownerResult.next().getResource("owner"));
@@ -356,22 +356,35 @@ public Resource createOwnerAuthorization(Model model, Resource accessTo, Resourc
/**
* Returns the SPARQL service for agent data.
- *
+ *
* @return service resource
*/
protected Service getAdminService()
{
- return getApplication().canAs(EndUserApplication.class) ?
- getApplication().as(EndUserApplication.class).getAdminApplication().getService() :
- getApplication().getService();
+ return getApplication().get().canAs(EndUserApplication.class) ?
+ getApplication().get().as(EndUserApplication.class).getAdminApplication().getService() :
+ getApplication().get().getService();
+ }
+
+ /**
+ * Returns the base URI of the admin application.
+ * Authorization data is always stored in the admin application's dataspace.
+ *
+ * @return admin application's base URI
+ */
+ protected Resource getAdminBase()
+ {
+ return getApplication().get().canAs(EndUserApplication.class) ?
+ getApplication().get().as(EndUserApplication.class).getAdminApplication().getBase() :
+ getApplication().get().getBase();
}
/**
* Returns currently matched application.
- *
- * @return application resource
+ *
+ * @return optional application resource
*/
- public com.atomgraph.linkeddatahub.apps.model.Application getApplication()
+ public Optional getApplication()
{
return app.get();
}
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java
index cf002de2a..c996d5214 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java
@@ -65,17 +65,19 @@ public void filter(ContainerRequestContext crc) throws IOException
/**
* Retrieves (optional) ontology from the container request context.
- *
+ *
* @param crc request context
* @return optional ontology
*/
public Optional getOntology(ContainerRequestContext crc)
{
- Application app = getApplication(crc);
-
+ Optional appOpt = getApplication(crc);
+
+ if (!appOpt.isPresent()) return Optional.empty();
+
try
{
- return Optional.ofNullable(getOntology(app));
+ return Optional.ofNullable(getOntology(appOpt.get()));
}
catch (OntologyException ex)
{
@@ -115,8 +117,7 @@ public Ontology getOntology(Application app, String uri)
// only create InfModel if ontology is not already cached
if (!ontModelSpec.getDocumentManager().getFileManager().hasCachedModel(uri))
{
- OntologyModelGetter modelGetter = new OntologyModelGetter(app.as(EndUserApplication.class),
- ontModelSpec, getSystem().getOntologyQuery(), getSystem().getNoCertClient(), getSystem().getMediaTypes());
+ OntologyModelGetter modelGetter = new OntologyModelGetter(app.as(EndUserApplication.class), ontModelSpec, getSystem().getOntologyQuery());
ontModelSpec.setImportModelGetter(modelGetter);
if (log.isDebugEnabled()) log.debug("Started loading ontology with URI '{}' from the admin dataset", uri);
Model baseModel = modelGetter.getModel(uri);
@@ -185,13 +186,13 @@ public static void addDocumentModel(OntDocumentManager odm, String importURI)
/**
* Retrieves application from the container request context.
- *
+ *
* @param crc request context
- * @return application resource
+ * @return optional application resource
*/
- public Application getApplication(ContainerRequestContext crc)
+ public Optional getApplication(ContainerRequestContext crc)
{
- return ((Application)crc.getProperty(LAPP.Application.getURI()));
+ return ((Optional)crc.getProperty(LAPP.Application.getURI()));
}
/**
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/auth/IDTokenFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/auth/IDTokenFilter.java
index 078c6f7dd..045f18fb9 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/auth/IDTokenFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/auth/IDTokenFilter.java
@@ -107,12 +107,13 @@ public String getScheme()
public void filter(ContainerRequestContext request) throws IOException
{
if (request.getSecurityContext().getUserPrincipal() != null) return; // skip filter if agent already authorized
- if (!getApplication().canAs(EndUserApplication.class) && !getApplication().canAs(AdminApplication.class)) return; // skip "primitive" apps
+ if (!getApplication().isPresent()) return; // skip if no application matched
+ if (!getApplication().get().canAs(EndUserApplication.class) && !getApplication().get().canAs(AdminApplication.class)) return; // skip "primitive" apps
// do not verify token for auth endpoints as that will lead to redirect loops
if (request.getUriInfo().getAbsolutePath().equals(getLoginURL())) return;
if (request.getUriInfo().getAbsolutePath().equals(getAuthorizeGoogleURL())) return;
-
+
super.filter(request);
}
@@ -299,15 +300,15 @@ public URI getAuthorizeGoogleURL()
/**
* Returns the admin application of the current dataspace.
- *
+ *
* @return admin application resource
*/
public AdminApplication getAdminApplication()
{
- if (getApplication().canAs(EndUserApplication.class))
- return getApplication().as(EndUserApplication.class).getAdminApplication();
+ if (getApplication().get().canAs(EndUserApplication.class))
+ return getApplication().get().as(EndUserApplication.class).getAdminApplication();
else
- return getApplication().as(AdminApplication.class);
+ return getApplication().get().as(AdminApplication.class);
}
/**
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/BackendInvalidationFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/BackendInvalidationFilter.java
index 4d95fda1e..702f924f8 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/BackendInvalidationFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/BackendInvalidationFilter.java
@@ -31,6 +31,7 @@
import jakarta.ws.rs.container.ContainerResponseFilter;
import jakarta.ws.rs.core.HttpHeaders;
import jakarta.ws.rs.core.Response;
+import java.util.Optional;
import org.apache.jena.rdf.model.Resource;
import org.glassfish.jersey.uri.UriComponent;
@@ -50,29 +51,32 @@ public class BackendInvalidationFilter implements ContainerResponseFilter
public static final String HEADER_NAME = "X-Escaped-Request-URI";
@Inject com.atomgraph.linkeddatahub.Application system;
- @Inject jakarta.inject.Provider app;
-
+ @Inject jakarta.inject.Provider> app;
+
@Override
public void filter(ContainerRequestContext req, ContainerResponseContext resp) throws IOException
{
+ // If no application was matched (e.g., non-existent dataspace), skip cache invalidation
+ if (!getApplication().isPresent()) return;
+
if (getAdminApplication().getService().getBackendProxy() == null) return;
if (req.getMethod().equals(HttpMethod.POST) && resp.getHeaderString(HttpHeaders.LOCATION) != null)
{
URI location = (URI)resp.getHeaders().get(HttpHeaders.LOCATION).get(0);
URI parentURI = location.resolve("..").normalize();
-
- ban(getApplication().getService().getBackendProxy(), location.toString()).close();
+
+ ban(getApplication().get().getService().getBackendProxy(), location.toString()).close();
// ban URI from authorization query results
ban(getAdminApplication().getService().getBackendProxy(), location.toString()).close();
// ban parent resource URI in order to avoid stale children data in containers
- ban(getApplication().getService().getBackendProxy(), parentURI.toString()).close();
- ban(getApplication().getService().getBackendProxy(), getApplication().getBaseURI().relativize(parentURI).toString()).close(); // URIs can be relative in queries
+ ban(getApplication().get().getService().getBackendProxy(), parentURI.toString()).close();
+ ban(getApplication().get().getService().getBackendProxy(), getApplication().get().getBaseURI().relativize(parentURI).toString()).close(); // URIs can be relative in queries
// ban all results of queries that use forClass type
if (req.getUriInfo().getQueryParameters().containsKey(AC.forClass.getLocalName()))
{
String forClass = req.getUriInfo().getQueryParameters().getFirst(AC.forClass.getLocalName());
- ban(getApplication().getService().getBackendProxy(), forClass).close();
+ ban(getApplication().get().getService().getBackendProxy(), forClass).close();
}
}
@@ -82,25 +86,23 @@ public void filter(ContainerRequestContext req, ContainerResponseContext resp) t
if (!getAdminApplication().getBaseURI().relativize(req.getUriInfo().getAbsolutePath()).isAbsolute()) // URL is relative to the admin app's base URI
{
ban(getAdminApplication().getService().getBackendProxy(), getAdminApplication().getBaseURI().toString()).close();
-// ban(getAdminApplication().getService().getBackendProxy(), FOAF.Agent.getURI()).close();
ban(getAdminApplication().getService().getBackendProxy(), "foaf:Agent").close(); // queries use prefixed names instead of absolute URIs
-// ban(getAdminApplication().getService().getBackendProxy(), ACL.AuthenticatedAgent.getURI()).close();
ban(getAdminApplication().getService().getBackendProxy(), "acl:AuthenticatedAgent").close();
}
if (req.getUriInfo().getAbsolutePath().toString().endsWith("/"))
{
- ban(getApplication().getService().getBackendProxy(), req.getUriInfo().getAbsolutePath().toString()).close();
+ ban(getApplication().get().getService().getBackendProxy(), req.getUriInfo().getAbsolutePath().toString()).close();
// ban URI from authorization query results
ban(getAdminApplication().getService().getBackendProxy(), req.getUriInfo().getAbsolutePath().toString()).close();
-
+
// ban parent document URIs (those that have a trailing slash) in order to avoid stale children data in containers
- if (!req.getUriInfo().getAbsolutePath().equals(getApplication().getBaseURI()))
+ if (!req.getUriInfo().getAbsolutePath().equals(getApplication().get().getBaseURI()))
{
URI parentURI = req.getUriInfo().getAbsolutePath().resolve("..").normalize();
- ban(getApplication().getService().getBackendProxy(), parentURI.toString()).close();
- ban(getApplication().getService().getBackendProxy(), getApplication().getBaseURI().relativize(parentURI).toString()).close(); // URIs can be relative in queries
+ ban(getApplication().get().getService().getBackendProxy(), parentURI.toString()).close();
+ ban(getApplication().get().getService().getBackendProxy(), getApplication().get().getBaseURI().relativize(parentURI).toString()).close(); // URIs can be relative in queries
}
}
}
@@ -125,23 +127,24 @@ public Response ban(Resource proxy, String url)
/**
* Returns admin application of the current dataspace.
- *
+ *
* @return admin application resource
*/
public AdminApplication getAdminApplication()
{
- if (getApplication().canAs(EndUserApplication.class))
- return getApplication().as(EndUserApplication.class).getAdminApplication();
+ com.atomgraph.linkeddatahub.apps.model.Application application = getApplication().get();
+ if (application.canAs(EndUserApplication.class))
+ return application.as(EndUserApplication.class).getAdminApplication();
else
- return getApplication().as(AdminApplication.class);
+ return application.as(AdminApplication.class);
}
/**
* Returns the current application.
- *
- * @return application resource
+ *
+ * @return optional application resource
*/
- public com.atomgraph.linkeddatahub.apps.model.Application getApplication()
+ public Optional getApplication()
{
return app.get();
}
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/ResponseHeadersFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/ResponseHeadersFilter.java
index 5d954a016..f444ae697 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/ResponseHeadersFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/ResponseHeadersFilter.java
@@ -56,7 +56,7 @@ public class ResponseHeadersFilter implements ContainerResponseFilter
private static final Logger log = LoggerFactory.getLogger(ResponseHeadersFilter.class);
private static final Pattern LINK_SPLITTER = Pattern.compile(",(?=\\s*<)"); // split on commas before next '<'
- @Inject jakarta.inject.Provider app;
+ @Inject jakarta.inject.Provider> app;
@Inject jakarta.inject.Provider> dataset;
@Inject jakarta.inject.Provider> authorizationContext;
@@ -65,31 +65,36 @@ public void filter(ContainerRequestContext request, ContainerResponseContext res
{
if (response.getStatusInfo().equals(Response.Status.NO_CONTENT))
response.getHeaders().remove(HttpHeaders.CONTENT_TYPE); // needs to be explicitly unset for some reason
-
+
if (request.getSecurityContext().getUserPrincipal() instanceof Agent)
{
Agent agent = ((Agent)(request.getSecurityContext().getUserPrincipal()));
response.getHeaders().add(HttpHeaders.LINK, new Link(URI.create(agent.getURI()), ACL.agent.getURI(), null));
}
-
+
if (getAuthorizationContext().isPresent())
getAuthorizationContext().get().getModeURIs().forEach(mode -> response.getHeaders().add(HttpHeaders.LINK, new Link(mode, ACL.mode.getURI(), null)));
-
+
List