Skip to content

Commit

Permalink
Merge pull request #2205 from bakdata/feature/api-keys
Browse files Browse the repository at this point in the history
api keys reopend
  • Loading branch information
thoniTUB authored Nov 16, 2021
2 parents 6ee0b73 + 9124d23 commit 9a70c31
Show file tree
Hide file tree
Showing 112 changed files with 2,165 additions and 836 deletions.
4 changes: 2 additions & 2 deletions autodoc/src/main/java/com/bakdata/conquery/Constants.java
Original file line number Diff line number Diff line change
Expand Up @@ -69,11 +69,11 @@
import com.bakdata.conquery.models.preproc.TableImportDescriptor;
import com.bakdata.conquery.models.preproc.TableInputDescriptor;
import com.bakdata.conquery.models.preproc.outputs.OutputDescription;
import com.bakdata.conquery.resources.api.APIResource;
import com.bakdata.conquery.resources.api.ConceptResource;
import com.bakdata.conquery.resources.api.ConceptsProcessor;
import com.bakdata.conquery.resources.api.ConfigResource;
import com.bakdata.conquery.resources.api.DatasetResource;
import com.bakdata.conquery.resources.api.DatasetsResource;
import com.bakdata.conquery.resources.api.FilterResource;
import com.bakdata.conquery.resources.api.QueryResource;
import com.bakdata.conquery.resources.api.ResultCsvResource;
Expand Down Expand Up @@ -138,7 +138,7 @@ public class Constants {
.build(),
Group.builder().name("REST API JSONs")
.resource(ConfigResource.class)
.resource(APIResource.class)
.resource(DatasetsResource.class)
.resource(DatasetResource.class)
.resource(ConceptResource.class)
.resource(FilterResource.class)
Expand Down
16 changes: 4 additions & 12 deletions backend/src/main/java/com/bakdata/conquery/apiv1/ApiV1.java
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
package com.bakdata.conquery.apiv1;

import java.time.Duration;

import com.bakdata.conquery.commands.ManagerNode;
import com.bakdata.conquery.io.cps.CPSType;
import com.bakdata.conquery.io.jackson.IdRefPathParamConverterProvider;
Expand All @@ -15,18 +13,12 @@
import com.bakdata.conquery.models.forms.frontendconfiguration.FormConfigProcessor;
import com.bakdata.conquery.models.worker.DatasetRegistry;
import com.bakdata.conquery.resources.ResourcesProvider;
import com.bakdata.conquery.resources.api.APIResource;
import com.bakdata.conquery.resources.api.ConceptResource;
import com.bakdata.conquery.resources.api.ConceptsProcessor;
import com.bakdata.conquery.resources.api.ConfigResource;
import com.bakdata.conquery.resources.api.DatasetResource;
import com.bakdata.conquery.resources.api.FilterResource;
import com.bakdata.conquery.resources.api.FormConfigResource;
import com.bakdata.conquery.resources.api.MeResource;
import com.bakdata.conquery.resources.api.QueryResource;
import com.bakdata.conquery.resources.api.*;
import io.dropwizard.jersey.setup.JerseyEnvironment;
import org.glassfish.hk2.utilities.binding.AbstractBinder;

import java.time.Duration;

@CPSType(base = ResourcesProvider.class, id = "ApiV1")
public class ApiV1 implements ResourcesProvider {

Expand Down Expand Up @@ -72,7 +64,7 @@ protected void configure() {
environment.register(new ConfigResource(manager.getConfig()));
environment.register(FormConfigResource.class);

environment.register(APIResource.class);
environment.register(DatasetsResource.class);
environment.register(ConceptResource.class);
environment.register(DatasetResource.class);
environment.register(FilterResource.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import java.util.function.Consumer;

import com.bakdata.conquery.io.storage.MetaStorage;
import com.bakdata.conquery.models.auth.entities.User;
import com.bakdata.conquery.models.auth.entities.Subject;
import com.bakdata.conquery.models.auth.permissions.Ability;
import com.bakdata.conquery.models.forms.configs.FormConfig;
import com.bakdata.conquery.util.QueryUtils;
Expand All @@ -23,15 +23,15 @@
public class FormConfigPatch extends MetaDataPatch {
private JsonNode values;

public void applyTo(FormConfig instance, MetaStorage storage, User user){
chain(QueryUtils.getNoOpEntryPoint(), storage, user, instance)
public void applyTo(FormConfig instance, MetaStorage storage, Subject subject){
chain(QueryUtils.getNoOpEntryPoint(), storage, subject, instance)
.accept(this);
}

protected Consumer<FormConfigPatch> chain(Consumer<FormConfigPatch> patchConsumerChain, MetaStorage storage, User user, FormConfig instance) {
patchConsumerChain = super.buildChain(patchConsumerChain, storage, user, instance);
protected Consumer<FormConfigPatch> chain(Consumer<FormConfigPatch> patchConsumerChain, MetaStorage storage, Subject subject, FormConfig instance) {
patchConsumerChain = super.buildChain(patchConsumerChain, storage, subject, instance);

if(getValues() != null && user.isPermitted(instance,Ability.MODIFY)) {
if(getValues() != null && subject.isPermitted(instance,Ability.MODIFY)) {
patchConsumerChain = patchConsumerChain.andThen(instance.valueSetter());
}
return patchConsumerChain;
Expand Down
20 changes: 10 additions & 10 deletions backend/src/main/java/com/bakdata/conquery/apiv1/MetaDataPatch.java
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

import com.bakdata.conquery.io.storage.MetaStorage;
import com.bakdata.conquery.models.auth.entities.Group;
import com.bakdata.conquery.models.auth.entities.User;
import com.bakdata.conquery.models.auth.entities.Subject;
import com.bakdata.conquery.models.auth.permissions.Ability;
import com.bakdata.conquery.models.auth.permissions.Authorized;
import com.bakdata.conquery.models.auth.permissions.ConqueryPermission;
Expand Down Expand Up @@ -37,33 +37,33 @@ public class MetaDataPatch implements Taggable, Labelable, ShareInformation {
private List<GroupId> groups;

/**
* Patches the given {@link Identifiable} by checking if the user holds the necessary Permission for that operation.
* Patches the given {@link Identifiable} by checking if the subject holds the necessary Permission for that operation.
* Hence the patched instance must have a corresponding {@link Permission}-type.
* Tagging and Labeling only alters the state of the instance while sharing also alters the state of {@link Group}s.
*
* @param instance The instance to patch
* @param storage Storage that persists the instance and also auth information.
* @param user The user on whose behalf the patch is executed
* @param subject The subject on whose behalf the patch is executed
* @param <INST> Type of the instance that is patched
*/
public <T extends MetaDataPatch, ID extends IId<?>, INST extends Taggable & Shareable & Labelable & Identifiable<? extends ID> & Owned & Authorized> void applyTo(INST instance, MetaStorage storage, User user) {
public <T extends MetaDataPatch, ID extends IId<?>, INST extends Taggable & Shareable & Labelable & Identifiable<? extends ID> & Owned & Authorized> void applyTo(INST instance, MetaStorage storage, Subject subject) {
buildChain(QueryUtils.getNoOpEntryPoint(),
storage,
user,
subject,
instance
)
.accept(this);
}

protected <T extends MetaDataPatch, ID extends IId<?>, INST extends Taggable & Shareable & Labelable & Identifiable<? extends ID> & Owned & Authorized> Consumer<T> buildChain(Consumer<T> patchConsumerChain, MetaStorage storage, User user, INST instance) {
if (getTags() != null && user.isPermitted(instance, Ability.TAG)) {
protected <T extends MetaDataPatch, ID extends IId<?>, INST extends Taggable & Shareable & Labelable & Identifiable<? extends ID> & Owned & Authorized> Consumer<T> buildChain(Consumer<T> patchConsumerChain, MetaStorage storage, Subject subject, INST instance) {
if (getTags() != null && subject.isPermitted(instance, Ability.TAG)) {
patchConsumerChain = patchConsumerChain.andThen(instance.tagger());
}
if (getLabel() != null && user.isPermitted(instance, Ability.LABEL)) {
if (getLabel() != null && subject.isPermitted(instance, Ability.LABEL)) {
patchConsumerChain = patchConsumerChain.andThen(instance.labeler());
}
if (getGroups() != null && user.isPermitted(instance, Ability.SHARE)) {
patchConsumerChain = patchConsumerChain.andThen(instance.sharer(storage, user));
if (getGroups() != null && subject.isPermitted(instance, Ability.SHARE)) {
patchConsumerChain = patchConsumerChain.andThen(instance.sharer(storage, subject));
}
return patchConsumerChain;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import com.bakdata.conquery.models.auth.AuthorizationHelper;
import com.bakdata.conquery.models.auth.entities.Group;
import com.bakdata.conquery.models.auth.entities.User;
import com.bakdata.conquery.models.auth.entities.Subject;
import com.bakdata.conquery.models.auth.permissions.Ability;
import com.bakdata.conquery.models.config.ConqueryConfig;
import com.bakdata.conquery.models.datasets.Dataset;
Expand Down Expand Up @@ -68,9 +69,9 @@ public class QueryProcessor {
* Creates a query for all datasets, then submits it for execution on the
* intended dataset.
*/
public ManagedExecution<?> postQuery(Dataset dataset, QueryDescription query, User user) {
public ManagedExecution<?> postQuery(Dataset dataset, QueryDescription query, Subject subject) {

log.info("Query posted on Dataset[{}] by User[{{}].", dataset.getId(), user.getId());
log.info("Query posted on Dataset[{}] by User[{{}].", dataset.getId(), subject.getId());

// This maps works as long as we have query visitors that are not configured in anyway.
// So adding a visitor twice would replace the previous one but both would have yielded the same result.
Expand All @@ -82,7 +83,7 @@ public ManagedExecution<?> postQuery(Dataset dataset, QueryDescription query, Us
visitors.putInstance(QueryUtils.OnlyReusingChecker.class, new QueryUtils.OnlyReusingChecker());
visitors.putInstance(NamespacedIdentifiableCollector.class, new NamespacedIdentifiableCollector());

final String primaryGroupName = AuthorizationHelper.getPrimaryGroup(user, storage).map(Group::getName).orElse("none");
final String primaryGroupName = AuthorizationHelper.getPrimaryGroup(subject, storage).map(Group::getName).orElse("none");

visitors.putInstance(ExecutionMetrics.QueryMetricsReporter.class, new ExecutionMetrics.QueryMetricsReporter(primaryGroupName));

Expand All @@ -97,7 +98,8 @@ public ManagedExecution<?> postQuery(Dataset dataset, QueryDescription query, Us
query.visit(consumerChain);


query.authorize(user, dataset, visitors);
query.authorize(subject, dataset, visitors);
// After all authorization checks we can now use the actual subject to invoke the query and do not to bubble down the Userish in methods

ExecutionMetrics.reportNamespacedIds(visitors.getInstance(NamespacedIdentifiableCollector.class).getIdentifiables(), primaryGroupName);

Expand All @@ -111,15 +113,15 @@ public ManagedExecution<?> postQuery(Dataset dataset, QueryDescription query, Us
{
final Optional<ManagedExecutionId> executionId = visitors.getInstance(QueryUtils.OnlyReusingChecker.class).getOnlyReused();

final Optional<ManagedExecution<?>> execution = executionId.map(id -> tryReuse(query, id, datasetRegistry, config, executionManager, user));
final Optional<ManagedExecution<?>> execution = executionId.map(id -> tryReuse(query, id, datasetRegistry, config, executionManager, subject.getUser()));

if (execution.isPresent()) {
return execution.get();
}
}

// Execute the query
return executionManager.runQuery(datasetRegistry, query, user, dataset, config);
return executionManager.runQuery(datasetRegistry, query, subject.getUser(), dataset, config);
}

/**
Expand Down Expand Up @@ -173,14 +175,14 @@ private ManagedExecution<?> tryReuse(QueryDescription query, ManagedExecutionId
}


public Stream<ExecutionStatus> getAllQueries(Dataset dataset, HttpServletRequest req, User user, boolean allProviders) {
public Stream<ExecutionStatus> getAllQueries(Dataset dataset, HttpServletRequest req, Subject subject, boolean allProviders) {
Collection<ManagedExecution<?>> allQueries = storage.getAllExecutions();

return getQueriesFiltered(dataset, RequestAwareUriBuilder.fromRequest(req), user, allQueries, allProviders);
return getQueriesFiltered(dataset, RequestAwareUriBuilder.fromRequest(req), subject, allQueries, allProviders);
}

public Stream<ExecutionStatus> getQueriesFiltered(Dataset datasetId, UriBuilder uriBuilder, User user, Collection<ManagedExecution<?>> allQueries, boolean allProviders) {
Map<DatasetId, Set<Ability>> datasetAbilities = buildDatasetAbilityMap(user, datasetRegistry);
public Stream<ExecutionStatus> getQueriesFiltered(Dataset datasetId, UriBuilder uriBuilder, Subject subject, Collection<ManagedExecution<?>> allQueries, boolean allProviders) {
Map<DatasetId, Set<Ability>> datasetAbilities = buildDatasetAbilityMap(subject, datasetRegistry);

return allQueries.stream()
// The following only checks the dataset, under which the query was submitted, but a query can target more that
Expand All @@ -189,13 +191,11 @@ public Stream<ExecutionStatus> getQueriesFiltered(Dataset datasetId, UriBuilder
// to exclude subtypes from somewhere else
.filter(QueryProcessor::canFrontendRender)
.filter(q -> q.getState().equals(ExecutionState.DONE) || q.getState().equals(ExecutionState.NEW))
// We decide, that if a user owns an execution it is permitted to see it, which saves us a lot of permissions
// However, for other executions we check because those are probably shared.
.filter(q -> user.isPermitted(q, Ability.READ))
.filter(q -> subject.isPermitted(q, Ability.READ))
.map(mq -> {
OverviewExecutionStatus status = mq.buildStatusOverview(
uriBuilder.clone(),
user
subject
);
if (mq.isReadyToDownload(datasetAbilities)) {
setDownloadUrls(status, config.getResultProviders(), mq, uriBuilder, allProviders);
Expand Down Expand Up @@ -262,14 +262,14 @@ public static boolean isFrontendStructure(CQElement root) {
/**
* Cancel a running query: Sending cancellation to shards, which will cause them to stop executing them, results are not sent back, and incoming results will be discarded.
*/
public void cancel(User user, Dataset dataset, ManagedExecution<?> query) {
public void cancel(Subject subject, Dataset dataset, ManagedExecution<?> query) {

// Does not make sense to cancel a query that isn't running.
if (!query.getState().equals(ExecutionState.RUNNING)) {
return;
}

log.info("{} cancelled Query[{}]", user, query.getId());
log.info("User[{}] cancelled Query[{}]", subject.getId(), query.getId());

final Namespace namespace = getDatasetRegistry().get(dataset.getId());

Expand All @@ -278,13 +278,14 @@ public void cancel(User user, Dataset dataset, ManagedExecution<?> query) {
namespace.sendToAll(new CancelQuery(query.getId()));
}

public void patchQuery(User user, ManagedExecution<?> execution, MetaDataPatch patch) {
public void patchQuery(Subject subject, ManagedExecution<?> execution, MetaDataPatch patch) {

log.info("Patching {} ({}) with patch: {}", execution.getClass().getSimpleName(), execution, patch);

patch.applyTo(execution, storage, user);
patch.applyTo(execution, storage, subject);
storage.updateExecution(execution);

// TODO remove this, since we don't translate anymore
// Patch this query in other datasets
List<Dataset> remainingDatasets = datasetRegistry.getAllDatasets();
remainingDatasets.remove(execution.getDataset());
Expand All @@ -296,13 +297,13 @@ public void patchQuery(User user, ManagedExecution<?> execution, MetaDataPatch p
continue;
}
log.trace("Patching {} ({}) with patch: {}", execution.getClass().getSimpleName(), id, patch);
patch.applyTo(otherExecution, storage, user);
patch.applyTo(otherExecution, storage, subject);
storage.updateExecution(execution);
}
}

public void reexecute(User user, ManagedExecution<?> query) {
log.info("User[{}] reexecuted Query[{}]", user, query);
public void reexecute(Subject subject, ManagedExecution<?> query) {
log.info("User[{}] reexecuted Query[{}]", subject.getId(), query);

if (!query.getState().equals(ExecutionState.RUNNING)) {
datasetRegistry.get(query.getDataset().getId())
Expand All @@ -312,8 +313,8 @@ public void reexecute(User user, ManagedExecution<?> query) {
}


public void deleteQuery(User user, ManagedExecution<?> execution) {
log.info("User[{}] deleted Query[{}]", user.getId(), execution.getId());
public void deleteQuery(Subject subject, ManagedExecution<?> execution) {
log.info("User[{}] deleted Query[{}]", subject.getId(), execution.getId());

datasetRegistry.get(execution.getDataset().getId())
.getExecutionManager() // Don't go over execution#getExecutionManager() as that's only set when query is initialized
Expand All @@ -322,12 +323,12 @@ public void deleteQuery(User user, ManagedExecution<?> execution) {
storage.removeExecution(execution.getId());
}

public FullExecutionStatus getQueryFullStatus(ManagedExecution<?> query, User user, UriBuilder url, Boolean allProviders) {
public FullExecutionStatus getQueryFullStatus(ManagedExecution<?> query, Subject subject, UriBuilder url, Boolean allProviders) {

query.initExecutable(datasetRegistry, config);

Map<DatasetId, Set<Ability>> datasetAbilities = buildDatasetAbilityMap(user, datasetRegistry);
final FullExecutionStatus status = query.buildStatusFull(storage, user, datasetRegistry, config);
Map<DatasetId, Set<Ability>> datasetAbilities = buildDatasetAbilityMap(subject, datasetRegistry);
final FullExecutionStatus status = query.buildStatusFull(storage, subject, datasetRegistry, config);

if (query.isReadyToDownload(datasetAbilities)) {
setDownloadUrls(status, config.getResultProviders(), query, url, allProviders);
Expand All @@ -336,9 +337,9 @@ public FullExecutionStatus getQueryFullStatus(ManagedExecution<?> query, User us
}

/**
* Try to resolve the external upload, if successful, create query for the user and return id and statistics for that.
* Try to resolve the external upload, if successful, create query for the subject and return id and statistics for that.
*/
public ExternalUploadResult uploadEntities(User user, Dataset dataset, ExternalUpload upload) {
public ExternalUploadResult uploadEntities(Subject subject, Dataset dataset, ExternalUpload upload) {

final CQExternal.ResolveStatistic statistic =
CQExternal.resolveEntities(upload.getValues(), upload.getFormat(),
Expand All @@ -359,7 +360,7 @@ public ExternalUploadResult uploadEntities(User user, Dataset dataset, ExternalU
// We only create the Query, really no need to execute it as it's only useful for composition.
final ManagedQuery execution =
((ManagedQuery) datasetRegistry.get(dataset.getId()).getExecutionManager()
.createExecution(datasetRegistry, query, user, dataset));
.createExecution(datasetRegistry, query, subject.getUser(), dataset));

execution.setLastResultCount((long) statistic.getResolved().size());

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
package com.bakdata.conquery.apiv1.auth;

import com.bakdata.conquery.io.storage.MetaStorage;
import com.bakdata.conquery.models.auth.apitoken.ApiTokenData;
import com.bakdata.conquery.models.auth.apitoken.ApiTokenRealm;
import com.bakdata.conquery.models.auth.apitoken.Scopes;
import com.bakdata.conquery.models.auth.entities.User;
import com.fasterxml.jackson.annotation.JsonIgnore;
import io.dropwizard.validation.ValidationMethod;
import lombok.*;
import lombok.experimental.Accessors;

import javax.validation.constraints.NotEmpty;
import javax.validation.constraints.NotNull;
import java.time.LocalDate;
import java.util.Set;
import java.util.UUID;

/**
* Container class for how tokens are represented through the API.
* This is necessary so that the actual token and it's hash are not leaked (with except for the token on creation).
* @implNote We don't use fluent accessors here, because that does not work well with Jackson
*/
@Data
@NoArgsConstructor(access = AccessLevel.PROTECTED)
public abstract class ApiTokenDataRepresentation {

@NotNull
protected String name;
@NotNull
protected LocalDate expirationDate;
@NotEmpty
protected Set<Scopes> scopes;

@ValidationMethod
@JsonIgnore
boolean isNotExpired() {
final LocalDate now = LocalDate.now();
return expirationDate.isAfter(now) || expirationDate.isEqual(now);
}

/**
* Container that is send with an incoming request to create a token.
*/
@Data
@EqualsAndHashCode(callSuper = true)
public static class Request extends ApiTokenDataRepresentation {
// Intentionally left blank
}

/**
* Container that is send with an outgoing response to give information about created tokens.
*/
@Data
@EqualsAndHashCode(callSuper = true)
public static class Response extends ApiTokenDataRepresentation {

private UUID id;
private LocalDate lastUsed;
private LocalDate creationDate;
private boolean isExpired;

}
}
Loading

0 comments on commit 9a70c31

Please sign in to comment.