diff --git a/authorizations/authorization-ranger/build.gradle.kts b/authorizations/authorization-ranger/build.gradle.kts index 13f4cc7536..66341d9b00 100644 --- a/authorizations/authorization-ranger/build.gradle.kts +++ b/authorizations/authorization-ranger/build.gradle.kts @@ -125,6 +125,9 @@ tasks { } tasks.test { + doFirst { + environment("HADOOP_USER_NAME", "test") + } dependsOn(":catalogs:catalog-hive:jar", ":catalogs:catalog-hive:runtimeJars") val skipITs = project.hasProperty("skipITs") diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHivePlugin.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHivePlugin.java index 342038c2e3..12072d7f68 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHivePlugin.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHivePlugin.java @@ -243,6 +243,10 @@ public List translatePrivilege(SecurableObject securableO .forEach( gravitinoPrivilege -> { Set rangerPrivileges = new HashSet<>(); + // Ignore unsupported privileges + if (!privilegesMappingRule().containsKey(gravitinoPrivilege.name())) { + return; + } privilegesMappingRule().get(gravitinoPrivilege.name()).stream() .forEach( rangerPrivilege -> diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java index 5019d72ac2..c967fc008c 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java @@ -1088,15 +1088,6 @@ public boolean validAuthorizationOperation(List securableObject securableObject.privileges().stream() .forEach( privilege -> { - if (!allowPrivilegesRule().contains(privilege.name())) { - LOG.error( - "Authorization to ignore privilege({}) on metadata object({})!", - privilege.name(), - securableObject.fullName()); - match.set(false); - return; - } - if (!privilege.canBindTo(securableObject.type())) { LOG.error( "The privilege({}) is not supported for the metadata object({})!", diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java index 04956a5353..97f2b90352 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java @@ -332,11 +332,11 @@ public void testValidAuthorizationOperation() { String.format("catalog.schema"), MetadataObject.Type.SCHEMA, Lists.newArrayList(Privileges.ReadFileset.allow())); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createFilesetInMetalake))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createFilesetInCatalog))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createFilesetInSchema))); // Ignore the Topic operation @@ -360,13 +360,13 @@ public void testValidAuthorizationOperation() { String.format("catalog.schema.fileset"), MetadataObject.Type.FILESET, Lists.newArrayList(Privileges.WriteFileset.allow())); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(writeFilesetInMetalake))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(writeFilesetInCatalog))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(writeFilesetInScheam))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(writeFileset))); // Ignore the Fileset operation @@ -390,14 +390,13 @@ public void testValidAuthorizationOperation() { String.format("catalog.schema.table"), MetadataObject.Type.FILESET, Lists.newArrayList(Privileges.ReadFileset.allow())); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFilesetInMetalake))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFilesetInCatalog))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFilesetInSchema))); - Assertions.assertFalse( - rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFileset))); + Assertions.assertTrue(rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFileset))); // Ignore the Topic operation SecurableObject createTopicInMetalake = @@ -415,11 +414,11 @@ public void testValidAuthorizationOperation() { String.format("catalog.schema"), MetadataObject.Type.SCHEMA, Lists.newArrayList(Privileges.CreateTopic.allow())); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createTopicInMetalake))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createTopicInCatalog))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createTopicInSchema))); SecurableObject produceTopicInMetalake = @@ -442,13 +441,13 @@ public void testValidAuthorizationOperation() { String.format("catalog.schema.fileset"), MetadataObject.Type.TOPIC, Lists.newArrayList(Privileges.ProduceTopic.allow())); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(produceTopicInMetalake))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(produceTopicInCatalog))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(produceTopicInSchema))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(produceTopic))); SecurableObject consumeTopicInMetalake = @@ -471,13 +470,13 @@ public void testValidAuthorizationOperation() { String.format("catalog.schema.topic"), MetadataObject.Type.TOPIC, Lists.newArrayList(Privileges.ConsumeTopic.allow())); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(consumeTopicInMetalake))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(consumeTopicInCatalog))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(consumeTopicInSchema))); - Assertions.assertFalse( + Assertions.assertTrue( rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(consumeTopic))); } diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java index 91597ce8e5..91d58bd187 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java @@ -33,7 +33,6 @@ import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.time.Instant; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -42,7 +41,9 @@ import org.apache.gravitino.Catalog; import org.apache.gravitino.Configs; import org.apache.gravitino.MetadataObject; -import org.apache.gravitino.Schema; +import org.apache.gravitino.MetadataObjects; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.auth.AuthConstants; import org.apache.gravitino.auth.AuthenticatorType; import org.apache.gravitino.authorization.Privileges; import org.apache.gravitino.authorization.SecurableObject; @@ -54,9 +55,9 @@ import org.apache.gravitino.integration.test.container.RangerContainer; import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; -import org.apache.gravitino.meta.AuditInfo; -import org.apache.gravitino.meta.RoleEntity; -import org.apache.gravitino.meta.UserEntity; +import org.apache.kyuubi.plugin.spark.authz.AccessControlException; +import org.apache.spark.SparkUnsupportedOperationException; +import org.apache.spark.sql.AnalysisException; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; @@ -73,11 +74,11 @@ public class RangerHiveE2EIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(RangerHiveE2EIT.class); public static final String metalakeName = - GravitinoITUtils.genRandomName("RangerHiveE2EIT_metalake").toLowerCase(); - public static final String catalogName = - GravitinoITUtils.genRandomName("RangerHiveE2EIT_catalog").toLowerCase(); - public static final String schemaName = - GravitinoITUtils.genRandomName("RangerHiveE2EIT_schema").toLowerCase(); + GravitinoITUtils.genRandomName("metalake").toLowerCase(); + public static final String catalogName = GravitinoITUtils.genRandomName("catalog").toLowerCase(); + public static final String schemaName = GravitinoITUtils.genRandomName("schema").toLowerCase(); + + public static final String tableName = GravitinoITUtils.genRandomName("table").toLowerCase(); private static GravitinoMetalake metalake; private static Catalog catalog; @@ -85,14 +86,34 @@ public class RangerHiveE2EIT extends BaseIT { private static String HIVE_METASTORE_URIS; private static SparkSession sparkSession = null; - private final AuditInfo auditInfo = - AuditInfo.builder().withCreator("test").withCreateTime(Instant.now()).build(); private static final String HADOOP_USER_NAME = "HADOOP_USER_NAME"; - private static final String TEST_USER_NAME = "e2e_it_user"; private static final String SQL_SHOW_DATABASES = String.format("SHOW DATABASES like '%s'", schemaName); + private static final String SQL_CREATE_SCHEMA = String.format("CREATE DATABASE %s", schemaName); + + private static final String SQL_USE_SCHEMA = String.format("USE SCHEMA %s", schemaName); + + private static final String SQL_CREATE_TABLE = + String.format("CREATE TABLE %s (a int, b string, c string)", tableName); + + private static final String SQL_INSERT_TABLE = + String.format("INSERT INTO %s (a, b, c) VALUES (1, 'a', 'b')", tableName); + + private static final String SQL_SELECT_TABLE = String.format("SELECT * FROM %s", tableName); + + private static final String SQL_UPDATE_TABLE = + String.format("UPDATE %s SET b = 'b', c = 'c' WHERE a = 1", tableName); + + private static final String SQL_DELETE_TABLE = + String.format("DELETE FROM %s WHERE a = 1", tableName); + + private static final String SQL_ALTER_TABLE = + String.format("ALTER TABLE %s ADD COLUMN d string", tableName); + + private static final String SQL_DROP_TABLE = String.format("DROP TABLE %s", tableName); + private static String RANGER_ADMIN_URL = null; @BeforeAll @@ -102,7 +123,7 @@ public void startIntegrationTest() throws Exception { configs.put(Configs.ENABLE_AUTHORIZATION.getKey(), String.valueOf(true)); configs.put(Configs.SERVICE_ADMINS.getKey(), RangerITEnv.HADOOP_USER_NAME); configs.put(Configs.AUTHENTICATORS.getKey(), AuthenticatorType.SIMPLE.name().toLowerCase()); - configs.put("SimpleAuthUserName", TEST_USER_NAME); + configs.put("SimpleAuthUserName", AuthConstants.ANONYMOUS_USER); registerCustomConfigs(configs); super.startIntegrationTest(); @@ -143,6 +164,8 @@ public void startIntegrationTest() throws Exception { createMetalake(); createCatalog(); + + metalake.addUser("test"); } private static void generateRangerSparkSecurityXML() throws IOException { @@ -204,52 +227,347 @@ public void stop() { } @Test - void testAllowUseSchemaPrivilege() throws InterruptedException { - // First, create a schema use Gravitino client - createSchema(); + void testCreateSchema() throws InterruptedException { + // First, fail to create the schema + Assertions.assertThrows( + AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_SCHEMA)); - // Use Spark to show this databases(schema) - Dataset dataset1 = sparkSession.sql(SQL_SHOW_DATABASES); - dataset1.show(); - List rows1 = dataset1.collectAsList(); - // The schema should not be shown, because the user does not have the permission - Assertions.assertEquals( - 0, rows1.stream().filter(row -> row.getString(0).equals(schemaName)).count()); + // Second, grant the `CREATE_SCHEMA` role + String userName1 = System.getenv(HADOOP_USER_NAME); + String roleName = currentFunName(); + SecurableObject securableObject = + SecurableObjects.ofMetalake( + metalakeName, Lists.newArrayList(Privileges.CreateSchema.allow())); + metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); + metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); + waitForUpdatingPolicies(); + + // Third, succeed to create the schema + sparkSession.sql(SQL_CREATE_SCHEMA); + + // Clean up + catalog.asSchemas().dropSchema(schemaName, true); + metalake.deleteRole(roleName); + } + + @Test + void testCreateTable() throws InterruptedException { + // First, create a role for creating a database and grant role to the user + String createSchemaRole = currentFunName(); + SecurableObject securableObject = + SecurableObjects.ofMetalake( + metalakeName, + Lists.newArrayList(Privileges.UseSchema.allow(), Privileges.CreateSchema.allow())); + String userName1 = System.getenv(HADOOP_USER_NAME); + metalake.createRole( + createSchemaRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); + metalake.grantRolesToUser(Lists.newArrayList(createSchemaRole), userName1); + waitForUpdatingPolicies(); + // Second, create a schema + sparkSession.sql(SQL_CREATE_SCHEMA); + + // Third, fail to create a table + sparkSession.sql(SQL_USE_SCHEMA); + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_TABLE)); + + // Fourth, create a role for creating a table and grant to the user + String createTableRole = currentFunName() + "2"; + securableObject = + SecurableObjects.ofMetalake( + metalakeName, Lists.newArrayList(Privileges.CreateTable.allow())); + metalake.createRole( + createTableRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); + metalake.grantRolesToUser(Lists.newArrayList(createTableRole), userName1); + waitForUpdatingPolicies(); + + // Fifth, succeed to create a table + sparkSession.sql(SQL_CREATE_TABLE); + + // Sixth, fail to read and write a table + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_INSERT_TABLE)); + Assertions.assertThrows( + AccessControlException.class, () -> sparkSession.sql(SQL_SELECT_TABLE).collectAsList()); + + // Clean up + catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asSchemas().dropSchema(schemaName, true); + metalake.deleteRole(createTableRole); + metalake.deleteRole(createSchemaRole); + } + + @Test + void testReadWriteTable() throws InterruptedException { + // First, create a role for creating a database and grant role to the user + String readWriteRole = currentFunName(); + SecurableObject securableObject = + SecurableObjects.ofMetalake( + metalakeName, + Lists.newArrayList( + Privileges.UseSchema.allow(), + Privileges.CreateSchema.allow(), + Privileges.CreateTable.allow(), + Privileges.SelectTable.allow(), + Privileges.ModifyTable.allow())); + String userName1 = System.getenv(HADOOP_USER_NAME); + metalake.createRole(readWriteRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); + metalake.grantRolesToUser(Lists.newArrayList(readWriteRole), userName1); + waitForUpdatingPolicies(); + // Second, create a schema + sparkSession.sql(SQL_CREATE_SCHEMA); + + // Third, create a table + sparkSession.sql(SQL_USE_SCHEMA); + sparkSession.sql(SQL_CREATE_TABLE); + + // case 1: Succeed to insert data into table + sparkSession.sql(SQL_INSERT_TABLE); + + // case 2: Succeed to select data from the table + sparkSession.sql(SQL_SELECT_TABLE).collectAsList(); + + // case 3: Fail to update data in the table, Because Hive doesn't support. + Assertions.assertThrows( + SparkUnsupportedOperationException.class, () -> sparkSession.sql(SQL_UPDATE_TABLE)); + + // case 4: Fail to delete data from the table, Because Hive doesn't support. + Assertions.assertThrows(AnalysisException.class, () -> sparkSession.sql(SQL_DELETE_TABLE)); + + // case 5: Succeed to alter the table + sparkSession.sql(SQL_ALTER_TABLE); + + // case 6: Fail to drop the table + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_DROP_TABLE)); + + // Clean up + catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asSchemas().dropSchema(schemaName, true); + metalake.deleteRole(readWriteRole); + } + + @Test + void testReadOnlyTable() throws InterruptedException { + // First, create a role for creating a database and grant role to the user + String readOnlyRole = currentFunName(); + SecurableObject securableObject = + SecurableObjects.ofMetalake( + metalakeName, + Lists.newArrayList( + Privileges.UseSchema.allow(), + Privileges.CreateSchema.allow(), + Privileges.CreateTable.allow(), + Privileges.SelectTable.allow())); + String userName1 = System.getenv(HADOOP_USER_NAME); + metalake.createRole(readOnlyRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); + metalake.grantRolesToUser(Lists.newArrayList(readOnlyRole), userName1); + waitForUpdatingPolicies(); + // Second, create a schema + sparkSession.sql(SQL_CREATE_SCHEMA); + + // Third, create a table + sparkSession.sql(SQL_USE_SCHEMA); + sparkSession.sql(SQL_CREATE_TABLE); + + // case 1: Fail to insert data into table + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_INSERT_TABLE)); + + // case 2: Succeed to select data from the table + sparkSession.sql(SQL_SELECT_TABLE).collectAsList(); + + // case 3: Fail to alter data in the table + Assertions.assertThrows( + SparkUnsupportedOperationException.class, () -> sparkSession.sql(SQL_UPDATE_TABLE)); + + // case 4: Fail to delete data from the table + Assertions.assertThrows(AnalysisException.class, () -> sparkSession.sql(SQL_DELETE_TABLE)); + + // case 5: Fail to alter the table + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_ALTER_TABLE)); + + // case 6: Fail to drop the table + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_DROP_TABLE)); + + // Clean up + catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asSchemas().dropSchema(schemaName, true); + metalake.deleteRole(readOnlyRole); + } + + @Test + void testWriteOnlyTable() throws InterruptedException { + // First, create a role for creating a database and grant role to the user + String readOnlyRole = currentFunName(); + SecurableObject securableObject = + SecurableObjects.ofMetalake( + metalakeName, + Lists.newArrayList( + Privileges.UseSchema.allow(), + Privileges.CreateSchema.allow(), + Privileges.CreateTable.allow(), + Privileges.ModifyTable.allow())); + String userName1 = System.getenv(HADOOP_USER_NAME); + metalake.createRole(readOnlyRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); + metalake.grantRolesToUser(Lists.newArrayList(readOnlyRole), userName1); + waitForUpdatingPolicies(); + // Second, create a schema + sparkSession.sql(SQL_CREATE_SCHEMA); + + // Third, create a table + sparkSession.sql(SQL_USE_SCHEMA); + sparkSession.sql(SQL_CREATE_TABLE); + + // case 1: Succeed to insert data into the table + sparkSession.sql(SQL_INSERT_TABLE); + + // case 2: Fail to select data from the table + Assertions.assertThrows( + AccessControlException.class, () -> sparkSession.sql(SQL_SELECT_TABLE).collectAsList()); + + // case 3: Succeed to update data in the table + Assertions.assertThrows( + SparkUnsupportedOperationException.class, () -> sparkSession.sql(SQL_UPDATE_TABLE)); + + // case 4: Succeed to delete data from the table + Assertions.assertThrows(AnalysisException.class, () -> sparkSession.sql(SQL_DELETE_TABLE)); + // case 5: Succeed to alter the table + sparkSession.sql(SQL_ALTER_TABLE); + + // case 6: Fail to drop the table + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_DROP_TABLE)); + + // Clean up + catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asSchemas().dropSchema(schemaName, true); + metalake.deleteRole(readOnlyRole); + } + + @Test + void testCreateAllPrivilegesRole() throws InterruptedException { + String roleName = currentFunName(); + SecurableObject securableObject = + SecurableObjects.ofMetalake( + metalakeName, + Lists.newArrayList( + Privileges.CreateCatalog.allow(), + Privileges.UseCatalog.allow(), + Privileges.UseSchema.allow(), + Privileges.CreateSchema.allow(), + Privileges.CreateFileset.allow(), + Privileges.ReadFileset.allow(), + Privileges.WriteFileset.allow(), + Privileges.CreateTopic.allow(), + Privileges.ConsumeTopic.allow(), + Privileges.ProduceTopic.allow(), + Privileges.CreateTable.allow(), + Privileges.SelectTable.allow(), + Privileges.ModifyTable.allow(), + Privileges.ManageUsers.allow(), + Privileges.ManageGroups.allow(), + Privileges.CreateRole.allow())); + metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); + + // Granted this role to the spark execution user `HADOOP_USER_NAME` + String userName1 = System.getenv(HADOOP_USER_NAME); + metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); + + waitForUpdatingPolicies(); + + // Test to create a schema + sparkSession.sql(SQL_CREATE_SCHEMA); + + // Test to creat a table + sparkSession.sql(SQL_USE_SCHEMA); + sparkSession.sql(SQL_CREATE_TABLE); + + // Clean up + catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asSchemas().dropSchema(schemaName, true); + metalake.deleteRole(roleName); + } + + @Test + void testDeleteAndRecreateRole() throws InterruptedException { + // Create a role with CREATE_SCHEMA privilege + String roleName = currentFunName(); + SecurableObject securableObject = + SecurableObjects.parse( + String.format("%s", catalogName), + MetadataObject.Type.CATALOG, + Lists.newArrayList(Privileges.UseCatalog.allow(), Privileges.CreateSchema.allow())); + metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); + + // Granted this role to the spark execution user `HADOOP_USER_NAME` + String userName1 = System.getenv(HADOOP_USER_NAME); + metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); + waitForUpdatingPolicies(); + + // Succeed to create the schema + sparkSession.sql(SQL_CREATE_SCHEMA); + catalog.asSchemas().dropSchema(schemaName, true); + + // Delete the role + metalake.deleteRole(roleName); + waitForUpdatingPolicies(); + + // Fail to create the schema + Assertions.assertThrows( + AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_SCHEMA)); + + // Create the role again + metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); + + // Grant the role again + metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); + waitForUpdatingPolicies(); + + // Succeed to create the schema + sparkSession.sql(SQL_CREATE_SCHEMA); + + // Clean up + catalog.asSchemas().dropSchema(schemaName, true); + metalake.deleteRole(roleName); + } + + @Test + void testAllowUseSchemaPrivilege() throws InterruptedException { // Create a role with CREATE_SCHEMA privilege - SecurableObject securableObject1 = + String roleName = currentFunName(); + SecurableObject securableObject = SecurableObjects.parse( String.format("%s", catalogName), MetadataObject.Type.CATALOG, Lists.newArrayList(Privileges.CreateSchema.allow())); - RoleEntity role = - RoleEntity.builder() - .withId(1L) - .withName(currentFunName()) - .withAuditInfo(auditInfo) - .withSecurableObjects(Lists.newArrayList(securableObject1)) - .build(); - RangerITEnv.rangerAuthHivePlugin.onRoleCreated(role); + metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); // Granted this role to the spark execution user `HADOOP_USER_NAME` String userName1 = System.getenv(HADOOP_USER_NAME); - UserEntity userEntity1 = - UserEntity.builder() - .withId(1L) - .withName(userName1) - .withRoleNames(Collections.emptyList()) - .withRoleIds(Collections.emptyList()) - .withAuditInfo(auditInfo) - .build(); - Assertions.assertTrue( - RangerITEnv.rangerAuthHivePlugin.onGrantedRolesToUser( - Lists.newArrayList(role), userEntity1)); - - // After Ranger Authorization, Must wait a period of time for the Ranger Spark plugin to update - // the policy Sleep time must be greater than the policy update interval - // (ranger.plugin.spark.policy.pollIntervalMs) in the - // `resources/ranger-spark-security.xml.template` - Thread.sleep(1000L); + metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); + waitForUpdatingPolicies(); + + // create a schema use Gravitino client + sparkSession.sql(SQL_CREATE_SCHEMA); + + // Revoke the privilege of creating schema + MetadataObject catalogObject = + MetadataObjects.of(null, catalogName, MetadataObject.Type.CATALOG); + metalake.revokePrivilegesFromRole( + roleName, catalogObject, Lists.newArrayList(Privileges.CreateSchema.allow())); + waitForUpdatingPolicies(); + + // Use Spark to show this databases(schema) + Dataset dataset1 = sparkSession.sql(SQL_SHOW_DATABASES); + dataset1.show(); + List rows1 = dataset1.collectAsList(); + // The schema should not be shown, because the user does not have the permission + Assertions.assertEquals( + 0, rows1.stream().filter(row -> row.getString(0).equals(schemaName)).count()); + + // Grant the privilege of using schema + MetadataObject schemaObject = + MetadataObjects.of(catalogName, schemaName, MetadataObject.Type.SCHEMA); + metalake.grantPrivilegesToRole( + roleName, schemaObject, Lists.newArrayList(Privileges.UseSchema.allow())); + waitForUpdatingPolicies(); // Use Spark to show this databases(schema) again Dataset dataset2 = sparkSession.sql(SQL_SHOW_DATABASES); @@ -262,6 +580,12 @@ void testAllowUseSchemaPrivilege() throws InterruptedException { // The schema should be shown, because the user has the permission Assertions.assertEquals( 1, rows2.stream().filter(row -> row.getString(0).equals(schemaName)).count()); + + // Clean up + catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asSchemas().dropSchema(schemaName, true); + metalake.revokeRolesFromUser(Lists.newArrayList(roleName), userName1); + metalake.deleteRole(roleName); } private void createMetalake() { @@ -300,21 +624,11 @@ private static void createCatalog() { LOG.info("Catalog created: {}", catalog); } - private static void createSchema() { - Map properties = Maps.newHashMap(); - properties.put("key1", "val1"); - properties.put("key2", "val2"); - properties.put( - "location", - String.format( - "hdfs://%s:%d/user/hive/warehouse/%s.db", - containerSuite.getHiveRangerContainer().getContainerIpAddress(), - HiveContainer.HDFS_DEFAULTFS_PORT, - schemaName.toLowerCase())); - String comment = "comment"; - - catalog.asSchemas().createSchema(schemaName, comment, properties); - Schema loadSchema = catalog.asSchemas().loadSchema(schemaName); - Assertions.assertEquals(schemaName.toLowerCase(), loadSchema.name()); + private static void waitForUpdatingPolicies() throws InterruptedException { + // After Ranger authorization, Must wait a period of time for the Ranger Spark plugin to update + // the policy Sleep time must be greater than the policy update interval + // (ranger.plugin.spark.policy.pollIntervalMs) in the + // `resources/ranger-spark-security.xml.template` + Thread.sleep(1000L); } } diff --git a/build.gradle.kts b/build.gradle.kts index c848449060..6b272101e0 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -163,7 +163,11 @@ allprojects { // Default use MiniGravitino to run integration tests param.environment("GRAVITINO_ROOT_DIR", project.rootDir.path) param.environment("IT_PROJECT_DIR", project.buildDir.path) - param.environment("HADOOP_USER_NAME", "anonymous") + // If the environment variable `HADOOP_USER_NAME` is not customized in submodule, + // then set it to "anonymous" + if (param.environment["HADOOP_USER_NAME"] == null) { + param.environment("HADOOP_USER_NAME", "anonymous") + } param.environment("HADOOP_HOME", "/tmp") param.environment("PROJECT_VERSION", project.version) diff --git a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java index 147d66eef4..42dd9f830d 100644 --- a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java +++ b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java @@ -149,9 +149,11 @@ public static void callAuthorizationPluginForSecurableObjects( CatalogManager catalogManager = GravitinoEnv.getInstance().catalogManager(); for (SecurableObject securableObject : securableObjects) { if (needApplyAuthorizationPluginAllCatalogs(securableObject)) { - Catalog[] catalogs = catalogManager.listCatalogsInfo(Namespace.of(metalake)); - for (Catalog catalog : catalogs) { - callAuthorizationPluginImpl(consumer, catalog); + NameIdentifier[] catalogs = catalogManager.listCatalogs(Namespace.of(metalake)); + // ListCatalogsInfo return `CatalogInfo` instead of `BaseCatalog`, we need `BaseCatalog` to + // call authorization plugin method. + for (NameIdentifier catalog : catalogs) { + callAuthorizationPluginImpl(consumer, catalogManager.loadCatalog(catalog)); } } else if (needApplyAuthorization(securableObject.type())) {