Skip to content

Respect IP Pool when allocating ephemeral addresses for guests #1496

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 29, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 42 additions & 2 deletions nexus/src/db/datastore/instance_external_ip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use crate::db::error::ErrorHandler;
use crate::db::model::IncompleteInstanceExternalIp;
use crate::db::model::InstanceExternalIp;
use crate::db::model::IpKind;
use crate::db::model::IpPool;
use crate::db::model::Name;
use crate::db::queries::external_ip::NextExternalIp;
use crate::db::update_and_check::UpdateAndCheck;
Expand All @@ -22,6 +23,8 @@ use diesel::prelude::*;
use omicron_common::api::external::CreateResult;
use omicron_common::api::external::Error;
use omicron_common::api::external::LookupResult;
use omicron_common::api::external::LookupType;
use omicron_common::api::external::ResourceType;
use uuid::Uuid;

impl DataStore {
Expand Down Expand Up @@ -50,13 +53,50 @@ impl DataStore {
ip_id: Uuid,
project_id: Uuid,
instance_id: Uuid,
_pool_name: Option<Name>,
pool_name: Option<Name>,
) -> CreateResult<InstanceExternalIp> {
let pool_id = if let Some(ref name) = pool_name {
// We'd like to add authz checks here, and use the `LookupPath`
// methods on the project-scoped view of this resource. It's not
// entirely clear how that'll work in the API, so see RFD 288 and
// https://github.com/oxidecomputer/omicron/issues/1470 for more
// details.
//
// For now, we just ensure that the pool is either unreserved, or
// reserved for the instance's project.
use db::schema::ip_pool::dsl;
Some(
dsl::ip_pool
.filter(dsl::name.eq(name.clone()))
.filter(dsl::time_deleted.is_null())
.filter(
dsl::project_id
.is_null()
.or(dsl::project_id.eq(Some(project_id))),
)
.select(IpPool::as_select())
.first_async::<IpPool>(self.pool_authorized(opctx).await?)
.await
.map_err(|e| {
public_error_from_diesel_pool(
e,
ErrorHandler::NotFoundByLookup(
ResourceType::IpPool,
LookupType::ByName(name.to_string()),
),
)
})?
.identity
.id,
)
} else {
None
};
let data = IncompleteInstanceExternalIp::for_ephemeral(
ip_id,
project_id,
instance_id,
/* pool_id = */ None,
pool_id,
);
self.allocate_instance_external_ip(opctx, data).await
}
Expand Down
167 changes: 156 additions & 11 deletions nexus/src/db/queries/external_ip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,7 @@ const MAX_PORT: i32 = u16::MAX as _;
/// FROM
/// ip_pool_range
/// WHERE
/// project_id = <project_id> OR
/// project_id IS NULL AND
/// <pool restriction clause> AND
/// time_deleted IS NULL
/// )
/// CROSS JOIN
Expand Down Expand Up @@ -183,9 +182,36 @@ const MAX_PORT: i32 = u16::MAX as _;
/// (even though we send the start/stop to the database as `i32`s). We need to
/// cast it on the way out of the database, so that Diesel can correctly
/// deserialize it into an `i32`.
///
/// Pool restriction
/// ----------------
///
/// Clients may optionally request an external address from a specific IP Pool.
/// If they don't provide a pool, the query is restricted to all IP Pools
/// available to their project (not reserved for a different project). In that
/// case, the pool restriction looks like:
///
/// ```sql
/// (project_id = <project_id> OR project_id IS NULL)
/// ```
///
/// That is, we search for all pools that are unreserved for reserved for _this
/// instance's_ project.
///
/// If the client does provide a pool, an additional filtering clause is added,
/// so that the whole filter looks like:
///
/// ```sql
/// pool_id = <pool_id> AND
/// (project_id = <project_id> OR project_id IS NULL)
/// ```
///
/// That filter on `project_id` is a bit redundant, as the caller should be
/// looking up the pool's ID by name, among those available to the instance's
/// project. However, it helps provides safety in the case that the caller
/// violates that expectation.
#[derive(Debug, Clone)]
pub struct NextExternalIp {
// TODO-completeness: Add `ip_pool_id`, and restrict search to it.
ip: IncompleteInstanceExternalIp,
// Number of ports reserved per IP address. Only applicable if the IP kind
// is snat.
Expand Down Expand Up @@ -390,7 +416,7 @@ impl NextExternalIp {
// FROM
// ip_pool_range
// WHERE
// (project_id = <project_id> OR project_id IS NULL) AND
// <pool_restriction> AND
// time_deleted IS NULL
// ```
fn push_address_sequence_subquery<'a>(
Expand All @@ -410,13 +436,21 @@ impl NextExternalIp {
out.push_identifier(dsl::first_address::NAME)?;
out.push_sql(") AS candidate_ip FROM ");
IP_POOL_RANGE_FROM_CLAUSE.walk_ast(out.reborrow())?;
out.push_sql(" WHERE (");
out.push_identifier(dsl::project_id::NAME)?;
out.push_sql(" = ");
out.push_bind_param::<sql_types::Uuid, Uuid>(self.ip.project_id())?;
out.push_sql(" OR ");
out.push_identifier(dsl::project_id::NAME)?;
out.push_sql(" IS NULL) AND ");
out.push_sql(" WHERE ");
if let Some(ref pool_id) = self.ip.pool_id() {
out.push_identifier(dsl::ip_pool_id::NAME)?;
out.push_sql(" = ");
out.push_bind_param::<sql_types::Uuid, Uuid>(pool_id)?;
} else {
out.push_sql("(");
out.push_identifier(dsl::project_id::NAME)?;
out.push_sql(" = ");
out.push_bind_param::<sql_types::Uuid, Uuid>(self.ip.project_id())?;
out.push_sql(" OR ");
out.push_identifier(dsl::project_id::NAME)?;
out.push_sql(" IS NULL)");
}
out.push_sql(" AND ");
out.push_identifier(dsl::time_deleted::NAME)?;
out.push_sql(" IS NULL");
Ok(())
Expand Down Expand Up @@ -581,6 +615,7 @@ mod tests {
use crate::db::model::IpKind;
use crate::db::model::IpPool;
use crate::db::model::IpPoolRange;
use crate::db::model::Name;
use crate::external_api::shared::IpRange;
use async_bb8_diesel::AsyncRunQueryDsl;
use dropshot::test_util::LogContext;
Expand All @@ -589,6 +624,7 @@ mod tests {
use omicron_common::api::external::IdentityMetadataCreateParams;
use omicron_test_utils::dev;
use omicron_test_utils::dev::db::CockroachInstance;
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::sync::Arc;
use uuid::Uuid;
Expand Down Expand Up @@ -999,4 +1035,113 @@ mod tests {

context.success().await;
}

#[tokio::test]
async fn test_next_external_ip_is_restricted_to_pools() {
let context =
TestContext::new("test_next_external_ip_is_restricted_to_pools")
.await;

// Create two pools, neither project-restricted.
let first_range = IpRange::try_from((
Ipv4Addr::new(10, 0, 0, 1),
Ipv4Addr::new(10, 0, 0, 3),
))
.unwrap();
context.create_ip_pool("p0", first_range, None).await;
let second_range = IpRange::try_from((
Ipv4Addr::new(10, 0, 0, 4),
Ipv4Addr::new(10, 0, 0, 6),
))
.unwrap();
context.create_ip_pool("p1", second_range, None).await;

// Allocating an address on an instance in the second pool should be
// respected, even though there are IPs available in the first.
let instance_id = Uuid::new_v4();
let project_id = Uuid::new_v4();
let id = Uuid::new_v4();
let pool_name = Some(Name("p1".parse().unwrap()));

let ip = context
.db_datastore
.allocate_instance_ephemeral_ip(
&context.opctx,
id,
project_id,
instance_id,
pool_name,
)
.await
.expect("Failed to allocate instance ephemeral IP address");
assert_eq!(ip.kind, IpKind::Ephemeral);
assert_eq!(ip.ip.ip(), second_range.first_address());
assert_eq!(ip.first_port.0, 0);
assert_eq!(ip.last_port.0, u16::MAX);
assert_eq!(ip.project_id, project_id);

context.success().await;
}

#[tokio::test]
async fn test_ensure_pool_exhaustion_does_not_use_other_pool() {
let context = TestContext::new(
"test_ensure_pool_exhaustion_does_not_use_other_pool",
)
.await;

// Create two pools, neither project-restricted.
let first_range = IpRange::try_from((
Ipv4Addr::new(10, 0, 0, 1),
Ipv4Addr::new(10, 0, 0, 3),
))
.unwrap();
context.create_ip_pool("p0", first_range, None).await;
let first_address = Ipv4Addr::new(10, 0, 0, 4);
let last_address = Ipv4Addr::new(10, 0, 0, 6);
let second_range =
IpRange::try_from((first_address, last_address)).unwrap();
context.create_ip_pool("p1", second_range, None).await;

// Allocate all available addresses in the second pool.
let instance_id = Uuid::new_v4();
let project_id = Uuid::new_v4();
let pool_name = Some(Name("p1".parse().unwrap()));
let first_octet = first_address.octets()[3];
let last_octet = last_address.octets()[3];
for octet in first_octet..=last_octet {
let ip = context
.db_datastore
.allocate_instance_ephemeral_ip(
&context.opctx,
Uuid::new_v4(),
project_id,
instance_id,
pool_name.clone(),
)
.await
.expect("Failed to allocate instance ephemeral IP address");
println!("{ip:#?}");
if let IpAddr::V4(addr) = ip.ip.ip() {
assert_eq!(addr.octets()[3], octet);
} else {
panic!("Expected an IPv4 address");
}
}

// Allocating another address should _fail_, and not use the first pool.
context
.db_datastore
.allocate_instance_ephemeral_ip(
&context.opctx,
Uuid::new_v4(),
project_id,
instance_id,
pool_name,
)
.await
.expect_err("Should not use IP addresses from a different pool");

context.success().await;
}
}