From 42fc9ef863498e0ddbcc586aed150d0dad4bde64 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Tue, 9 May 2023 23:52:04 +0200 Subject: [PATCH] Apply compat changes from latest Pekko --- .../connectors/amqp/javadsl/AmqpFlow.scala | 11 +-- .../amqp/javadsl/AmqpFlowWithContext.scala | 7 +- .../connectors/amqp/javadsl/AmqpRpcFlow.scala | 11 +-- .../connectors/amqp/javadsl/AmqpSink.scala | 9 +- .../amqp/javadsl/CommittableReadResult.scala | 7 +- .../scaladsl/EventBridgePublisher.scala | 4 +- .../awslambda/scaladsl/AwsLambdaFlow.scala | 4 +- .../storagequeue/javadsl/AzureQueueSink.scala | 4 +- .../azure/storagequeue/settings.scala | 5 +- .../cassandra/CassandraSessionSettings.scala | 9 +- .../cassandra/CqlSessionProvider.scala | 7 +- .../PekkoDiscoverySessionProvider.scala | 4 +- .../cassandra/javadsl/CassandraSession.scala | 32 +++--- .../javadsl/CassandraSessionRegistry.scala | 5 +- .../cassandra/scaladsl/CassandraSession.scala | 12 +-- .../javadsl/CassandraSessionSpec.scala | 14 +-- .../scaladsl/CassandraLifecycle.scala | 4 +- contributor-advice.md | 2 +- .../couchbase/CouchbaseSessionRegistry.scala | 4 +- .../impl/CouchbaseSessionJavaAdapter.scala | 42 ++++---- .../couchbase/javadsl/CouchbaseSession.scala | 8 +- .../stream/connectors/couchbase/model.scala | 4 +- .../couchbase/scaladsl/DiscoverySupport.scala | 6 +- .../connectors/dynamodb/DynamoDbOp.scala | 4 +- .../ElasticsearchConnectionSettings.scala | 7 +- .../connectors/elasticsearch/ReadResult.scala | 5 +- .../elasticsearch/WriteMessage.scala | 7 +- .../testkit/MessageFactory.scala | 7 +- .../file/javadsl/LogRotatorSink.scala | 9 +- .../connectors/ftp/javadsl/FtpApi.scala | 69 +++++++------ .../connectors/geode/javadsl/Geode.java | 5 +- .../javadsl/GeodeWithPoolSubscription.java | 5 +- .../javadsl/BigQueryArrowStorage.scala | 10 +- .../storage/javadsl/BigQueryAvroStorage.scala | 10 +- .../storage/javadsl/BigQueryStorage.scala | 6 +- .../bigquery/javadsl/BigQuery.scala | 50 +++++----- .../bigquery/model/DatasetJsonProtocol.scala | 36 +++---- .../model/ErrorProtoJsonProtocol.scala | 19 ++-- .../bigquery/model/JobJsonProtocol.scala | 82 ++++++++-------- .../bigquery/model/QueryJsonProtocol.scala | 98 +++++++++---------- .../model/TableDataJsonProtocol.scala | 48 ++++----- .../bigquery/model/TableJsonProtocol.scala | 60 ++++++------ .../pubsub/javadsl/GooglePubSub.scala | 4 +- .../googlecloud/storage/Owner.scala | 6 +- .../googlecloud/storage/StorageObject.scala | 68 ++++++------- .../storage/javadsl/GCStorage.scala | 37 ++++--- .../connectors/google/GoogleSettings.scala | 16 +-- .../connectors/google/javadsl/Google.scala | 6 +- .../connectors/google/javadsl/Paginated.scala | 7 +- .../google/firebase/fcm/FcmSettings.scala | 6 +- .../connectors/hbase/HTableSettings.scala | 5 +- .../hbase/javadsl/HTableStage.scala | 5 +- .../connectors/hdfs/javadsl/HdfsSource.scala | 11 +-- .../ironmq/javadsl/IronMqProducer.scala | 5 +- .../connectors/ironmq/javadsl/package.scala | 10 +- .../stream/connectors/ironmq/UnitTest.java | 6 +- .../stream/connectors/jms/Destinations.scala | 2 +- .../stream/connectors/jms/JmsMessages.scala | 4 +- .../connectors/jms/javadsl/JmsProducer.scala | 15 ++- .../kinesis/impl/KinesisSourceStage.scala | 7 +- .../javadsl/KinesisSchedulerSource.scala | 4 +- .../kinesis/scaladsl/KinesisFlow.scala | 4 +- .../scaladsl/KinesisFirehoseFlow.scala | 5 +- .../connectors/kudu/KuduTableSettings.scala | 2 +- .../connectors/mqtt/streaming/model.scala | 8 +- .../connectors/mqtt/javadsl/MqttFlow.scala | 9 +- .../mqtt/javadsl/MqttMessageWithAck.scala | 5 +- .../connectors/mqtt/javadsl/MqttSource.scala | 7 +- .../mqtt/scaladsl/MqttMessageWithAck.scala | 4 +- .../connectors/pravega/javadsl/Pravega.java | 4 +- .../pravega/javadsl/PravegaTable.java | 7 +- .../connectors/pravega/impl/PravegaFlow.scala | 4 +- .../pravega/impl/PravegaTableReadFlow.scala | 4 +- .../pravega/impl/PravegaTableWriteFlow.scala | 4 +- project/Dependencies.scala | 6 +- project/plugins.sbt | 2 +- .../reference/javadsl/Reference.scala | 4 +- .../stream/connectors/reference/model.scala | 6 +- .../connectors/reference/settings.scala | 7 +- .../stream/connectors/s3/javadsl/S3.scala | 49 +++++----- .../pekko/stream/connectors/s3/model.scala | 42 ++++---- .../pekko/stream/connectors/s3/settings.scala | 12 +-- .../connectors/s3/impl/auth/SignerSpec.scala | 5 +- .../connectors/slick/javadsl/Slick.scala | 8 +- .../sns/scaladsl/SnsPublisher.scala | 5 +- .../connectors/sqs/javadsl/SqsAckSink.scala | 7 +- .../sqs/javadsl/SqsPublishSink.scala | 17 ++-- .../connectors/sqs/scaladsl/SqsAckFlow.scala | 10 +- .../sqs/scaladsl/SqsPublishFlow.scala | 7 +- .../connectors/sqs/scaladsl/SqsSource.scala | 7 +- .../sqs/scaladsl/SqsSourceMockSpec.scala | 6 +- .../connectors/sse/javadsl/EventSource.scala | 13 ++- .../stream/connectors/udp/javadsl/Udp.scala | 11 +-- .../javadsl/UnixDomainSocket.scala | 16 +-- .../pekko/stream/connectors/xml/model.scala | 32 +++--- 95 files changed, 645 insertions(+), 670 deletions(-) rename cassandra/src/test/scala/{docs => org/apache/pekko/stream/connectors/cassandra}/javadsl/CassandraSessionSpec.scala (94%) diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpFlow.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpFlow.scala index 1b80d18ef..3537f8ddd 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpFlow.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpFlow.scala @@ -20,8 +20,7 @@ import pekko.Done import pekko.japi.Pair import pekko.stream.connectors.amqp._ import pekko.stream.scaladsl.Keep - -import scala.compat.java8.FutureConverters._ +import pekko.util.FutureConverters._ object AmqpFlow { @@ -38,7 +37,7 @@ object AmqpFlow { */ def create( settings: AmqpWriteSettings): pekko.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] = - pekko.stream.connectors.amqp.scaladsl.AmqpFlow(settings).mapMaterializedValue(f => f.toJava).asJava + pekko.stream.connectors.amqp.scaladsl.AmqpFlow(settings).mapMaterializedValue(f => f.asJava).asJava /** * Creates an `AmqpFlow` that accepts `WriteMessage` elements and emits `WriteResult`. @@ -62,7 +61,7 @@ object AmqpFlow { settings: AmqpWriteSettings): pekko.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] = pekko.stream.connectors.amqp.scaladsl.AmqpFlow .withConfirm(settings = settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -87,7 +86,7 @@ object AmqpFlow { settings: AmqpWriteSettings): pekko.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] = pekko.stream.connectors.amqp.scaladsl.AmqpFlow .withConfirmUnordered(settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -109,6 +108,6 @@ object AmqpFlow { pekko.stream.connectors.amqp.scaladsl.AmqpFlow .withConfirmAndPassThroughUnordered[T](settings = settings))(Keep.right) .map { case (writeResult, passThrough) => Pair(writeResult, passThrough) } - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpFlowWithContext.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpFlowWithContext.scala index 50aa0d673..5a09a5ca4 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpFlowWithContext.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpFlowWithContext.scala @@ -18,8 +18,7 @@ import java.util.concurrent.CompletionStage import org.apache.pekko import pekko.Done import pekko.stream.connectors.amqp._ - -import scala.compat.java8.FutureConverters._ +import pekko.util.FutureConverters._ object AmqpFlowWithContext { @@ -33,7 +32,7 @@ object AmqpFlowWithContext { : pekko.stream.javadsl.FlowWithContext[WriteMessage, T, WriteResult, T, CompletionStage[Done]] = pekko.stream.connectors.amqp.scaladsl.AmqpFlowWithContext .apply(settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -50,6 +49,6 @@ object AmqpFlowWithContext { : pekko.stream.javadsl.FlowWithContext[WriteMessage, T, WriteResult, T, CompletionStage[Done]] = pekko.stream.connectors.amqp.scaladsl.AmqpFlowWithContext .withConfirm(settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpRpcFlow.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpRpcFlow.scala index 7b9a39c5c..98355e885 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpRpcFlow.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpRpcFlow.scala @@ -19,8 +19,7 @@ import org.apache.pekko import pekko.stream.connectors.amqp._ import pekko.stream.javadsl.Flow import pekko.util.ByteString - -import scala.compat.java8.FutureConverters._ +import pekko.util.FutureConverters._ object AmqpRpcFlow { @@ -37,7 +36,7 @@ object AmqpRpcFlow { repliesPerMessage: Int): Flow[ByteString, ByteString, CompletionStage[String]] = pekko.stream.connectors.amqp.scaladsl.AmqpRpcFlow .simple(settings, repliesPerMessage) - .mapMaterializedValue(f => f.toJava) + .mapMaterializedValue(f => f.asJava) .asJava /** @@ -49,7 +48,7 @@ object AmqpRpcFlow { bufferSize: Int): Flow[WriteMessage, ReadResult, CompletionStage[String]] = pekko.stream.connectors.amqp.scaladsl.AmqpRpcFlow .atMostOnceFlow(settings, bufferSize) - .mapMaterializedValue(f => f.toJava) + .mapMaterializedValue(f => f.asJava) .asJava /** @@ -62,7 +61,7 @@ object AmqpRpcFlow { repliesPerMessage: Int): Flow[WriteMessage, ReadResult, CompletionStage[String]] = pekko.stream.connectors.amqp.scaladsl.AmqpRpcFlow .atMostOnceFlow(settings, bufferSize, repliesPerMessage) - .mapMaterializedValue(f => f.toJava) + .mapMaterializedValue(f => f.asJava) .asJava /** @@ -82,7 +81,7 @@ object AmqpRpcFlow { repliesPerMessage: Int = 1): Flow[WriteMessage, CommittableReadResult, CompletionStage[String]] = pekko.stream.connectors.amqp.scaladsl.AmqpRpcFlow .committableFlow(settings, bufferSize, repliesPerMessage) - .mapMaterializedValue(f => f.toJava) + .mapMaterializedValue(f => f.asJava) .map(cm => new CommittableReadResult(cm)) .asJava diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpSink.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpSink.scala index bf1bbf124..3eb6c6922 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpSink.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/AmqpSink.scala @@ -19,8 +19,7 @@ import org.apache.pekko import pekko.Done import pekko.stream.connectors.amqp._ import pekko.util.ByteString - -import scala.compat.java8.FutureConverters._ +import pekko.util.FutureConverters._ object AmqpSink { @@ -31,7 +30,7 @@ object AmqpSink { * either normally or because of an amqp failure. */ def create(settings: AmqpWriteSettings): pekko.stream.javadsl.Sink[WriteMessage, CompletionStage[Done]] = - pekko.stream.connectors.amqp.scaladsl.AmqpSink(settings).mapMaterializedValue(f => f.toJava).asJava + pekko.stream.connectors.amqp.scaladsl.AmqpSink(settings).mapMaterializedValue(f => f.asJava).asJava /** * Creates an `AmqpSink` that accepts `ByteString` elements. @@ -42,7 +41,7 @@ object AmqpSink { def createSimple( settings: AmqpWriteSettings): pekko.stream.javadsl.Sink[ByteString, CompletionStage[Done]] = pekko.stream.connectors.amqp.scaladsl.AmqpSink.simple(settings).mapMaterializedValue(f => - f.toJava).asJava + f.asJava).asJava /** * Connects to an AMQP server upon materialization and sends incoming messages to the server. @@ -55,6 +54,6 @@ object AmqpSink { def createReplyTo( settings: AmqpReplyToSinkSettings): pekko.stream.javadsl.Sink[WriteMessage, CompletionStage[Done]] = pekko.stream.connectors.amqp.scaladsl.AmqpSink.replyTo(settings).mapMaterializedValue(f => - f.toJava).asJava + f.asJava).asJava } diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/CommittableReadResult.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/CommittableReadResult.scala index b8a66c5f1..69484cc1e 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/CommittableReadResult.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/javadsl/CommittableReadResult.scala @@ -19,16 +19,15 @@ import org.apache.pekko import pekko.Done import pekko.stream.connectors.amqp.ReadResult import pekko.stream.connectors.amqp.scaladsl - -import scala.compat.java8.FutureConverters._ +import pekko.util.FutureConverters._ final class CommittableReadResult(cm: scaladsl.CommittableReadResult) { val message: ReadResult = cm.message def ack(): CompletionStage[Done] = ack(false) - def ack(multiple: Boolean): CompletionStage[Done] = cm.ack(multiple).toJava + def ack(multiple: Boolean): CompletionStage[Done] = cm.ack(multiple).asJava def nack(): CompletionStage[Done] = nack(false, true) def nack(multiple: Boolean, requeue: Boolean): CompletionStage[Done] = - cm.nack(multiple, requeue).toJava + cm.nack(multiple, requeue).asJava } diff --git a/aws-event-bridge/src/main/scala/org/apache/pekko/stream/connectors/aws/eventbridge/scaladsl/EventBridgePublisher.scala b/aws-event-bridge/src/main/scala/org/apache/pekko/stream/connectors/aws/eventbridge/scaladsl/EventBridgePublisher.scala index 98412df37..1a0fa59b3 100644 --- a/aws-event-bridge/src/main/scala/org/apache/pekko/stream/connectors/aws/eventbridge/scaladsl/EventBridgePublisher.scala +++ b/aws-event-bridge/src/main/scala/org/apache/pekko/stream/connectors/aws/eventbridge/scaladsl/EventBridgePublisher.scala @@ -17,11 +17,11 @@ import org.apache.pekko import pekko.stream.connectors.aws.eventbridge.EventBridgePublishSettings import pekko.stream.scaladsl.{ Flow, Keep, Sink } import pekko.{ Done, NotUsed } +import pekko.util.FutureConverters._ import software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient import software.amazon.awssdk.services.eventbridge.model._ import scala.concurrent.Future -import scala.compat.java8.FutureConverters._ /** * Scala API @@ -64,7 +64,7 @@ object EventBridgePublisher { settings: EventBridgePublishSettings)( implicit eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequest, PutEventsResponse, NotUsed] = Flow[PutEventsRequest] - .mapAsync(settings.concurrency)(eventBridgeClient.putEvents(_).toScala) + .mapAsync(settings.concurrency)(eventBridgeClient.putEvents(_).asScala) /** * Creates a [[pekko.stream.scaladsl.Flow Flow]] to publish messages to an EventBridge. diff --git a/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/scaladsl/AwsLambdaFlow.scala b/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/scaladsl/AwsLambdaFlow.scala index cbcdd5ee2..10e53ad10 100644 --- a/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/scaladsl/AwsLambdaFlow.scala +++ b/awslambda/src/main/scala/org/apache/pekko/stream/connectors/awslambda/scaladsl/AwsLambdaFlow.scala @@ -16,9 +16,9 @@ package org.apache.pekko.stream.connectors.awslambda.scaladsl import org.apache.pekko import pekko.NotUsed import pekko.stream.scaladsl.Flow +import pekko.util.FutureConverters._ import software.amazon.awssdk.services.lambda.model.{ InvokeRequest, InvokeResponse } import software.amazon.awssdk.services.lambda.LambdaAsyncClient -import scala.compat.java8.FutureConverters._ object AwsLambdaFlow { @@ -27,6 +27,6 @@ object AwsLambdaFlow { */ def apply( parallelism: Int)(implicit awsLambdaClient: LambdaAsyncClient): Flow[InvokeRequest, InvokeResponse, NotUsed] = - Flow[InvokeRequest].mapAsyncUnordered(parallelism)(awsLambdaClient.invoke(_).toScala) + Flow[InvokeRequest].mapAsyncUnordered(parallelism)(awsLambdaClient.invoke(_).asScala) } diff --git a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala index bee3d0bcc..36488a3e7 100644 --- a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala +++ b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala @@ -36,8 +36,8 @@ object AzureQueueSink { */ private[javadsl] def fromFunction[T](f: T => Unit): Sink[T, CompletionStage[Done]] = { import pekko.stream.connectors.azure.storagequeue.scaladsl.{ AzureQueueSink => AzureQueueSinkScalaDSL } - import scala.compat.java8.FutureConverters._ - AzureQueueSinkScalaDSL.fromFunction(f).mapMaterializedValue(_.toJava).asJava + import pekko.util.FutureConverters._ + AzureQueueSinkScalaDSL.fromFunction(f).mapMaterializedValue(_.asJava).asJava } } diff --git a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/settings.scala b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/settings.scala index 9c89a51b4..b2099a0d2 100644 --- a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/settings.scala +++ b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/settings.scala @@ -13,10 +13,11 @@ package org.apache.pekko.stream.connectors.azure.storagequeue +import org.apache.pekko.util.OptionConverters._ + import java.time.{ Duration => JavaDuration } import java.util.Optional -import scala.compat.java8.OptionConverters._ import scala.concurrent.duration.{ Duration, FiniteDuration } /** @@ -51,7 +52,7 @@ final class AzureQueueSourceSettings private ( * Java API */ def getRetrieveRetryTimeout(): Optional[JavaDuration] = - retrieveRetryTimeout.map(d => JavaDuration.ofNanos(d.toNanos)).asJava + retrieveRetryTimeout.map(d => JavaDuration.ofNanos(d.toNanos)).toJava private def copy(batchSize: Int = batchSize, retrieveRetryTimeout: Option[FiniteDuration] = retrieveRetryTimeout) = new AzureQueueSourceSettings(initialVisibilityTimeout, batchSize, retrieveRetryTimeout) diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraSessionSettings.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraSessionSettings.scala index a872cc61c..70cb31077 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraSessionSettings.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CassandraSessionSettings.scala @@ -15,10 +15,11 @@ package org.apache.pekko.stream.connectors.cassandra import java.util.concurrent.CompletionStage -import org.apache.pekko.Done +import org.apache.pekko +import pekko.Done +import pekko.util.FunctionConverters._ +import pekko.util.FutureConverters._ import com.datastax.oss.driver.api.core.CqlSession -import scala.compat.java8.FunctionConverters._ -import scala.compat.java8.FutureConverters._ import scala.concurrent.Future @@ -39,7 +40,7 @@ class CassandraSessionSettings private (val configPath: String, * only execute the first. */ def withInit(value: java.util.function.Function[CqlSession, CompletionStage[Done]]): CassandraSessionSettings = - copy(init = Some(value.asScala.andThen(_.toScala))) + copy(init = Some(value.asScala.andThen(_.asScala))) /** * The `init` function will be performed once when the session is created, i.e. diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CqlSessionProvider.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CqlSessionProvider.scala index 486418908..369057af4 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CqlSessionProvider.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/CqlSessionProvider.scala @@ -13,12 +13,13 @@ package org.apache.pekko.stream.connectors.cassandra -import org.apache.pekko.actor.{ ActorSystem, ClassicActorSystemProvider, ExtendedActorSystem } +import org.apache.pekko +import pekko.actor.{ ActorSystem, ClassicActorSystemProvider, ExtendedActorSystem } +import pekko.util.FutureConverters._ import com.datastax.oss.driver.api.core.CqlSession import com.typesafe.config.{ Config, ConfigFactory } import scala.collection.immutable -import scala.compat.java8.FutureConverters._ import scala.concurrent.{ ExecutionContext, Future } import scala.util.Failure @@ -59,7 +60,7 @@ class DefaultSessionProvider(system: ActorSystem, config: Config) extends CqlSes } else { val driverConfig = CqlSessionProvider.driverConfig(system, config) val driverConfigLoader = DriverConfigLoaderFromConfig.fromConfig(driverConfig) - CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().toScala + CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().asScala } } } diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/PekkoDiscoverySessionProvider.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/PekkoDiscoverySessionProvider.scala index 1be43ca71..23e6305e3 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/PekkoDiscoverySessionProvider.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/PekkoDiscoverySessionProvider.scala @@ -18,11 +18,11 @@ import pekko.ConfigurationException import pekko.actor.{ ActorSystem, ClassicActorSystemProvider } import pekko.discovery.Discovery import pekko.util.JavaDurationConverters._ +import pekko.util.FutureConverters._ import com.datastax.oss.driver.api.core.CqlSession import com.typesafe.config.{ Config, ConfigFactory } import scala.collection.immutable -import scala.compat.java8.FutureConverters._ import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ ExecutionContext, Future } @@ -72,7 +72,7 @@ private[cassandra] object PekkoDiscoverySessionProvider { basic.contact-points = [${contactPoints.mkString("\"", "\", \"", "\"")}] """).withFallback(CqlSessionProvider.driverConfig(system, config)) val driverConfigLoader = DriverConfigLoaderFromConfig.fromConfig(driverConfigWithContactPoints) - CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().toScala + CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().asScala } } diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala index 0bf89fe7e..9fa9f8afc 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala @@ -19,8 +19,6 @@ import java.util.concurrent.{ CompletionStage, Executor } import java.util.function.{ Function => JFunction } import scala.annotation.varargs -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ import scala.concurrent.ExecutionContext import org.apache.pekko import pekko.Done @@ -32,6 +30,8 @@ import pekko.stream.connectors.cassandra.CassandraServerMetaData import pekko.stream.connectors.cassandra.{ scaladsl, CqlSessionProvider } import pekko.stream.javadsl.Source import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ +import pekko.util.OptionConverters._ import com.datastax.oss.driver.api.core.CqlSession import com.datastax.oss.driver.api.core.cql.BatchStatement import com.datastax.oss.driver.api.core.cql.PreparedStatement @@ -68,7 +68,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. executionContext, log, metricsCategory, - session => init.apply(session).toScala, + session => init.apply(session).asScala, () => onClose.run())) /** @@ -89,13 +89,13 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. * Closes the underlying Cassandra session. * @param executor as this might be used after actor system termination, the actor systems dispatcher can't be used */ - def close(executor: Executor): CompletionStage[Done] = delegate.close(ExecutionContext.fromExecutor(executor)).toJava + def close(executor: Executor): CompletionStage[Done] = delegate.close(ExecutionContext.fromExecutor(executor)).asJava /** * Meta data about the Cassandra server, such as its version. */ def serverMetaData: CompletionStage[CassandraServerMetaData] = - delegate.serverMetaData.toJava + delegate.serverMetaData.asJava /** * The `Session` of the underlying @@ -104,7 +104,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. * API exposed by this class. Be careful to not use blocking calls. */ def underlying(): CompletionStage[CqlSession] = - delegate.underlying().toJava + delegate.underlying().asJava /** * Execute CQL commands @@ -113,14 +113,14 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. * The returned `CompletionStage` is completed when the command is done, or if the statement fails. */ def executeDDL(stmt: String): CompletionStage[Done] = - delegate.executeDDL(stmt).toJava + delegate.executeDDL(stmt).asJava /** * Create a `PreparedStatement` that can be bound and used in * `executeWrite` or `select` multiple times. */ def prepare(stmt: String): CompletionStage[PreparedStatement] = - delegate.prepare(stmt).toJava + delegate.prepare(stmt).asJava /** * Execute several statements in a batch. First you must `prepare` the @@ -135,7 +135,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. * successfully executed, or if it fails. */ def executeWriteBatch(batch: BatchStatement): CompletionStage[Done] = - delegate.executeWriteBatch(batch).toJava + delegate.executeWriteBatch(batch).asJava /** * Execute one statement. First you must `prepare` the @@ -150,7 +150,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. * successfully executed, or if it fails. */ def executeWrite(stmt: Statement[_]): CompletionStage[Done] = - delegate.executeWrite(stmt).toJava + delegate.executeWrite(stmt).asJava /** * Prepare, bind and execute one statement in one go. @@ -164,7 +164,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. */ @varargs def executeWrite(stmt: String, bindValues: AnyRef*): CompletionStage[Done] = - delegate.executeWrite(stmt, bindValues: _*).toJava + delegate.executeWrite(stmt, bindValues: _*).asJava /** * Execute a select statement. First you must `prepare` the @@ -194,7 +194,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. * this `Source` and then `run` the stream. */ def select(stmt: CompletionStage[Statement[_]]): Source[Row, NotUsed] = - delegate.select(stmt.toScala).asJava + delegate.select(stmt.asScala).asJava /** * Prepare, bind and execute a select statement in one go. @@ -222,7 +222,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. * The returned `CompletionStage` is completed with the found rows. */ def selectAll(stmt: Statement[_]): CompletionStage[JList[Row]] = - delegate.selectAll(stmt).map(_.asJava).toJava + delegate.selectAll(stmt).map(_.asJava).asJava /** * Prepare, bind and execute a select statement in one go. Only use this method @@ -235,7 +235,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. */ @varargs def selectAll(stmt: String, bindValues: AnyRef*): CompletionStage[JList[Row]] = - delegate.selectAll(stmt, bindValues: _*).map(_.asJava).toJava + delegate.selectAll(stmt, bindValues: _*).map(_.asJava).asJava /** * Execute a select statement that returns one row. First you must `prepare` the @@ -248,7 +248,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. * if any. */ def selectOne(stmt: Statement[_]): CompletionStage[Optional[Row]] = - delegate.selectOne(stmt).map(_.asJava).toJava + delegate.selectOne(stmt).map(_.toJava).asJava /** * Prepare, bind and execute a select statement that returns one row. @@ -260,6 +260,6 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. */ @varargs def selectOne(stmt: String, bindValues: AnyRef*): CompletionStage[Optional[Row]] = - delegate.selectOne(stmt, bindValues: _*).map(_.asJava).toJava + delegate.selectOne(stmt, bindValues: _*).map(_.toJava).asJava } diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionRegistry.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionRegistry.scala index f8be608e2..4843c8fa8 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionRegistry.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionRegistry.scala @@ -19,10 +19,9 @@ import org.apache.pekko import pekko.Done import pekko.actor.ClassicActorSystemProvider import pekko.stream.connectors.cassandra.{ scaladsl, CassandraSessionSettings } +import pekko.util.FutureConverters._ import com.datastax.oss.driver.api.core.CqlSession -import scala.compat.java8.FutureConverters._ - /** * This Cassandra session registry makes it possible to share Cassandra sessions between multiple use sites * in the same `ActorSystem` (important for the Cassandra Akka Persistence plugin where it is shared between journal, @@ -67,7 +66,7 @@ final class CassandraSessionRegistry private (delegate: scaladsl.CassandraSessio */ def sessionFor(configPath: String, init: java.util.function.Function[CqlSession, CompletionStage[Done]]): CassandraSession = - new CassandraSession(delegate.sessionFor(configPath, ses => init(ses).toScala)) + new CassandraSession(delegate.sessionFor(configPath, ses => init(ses).asScala)) /** * Get an existing session or start a new one with the given settings, diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSession.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSession.scala index a548fbf01..6d0fee6d2 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSession.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSession.scala @@ -20,6 +20,7 @@ import pekko.event.LoggingAdapter import pekko.stream.connectors.cassandra.{ CassandraMetricsRegistry, CassandraServerMetaData, CqlSessionProvider } import pekko.stream.scaladsl.{ Sink, Source } import pekko.stream.{ Materializer, SystemMaterializer } +import pekko.util.FutureConverters._ import pekko.util.OptionVal import pekko.{ Done, NotUsed } import com.datastax.oss.driver.api.core.CqlSession @@ -27,7 +28,6 @@ import com.datastax.oss.driver.api.core.cql._ import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException import scala.collection.immutable -import scala.compat.java8.FutureConverters._ import scala.concurrent.{ ExecutionContext, Future } import scala.util.control.NonFatal @@ -88,7 +88,7 @@ final class CassandraSession(system: pekko.actor.ActorSystem, def close(executionContext: ExecutionContext): Future[Done] = { implicit val ec: ExecutionContext = executionContext onClose() - _underlyingSession.map(_.closeAsync().toScala).map(_ => Done) + _underlyingSession.map(_.closeAsync().asScala).map(_ => Done) } /** @@ -132,7 +132,7 @@ final class CassandraSession(system: pekko.actor.ActorSystem, */ def executeDDL(stmt: String): Future[Done] = underlying().flatMap { cqlSession => - cqlSession.executeAsync(stmt).toScala.map(_ => Done) + cqlSession.executeAsync(stmt).asScala.map(_ => Done) } /** @@ -141,7 +141,7 @@ final class CassandraSession(system: pekko.actor.ActorSystem, */ def prepare(stmt: String): Future[PreparedStatement] = underlying().flatMap { cqlSession => - cqlSession.prepareAsync(stmt).toScala + cqlSession.prepareAsync(stmt).asScala } /** @@ -173,7 +173,7 @@ final class CassandraSession(system: pekko.actor.ActorSystem, */ def executeWrite(stmt: Statement[_]): Future[Done] = { underlying().flatMap { cqlSession => - cqlSession.executeAsync(stmt).toScala.map(_ => Done) + cqlSession.executeAsync(stmt).asScala.map(_ => Done) } } @@ -196,7 +196,7 @@ final class CassandraSession(system: pekko.actor.ActorSystem, */ @InternalApi private[pekko] def selectResultSet(stmt: Statement[_]): Future[AsyncResultSet] = { underlying().flatMap { s => - s.executeAsync(stmt).toScala + s.executeAsync(stmt).asScala } } diff --git a/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala similarity index 94% rename from cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala rename to cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala index 498c66bf8..6fa00ffa1 100644 --- a/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala +++ b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala @@ -11,7 +11,7 @@ * Copyright (C) since 2016 Lightbend Inc. */ -package docs.javadsl +package org.apache.pekko.stream.connectors.cassandra.javadsl import java.util import java.util.concurrent.CompletionStage @@ -28,11 +28,11 @@ import pekko.stream.javadsl.Sink import pekko.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped import pekko.stream.testkit.scaladsl.TestSink import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ +import pekko.util.OptionConverters._ import com.datastax.oss.driver.api.core.cql.Row import scala.collection.immutable -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ import scala.concurrent.Future import scala.concurrent.duration._ @@ -80,7 +80,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr // testing javadsl to prove delegation works lazy val session: javadsl.CassandraSession = javadslSessionRegistry.sessionFor(sessionSettings) - def await[T](cs: CompletionStage[T]): T = cs.toScala.futureValue + def await[T](cs: CompletionStage[T]): T = cs.asScala.futureValue "session" must { @@ -99,7 +99,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr } yield Done }.futureValue mustBe Done val sink: Sink[Row, CompletionStage[util.List[Row]]] = Sink.seq - val rows = session.select(s"SELECT * FROM $table").runWith(sink, materializer).toScala.futureValue + val rows = session.select(s"SELECT * FROM $table").runWith(sink, materializer).asScala.futureValue rows.asScala.map(_.getInt("id")) must contain theSameElementsAs data } @@ -141,7 +141,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr "selectOne empty" in { val row = await(session.selectOne(s"SELECT count FROM $dataTable WHERE partition = ? and key = ?", "A", "x")) - row.asScala mustBe empty + row.toScala mustBe empty } "create indexes" in { @@ -151,7 +151,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr val row = await( session.selectOne("SELECT * FROM system_schema.indexes WHERE table_name = ? ALLOW FILTERING", dataTableName)) - row.asScala.map(index => index.getString("table_name") -> index.getString("index_name")) mustBe Some( + row.toScala.map(index => index.getString("table_name") -> index.getString("index_name")) mustBe Some( dataTableName -> "count_idx") } diff --git a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala index 2f5789013..00674384a 100644 --- a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala +++ b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala @@ -20,6 +20,7 @@ import org.apache.pekko import pekko.Done import pekko.testkit.TestKitBase import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import com.datastax.oss.driver.api.core.cql._ import org.scalatest._ import org.scalatest.concurrent.{ PatienceConfiguration, ScalaFutures } @@ -28,7 +29,6 @@ import scala.collection.immutable import scala.concurrent.duration._ import scala.concurrent.{ Await, Future } import scala.util.control.NonFatal -import scala.compat.java8.FutureConverters._ trait CassandraLifecycleBase { def lifecycleSession: CassandraSession @@ -68,7 +68,7 @@ trait CassandraLifecycleBase { def executeCql(statements: immutable.Seq[String]): Future[Done] = executeCql(lifecycleSession, statements) def executeCqlList(statements: java.util.List[String]): CompletionStage[Done] = - executeCql(lifecycleSession, statements.asScala.toList).toJava + executeCql(lifecycleSession, statements.asScala.toList).asJava def withSchemaMetadataDisabled(block: => Future[Done]): Future[Done] = { implicit val ec = lifecycleSession.ec diff --git a/contributor-advice.md b/contributor-advice.md index 628c4c09c..e98422fb2 100644 --- a/contributor-advice.md +++ b/contributor-advice.md @@ -45,7 +45,7 @@ Apache Pekko Connectors, same as Apache Pekko, aims to keep 100% feature parity 1. If the underlying Scala code requires an `ExecutionContext`, make the Java API take an `Executor` and use `ExecutionContext.fromExecutor(executor)` for conversion. -1. Make use of `scala-java8-compat` conversions, see [GitHub](https://github.com/scala/scala-java8-compat) (eg. `scala.compat.java8.FutureConverters` to translate Futures to `CompletionStage`s). +1. Make use of `org.apache.pekko.util` conversions (eg. `org.apache.pekko.util.FutureConverters` to translate Futures to `CompletionStage`s). ### Overview of Scala types and their Java counterparts diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/CouchbaseSessionRegistry.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/CouchbaseSessionRegistry.scala index 14b6c36ab..866762a21 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/CouchbaseSessionRegistry.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/CouchbaseSessionRegistry.scala @@ -21,9 +21,9 @@ import pekko.dispatch.ExecutionContexts import pekko.stream.connectors.couchbase.impl.CouchbaseClusterRegistry import pekko.stream.connectors.couchbase.javadsl.{ CouchbaseSession => JCouchbaseSession } import pekko.stream.connectors.couchbase.scaladsl.CouchbaseSession +import pekko.util.FutureConverters._ import scala.annotation.tailrec -import scala.compat.java8.FutureConverters._ import scala.concurrent.{ Future, Promise } /** @@ -88,7 +88,7 @@ final class CouchbaseSessionRegistry(system: ExtendedActorSystem) extends Extens def getSessionFor(settings: CouchbaseSessionSettings, bucketName: String): CompletionStage[JCouchbaseSession] = sessionFor(settings, bucketName) .map(_.asJava)(ExecutionContexts.parasitic) - .toJava + .asJava @tailrec private def startSession(key: SessionKey): Future[CouchbaseSession] = { diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseSessionJavaAdapter.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseSessionJavaAdapter.scala index 457172054..5a715e531 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseSessionJavaAdapter.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/impl/CouchbaseSessionJavaAdapter.scala @@ -24,14 +24,14 @@ import pekko.stream.connectors.couchbase.javadsl import pekko.stream.connectors.couchbase.scaladsl import pekko.stream.javadsl.Source import pekko.{ Done, NotUsed } +import pekko.util.FutureConverters._ +import pekko.util.OptionConverters._ import com.couchbase.client.java.AsyncBucket import com.couchbase.client.java.document.json.JsonObject import com.couchbase.client.java.document.{ Document, JsonDocument } import com.couchbase.client.java.query.util.IndexInfo import com.couchbase.client.java.query.{ N1qlQuery, Statement } -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ duration, Future } @@ -46,18 +46,18 @@ private[couchbase] final class CouchbaseSessionJavaAdapter(delegate: scaladsl.Co override def underlying: AsyncBucket = delegate.underlying - override def insert(document: JsonDocument): CompletionStage[JsonDocument] = delegate.insertDoc(document).toJava + override def insert(document: JsonDocument): CompletionStage[JsonDocument] = delegate.insertDoc(document).asJava - override def insertDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.insertDoc(document).toJava + override def insertDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.insertDoc(document).asJava override def insert( document: JsonDocument, writeSettings: CouchbaseWriteSettings): CompletionStage[JsonDocument] = - delegate.insert(document, writeSettings).toJava + delegate.insert(document, writeSettings).asJava override def insertDoc[T <: Document[_]]( document: T, - writeSettings: CouchbaseWriteSettings): CompletionStage[T] = delegate.insertDoc(document, writeSettings).toJava + writeSettings: CouchbaseWriteSettings): CompletionStage[T] = delegate.insertDoc(document, writeSettings).asJava override def get(id: String): CompletionStage[Optional[JsonDocument]] = futureOptToJava(delegate.get(id)) @@ -71,30 +71,30 @@ private[couchbase] final class CouchbaseSessionJavaAdapter(delegate: scaladsl.Co def get[T <: Document[_]](id: String, timeout: Duration, documentClass: Class[T]): CompletionStage[Optional[T]] = futureOptToJava(delegate.get(id, FiniteDuration.apply(timeout.toNanos, duration.NANOSECONDS), documentClass)) - override def upsert(document: JsonDocument): CompletionStage[JsonDocument] = delegate.upsert(document).toJava + override def upsert(document: JsonDocument): CompletionStage[JsonDocument] = delegate.upsert(document).asJava - override def upsertDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.upsertDoc(document).toJava + override def upsertDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.upsertDoc(document).asJava override def upsert(document: JsonDocument, writeSettings: CouchbaseWriteSettings): CompletionStage[JsonDocument] = - delegate.upsert(document, writeSettings).toJava + delegate.upsert(document, writeSettings).asJava override def upsertDoc[T <: Document[_]](document: T, writeSettings: CouchbaseWriteSettings): CompletionStage[T] = - delegate.upsertDoc(document, writeSettings).toJava + delegate.upsertDoc(document, writeSettings).asJava - override def replace(document: JsonDocument): CompletionStage[JsonDocument] = delegate.replace(document).toJava + override def replace(document: JsonDocument): CompletionStage[JsonDocument] = delegate.replace(document).asJava - override def replaceDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.replaceDoc(document).toJava + override def replaceDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.replaceDoc(document).asJava override def replace(document: JsonDocument, writeSettings: CouchbaseWriteSettings): CompletionStage[JsonDocument] = - delegate.replace(document, writeSettings).toJava + delegate.replace(document, writeSettings).asJava override def replaceDoc[T <: Document[_]](document: T, writeSettings: CouchbaseWriteSettings): CompletionStage[T] = - delegate.replaceDoc(document, writeSettings).toJava + delegate.replaceDoc(document, writeSettings).asJava - override def remove(id: String): CompletionStage[Done] = delegate.remove(id).toJava + override def remove(id: String): CompletionStage[Done] = delegate.remove(id).asJava override def remove(id: String, writeSettings: CouchbaseWriteSettings): CompletionStage[Done] = - delegate.remove(id, writeSettings).toJava + delegate.remove(id, writeSettings).asJava override def streamedQuery(query: N1qlQuery): Source[JsonObject, pekko.NotUsed] = delegate.streamedQuery(query).asJava @@ -109,22 +109,22 @@ private[couchbase] final class CouchbaseSessionJavaAdapter(delegate: scaladsl.Co futureOptToJava(delegate.singleResponseQuery(query)) override def counter(id: String, delta: Long, initial: Long): CompletionStage[Long] = - delegate.counter(id, delta, initial).toJava + delegate.counter(id, delta, initial).asJava override def counter( id: String, delta: Long, initial: Long, writeSettings: CouchbaseWriteSettings): CompletionStage[Long] = - delegate.counter(id, delta, initial, writeSettings).toJava + delegate.counter(id, delta, initial, writeSettings).asJava - override def close(): CompletionStage[Done] = delegate.close().toJava + override def close(): CompletionStage[Done] = delegate.close().asJava override def createIndex(indexName: String, ignoreIfExist: Boolean, fields: AnyRef*): CompletionStage[Boolean] = - delegate.createIndex(indexName, ignoreIfExist, fields).toJava + delegate.createIndex(indexName, ignoreIfExist, fields).asJava private def futureOptToJava[T](future: Future[Option[T]]): CompletionStage[Optional[T]] = - future.map(_.asJava)(ExecutionContexts.parasitic).toJava + future.map(_.toJava)(ExecutionContexts.parasitic).asJava def listIndexes(): Source[IndexInfo, NotUsed] = delegate.listIndexes().asJava diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/javadsl/CouchbaseSession.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/javadsl/CouchbaseSession.scala index 21418edfd..b907edbdf 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/javadsl/CouchbaseSession.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/javadsl/CouchbaseSession.scala @@ -24,13 +24,13 @@ import pekko.stream.connectors.couchbase.impl.CouchbaseSessionJavaAdapter import pekko.stream.connectors.couchbase.scaladsl.{ CouchbaseSession => ScalaDslCouchbaseSession } import pekko.stream.javadsl.Source import pekko.{ Done, NotUsed } +import pekko.util.FutureConverters._ import com.couchbase.client.java.document.json.JsonObject import com.couchbase.client.java.document.{ Document, JsonDocument } import com.couchbase.client.java.query.util.IndexInfo import com.couchbase.client.java.query.{ N1qlQuery, Statement } import com.couchbase.client.java.{ AsyncBucket, AsyncCluster, Bucket } -import scala.compat.java8.FutureConverters._ import scala.concurrent.ExecutionContext /** @@ -51,7 +51,7 @@ object CouchbaseSession { .apply(settings, bucketName)(executionContext(executor)) .map(new CouchbaseSessionJavaAdapter(_): CouchbaseSession)( ExecutionContexts.parasitic) - .toJava + .asJava /** * Create a given bucket using a pre-existing cluster client, allowing for it to be shared among @@ -61,7 +61,7 @@ object CouchbaseSession { ScalaDslCouchbaseSession(client, bucketName)(executionContext(executor)) .map(new CouchbaseSessionJavaAdapter(_): CouchbaseSession)( ExecutionContexts.parasitic) - .toJava + .asJava /** * Connects to a Couchbase cluster by creating an `AsyncCluster`. @@ -70,7 +70,7 @@ object CouchbaseSession { def createClient(settings: CouchbaseSessionSettings, executor: Executor): CompletionStage[AsyncCluster] = ScalaDslCouchbaseSession .createClusterClient(settings)(executionContext(executor)) - .toJava + .asJava private def executionContext(executor: Executor): ExecutionContext = executor match { diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala index cb88379fa..0df164e88 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala @@ -19,6 +19,7 @@ import org.apache.pekko import pekko.actor.{ ActorSystem, ClassicActorSystemProvider } import pekko.annotation.InternalApi import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import com.couchbase.client.java.document.Document import com.couchbase.client.java.env.CouchbaseEnvironment import com.couchbase.client.java.{ PersistTo, ReplicateTo } @@ -26,7 +27,6 @@ import com.typesafe.config.Config import scala.collection.immutable import scala.concurrent.Future -import scala.compat.java8.FutureConverters._ import scala.concurrent.duration._ /** @@ -216,7 +216,7 @@ final class CouchbaseSessionSettings private ( def withEnrichAsyncCs( value: java.util.function.Function[CouchbaseSessionSettings, CompletionStage[CouchbaseSessionSettings]]) : CouchbaseSessionSettings = - copy(enrichAsync = (s: CouchbaseSessionSettings) => value.apply(s).toScala) + copy(enrichAsync = (s: CouchbaseSessionSettings) => value.apply(s).asScala) def withEnvironment(environment: CouchbaseEnvironment): CouchbaseSessionSettings = copy(environment = Some(environment)) diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala index 1d71ab707..cf4168c40 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala @@ -20,11 +20,11 @@ import pekko.annotation.InternalApi import pekko.discovery.Discovery import pekko.stream.connectors.couchbase.CouchbaseSessionSettings import pekko.util.JavaDurationConverters._ +import pekko.util.FunctionConverters._ +import pekko.util.FutureConverters._ import com.typesafe.config.Config import scala.collection.immutable -import scala.compat.java8.FunctionConverters._ -import scala.compat.java8.FutureConverters._ import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration @@ -83,7 +83,7 @@ sealed class DiscoverySupport private { config: Config, system: ClassicActorSystemProvider) : java.util.function.Function[CouchbaseSessionSettings, CompletionStage[CouchbaseSessionSettings]] = - nodes(config)(system).andThen(_.toJava).asJava + nodes(config)(system).andThen(_.asJava).asJava /** * Expects a `service` section in `pekko.connectors.couchbase.session` and reads the given service name's address diff --git a/dynamodb/src/main/scala/org/apache/pekko/stream/connectors/dynamodb/DynamoDbOp.scala b/dynamodb/src/main/scala/org/apache/pekko/stream/connectors/dynamodb/DynamoDbOp.scala index d77e749a2..9a0b900ba 100644 --- a/dynamodb/src/main/scala/org/apache/pekko/stream/connectors/dynamodb/DynamoDbOp.scala +++ b/dynamodb/src/main/scala/org/apache/pekko/stream/connectors/dynamodb/DynamoDbOp.scala @@ -15,6 +15,7 @@ package org.apache.pekko.stream.connectors.dynamodb import java.util.concurrent.CompletableFuture +import org.apache.pekko.util.FutureConverters._ import org.reactivestreams.Publisher import software.amazon.awssdk.core.async.SdkPublisher import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient @@ -26,7 +27,6 @@ import software.amazon.awssdk.services.dynamodb.paginators.{ ScanPublisher } -import scala.compat.java8.FutureConverters._ import scala.concurrent.Future /** @@ -39,7 +39,7 @@ import scala.concurrent.Future */ sealed class DynamoDbOp[In <: DynamoDbRequest, Out <: DynamoDbResponse]( sdkExecute: DynamoDbAsyncClient => In => CompletableFuture[Out]) { - def execute(request: In)(implicit client: DynamoDbAsyncClient): Future[Out] = sdkExecute(client)(request).toScala + def execute(request: In)(implicit client: DynamoDbAsyncClient): Future[Out] = sdkExecute(client)(request).asScala } /** diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ElasticsearchConnectionSettings.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ElasticsearchConnectionSettings.scala index 2147ba865..f953af64e 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ElasticsearchConnectionSettings.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ElasticsearchConnectionSettings.scala @@ -19,9 +19,10 @@ import pekko.http.scaladsl.model.HttpHeader import pekko.http.scaladsl.model.HttpHeader.ParsingResult import pekko.japi.Util import pekko.util.ccompat.JavaConverters._ +import pekko.util.OptionConverters +import pekko.util.OptionConverters._ import javax.net.ssl.SSLContext -import scala.compat.java8.OptionConverters final class ElasticsearchConnectionSettings private ( val baseUrl: String, @@ -73,8 +74,8 @@ final class ElasticsearchConnectionSettings private ( None, OptionConverters.toScala(connectionContext.getEnabledCipherSuites).map(Util.immutableSeq(_)), OptionConverters.toScala(connectionContext.getEnabledProtocols).map(Util.immutableSeq(_)), - OptionConverters.toScala(connectionContext.getClientAuth), - OptionConverters.toScala(connectionContext.getSslParameters)) + connectionContext.getClientAuth.toScala, + connectionContext.getSslParameters.toScala) copy(connectionContext = Option(scalaContext)) } diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ReadResult.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ReadResult.scala index c50ee2c7c..3a47eb8bd 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ReadResult.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/ReadResult.scala @@ -15,8 +15,7 @@ package org.apache.pekko.stream.connectors.elasticsearch import org.apache.pekko import pekko.annotation.InternalApi - -import scala.compat.java8.OptionConverters._ +import pekko.util.OptionConverters._ /** * Stream element type emitted by Elasticsearch sources. @@ -29,7 +28,7 @@ final class ReadResult[T] @InternalApi private[elasticsearch] (val id: String, val version: Option[Long]) { /** Java API */ - def getVersion: java.util.Optional[Long] = version.asJava + def getVersion: java.util.Optional[Long] = version.toJava override def toString = s"""ReadResult(id=$id,source=$source,version=${version.getOrElse("")})""" diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteMessage.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteMessage.scala index 53507015a..f6e8aeb56 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteMessage.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteMessage.scala @@ -17,8 +17,7 @@ import org.apache.pekko import pekko.NotUsed import pekko.annotation.InternalApi import pekko.util.ccompat.JavaConverters._ - -import scala.compat.java8.OptionConverters._ +import pekko.util.OptionConverters._ /** * INTERNAL API @@ -153,7 +152,7 @@ final class WriteResult[T2, C2] @InternalApi private[elasticsearch] (val message val success: Boolean = error.isEmpty /** Java API: JSON structure of the Elasticsearch error. */ - def getError: java.util.Optional[String] = error.asJava + def getError: java.util.Optional[String] = error.toJava /** `reason` field value of the Elasticsearch error. */ def errorReason: Option[String] = { @@ -162,7 +161,7 @@ final class WriteResult[T2, C2] @InternalApi private[elasticsearch] (val message } /** Java API: `reason` field value from the Elasticsearch error */ - def getErrorReason: java.util.Optional[String] = errorReason.asJava + def getErrorReason: java.util.Optional[String] = errorReason.toJava override def toString = s"""WriteResult(message=$message,error=$error)""" diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/testkit/MessageFactory.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/testkit/MessageFactory.scala index 11f879cb2..0406fba07 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/testkit/MessageFactory.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/testkit/MessageFactory.scala @@ -16,8 +16,7 @@ package org.apache.pekko.stream.connectors.elasticsearch.testkit import org.apache.pekko import pekko.annotation.ApiMayChange import pekko.stream.connectors.elasticsearch.{ ReadResult, WriteMessage, WriteResult } - -import scala.compat.java8.OptionConverters._ +import pekko.util.OptionConverters._ object MessageFactory { @@ -45,7 +44,7 @@ object MessageFactory { version: java.util.Optional[Long]): ReadResult[T] = new ReadResult( id, source, - version.asScala) + version.toScala) @ApiMayChange def createWriteResult[T, PT]( message: WriteMessage[T, PT], @@ -61,6 +60,6 @@ object MessageFactory { message: WriteMessage[T, PT], error: java.util.Optional[String]): WriteResult[T, PT] = new WriteResult( message, - error.asScala) + error.toScala) } diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala index c459e6dda..d439c58d7 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala @@ -25,12 +25,11 @@ import pekko.stream.javadsl.Sink import pekko.util.ByteString import pekko.japi.function import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ +import pekko.util.OptionConverters._ import scala.concurrent.Future -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ - /** * Java API. */ @@ -76,7 +75,7 @@ object LogRotatorSink { sinkFactory: function.Function[C, Sink[ByteString, CompletionStage[R]]]) : javadsl.Sink[ByteString, CompletionStage[Done]] = { val t: C => scaladsl.Sink[ByteString, Future[R]] = path => - sinkFactory.apply(path).asScala.mapMaterializedValue(_.toScala) + sinkFactory.apply(path).asScala.mapMaterializedValue(_.asScala) new Sink( pekko.stream.connectors.file.scaladsl.LogRotatorSink .withSinkFactory(asScala[C](triggerGeneratorCreator), t) @@ -86,7 +85,7 @@ object LogRotatorSink { private def asScala[C]( f: function.Creator[function.Function[ByteString, Optional[C]]]): () => ByteString => Option[C] = () => { val fun = f.create() - elem => fun(elem).asScala + elem => fun(elem).toScala } } diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala index a61c12238..a296fe3ae 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala @@ -24,12 +24,11 @@ import pekko.stream.connectors.ftp.impl._ import pekko.stream.javadsl.{ Sink, Source } import pekko.stream.{ IOResult, Materializer } import pekko.util.ByteString +import pekko.util.FunctionConverters._ import pekko.{ Done, NotUsed } import net.schmizz.sshj.SSHClient import org.apache.commons.net.ftp.{ FTPClient, FTPSClient } -import scala.compat.java8.FunctionConverters._ - @DoNotInherit sealed trait FtpApi[FtpClient, S <: RemoteFileSettings] { _: FtpSourceFactory[FtpClient, S] => @@ -300,7 +299,7 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { createBrowserGraph( basePath, connectionSettings, - asScalaFromPredicate(branchSelector), + branchSelector.asScala, _emitTraversedDirectories = false)) def ls(basePath: String, @@ -308,7 +307,7 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { branchSelector: Predicate[FtpFile], emitTraversedDirectories: Boolean): Source[FtpFile, NotUsed] = Source.fromGraph( - createBrowserGraph(basePath, connectionSettings, asScalaFromPredicate(branchSelector), emitTraversedDirectories)) + createBrowserGraph(basePath, connectionSettings, branchSelector.asScala, emitTraversedDirectories)) def fromPath(host: String, path: String): Source[ByteString, CompletionStage[IOResult]] = fromPath(path, defaultSettings(host)) @@ -331,10 +330,10 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { connectionSettings: S, chunkSize: Int, offset: Long): Source[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ + import pekko.util.FutureConverters._ Source .fromGraph(createIOSource(path, connectionSettings, chunkSize, offset)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def mkdir(basePath: String, name: String, connectionSettings: S): Source[Done, NotUsed] = @@ -353,8 +352,8 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { } def toPath(path: String, connectionSettings: S, append: Boolean): Sink[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.toJava)) + import pekko.util.FutureConverters._ + Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.asJava)) } def toPath(path: String, connectionSettings: S): Sink[ByteString, CompletionStage[IOResult]] = @@ -362,16 +361,16 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { def move(destinationPath: Function[FtpFile, String], connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FunctionConverters._ - import scala.compat.java8.FutureConverters._ + import pekko.util.FunctionConverters._ + import pekko.util.FutureConverters._ Sink .fromGraph(createMoveSink(destinationPath.asScala, connectionSettings)) - .mapMaterializedValue[CompletionStage[IOResult]](func(_.toJava)) + .mapMaterializedValue[CompletionStage[IOResult]](func(_.asJava)) } def remove(connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.toJava)) + import pekko.util.FutureConverters._ + Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.asJava)) } } @@ -394,7 +393,7 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { createBrowserGraph( basePath, connectionSettings, - asScalaFromPredicate(branchSelector), + branchSelector.asScala, _emitTraversedDirectories = false)) def ls(basePath: String, @@ -402,7 +401,7 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { branchSelector: Predicate[FtpFile], emitTraversedDirectories: Boolean): Source[FtpFile, NotUsed] = Source.fromGraph( - createBrowserGraph(basePath, connectionSettings, asScalaFromPredicate(branchSelector), emitTraversedDirectories)) + createBrowserGraph(basePath, connectionSettings, branchSelector.asScala, emitTraversedDirectories)) def fromPath(host: String, path: String): Source[ByteString, CompletionStage[IOResult]] = fromPath(path, defaultSettings(host)) @@ -425,10 +424,10 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { connectionSettings: S, chunkSize: Int, offset: Long): Source[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ + import pekko.util.FutureConverters._ Source .fromGraph(createIOSource(path, connectionSettings, chunkSize, offset)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def mkdir(basePath: String, name: String, connectionSettings: S): Source[Done, NotUsed] = @@ -447,8 +446,8 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { } def toPath(path: String, connectionSettings: S, append: Boolean): Sink[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.toJava)) + import pekko.util.FutureConverters._ + Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.asJava)) } def toPath(path: String, connectionSettings: S): Sink[ByteString, CompletionStage[IOResult]] = @@ -456,16 +455,16 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { def move(destinationPath: Function[FtpFile, String], connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FunctionConverters._ - import scala.compat.java8.FutureConverters._ + import pekko.util.FunctionConverters._ + import pekko.util.FutureConverters._ Sink .fromGraph(createMoveSink(destinationPath.asScala, connectionSettings)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def remove(connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.toJava)) + import pekko.util.FutureConverters._ + Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.asJava)) } } @@ -489,7 +488,7 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { createBrowserGraph( basePath, connectionSettings, - asScalaFromPredicate(branchSelector), + branchSelector.asScala, _emitTraversedDirectories = false)) def ls(basePath: String, @@ -497,7 +496,7 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { branchSelector: Predicate[FtpFile], emitTraversedDirectories: Boolean): Source[FtpFile, NotUsed] = Source.fromGraph( - createBrowserGraph(basePath, connectionSettings, asScalaFromPredicate(branchSelector), emitTraversedDirectories)) + createBrowserGraph(basePath, connectionSettings, branchSelector.asScala, emitTraversedDirectories)) def fromPath(host: String, path: String): Source[ByteString, CompletionStage[IOResult]] = fromPath(path, defaultSettings(host)) @@ -520,10 +519,10 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { connectionSettings: S, chunkSize: Int, offset: Long): Source[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ + import pekko.util.FutureConverters._ Source .fromGraph(createIOSource(path, connectionSettings, chunkSize, offset)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def mkdir(basePath: String, name: String, connectionSettings: S): Source[Done, NotUsed] = @@ -542,8 +541,8 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { } def toPath(path: String, connectionSettings: S, append: Boolean): Sink[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.toJava)) + import pekko.util.FutureConverters._ + Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.asJava)) } def toPath(path: String, connectionSettings: S): Sink[ByteString, CompletionStage[IOResult]] = @@ -551,16 +550,16 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { def move(destinationPath: Function[FtpFile, String], connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FunctionConverters._ - import scala.compat.java8.FutureConverters._ + import pekko.util.FunctionConverters._ + import pekko.util.FutureConverters._ Sink .fromGraph(createMoveSink(destinationPath.asScala, connectionSettings)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def remove(connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.toJava)) + import pekko.util.FutureConverters._ + Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.asJava)) } } diff --git a/geode/src/main/java/org/apache/pekko/stream/connectors/geode/javadsl/Geode.java b/geode/src/main/java/org/apache/pekko/stream/connectors/geode/javadsl/Geode.java index 62d4a166c..1a6f9a0bb 100644 --- a/geode/src/main/java/org/apache/pekko/stream/connectors/geode/javadsl/Geode.java +++ b/geode/src/main/java/org/apache/pekko/stream/connectors/geode/javadsl/Geode.java @@ -26,10 +26,9 @@ import org.apache.pekko.stream.javadsl.Keep; import org.apache.pekko.stream.javadsl.Sink; import org.apache.pekko.stream.javadsl.Source; +import org.apache.pekko.util.FutureConverters; import org.apache.geode.cache.client.ClientCacheFactory; -import scala.compat.java8.FutureConverters; - import java.util.concurrent.CompletionStage; /** Java API: Geode client without server event subscription. */ @@ -51,7 +50,7 @@ public Source> query(String query, PekkoPdxSerializ registerPDXSerializer(serializer, serializer.clazz()); return Source.fromGraph(new GeodeFiniteSourceStage(cache(), query)) - .mapMaterializedValue(FutureConverters::toJava); + .mapMaterializedValue(FutureConverters::asJava); } public Flow flow( diff --git a/geode/src/main/java/org/apache/pekko/stream/connectors/geode/javadsl/GeodeWithPoolSubscription.java b/geode/src/main/java/org/apache/pekko/stream/connectors/geode/javadsl/GeodeWithPoolSubscription.java index 7b175b200..4f7013b3e 100644 --- a/geode/src/main/java/org/apache/pekko/stream/connectors/geode/javadsl/GeodeWithPoolSubscription.java +++ b/geode/src/main/java/org/apache/pekko/stream/connectors/geode/javadsl/GeodeWithPoolSubscription.java @@ -18,13 +18,12 @@ import org.apache.pekko.stream.connectors.geode.GeodeSettings; import org.apache.pekko.stream.connectors.geode.impl.stage.GeodeContinuousSourceStage; import org.apache.pekko.stream.javadsl.Source; +import org.apache.pekko.util.FutureConverters; import org.apache.geode.cache.client.ClientCacheFactory; import org.apache.geode.cache.query.CqException; import org.apache.geode.cache.query.CqQuery; import org.apache.geode.cache.query.QueryService; -import scala.compat.java8.FutureConverters; - import java.util.concurrent.CompletionStage; /** Java API: Geode client with server event subscription. Can build continuous sources. */ @@ -47,7 +46,7 @@ public Source> continuousQuery( String queryName, String query, PekkoPdxSerializer serializer) { registerPDXSerializer(serializer, serializer.clazz()); return Source.fromGraph(new GeodeContinuousSourceStage(cache(), queryName, query)) - .mapMaterializedValue(FutureConverters::toJava); + .mapMaterializedValue(FutureConverters::asJava); } public boolean closeContinuousQuery(String name) throws CqException { diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryArrowStorage.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryArrowStorage.scala index 8a607a0d0..8f17097d0 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryArrowStorage.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryArrowStorage.scala @@ -20,10 +20,10 @@ import pekko.stream.javadsl.Source import com.google.cloud.bigquery.storage.v1.stream.ReadSession.TableReadOptions import pekko.stream.connectors.googlecloud.bigquery.storage.{ scaladsl => scstorage } import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import com.google.cloud.bigquery.storage.v1.arrow.{ ArrowRecordBatch, ArrowSchema } import java.util.concurrent.CompletionStage -import scala.compat.java8.FutureConverters.FutureOps /** * Google BigQuery Storage Api Akka Stream operator factory using Arrow Format. @@ -67,7 +67,7 @@ object BigQueryArrowStorage { stream.asJava }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def readRecords(projectId: String, datasetId: String, @@ -109,7 +109,7 @@ object BigQueryArrowStorage { stream.map(_.asJava).asJava }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def readMerged(projectId: String, datasetId: String, @@ -151,7 +151,7 @@ object BigQueryArrowStorage { (stream._1, stream._2.asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def read( projectId: String, @@ -198,6 +198,6 @@ object BigQueryArrowStorage { (stream._1, stream._2.map(_.asJava).asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) } diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryAvroStorage.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryAvroStorage.scala index 8fba3afe6..609132f41 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryAvroStorage.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryAvroStorage.scala @@ -18,11 +18,11 @@ import pekko.NotUsed import pekko.stream.connectors.googlecloud.bigquery.storage.{ scaladsl => scstorage, BigQueryRecord } import pekko.stream.javadsl.Source import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import com.google.cloud.bigquery.storage.v1.avro.{ AvroRows, AvroSchema } import com.google.cloud.bigquery.storage.v1.stream.ReadSession.TableReadOptions import java.util.concurrent.CompletionStage -import scala.compat.java8.FutureConverters.FutureOps /** * Google BigQuery Storage Api Akka Stream operator factory using Avro Format. @@ -66,7 +66,7 @@ object BigQueryAvroStorage { stream.asJava }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def readRecords(projectId: String, datasetId: String, @@ -108,7 +108,7 @@ object BigQueryAvroStorage { stream.map(_.asJava).asJava }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def readMerged(projectId: String, datasetId: String, @@ -147,7 +147,7 @@ object BigQueryAvroStorage { (stream._1, stream._2.asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def read(projectId: String, datasetId: String, @@ -189,6 +189,6 @@ object BigQueryAvroStorage { (stream._1, stream._2.map(_.asJava).asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) } diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryStorage.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryStorage.scala index daeb3d134..58c653118 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryStorage.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/javadsl/BigQueryStorage.scala @@ -22,13 +22,13 @@ import pekko.stream.connectors.googlecloud.bigquery.storage.{ scaladsl => scstor import pekko.stream.javadsl.Source import pekko.util.ccompat.JavaConverters._ import pekko.util.ByteString +import pekko.util.FutureConverters._ import com.google.cloud.bigquery.storage.v1.DataFormat import com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions import com.google.cloud.bigquery.storage.v1.storage.ReadRowsResponse import com.google.cloud.bigquery.storage.v1.stream.ReadSession import java.util.concurrent.CompletionStage -import scala.compat.java8.FutureConverters.FutureOps /** * Google BigQuery Storage Api Akka Stream operator factory. @@ -126,7 +126,7 @@ object BigQueryStorage { (stream._1, stream._2.map(_.asJava).asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) /** * Create a source that contains a number of sources, one for each stream, or section of the table data. @@ -216,6 +216,6 @@ object BigQueryStorage { scstorage.BigQueryStorage .createMergedStreams(projectId, datasetId, tableId, dataFormat, readOptions.map(_.asScala()), maxNumStreams)(um) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala index c0420c9aa..34942a587 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala @@ -39,14 +39,14 @@ import pekko.stream.{ scaladsl => ss } import pekko.util.ByteString import pekko.{ Done, NotUsed } import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ +import pekko.util.OptionConverters._ import java.time.Duration import java.util.concurrent.CompletionStage import java.{ lang, util } import scala.annotation.nowarn -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ import scala.concurrent.duration.{ FiniteDuration, MILLISECONDS } /** @@ -67,7 +67,7 @@ object BigQuery extends Google { def listDatasets(maxResults: util.OptionalInt, all: util.Optional[lang.Boolean], filter: util.Map[String, String]): Source[Dataset, NotUsed] = - ScalaBigQuery.datasets(maxResults.asScala, all.asScala.map(_.booleanValue), filter.asScala.toMap).asJava + ScalaBigQuery.datasets(maxResults.toScala, all.toScala.map(_.booleanValue), filter.asScala.toMap).asJava /** * Returns the specified dataset. @@ -81,7 +81,7 @@ object BigQuery extends Google { def getDataset(datasetId: String, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Dataset] = - ScalaBigQuery.dataset(datasetId)(system, settings).toJava + ScalaBigQuery.dataset(datasetId)(system, settings).asJava /** * Creates a new empty dataset. @@ -95,7 +95,7 @@ object BigQuery extends Google { def createDataset(datasetId: String, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Dataset] = - ScalaBigQuery.createDataset(datasetId)(system, settings).toJava + ScalaBigQuery.createDataset(datasetId)(system, settings).asJava /** * Creates a new empty dataset. @@ -109,7 +109,7 @@ object BigQuery extends Google { def createDataset(dataset: Dataset, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Dataset] = - ScalaBigQuery.createDataset(dataset)(system, settings).toJava + ScalaBigQuery.createDataset(dataset)(system, settings).asJava /** * Deletes the dataset specified by the datasetId value. @@ -124,7 +124,7 @@ object BigQuery extends Google { deleteContents: Boolean, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Done] = - ScalaBigQuery.deleteDataset(datasetId, deleteContents)(system, settings).toJava + ScalaBigQuery.deleteDataset(datasetId, deleteContents)(system, settings).asJava /** * Lists all tables in the specified dataset. @@ -135,7 +135,7 @@ object BigQuery extends Google { * @return a [[pekko.stream.javadsl.Source]] that emits each [[pekko.stream.connectors.googlecloud.bigquery.model.Table]] in the dataset and materializes a [[java.util.concurrent.CompletionStage]] containing the [[pekko.stream.connectors.googlecloud.bigquery.model.TableListResponse]] */ def listTables(datasetId: String, maxResults: util.OptionalInt): Source[Table, CompletionStage[TableListResponse]] = - ScalaBigQuery.tables(datasetId, maxResults.asScala).mapMaterializedValue(_.toJava).asJava + ScalaBigQuery.tables(datasetId, maxResults.toScala).mapMaterializedValue(_.asJava).asJava /** * Gets the specified table resource. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table. @@ -151,7 +151,7 @@ object BigQuery extends Google { tableId: String, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Table] = - ScalaBigQuery.table(datasetId, tableId)(system, settings).toJava + ScalaBigQuery.table(datasetId, tableId)(system, settings).asJava /** * Creates a new, empty table in the dataset. @@ -181,7 +181,7 @@ object BigQuery extends Google { * @return a [[java.util.concurrent.CompletionStage]] containing the [[pekko.stream.connectors.googlecloud.bigquery.model.Table]] */ def createTable(table: Table, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Table] = - ScalaBigQuery.createTable(table)(system, settings).toJava + ScalaBigQuery.createTable(table)(system, settings).asJava /** * Deletes the specified table from the dataset. If the table contains data, all the data will be deleted. @@ -197,7 +197,7 @@ object BigQuery extends Google { tableId: String, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Done] = - ScalaBigQuery.deleteTable(datasetId, tableId)(system, settings).toJava + ScalaBigQuery.deleteTable(datasetId, tableId)(system, settings).asJava /** * Lists the content of a table in rows. @@ -222,8 +222,8 @@ object BigQuery extends Google { : Source[Out, CompletionStage[TableDataListResponse[Out]]] = { implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] ScalaBigQuery - .tableData(datasetId, tableId, startIndex.asScala, maxResults.asScala, selectedFields.asScala.toList) - .mapMaterializedValue(_.toJava) + .tableData(datasetId, tableId, startIndex.toScala, maxResults.toScala, selectedFields.asScala.toList) + .mapMaterializedValue(_.asJava) .asJava } @@ -248,7 +248,7 @@ object BigQuery extends Google { implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] ss.Flow[util.List[In]] .map(_.asScala.toList) - .to(ScalaBigQuery.insertAll[In](datasetId, tableId, retryPolicy, templateSuffix.asScala)) + .to(ScalaBigQuery.insertAll[In](datasetId, tableId, retryPolicy, templateSuffix.toScala)) .asJava[util.List[In]] } @@ -291,7 +291,7 @@ object BigQuery extends Google { useLegacySql: Boolean, unmarshaller: Unmarshaller[HttpEntity, QueryResponse[Out]]): Source[Out, CompletionStage[QueryResponse[Out]]] = { implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] - ScalaBigQuery.query(query, dryRun, useLegacySql).mapMaterializedValue(_.toJava).asJava + ScalaBigQuery.query(query, dryRun, useLegacySql).mapMaterializedValue(_.asJava).asJava } /** @@ -314,7 +314,7 @@ object BigQuery extends Google { .query(query) .mapMaterializedValue { case (jobReference, queryResponse) => - Pair(jobReference.toJava, queryResponse.toJava) + Pair(jobReference.asJava, queryResponse.asJava) } .asJava } @@ -342,11 +342,11 @@ object BigQuery extends Google { implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] ScalaBigQuery .queryResults(jobId, - startIndex.asScala, - maxResults.asScala, - timeout.asScala.map(d => FiniteDuration(d.toMillis, MILLISECONDS)), - location.asScala) - .mapMaterializedValue(_.toJava) + startIndex.toScala, + maxResults.toScala, + timeout.toScala.map(d => FiniteDuration(d.toMillis, MILLISECONDS)), + location.toScala) + .mapMaterializedValue(_.asJava) .asJava } @@ -364,7 +364,7 @@ object BigQuery extends Google { location: util.Optional[String], settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Job] = - ScalaBigQuery.job(jobId, location.asScala)(system, settings).toJava + ScalaBigQuery.job(jobId, location.toScala)(system, settings).asJava /** * Requests that a job be cancelled. @@ -380,7 +380,7 @@ object BigQuery extends Google { location: util.Optional[String], settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[JobCancelResponse] = - ScalaBigQuery.cancelJob(jobId, location.asScala)(system, settings).toJava + ScalaBigQuery.cancelJob(jobId, location.toScala)(system, settings).asJava /** * Loads data into BigQuery via a series of asynchronous load jobs created at the rate [[pekko.stream.connectors.googlecloud.bigquery.BigQuerySettings.loadJobPerTableQuota]]. @@ -416,7 +416,7 @@ object BigQuery extends Google { labels: util.Optional[util.Map[String, String]], marshaller: Marshaller[In, RequestEntity]): Flow[In, Job, NotUsed] = { implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] - ScalaBigQuery.insertAllAsync[In](datasetId, tableId, labels.asScala.map(_.asScala.toMap)).asJava[In] + ScalaBigQuery.insertAllAsync[In](datasetId, tableId, labels.toScala.map(_.asScala.toMap)).asJava[In] } /** @@ -438,7 +438,7 @@ object BigQuery extends Google { unmarshaller: Unmarshaller[HttpEntity, Job]): Sink[ByteString, CompletionStage[Job]] = { implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] - ScalaBigQuery.createLoadJob(job).mapMaterializedValue(_.toJava).asJava[ByteString] + ScalaBigQuery.createLoadJob(job).mapMaterializedValue(_.asJava).asJava[ByteString] } } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala index 555f6d307..a9323a7f7 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala @@ -17,11 +17,11 @@ import org.apache.pekko import pekko.stream.connectors.google.scaladsl.Paginated import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRestJsonProtocol._ import pekko.util.ccompat.JavaConverters._ +import pekko.util.OptionConverters._ import spray.json.{ JsonFormat, RootJsonFormat } import java.util import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ /** * Dataset resource model @@ -38,9 +38,9 @@ final case class Dataset private (datasetReference: DatasetReference, location: Option[String]) { def getDatasetReference = datasetReference - def getFriendlyName = friendlyName.asJava - def getLabels = labels.map(_.asJava).asJava - def getLocation = location.asJava + def getFriendlyName = friendlyName.toJava + def getLabels = labels.map(_.asJava).toJava + def getLocation = location.toJava def withDatasetReference(datasetReference: DatasetReference) = copy(datasetReference = datasetReference) @@ -48,15 +48,15 @@ final case class Dataset private (datasetReference: DatasetReference, def withFriendlyName(friendlyName: Option[String]) = copy(friendlyName = friendlyName) def withFriendlyName(friendlyName: util.Optional[String]) = - copy(friendlyName = friendlyName.asScala) + copy(friendlyName = friendlyName.toScala) def withLabels(labels: Option[Map[String, String]]) = copy(labels = labels) def withLabels(labels: util.Optional[util.Map[String, String]]) = - copy(labels = labels.asScala.map(_.asScala.toMap)) + copy(labels = labels.toScala.map(_.asScala.toMap)) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) } object Dataset { @@ -75,7 +75,7 @@ object Dataset { friendlyName: util.Optional[String], labels: util.Optional[util.Map[String, String]], location: util.Optional[String]) = - Dataset(datasetReference, friendlyName.asScala, labels.asScala.map(_.asScala.toMap), location.asScala) + Dataset(datasetReference, friendlyName.toScala, labels.toScala.map(_.asScala.toMap), location.toScala) implicit val format: RootJsonFormat[Dataset] = jsonFormat4(apply) } @@ -89,18 +89,18 @@ object Dataset { */ final case class DatasetReference private (datasetId: Option[String], projectId: Option[String]) { - def getDatasetId = datasetId.asJava - def getProjectId = projectId.asJava + def getDatasetId = datasetId.toJava + def getProjectId = projectId.toJava def withDatasetId(datasetId: Option[String]) = copy(datasetId = datasetId) def withDatasetId(datasetId: util.Optional[String]) = - copy(datasetId = datasetId.asScala) + copy(datasetId = datasetId.toScala) def withProjectId(projectId: Option[String]) = copy(projectId = projectId) def withProjectId(projectId: util.Optional[String]) = - copy(projectId = projectId.asScala) + copy(projectId = projectId.toScala) } object DatasetReference { @@ -114,7 +114,7 @@ object DatasetReference { * @return a [[DatasetReference]] */ def create(datasetId: util.Optional[String], projectId: util.Optional[String]) = - DatasetReference(datasetId.asScala, projectId.asScala) + DatasetReference(datasetId.toScala, projectId.toScala) implicit val format: JsonFormat[DatasetReference] = jsonFormat2(apply) } @@ -128,18 +128,18 @@ object DatasetReference { */ final case class DatasetListResponse private (nextPageToken: Option[String], datasets: Option[Seq[Dataset]]) { - def getNextPageToken = nextPageToken.asJava - def getDatasets = datasets.map(_.asJava).asJava + def getNextPageToken = nextPageToken.toJava + def getDatasets = datasets.map(_.asJava).toJava def withNextPageToken(nextPageToken: Option[String]) = copy(nextPageToken = nextPageToken) def withNextPageToken(nextPageToken: util.Optional[String]) = - copy(nextPageToken = nextPageToken.asScala) + copy(nextPageToken = nextPageToken.toScala) def withDatasets(datasets: Option[Seq[Dataset]]) = copy(datasets = datasets) def withDatasets(datasets: util.Optional[util.List[Dataset]]) = - copy(datasets = datasets.asScala.map(_.asScala.toList)) + copy(datasets = datasets.toScala.map(_.asScala.toList)) } object DatasetListResponse { @@ -153,7 +153,7 @@ object DatasetListResponse { * @return a [[DatasetListResponse]] */ def create(nextPageToken: util.Optional[String], datasets: util.Optional[util.List[Dataset]]) = - DatasetListResponse(nextPageToken.asScala, datasets.asScala.map(_.asScala.toList)) + DatasetListResponse(nextPageToken.toScala, datasets.toScala.map(_.asScala.toList)) implicit val format: RootJsonFormat[DatasetListResponse] = jsonFormat2(apply) implicit val paginated: Paginated[DatasetListResponse] = _.nextPageToken diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala index e041c2cad..af887e7b5 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala @@ -13,14 +13,15 @@ package org.apache.pekko.stream.connectors.googlecloud.bigquery.model -import org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRestJsonProtocol._ +import org.apache.pekko +import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRestJsonProtocol._ +import pekko.util.OptionConverters._ import com.fasterxml.jackson.annotation.{ JsonCreator, JsonProperty } import spray.json.JsonFormat import java.util import scala.annotation.nowarn -import scala.compat.java8.OptionConverters._ /** * ErrorProto model @@ -39,24 +40,24 @@ final case class ErrorProto private (reason: Option[String], location: Option[St @JsonProperty(value = "message") message: String) = this(Option(reason), Option(location), Option(message)) - def getReason = reason.asJava - def getLocation = location.asJava - def getMessage = message.asJava + def getReason = reason.toJava + def getLocation = location.toJava + def getMessage = message.toJava def withReason(reason: Option[String]) = copy(reason = reason) def withReason(reason: util.Optional[String]) = - copy(reason = reason.asScala) + copy(reason = reason.toScala) def withLocation(location: Option[String]) = copy(location = location) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) def withMessage(message: Option[String]) = copy(message = message) def withMessage(message: util.Optional[String]) = - copy(message = message.asScala) + copy(message = message.toScala) } object ErrorProto { @@ -71,7 +72,7 @@ object ErrorProto { * @return an [[ErrorProto]] */ def create(reason: util.Optional[String], location: util.Optional[String], message: util.Optional[String]) = - ErrorProto(reason.asScala, location.asScala, message.asScala) + ErrorProto(reason.toScala, location.toScala, message.toScala) implicit val format: JsonFormat[ErrorProto] = jsonFormat3(apply) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala index e03389f4f..4799c0fe1 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala @@ -16,6 +16,7 @@ package org.apache.pekko.stream.connectors.googlecloud.bigquery.model import org.apache.pekko import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRestJsonProtocol._ import pekko.util.ccompat.JavaConverters._ +import pekko.util.OptionConverters._ import com.fasterxml.jackson.annotation.{ JsonCreator, JsonProperty } import spray.json.{ JsonFormat, RootJsonFormat } @@ -23,7 +24,6 @@ import java.util import scala.annotation.nowarn import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ /** * Job model @@ -37,24 +37,24 @@ final case class Job private (configuration: Option[JobConfiguration], jobReference: Option[JobReference], status: Option[JobStatus]) { - def getConfiguration = configuration.asJava - def getJobReference = jobReference.asJava - def getStatus = status.asJava + def getConfiguration = configuration.toJava + def getJobReference = jobReference.toJava + def getStatus = status.toJava def withConfiguration(configuration: Option[JobConfiguration]) = copy(configuration = configuration) def withConfiguration(configuration: util.Optional[JobConfiguration]) = - copy(configuration = configuration.asScala) + copy(configuration = configuration.toScala) def withJobReference(jobReference: Option[JobReference]) = copy(jobReference = jobReference) def withJobReference(jobReference: util.Optional[JobReference]) = - copy(jobReference = jobReference.asScala) + copy(jobReference = jobReference.toScala) def withStatus(status: Option[JobStatus]) = copy(status = status) def withStatus(status: util.Optional[JobStatus]) = - copy(status = status.asScala) + copy(status = status.toScala) } object Job { @@ -71,7 +71,7 @@ object Job { def create(configuration: util.Optional[JobConfiguration], jobReference: util.Optional[JobReference], status: util.Optional[JobStatus]) = - Job(configuration.asScala, jobReference.asScala, status.asScala) + Job(configuration.toScala, jobReference.toScala, status.toScala) implicit val format: RootJsonFormat[Job] = jsonFormat3(apply) } @@ -84,18 +84,18 @@ object Job { * @param labels the labels associated with this job */ final case class JobConfiguration private (load: Option[JobConfigurationLoad], labels: Option[Map[String, String]]) { - def getLoad = load.asJava - def getLabels = labels.asJava + def getLoad = load.toJava + def getLabels = labels.toJava def withLoad(load: Option[JobConfigurationLoad]) = copy(load = load) def withLoad(load: util.Optional[JobConfigurationLoad]) = - copy(load = load.asScala) + copy(load = load.toScala) def withLabels(labels: Option[Map[String, String]]) = copy(labels = labels) def withLabels(labels: util.Optional[util.Map[String, String]]) = - copy(labels = labels.asScala.map(_.asScala.toMap)) + copy(labels = labels.toScala.map(_.asScala.toMap)) } object JobConfiguration { @@ -118,7 +118,7 @@ object JobConfiguration { * @return a [[JobConfiguration]] */ def create(load: util.Optional[JobConfigurationLoad]) = - JobConfiguration(load.asScala) + JobConfiguration(load.toScala) /** * Java API: JobConfiguration model @@ -129,7 +129,7 @@ object JobConfiguration { * @return a [[JobConfiguration]] */ def create(load: util.Optional[JobConfigurationLoad], labels: util.Optional[util.Map[String, String]]) = - JobConfiguration(load.asScala, labels.asScala.map(_.asScala.toMap)) + JobConfiguration(load.toScala, labels.toScala.map(_.asScala.toMap)) implicit val format: JsonFormat[JobConfiguration] = jsonFormat2(apply) } @@ -150,36 +150,36 @@ final case class JobConfigurationLoad private (schema: Option[TableSchema], writeDisposition: Option[WriteDisposition], sourceFormat: Option[SourceFormat]) { - def getSchema = schema.asJava - def getDestinationTable = destinationTable.asJava - def getCreateDisposition = createDisposition.asJava - def getWriteDisposition = writeDisposition.asJava - def getSourceFormat = sourceFormat.asJava + def getSchema = schema.toJava + def getDestinationTable = destinationTable.toJava + def getCreateDisposition = createDisposition.toJava + def getWriteDisposition = writeDisposition.toJava + def getSourceFormat = sourceFormat.toJava def withSchema(schema: Option[TableSchema]) = copy(schema = schema) def withSchema(schema: util.Optional[TableSchema]) = - copy(schema = schema.asScala) + copy(schema = schema.toScala) def withDestinationTable(destinationTable: Option[TableReference]) = copy(destinationTable = destinationTable) def withDestinationTable(destinationTable: util.Optional[TableReference]) = - copy(destinationTable = destinationTable.asScala) + copy(destinationTable = destinationTable.toScala) def withCreateDisposition(createDisposition: Option[CreateDisposition]) = copy(createDisposition = createDisposition) def withCreateDisposition(createDisposition: util.Optional[CreateDisposition]) = - copy(createDisposition = createDisposition.asScala) + copy(createDisposition = createDisposition.toScala) def withWriteDisposition(writeDisposition: Option[WriteDisposition]) = copy(writeDisposition = writeDisposition) def withWriteDisposition(writeDisposition: util.Optional[WriteDisposition]) = - copy(writeDisposition = writeDisposition.asScala) + copy(writeDisposition = writeDisposition.toScala) def withSourceFormat(sourceFormat: Option[SourceFormat]) = copy(sourceFormat = sourceFormat) def withSourceFormat(sourceFormat: util.Optional[SourceFormat]) = - copy(sourceFormat = sourceFormat.asScala) + copy(sourceFormat = sourceFormat.toScala) } object JobConfigurationLoad { @@ -201,11 +201,11 @@ object JobConfigurationLoad { writeDisposition: util.Optional[WriteDisposition], sourceFormat: util.Optional[SourceFormat]) = JobConfigurationLoad( - schema.asScala, - destinationTable.asScala, - createDisposition.asScala, - writeDisposition.asScala, - sourceFormat.asScala) + schema.toScala, + destinationTable.toScala, + createDisposition.toScala, + writeDisposition.toScala, + sourceFormat.toScala) implicit val configurationLoadFormat: JsonFormat[JobConfigurationLoad] = jsonFormat5(apply) } @@ -278,24 +278,24 @@ final case class JobReference private (projectId: Option[String], jobId: Option[ @JsonProperty("location") location: String) = this(Option(projectId), Option(jobId), Option(location)) - def getProjectId = projectId.asJava - def getJobId = jobId.asJava - def getLocation = location.asJava + def getProjectId = projectId.toJava + def getJobId = jobId.toJava + def getLocation = location.toJava def withProjectId(projectId: Option[String]) = copy(projectId = projectId) def withProjectId(projectId: util.Optional[String]) = - copy(projectId = projectId.asScala) + copy(projectId = projectId.toScala) def withJobId(jobId: Option[String]) = copy(jobId = jobId) def withJobId(jobId: util.Optional[String]) = - copy(jobId = jobId.asScala) + copy(jobId = jobId.toScala) def withLocation(location: Option[String]) = copy(location = location) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) } object JobReference { @@ -310,7 +310,7 @@ object JobReference { * @return a [[JobReference]] */ def create(projectId: util.Optional[String], jobId: util.Optional[String], location: util.Optional[String]) = - JobReference(projectId.asScala, jobId.asScala, location.asScala) + JobReference(projectId.toScala, jobId.toScala, location.toScala) implicit val format: JsonFormat[JobReference] = jsonFormat3(apply) } @@ -325,19 +325,19 @@ object JobReference { */ final case class JobStatus private (errorResult: Option[ErrorProto], errors: Option[Seq[ErrorProto]], state: JobState) { - def getErrorResult = errorResult.asJava - def getErrors = errors.map(_.asJava).asJava + def getErrorResult = errorResult.toJava + def getErrors = errors.map(_.asJava).toJava def getState = state def withErrorResult(errorResult: Option[ErrorProto]) = copy(errorResult = errorResult) def withErrorResult(errorResult: util.Optional[ErrorProto]) = - copy(errorResult = errorResult.asScala) + copy(errorResult = errorResult.toScala) def withErrors(errors: Option[Seq[ErrorProto]]) = copy(errors = errors) def withErrors(errors: util.Optional[util.List[ErrorProto]]) = - copy(errors = errors.asScala.map(_.asScala.toList)) + copy(errors = errors.toScala.map(_.asScala.toList)) def withState(state: JobState) = copy(state = state) @@ -355,7 +355,7 @@ object JobStatus { * @return a [[JobStatus]] */ def create(errorResult: util.Optional[ErrorProto], errors: util.Optional[util.List[ErrorProto]], state: JobState) = - JobStatus(errorResult.asScala, errors.asScala.map(_.asScala.toList), state) + JobStatus(errorResult.toScala, errors.toScala.map(_.asScala.toList), state) implicit val format: JsonFormat[JobStatus] = jsonFormat3(apply) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala index 9b940d0ac..5e66ee3cd 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala @@ -19,6 +19,7 @@ import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRestJ import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRootJsonReader import pekko.util.ccompat.JavaConverters._ import pekko.util.JavaDurationConverters._ +import pekko.util.OptionConverters._ import com.fasterxml.jackson.annotation.{ JsonCreator, JsonIgnoreProperties, JsonProperty } import spray.json.{ RootJsonFormat, RootJsonReader } @@ -28,7 +29,6 @@ import java.{ lang, util } import scala.annotation.nowarn import scala.annotation.unchecked.uncheckedVariance import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ import scala.concurrent.duration.FiniteDuration /** @@ -58,15 +58,15 @@ final case class QueryRequest private (query: String, requestId: Option[String]) { def getQuery = query - def getMaxResults = maxResults.asPrimitive - def getDefaultDataset = defaultDataset.asJava - def getTimeout = timeout.map(_.asJava).asJava - def getDryRun = dryRun.map(lang.Boolean.valueOf).asJava - def getUseLegacySql = useLegacySql.map(lang.Boolean.valueOf).asJava - def getRequestId = requestId.asJava - def getLocation = location.asJava - def getMaximumBytesBilled = maximumBytesBilled.asJava - def getLabels = labels.asJava + def getMaxResults = maxResults.toJavaPrimitive + def getDefaultDataset = defaultDataset.toJava + def getTimeout = timeout.map(_.asJava).toJava + def getDryRun = dryRun.map(lang.Boolean.valueOf).toJava + def getUseLegacySql = useLegacySql.map(lang.Boolean.valueOf).toJava + def getRequestId = requestId.toJava + def getLocation = location.toJava + def getMaximumBytesBilled = maximumBytesBilled.toJava + def getLabels = labels.toJava def withQuery(query: String) = copy(query = query) @@ -74,47 +74,47 @@ final case class QueryRequest private (query: String, def withMaxResults(maxResults: Option[Int]) = copy(maxResults = maxResults) def withMaxResults(maxResults: util.OptionalInt) = - copy(maxResults = maxResults.asScala) + copy(maxResults = maxResults.toScala) def withDefaultDataset(defaultDataset: Option[DatasetReference]) = copy(defaultDataset = defaultDataset) def withDefaultDataset(defaultDataset: util.Optional[DatasetReference]) = - copy(defaultDataset = defaultDataset.asScala) + copy(defaultDataset = defaultDataset.toScala) def withTimeout(timeout: Option[FiniteDuration]) = copy(timeout = timeout) def withTimeout(timeout: util.Optional[Duration]) = - copy(timeout = timeout.asScala.map(_.asScala)) + copy(timeout = timeout.toScala.map(_.asScala)) def withDryRun(dryRun: Option[Boolean]) = copy(dryRun = dryRun) def withDryRun(dryRun: util.Optional[lang.Boolean]) = - copy(dryRun = dryRun.asScala.map(_.booleanValue)) + copy(dryRun = dryRun.toScala.map(_.booleanValue)) def withUseLegacySql(useLegacySql: Option[Boolean]) = copy(useLegacySql = useLegacySql) def withUseLegacySql(useLegacySql: util.Optional[lang.Boolean]) = - copy(useLegacySql = useLegacySql.asScala.map(_.booleanValue)) + copy(useLegacySql = useLegacySql.toScala.map(_.booleanValue)) def withRequestId(requestId: Option[String]) = copy(requestId = requestId) def withRequestId(requestId: util.Optional[String]) = - copy(requestId = requestId.asScala) + copy(requestId = requestId.toScala) def withLocation(location: Option[String]) = copy(location = location) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) def withMaximumBytesBilled(maximumBytesBilled: Option[Long]) = copy(maximumBytesBilled = maximumBytesBilled) def withMaximumBytesBilled(maximumBytesBilled: util.OptionalLong) = - copy(maximumBytesBilled = maximumBytesBilled.asScala) + copy(maximumBytesBilled = maximumBytesBilled.toScala) def withLabels(labels: Option[Map[String, String]]) = copy(labels = labels) def withLabels(labels: util.Optional[util.Map[String, String]]) = - copy(labels = labels.asScala.map(_.asScala.toMap)) + copy(labels = labels.toScala.map(_.asScala.toMap)) } object QueryRequest { @@ -150,15 +150,15 @@ object QueryRequest { requestId: util.Optional[String]) = QueryRequest( query, - maxResults.asScala, - defaultDataset.asScala, - timeout.asScala.map(_.asScala), - dryRun.asScala.map(_.booleanValue), - useLegacySql.asScala.map(_.booleanValue), + maxResults.toScala, + defaultDataset.toScala, + timeout.toScala.map(_.asScala), + dryRun.toScala.map(_.booleanValue), + useLegacySql.toScala.map(_.booleanValue), None, None, None, - requestId.asScala) + requestId.toScala) implicit val format: RootJsonFormat[QueryRequest] = jsonFormat( apply, @@ -227,21 +227,21 @@ final case class QueryResponse[+T] private (schema: Option[TableSchema], Option(cacheHit).map(_.booleanValue), Option(numDmlAffectedRows).map(_.toLong)) - def getSchema = schema.asJava + def getSchema = schema.toJava def getJobReference = jobReference - def getTotalRows = totalRows.asPrimitive - def getPageToken = pageToken.asJava - def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).asJava - def getTotalBytesProcessed = totalBytesProcessed.asPrimitive + def getTotalRows = totalRows.toJavaPrimitive + def getPageToken = pageToken.toJava + def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).toJava + def getTotalBytesProcessed = totalBytesProcessed.toJavaPrimitive def getJobComplete = jobComplete - def getErrors = errors.map(_.asJava).asJava - def getCacheHit = cacheHit.map(lang.Boolean.valueOf).asJava - def getNumDmlAffectedRows = numDmlAffectedRows.asPrimitive + def getErrors = errors.map(_.asJava).toJava + def getCacheHit = cacheHit.map(lang.Boolean.valueOf).toJava + def getNumDmlAffectedRows = numDmlAffectedRows.toJavaPrimitive def withSchema(schema: Option[TableSchema]) = copy(schema = schema) def withSchema(schema: util.Optional[TableSchema]) = - copy(schema = schema.asScala) + copy(schema = schema.toScala) def withJobReference(jobReference: JobReference) = copy(jobReference = jobReference) @@ -249,22 +249,22 @@ final case class QueryResponse[+T] private (schema: Option[TableSchema], def withTotalRows(totalRows: Option[Long]) = copy(totalRows = totalRows) def withTotalRows(totalRows: util.OptionalLong) = - copy(totalRows = totalRows.asScala) + copy(totalRows = totalRows.toScala) def withPageToken(pageToken: Option[String]) = copy(pageToken = pageToken) def withPageToken(pageToken: util.Optional[String]) = - copy(pageToken = pageToken.asScala) + copy(pageToken = pageToken.toScala) def withRows[S >: T](rows: Option[Seq[S]]) = copy(rows = rows) def withRows(rows: util.Optional[util.List[T] @uncheckedVariance]) = - copy(rows = rows.asScala.map(_.asScala.toList)) + copy(rows = rows.toScala.map(_.asScala.toList)) def withTotalBytesProcessed(totalBytesProcessed: Option[Long]) = copy(totalBytesProcessed = totalBytesProcessed) def withTotalBytesProcessed(totalBytesProcessed: util.OptionalLong) = - copy(totalBytesProcessed = totalBytesProcessed.asScala) + copy(totalBytesProcessed = totalBytesProcessed.toScala) def withJobComplete(jobComplete: Boolean) = copy(jobComplete = jobComplete) @@ -272,17 +272,17 @@ final case class QueryResponse[+T] private (schema: Option[TableSchema], def withErrors(errors: Option[Seq[ErrorProto]]) = copy(errors = errors) def withErrors(errors: util.Optional[util.List[ErrorProto]]) = - copy(errors = errors.asScala.map(_.asScala.toList)) + copy(errors = errors.toScala.map(_.asScala.toList)) def withCacheHit(cacheHit: Option[Boolean]) = copy(cacheHit = cacheHit) def withCacheHit(cacheHit: util.Optional[lang.Boolean]) = - copy(cacheHit = cacheHit.asScala.map(_.booleanValue)) + copy(cacheHit = cacheHit.toScala.map(_.booleanValue)) def withNumDmlAffectedRows(numDmlAffectedRows: Option[Long]) = copy(numDmlAffectedRows = numDmlAffectedRows) def withNumDmlAffectedRows(numDmlAffectedRows: util.OptionalLong) = - copy(numDmlAffectedRows = numDmlAffectedRows.asScala) + copy(numDmlAffectedRows = numDmlAffectedRows.toScala) } object QueryResponse { @@ -316,16 +316,16 @@ object QueryResponse { cacheHit: util.Optional[lang.Boolean], numDmlAffectedRows: util.OptionalLong) = QueryResponse[T]( - schema.asScala, + schema.toScala, jobReference, - totalRows.asScala, - pageToken.asScala, - rows.asScala.map(_.asScala.toList), - totalBytesProcessed.asScala, + totalRows.toScala, + pageToken.toScala, + rows.toScala.map(_.asScala.toList), + totalBytesProcessed.toScala, jobComplete, - errors.asScala.map(_.asScala.toList), - cacheHit.asScala.map(_.booleanValue), - numDmlAffectedRows.asScala) + errors.toScala.map(_.asScala.toList), + cacheHit.toScala.map(_.booleanValue), + numDmlAffectedRows.toScala) implicit def reader[T <: AnyRef]( implicit reader: BigQueryRootJsonReader[T]): RootJsonReader[QueryResponse[T]] = { diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala index 06441bf9c..b2b0107e4 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala @@ -18,6 +18,7 @@ import pekko.stream.connectors.google.scaladsl.Paginated import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRestJsonProtocol._ import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.{ BigQueryRootJsonReader, BigQueryRootJsonWriter } import pekko.util.ccompat.JavaConverters._ +import pekko.util.OptionConverters._ import com.fasterxml.jackson.annotation.JsonInclude.Include import com.fasterxml.jackson.annotation._ import spray.json.{ JsonFormat, RootJsonFormat, RootJsonReader, RootJsonWriter } @@ -27,7 +28,6 @@ import java.{ lang, util } import scala.annotation.nowarn import scala.annotation.unchecked.uncheckedVariance import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ /** * TableDataListResponse model @@ -49,8 +49,8 @@ final case class TableDataListResponse[+T] private (totalRows: Long, pageToken: this(totalRows.toLong, Option(pageToken), Option(rows).map(_.asScala.toList)) def getTotalRows = totalRows - def getPageToken = pageToken.asJava - def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).asJava + def getPageToken = pageToken.toJava + def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).toJava def withTotalRows(totalRows: Long) = copy(totalRows = totalRows) @@ -58,12 +58,12 @@ final case class TableDataListResponse[+T] private (totalRows: Long, pageToken: def withPageToken(pageToken: Option[String]) = copy(pageToken = pageToken) def withPageToken(pageToken: util.Optional[String]) = - copy(pageToken = pageToken.asScala) + copy(pageToken = pageToken.toScala) def withRows[S >: T](rows: Option[Seq[S]]) = copy(rows = rows) def withRows(rows: util.Optional[util.List[T] @uncheckedVariance]) = - copy(rows = rows.asScala.map(_.asScala.toList)) + copy(rows = rows.toScala.map(_.asScala.toList)) } object TableDataListResponse { @@ -79,7 +79,7 @@ object TableDataListResponse { * @return a [[TableDataListResponse]] */ def create[T](totalRows: Long, pageToken: util.Optional[String], rows: util.Optional[util.List[T]]) = - TableDataListResponse(totalRows, pageToken.asScala, rows.asScala.map(_.asScala.toList)) + TableDataListResponse(totalRows, pageToken.toScala, rows.toScala.map(_.asScala.toList)) implicit def reader[T <: AnyRef]( implicit reader: BigQueryRootJsonReader[T]): RootJsonReader[TableDataListResponse[T]] = { @@ -105,9 +105,9 @@ final case class TableDataInsertAllRequest[+T] private (skipInvalidRows: Option[ templateSuffix: Option[String], rows: Seq[Row[T]]) { - @JsonIgnore def getSkipInvalidRows = skipInvalidRows.map(lang.Boolean.valueOf).asJava - @JsonIgnore def getIgnoreUnknownValues = ignoreUnknownValues.map(lang.Boolean.valueOf).asJava - @JsonIgnore def getTemplateSuffix = templateSuffix.asJava + @JsonIgnore def getSkipInvalidRows = skipInvalidRows.map(lang.Boolean.valueOf).toJava + @JsonIgnore def getIgnoreUnknownValues = ignoreUnknownValues.map(lang.Boolean.valueOf).toJava + @JsonIgnore def getTemplateSuffix = templateSuffix.toJava def getRows: util.List[Row[T] @uncheckedVariance] = rows.asJava @nowarn("msg=never used") @@ -123,17 +123,17 @@ final case class TableDataInsertAllRequest[+T] private (skipInvalidRows: Option[ def withSkipInvalidRows(skipInvalidRows: Option[Boolean]) = copy(skipInvalidRows = skipInvalidRows) def withSkipInvalidRows(skipInvalidRows: util.Optional[lang.Boolean]) = - copy(skipInvalidRows = skipInvalidRows.asScala.map(_.booleanValue)) + copy(skipInvalidRows = skipInvalidRows.toScala.map(_.booleanValue)) def withIgnoreUnknownValues(ignoreUnknownValues: Option[Boolean]) = copy(ignoreUnknownValues = ignoreUnknownValues) def withIgnoreUnknownValues(ignoreUnknownValues: util.Optional[lang.Boolean]) = - copy(ignoreUnknownValues = ignoreUnknownValues.asScala.map(_.booleanValue)) + copy(ignoreUnknownValues = ignoreUnknownValues.toScala.map(_.booleanValue)) def withTemplateSuffix(templateSuffix: Option[String]) = copy(templateSuffix = templateSuffix) def withTemplateSuffix(templateSuffix: util.Optional[String]) = - copy(templateSuffix = templateSuffix.asScala) + copy(templateSuffix = templateSuffix.toScala) def withRows[S >: T](rows: Seq[Row[S]]) = copy(rows = rows) @@ -159,9 +159,9 @@ object TableDataInsertAllRequest { templateSuffix: util.Optional[String], rows: util.List[Row[T]]) = TableDataInsertAllRequest( - skipInvalidRows.asScala.map(_.booleanValue), - ignoreUnknownValues.asScala.map(_.booleanValue), - templateSuffix.asScala, + skipInvalidRows.toScala.map(_.booleanValue), + ignoreUnknownValues.toScala.map(_.booleanValue), + templateSuffix.toScala, rows.asScala.toList) implicit def writer[T]( @@ -182,13 +182,13 @@ object TableDataInsertAllRequest { */ final case class Row[+T] private (insertId: Option[String], json: T) { - def getInsertId = insertId.asJava + def getInsertId = insertId.toJava def getJson = json def withInsertId(insertId: Option[String]) = copy(insertId = insertId) def withInsertId(insertId: util.Optional[String]) = - copy(insertId = insertId.asScala) + copy(insertId = insertId.toScala) def withJson[U >: T](json: U): Row[U] = copy(json = json) @@ -206,7 +206,7 @@ object Row { * @return a [[Row]] */ def create[T](insertId: util.Optional[String], json: T) = - Row(insertId.asScala, json) + Row(insertId.toScala, json) } /** @@ -214,13 +214,13 @@ object Row { * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ final case class TableDataInsertAllResponse private (insertErrors: Option[Seq[InsertError]]) { - def getInsertErrors = insertErrors.map(_.asJava).asJava + def getInsertErrors = insertErrors.map(_.asJava).toJava def withInsertErrors(insertErrors: Option[Seq[InsertError]]) = copy(insertErrors = insertErrors) def withInsertErrors(insertErrors: util.Optional[util.List[InsertError]]) = - copy(insertErrors = insertErrors.asScala.map(_.asScala.toList)) + copy(insertErrors = insertErrors.toScala.map(_.asScala.toList)) } object TableDataInsertAllResponse { @@ -230,7 +230,7 @@ object TableDataInsertAllResponse { * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ def create(insertErrors: util.Optional[util.List[InsertError]]) = - TableDataInsertAllResponse(insertErrors.asScala.map(_.asScala.toList)) + TableDataInsertAllResponse(insertErrors.toScala.map(_.asScala.toList)) implicit val format: RootJsonFormat[TableDataInsertAllResponse] = jsonFormat1(apply) @@ -242,7 +242,7 @@ object TableDataInsertAllResponse { */ final case class InsertError private (index: Int, errors: Option[Seq[ErrorProto]]) { def getIndex = index - def getErrors = errors.map(_.asJava).asJava + def getErrors = errors.map(_.asJava).toJava def withIndex(index: Int) = copy(index = index) @@ -250,7 +250,7 @@ final case class InsertError private (index: Int, errors: Option[Seq[ErrorProto] def withErrors(errors: Option[Seq[ErrorProto]]) = copy(errors = errors) def withErrors(errors: util.Optional[util.List[ErrorProto]]) = - copy(errors = errors.asScala.map(_.asScala.toList)) + copy(errors = errors.toScala.map(_.asScala.toList)) } object InsertError { @@ -260,7 +260,7 @@ object InsertError { * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ def create(index: Int, errors: util.Optional[util.List[ErrorProto]]) = - InsertError(index, errors.asScala.map(_.asScala.toList)) + InsertError(index, errors.toScala.map(_.asScala.toList)) implicit val format: JsonFormat[InsertError] = jsonFormat2(apply) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala index 28644a163..9d4427ae1 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala @@ -17,6 +17,7 @@ import org.apache.pekko import pekko.stream.connectors.google.scaladsl.Paginated import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRestJsonProtocol._ import pekko.util.ccompat.JavaConverters._ +import pekko.util.OptionConverters._ import com.fasterxml.jackson.annotation.{ JsonCreator, JsonProperty } import spray.json.{ JsonFormat, RootJsonFormat } @@ -25,7 +26,6 @@ import java.util import scala.annotation.nowarn import scala.annotation.varargs import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ /** * Table resource model @@ -44,10 +44,10 @@ final case class Table private (tableReference: TableReference, location: Option[String]) { def getTableReference = tableReference - def getLabels = labels.map(_.asJava).asJava - def getSchema = schema.asJava - def getNumRows = numRows.asPrimitive - def getLocation = location.asJava + def getLabels = labels.map(_.asJava).toJava + def getSchema = schema.toJava + def getNumRows = numRows.toJavaPrimitive + def getLocation = location.toJava def withTableReference(tableReference: TableReference) = copy(tableReference = tableReference) @@ -55,22 +55,22 @@ final case class Table private (tableReference: TableReference, def withLabels(labels: Option[Map[String, String]]) = copy(labels = labels) def withLabels(labels: util.Optional[util.Map[String, String]]) = - copy(labels = labels.asScala.map(_.asScala.toMap)) + copy(labels = labels.toScala.map(_.asScala.toMap)) def withSchema(schema: Option[TableSchema]) = copy(schema = schema) def withSchema(schema: util.Optional[TableSchema]) = - copy(schema = schema.asScala) + copy(schema = schema.toScala) def withNumRows(numRows: Option[Long]) = copy(numRows = numRows) def withNumRows(numRows: util.OptionalLong) = - copy(numRows = numRows.asScala) + copy(numRows = numRows.toScala) def withLocation(location: Option[String]) = copy(location = location) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) } object Table { @@ -93,10 +93,10 @@ object Table { location: util.Optional[String]) = Table( tableReference, - labels.asScala.map(_.asScala.toMap), - schema.asScala, - numRows.asScala, - location.asScala) + labels.toScala.map(_.asScala.toMap), + schema.toScala, + numRows.toScala, + location.toScala) implicit val format: RootJsonFormat[Table] = jsonFormat5(apply) } @@ -111,14 +111,14 @@ object Table { */ final case class TableReference private (projectId: Option[String], datasetId: String, tableId: Option[String]) { - def getProjectId = projectId.asJava + def getProjectId = projectId.toJava def getDatasetId = datasetId def getTableId = tableId def withProjectId(projectId: Option[String]) = copy(projectId = projectId) def withProjectId(projectId: util.Optional[String]) = - copy(projectId = projectId.asScala) + copy(projectId = projectId.toScala) def withDatasetId(datasetId: String) = copy(datasetId = datasetId) @@ -126,7 +126,7 @@ final case class TableReference private (projectId: Option[String], datasetId: S def withTableId(tableId: Option[String]) = copy(tableId = tableId) def withTableId(tableId: util.Optional[String]) = - copy(tableId = tableId.asScala) + copy(tableId = tableId.toScala) } object TableReference { @@ -141,7 +141,7 @@ object TableReference { * @return a [[TableReference]] */ def create(projectId: util.Optional[String], datasetId: String, tableId: util.Optional[String]) = - TableReference(projectId.asScala, datasetId, tableId.asScala) + TableReference(projectId.toScala, datasetId, tableId.toScala) implicit val referenceFormat: JsonFormat[TableReference] = jsonFormat3(apply) } @@ -219,8 +219,8 @@ final case class TableFieldSchema private (name: String, def getName = name def getType = `type` - def getMode = mode.asJava - def getFields = fields.map(_.asJava).asJava + def getMode = mode.toJava + def getFields = fields.map(_.asJava).toJava def withName(name: String) = copy(name = name) @@ -231,12 +231,12 @@ final case class TableFieldSchema private (name: String, def withMode(mode: Option[TableFieldSchemaMode]) = copy(mode = mode) def withMode(mode: util.Optional[TableFieldSchemaMode]) = - copy(mode = mode.asScala) + copy(mode = mode.toScala) def withFields(fields: Option[Seq[TableFieldSchema]]) = copy(fields = fields) def withFields(fields: util.Optional[util.List[TableFieldSchema]]) = - copy(fields = fields.asScala.map(_.asScala.toList)) + copy(fields = fields.toScala.map(_.asScala.toList)) } object TableFieldSchema { @@ -255,7 +255,7 @@ object TableFieldSchema { `type`: TableFieldSchemaType, mode: util.Optional[TableFieldSchemaMode], fields: util.Optional[util.List[TableFieldSchema]]) = - TableFieldSchema(name, `type`, mode.asScala, fields.asScala.map(_.asScala.toList)) + TableFieldSchema(name, `type`, mode.toScala, fields.toScala.map(_.asScala.toList)) /** * A field in TableSchema @@ -272,7 +272,7 @@ object TableFieldSchema { `type`: TableFieldSchemaType, mode: util.Optional[TableFieldSchemaMode], fields: TableFieldSchema*) = - TableFieldSchema(name, `type`, mode.asScala, if (fields.nonEmpty) Some(fields.toList) else None) + TableFieldSchema(name, `type`, mode.toScala, if (fields.nonEmpty) Some(fields.toList) else None) implicit val format: JsonFormat[TableFieldSchema] = lazyFormat( jsonFormat(apply, "name", "type", "mode", "fields")) @@ -360,16 +360,16 @@ final case class TableListResponse private (nextPageToken: Option[String], tables: Option[Seq[Table]], totalItems: Option[Int]) { - def getNextPageToken = nextPageToken.asJava - def getTables = tables.map(_.asJava).asJava - def getTotalItems = totalItems.asPrimitive + def getNextPageToken = nextPageToken.toJava + def getTables = tables.map(_.asJava).toJava + def getTotalItems = totalItems.toJavaPrimitive def withNextPageToken(nextPageToken: util.Optional[String]) = - copy(nextPageToken = nextPageToken.asScala) + copy(nextPageToken = nextPageToken.toScala) def withTables(tables: util.Optional[util.List[Table]]) = - copy(tables = tables.asScala.map(_.asScala.toList)) + copy(tables = tables.toScala.map(_.asScala.toList)) def withTotalItems(totalItems: util.OptionalInt) = - copy(totalItems = totalItems.asScala) + copy(totalItems = totalItems.toScala) } object TableListResponse { @@ -386,7 +386,7 @@ object TableListResponse { def createTableListResponse(nextPageToken: util.Optional[String], tables: util.Optional[util.List[Table]], totalItems: util.OptionalInt) = - TableListResponse(nextPageToken.asScala, tables.asScala.map(_.asScala.toList), totalItems.asScala) + TableListResponse(nextPageToken.toScala, tables.toScala.map(_.asScala.toList), totalItems.toScala) implicit val format: RootJsonFormat[TableListResponse] = jsonFormat3(apply) implicit val paginated: Paginated[TableListResponse] = _.nextPageToken diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/javadsl/GooglePubSub.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/javadsl/GooglePubSub.scala index 58e92d43f..fa88138ca 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/javadsl/GooglePubSub.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/javadsl/GooglePubSub.scala @@ -20,9 +20,9 @@ import pekko.stream.connectors.googlecloud.pubsub.{ AcknowledgeRequest, PubSubCo import pekko.stream.javadsl.{ Flow, FlowWithContext, Sink, Source } import pekko.{ Done, NotUsed } import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import java.util.concurrent.CompletionStage -import scala.compat.java8.FutureConverters._ import scala.concurrent.Future /** @@ -114,6 +114,6 @@ object GooglePubSub { def acknowledge(subscription: String, config: PubSubConfig): Sink[AcknowledgeRequest, CompletionStage[Done]] = GPubSub .acknowledge(subscription, config) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/Owner.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/Owner.scala index 322bf64e3..022ccaef2 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/Owner.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/Owner.scala @@ -14,14 +14,14 @@ package org.apache.pekko.stream.connectors.googlecloud.storage import java.util.Optional -import scala.compat.java8.OptionConverters._ +import org.apache.pekko.util.OptionConverters._ final class Owner private (entity: String, entityId: Option[String]) { def withEntity(entity: String): Owner = copy(entity = entity) def withEntityId(entityId: String): Owner = copy(entityId = Option(entityId)) /** Java API */ - def getEntityId: Optional[String] = entityId.asJava + def getEntityId: Optional[String] = entityId.toJava private def copy(entity: String = entity, entityId: Option[String] = entityId): Owner = new Owner(entity, entityId) @@ -38,5 +38,5 @@ object Owner { /** Java API */ def create(entity: String, entityId: Optional[String]): Owner = - new Owner(entity, entityId.asScala) + new Owner(entity, entityId.toScala) } diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/StorageObject.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/StorageObject.scala index 2810fed9a..323e11e65 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/StorageObject.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/StorageObject.scala @@ -19,7 +19,7 @@ import java.util.Optional import org.apache.pekko import pekko.http.scaladsl.model.ContentType import pekko.util.ccompat.JavaConverters._ -import scala.compat.java8.OptionConverters._ +import pekko.util.OptionConverters._ /** * Represents an object within Google Cloud Storage. @@ -98,24 +98,24 @@ final class StorageObject private ( /** Java API */ def getContentType: pekko.http.javadsl.model.ContentType = contentType.asInstanceOf[ContentType] - def getTimeDeleted: Optional[OffsetDateTime] = timeDeleted.asJava - def getContentDisposition: Optional[String] = contentDisposition.asJava - def getContentEncoding: Optional[String] = contentEncoding.asJava - def getContentLanguage: Optional[String] = contentLanguage.asJava - def getTemporaryHold: Optional[Boolean] = temporaryHold.asJava - def getEventBasedHold: Optional[Boolean] = eventBasedHold.asJava - def getRetentionExpirationTime: Optional[OffsetDateTime] = retentionExpirationTime.asJava - def getCacheControl: Optional[String] = cacheControl.asJava - def getMetadata: Optional[java.util.Map[String, String]] = metadata.map(_.asJava).asJava - def getComponentCount: Optional[Integer] = componentCount.map(Int.box).asJava - def getKmsKeyName: Optional[String] = kmsKeyName.asJava - def getCustomerEncryption: Optional[CustomerEncryption] = customerEncryption.asJava - def getOwner: Optional[Owner] = owner.asJava - def getAcl: Optional[java.util.List[ObjectAccessControls]] = acl.map(_.asJava).asJava - def getCustomTime: Optional[OffsetDateTime] = customTime.asJava - def getMaybeMd5Hash: Optional[String] = maybeMd5Hash.asJava - def getMaybeCrc32c: Optional[String] = maybeCrc32c.asJava - def getMaybeStorageClass: Optional[String] = maybeStorageClass.asJava + def getTimeDeleted: Optional[OffsetDateTime] = timeDeleted.toJava + def getContentDisposition: Optional[String] = contentDisposition.toJava + def getContentEncoding: Optional[String] = contentEncoding.toJava + def getContentLanguage: Optional[String] = contentLanguage.toJava + def getTemporaryHold: Optional[Boolean] = temporaryHold.toJava + def getEventBasedHold: Optional[Boolean] = eventBasedHold.toJava + def getRetentionExpirationTime: Optional[OffsetDateTime] = retentionExpirationTime.toJava + def getCacheControl: Optional[String] = cacheControl.toJava + def getMetadata: Optional[java.util.Map[String, String]] = metadata.map(_.asJava).toJava + def getComponentCount: Optional[Integer] = componentCount.map(Int.box).toJava + def getKmsKeyName: Optional[String] = kmsKeyName.toJava + def getCustomerEncryption: Optional[CustomerEncryption] = customerEncryption.toJava + def getOwner: Optional[Owner] = owner.toJava + def getAcl: Optional[java.util.List[ObjectAccessControls]] = acl.map(_.asJava).toJava + def getCustomTime: Optional[OffsetDateTime] = customTime.toJava + def getMaybeMd5Hash: Optional[String] = maybeMd5Hash.toJava + def getMaybeCrc32c: Optional[String] = maybeCrc32c.toJava + def getMaybeStorageClass: Optional[String] = maybeStorageClass.toJava def withKind(value: String): StorageObject = copy(kind = value) def withId(value: String): StorageObject = copy(id = value) @@ -466,23 +466,23 @@ object StorageObject { selfLink, updated, timeCreated, - timeDeleted.asScala, + timeDeleted.toScala, storageClass, Option(storageClass), - contentDisposition.asScala, - contentEncoding.asScala, - contentLanguage.asScala, + contentDisposition.toScala, + contentEncoding.toScala, + contentLanguage.toScala, metageneration, - temporaryHold.asScala, - eventBasedHold.asScala, - retentionExpirationTime.asScala, + temporaryHold.toScala, + eventBasedHold.toScala, + retentionExpirationTime.toScala, timeStorageClassUpdated, - cacheControl.asScala, - customTime.asScala, - metadata.asScala, - componentCount.asScala, - kmsKeyName.asScala, - customerEncryption.asScala, - owner.asScala, - acl.asScala) + cacheControl.toScala, + customTime.toScala, + metadata.toScala, + componentCount.toScala, + kmsKeyName.toScala, + customerEncryption.toScala, + owner.toScala, + acl.toScala) } diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/javadsl/GCStorage.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/javadsl/GCStorage.scala index a814fa0d9..38c9bb46c 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/javadsl/GCStorage.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/javadsl/GCStorage.scala @@ -26,11 +26,10 @@ import pekko.stream.javadsl.{ RunnableGraph, Sink, Source } import pekko.stream.{ Attributes, Materializer } import pekko.util.ccompat.JavaConverters._ import pekko.util.ByteString +import pekko.util.FutureConverters._ +import pekko.util.OptionConverters._ import pekko.{ Done, NotUsed } -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ - /** * Java API * @@ -55,8 +54,8 @@ object GCStorage { attributes: Attributes): CompletionStage[Optional[Bucket]] = GCStorageStream .getBucket(bucketName)(materializer, attributes) - .map(_.asJava)(materializer.executionContext) - .toJava + .map(_.toJava)(materializer.executionContext) + .asJava /** * Gets information on a bucket @@ -71,8 +70,8 @@ object GCStorage { def getBucket(bucketName: String, system: ActorSystem, attributes: Attributes): CompletionStage[Optional[Bucket]] = GCStorageStream .getBucket(bucketName)(Materializer.matFromSystem(system), attributes) - .map(_.asJava)(system.dispatcher) - .toJava + .map(_.toJava)(system.dispatcher) + .asJava /** * Gets information on a bucket @@ -83,7 +82,7 @@ object GCStorage { * @return a `Source` containing `Bucket` if it exists */ def getBucketSource(bucketName: String): Source[Optional[Bucket], NotUsed] = - GCStorageStream.getBucketSource(bucketName).map(_.asJava).asJava + GCStorageStream.getBucketSource(bucketName).map(_.toJava).asJava /** * Creates a new bucket @@ -100,7 +99,7 @@ object GCStorage { location: String, materializer: Materializer, attributes: Attributes): CompletionStage[Bucket] = - GCStorageStream.createBucket(bucketName, location)(materializer, attributes).toJava + GCStorageStream.createBucket(bucketName, location)(materializer, attributes).asJava /** * Creates a new bucket @@ -115,7 +114,7 @@ object GCStorage { location: String, system: ActorSystem, attributes: Attributes): CompletionStage[Bucket] = - GCStorageStream.createBucket(bucketName, location)(Materializer.matFromSystem(system), attributes).toJava + GCStorageStream.createBucket(bucketName, location)(Materializer.matFromSystem(system), attributes).asJava /** * Creates a new bucket @@ -140,7 +139,7 @@ object GCStorage { */ @deprecated("pass in the actor system instead of the materializer", "3.0.0") def deleteBucket(bucketName: String, materializer: Materializer, attributes: Attributes): CompletionStage[Done] = - GCStorageStream.deleteBucket(bucketName)(materializer, attributes).toJava + GCStorageStream.deleteBucket(bucketName)(materializer, attributes).asJava /** * Deletes bucket @@ -151,7 +150,7 @@ object GCStorage { * @return a `CompletionStage` of `Done` on successful deletion */ def deleteBucket(bucketName: String, system: ActorSystem, attributes: Attributes): CompletionStage[Done] = - GCStorageStream.deleteBucket(bucketName)(Materializer.matFromSystem(system), attributes).toJava + GCStorageStream.deleteBucket(bucketName)(Materializer.matFromSystem(system), attributes).asJava /** * Deletes bucket @@ -174,7 +173,7 @@ object GCStorage { * @return a `Source` containing `StorageObject` if it exists */ def getObject(bucket: String, objectName: String): Source[Optional[StorageObject], NotUsed] = - GCStorageStream.getObject(bucket, objectName).map(_.asJava).asJava + GCStorageStream.getObject(bucket, objectName).map(_.toJava).asJava /** * Get storage object @@ -187,7 +186,7 @@ object GCStorage { * @return a `Source` containing `StorageObject` if it exists */ def getObject(bucket: String, objectName: String, generation: Long): Source[Optional[StorageObject], NotUsed] = - GCStorageStream.getObject(bucket, objectName, Option(generation)).map(_.asJava).asJava + GCStorageStream.getObject(bucket, objectName, Option(generation)).map(_.toJava).asJava /** * Deletes object in bucket @@ -261,7 +260,7 @@ object GCStorage { * Otherwise [[scala.Option Option]] will contain a source of object's data. */ def download(bucket: String, objectName: String): Source[Optional[Source[ByteString, NotUsed]], NotUsed] = - GCStorageStream.download(bucket, objectName).map(_.map(_.asJava).asJava).asJava + GCStorageStream.download(bucket, objectName).map(_.map(_.asJava).toJava).asJava /** * Downloads object from bucket. @@ -277,7 +276,7 @@ object GCStorage { def download(bucket: String, objectName: String, generation: Long): Source[Optional[Source[ByteString, NotUsed]], NotUsed] = - GCStorageStream.download(bucket, objectName, Option(generation)).map(_.map(_.asJava).asJava).asJava + GCStorageStream.download(bucket, objectName, Option(generation)).map(_.map(_.asJava).toJava).asJava /** * Uploads object, use this for small files and `resumableUpload` for big ones @@ -349,7 +348,7 @@ object GCStorage { chunkSize, metadata.map(_.asScala.toMap)) .asJava - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } /** @@ -368,7 +367,7 @@ object GCStorage { GCStorageStream .resumableUpload(bucket, objectName, contentType.asInstanceOf[ScalaContentType]) .asJava - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) /** * Rewrites object to wanted destination by making multiple requests. @@ -388,7 +387,7 @@ object GCStorage { RunnableGraph .fromGraph( GCStorageStream.rewrite(sourceBucket, sourceObjectName, destinationBucket, destinationObjectName)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) /** * Deletes folder and its content. diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala index 384d9a8f6..c0e98af1e 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala @@ -26,11 +26,11 @@ import pekko.stream.connectors.google.auth.Credentials import pekko.stream.connectors.google.http.{ ForwardProxyHttpsContext, ForwardProxyPoolSettings } import pekko.stream.connectors.google.implicits._ import pekko.util.JavaDurationConverters._ +import pekko.util.OptionConverters._ import com.typesafe.config.Config import java.time import java.util.Optional -import scala.compat.java8.OptionConverters._ import scala.concurrent.duration._ object GoogleSettings { @@ -131,7 +131,7 @@ object RequestSettings { chunkSize: Int, retrySettings: RetrySettings, forwardProxy: Optional[ForwardProxy]) = - apply(userIp.asScala, quotaUser.asScala, prettyPrint, chunkSize, retrySettings, forwardProxy.asScala) + apply(userIp.toScala, quotaUser.toScala, prettyPrint, chunkSize, retrySettings, forwardProxy.toScala) } final case class RequestSettings @InternalApi private ( @@ -146,8 +146,8 @@ final case class RequestSettings @InternalApi private ( (uploadChunkSize >= (256 * 1024)) & (uploadChunkSize % (256 * 1024) == 0), "Chunk size must be a multiple of 256 KiB") - def getUserIp = userIp.asJava - def getQuotaUser = quotaUser.asJava + def getUserIp = userIp.toJava + def getQuotaUser = quotaUser.toJava def getPrettyPrint = prettyPrint def getUploadChunkSize = uploadChunkSize def getRetrySettings = retrySettings @@ -156,11 +156,11 @@ final case class RequestSettings @InternalApi private ( def withUserIp(userIp: Option[String]) = copy(userIp = userIp) def withUserIp(userIp: Optional[String]) = - copy(userIp = userIp.asScala) + copy(userIp = userIp.toScala) def withQuotaUser(quotaUser: Option[String]) = copy(quotaUser = quotaUser) def withQuotaUser(quotaUser: Optional[String]) = - copy(quotaUser = quotaUser.asScala) + copy(quotaUser = quotaUser.toScala) def withPrettyPrint(prettyPrint: Boolean) = copy(prettyPrint = prettyPrint) def withUploadChunkSize(uploadChunkSize: Int) = @@ -170,7 +170,7 @@ final case class RequestSettings @InternalApi private ( def withForwardProxy(forwardProxy: Option[ForwardProxy]) = copy(forwardProxy = forwardProxy) def withForwardProxy(forwardProxy: Optional[ForwardProxy]) = - copy(forwardProxy = forwardProxy.asScala) + copy(forwardProxy = forwardProxy.toScala) // Cache query string private[google] def query = @@ -262,7 +262,7 @@ object ForwardProxy { credentials: Optional[jm.headers.BasicHttpCredentials], trustPem: Optional[String], system: ClassicActorSystemProvider) = - apply(scheme, host, port, credentials.asScala.map(_.asInstanceOf[BasicHttpCredentials]), trustPem.asScala)(system) + apply(scheme, host, port, credentials.toScala.map(_.asInstanceOf[BasicHttpCredentials]), trustPem.toScala)(system) def create(connectionContext: jh.HttpConnectionContext, poolSettings: jh.settings.ConnectionPoolSettings) = apply(connectionContext.asInstanceOf[HttpsConnectionContext], poolSettings.asInstanceOf[ConnectionPoolSettings]) diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Google.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Google.scala index a06078e98..8000dcd7d 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Google.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Google.scala @@ -23,9 +23,9 @@ import pekko.stream.connectors.google.GoogleSettings import pekko.stream.connectors.google.scaladsl.{ Google => ScalaGoogle } import pekko.stream.javadsl.{ Sink, Source } import pekko.util.ByteString +import pekko.util.FutureConverters._ import java.util.concurrent.CompletionStage -import scala.compat.java8.FutureConverters._ import scala.language.implicitConversions /** @@ -47,7 +47,7 @@ private[connectors] trait Google { unmarshaller: Unmarshaller[HttpResponse, T], settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[T] = - ScalaGoogle.singleRequest[T](request)(unmarshaller.asScala, system, settings).toJava + ScalaGoogle.singleRequest[T](request)(unmarshaller.asScala, system, settings).asJava /** * Makes a series of requests to page through a resource. Authentication is handled automatically. @@ -75,7 +75,7 @@ private[connectors] trait Google { final def resumableUpload[Out]( request: HttpRequest, unmarshaller: Unmarshaller[HttpResponse, Out]): Sink[ByteString, CompletionStage[Out]] = - ScalaGoogle.resumableUpload(request)(unmarshaller.asScala).mapMaterializedValue(_.toJava).asJava + ScalaGoogle.resumableUpload(request)(unmarshaller.asScala).mapMaterializedValue(_.asJava).asJava private implicit def requestAsScala(request: HttpRequest): sm.HttpRequest = request.asInstanceOf[sm.HttpRequest] } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Paginated.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Paginated.scala index 2e892104c..2c5104acd 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Paginated.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Paginated.scala @@ -13,10 +13,11 @@ package org.apache.pekko.stream.connectors.google.javadsl -import org.apache.pekko.stream.connectors.google.scaladsl +import org.apache.pekko +import pekko.stream.connectors.google.scaladsl +import pekko.util.OptionConverters._ import java.util -import scala.compat.java8.OptionConverters._ /** * Models a paginated resource @@ -31,6 +32,6 @@ trait Paginated { private[connectors] object Paginated { implicit object paginatedIsPaginated extends scaladsl.Paginated[Paginated] { - override def pageToken(paginated: Paginated): Option[String] = paginated.getPageToken.asScala + override def pageToken(paginated: Paginated): Option[String] = paginated.getPageToken.toScala } } diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmSettings.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmSettings.scala index 8f0e95c47..2afbd2e80 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmSettings.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/FcmSettings.scala @@ -17,10 +17,10 @@ import org.apache.pekko import pekko.actor.ClassicActorSystemProvider import pekko.http.scaladsl.model.headers.BasicHttpCredentials import pekko.stream.connectors.google.{ ForwardProxy => CommonForwardProxy } +import pekko.util.OptionConverters._ import java.util.Objects import scala.annotation.nowarn -import scala.compat.java8.OptionConverters._ @nowarn("msg=deprecated") final class FcmSettings private ( @@ -242,9 +242,9 @@ final class ForwardProxy private (val host: String, def getPort: Int = port /** Java API */ - def getCredentials: java.util.Optional[ForwardProxyCredentials] = credentials.asJava + def getCredentials: java.util.Optional[ForwardProxyCredentials] = credentials.toJava - def getForwardProxyTrustPem: java.util.Optional[ForwardProxyTrustPem] = trustPem.asJava + def getForwardProxyTrustPem: java.util.Optional[ForwardProxyTrustPem] = trustPem.toJava def withHost(host: String) = copy(host = host) def withPort(port: Int) = copy(port = port) diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/HTableSettings.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/HTableSettings.scala index 6ac0c875e..076ecf5eb 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/HTableSettings.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/HTableSettings.scala @@ -18,8 +18,9 @@ import org.apache.hadoop.hbase.TableName import org.apache.hadoop.hbase.client.Mutation import scala.collection.immutable -import org.apache.pekko.util.ccompat.JavaConverters._ -import scala.compat.java8.FunctionConverters._ +import org.apache.pekko +import pekko.util.ccompat.JavaConverters._ +import pekko.util.FunctionConverters._ final class HTableSettings[T] private (val conf: Configuration, val tableName: TableName, diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/javadsl/HTableStage.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/javadsl/HTableStage.scala index 23a3bef72..50671f900 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/javadsl/HTableStage.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/javadsl/HTableStage.scala @@ -20,10 +20,9 @@ import pekko.stream.connectors.hbase.HTableSettings import pekko.stream.connectors.hbase.impl.{ HBaseFlowStage, HBaseSourceStage } import pekko.stream.scaladsl.{ Flow, Keep, Sink, Source } import pekko.{ Done, NotUsed } +import pekko.util.FutureConverters._ import org.apache.hadoop.hbase.client.{ Result, Scan } -import scala.compat.java8.FutureConverters._ - object HTableStage { /** @@ -31,7 +30,7 @@ object HTableStage { * HBase mutations for every incoming element are derived from the converter functions defined in the config. */ def sink[A](config: HTableSettings[A]): pekko.stream.javadsl.Sink[A, CompletionStage[Done]] = - Flow[A].via(flow(config)).toMat(Sink.ignore)(Keep.right).mapMaterializedValue(toJava).asJava + Flow[A].via(flow(config)).toMat(Sink.ignore)(Keep.right).mapMaterializedValue(asJava).asJava /** * Writes incoming element to HBase. diff --git a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/javadsl/HdfsSource.scala b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/javadsl/HdfsSource.scala index 0cf4d3073..cb1548630 100644 --- a/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/javadsl/HdfsSource.scala +++ b/hdfs/src/main/scala/org/apache/pekko/stream/connectors/hdfs/javadsl/HdfsSource.scala @@ -21,12 +21,11 @@ import pekko.japi.Pair import pekko.stream.connectors.hdfs.scaladsl.{ HdfsSource => ScalaHdfsSource } import pekko.stream.{ javadsl, IOResult } import pekko.util.ByteString +import pekko.util.FutureConverters._ import org.apache.hadoop.fs.{ FileSystem, Path } import org.apache.hadoop.io.Writable import org.apache.hadoop.io.compress.CompressionCodec -import scala.compat.java8.FutureConverters._ - object HdfsSource { /** @@ -38,7 +37,7 @@ object HdfsSource { def data( fs: FileSystem, path: Path): javadsl.Source[ByteString, CompletionStage[IOResult]] = - ScalaHdfsSource.data(fs, path).mapMaterializedValue(_.toJava).asJava + ScalaHdfsSource.data(fs, path).mapMaterializedValue(_.asJava).asJava /** * Java API: creates a [[Source]] that consumes as [[ByteString]] @@ -51,7 +50,7 @@ object HdfsSource { fs: FileSystem, path: Path, chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] = - ScalaHdfsSource.data(fs, path, chunkSize).mapMaterializedValue(_.toJava).asJava + ScalaHdfsSource.data(fs, path, chunkSize).mapMaterializedValue(_.asJava).asJava /** * Java API: creates a [[Source]] that consumes as [[ByteString]] @@ -64,7 +63,7 @@ object HdfsSource { fs: FileSystem, path: Path, codec: CompressionCodec): javadsl.Source[ByteString, CompletionStage[IOResult]] = - ScalaHdfsSource.compressed(fs, path, codec).mapMaterializedValue(_.toJava).asJava + ScalaHdfsSource.compressed(fs, path, codec).mapMaterializedValue(_.asJava).asJava /** * Java API: creates a [[Source]] that consumes as [[ByteString]] @@ -79,7 +78,7 @@ object HdfsSource { path: Path, codec: CompressionCodec, chunkSize: Int = 8192): javadsl.Source[ByteString, CompletionStage[IOResult]] = - ScalaHdfsSource.compressed(fs, path, codec, chunkSize).mapMaterializedValue(_.toJava).asJava + ScalaHdfsSource.compressed(fs, path, codec, chunkSize).mapMaterializedValue(_.asJava).asJava /** * Java API: creates a [[Source]] that consumes as [[(K, V]] diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/IronMqProducer.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/IronMqProducer.scala index 60e8f7b4c..6f0650b17 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/IronMqProducer.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/IronMqProducer.scala @@ -22,8 +22,7 @@ import pekko.stream.connectors.ironmq._ import pekko.stream.javadsl.{ Flow, Sink } import pekko.stream.scaladsl.{ Flow => ScalaFlow, Keep } import pekko.stream.connectors.ironmq.scaladsl.{ IronMqProducer => ScalaIronMqProducer } - -import scala.compat.java8.FutureConverters +import pekko.util.FutureConverters object IronMqProducer { @@ -37,7 +36,7 @@ object IronMqProducer { .asInstanceOf[Flow[PushMessage, String, NotUsed]] def sink(queueName: String, settings: IronMqSettings): Sink[PushMessage, CompletionStage[Done]] = - ScalaIronMqProducer.sink(queueName, settings).mapMaterializedValue(_.toJava).asJava + ScalaIronMqProducer.sink(queueName, settings).mapMaterializedValue(_.asJava).asJava def atLeastOnceFlow[C1 <: Committable]( queueName: String, diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/package.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/package.scala index 11b073a1f..d4f97216b 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/package.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/javadsl/package.scala @@ -22,7 +22,7 @@ import pekko.stream.connectors.ironmq.scaladsl.{ CommittableMessage => ScalaCommittableMessage } -import scala.compat.java8.FutureConverters +import pekko.util.FutureConverters import scala.concurrent.Future /** @@ -35,26 +35,26 @@ package object javadsl { private[javadsl] implicit class RichScalaCommittableMessage(cm: ScalaCommittableMessage) { def asJava: CommittableMessage = new CommittableMessage { override def message: Message = cm.message - override def commit(): CompletionStage[Done] = cm.commit().toJava + override def commit(): CompletionStage[Done] = cm.commit().asJava } } private[javadsl] implicit class RichScalaCommittable(cm: ScalaCommittable) { def asJava: Committable = new Committable { - override def commit(): CompletionStage[Done] = cm.commit().toJava + override def commit(): CompletionStage[Done] = cm.commit().asJava } } private[javadsl] implicit class RichCommittableMessage(cm: CommittableMessage) { def asScala: ScalaCommittableMessage = new ScalaCommittableMessage { override def message: Message = cm.message - override def commit(): Future[Done] = cm.commit().toScala + override def commit(): Future[Done] = cm.commit().asScala } } private[javadsl] implicit class RichCommittable(cm: Committable) { def asScala: ScalaCommittable = new ScalaCommittable { - override def commit(): Future[Done] = cm.commit().toScala + override def commit(): Future[Done] = cm.commit().asScala } } diff --git a/ironmq/src/test/java/org/apache/pekko/stream/connectors/ironmq/UnitTest.java b/ironmq/src/test/java/org/apache/pekko/stream/connectors/ironmq/UnitTest.java index 2b41d2c54..89c1bafcc 100644 --- a/ironmq/src/test/java/org/apache/pekko/stream/connectors/ironmq/UnitTest.java +++ b/ironmq/src/test/java/org/apache/pekko/stream/connectors/ironmq/UnitTest.java @@ -29,8 +29,8 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.apache.pekko.util.FutureConverters.*; import static scala.collection.JavaConverters.*; -import static scala.compat.java8.FutureConverters.*; public abstract class UnitTest { @Rule public final LogCapturingJunit4 logCapturing = new LogCapturingJunit4(); @@ -87,7 +87,7 @@ protected String givenQueue() { protected String givenQueue(String name) { try { - return toJava(ironMqClient.createQueue(name, system.dispatcher())) + return asJava(ironMqClient.createQueue(name, system.dispatcher())) .toCompletableFuture() .get(); } catch (Exception e) { @@ -103,7 +103,7 @@ protected Message.Ids givenMessages(String queueName, int n) { .collect(Collectors.toList()); try { - return toJava( + return asJava( ironMqClient.pushMessages( queueName, asScalaBufferConverter(messages).asScala().toSeq(), diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Destinations.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Destinations.scala index a87f3aae9..b4002ce9e 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Destinations.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/Destinations.scala @@ -14,7 +14,7 @@ package org.apache.pekko.stream.connectors.jms import javax.jms -import scala.compat.java8.FunctionConverters._ +import org.apache.pekko.util.FunctionConverters._ /** * A destination to send to/receive from. diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsMessages.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsMessages.scala index 43dd507b8..c8671f113 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsMessages.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/JmsMessages.scala @@ -20,7 +20,7 @@ import pekko.NotUsed import pekko.stream.connectors.jms.impl.JmsMessageReader._ import pekko.util.ccompat.JavaConverters._ import pekko.util.ByteString -import scala.compat.java8.OptionConverters._ +import pekko.util.OptionConverters._ /** * Base interface for messages handled by JmsProducers. Sub-classes support pass-through or use [[pekko.NotUsed]] as type for pass-through. @@ -47,7 +47,7 @@ sealed trait JmsEnvelope[+PassThrough] { /** * Java API. */ - def getDestination: java.util.Optional[Destination] = destination.asJava + def getDestination: java.util.Optional[Destination] = destination.toJava def passThrough: PassThrough diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/javadsl/JmsProducer.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/javadsl/JmsProducer.scala index 1244720be..739814a5d 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/javadsl/JmsProducer.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/javadsl/JmsProducer.scala @@ -21,10 +21,9 @@ import pekko.stream.javadsl.Source import pekko.stream.scaladsl.{ Flow, Keep } import pekko.util.ccompat.JavaConverters._ import pekko.util.ByteString +import pekko.util.FutureConverters._ import pekko.{ Done, NotUsed } -import scala.compat.java8.FutureConverters - /** * Factory methods to create JMS producers. */ @@ -59,7 +58,7 @@ object JmsProducer { settings: JmsProducerSettings): pekko.stream.javadsl.Sink[R, CompletionStage[Done]] = pekko.stream.connectors.jms.scaladsl.JmsProducer .sink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -68,7 +67,7 @@ object JmsProducer { def textSink(settings: JmsProducerSettings): pekko.stream.javadsl.Sink[String, CompletionStage[Done]] = pekko.stream.connectors.jms.scaladsl.JmsProducer .textSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -78,7 +77,7 @@ object JmsProducer { settings: JmsProducerSettings): pekko.stream.javadsl.Sink[Array[Byte], CompletionStage[Done]] = pekko.stream.connectors.jms.scaladsl.JmsProducer .bytesSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -88,7 +87,7 @@ object JmsProducer { settings: JmsProducerSettings): pekko.stream.javadsl.Sink[ByteString, CompletionStage[Done]] = pekko.stream.connectors.jms.scaladsl.JmsProducer .byteStringSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -100,7 +99,7 @@ object JmsProducer { val scalaSink = pekko.stream.connectors.jms.scaladsl.JmsProducer .mapSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) val javaToScalaConversion = Flow.fromFunction((javaMap: java.util.Map[String, Any]) => javaMap.asScala.toMap) javaToScalaConversion.toMat(scalaSink)(Keep.right).asJava @@ -113,7 +112,7 @@ object JmsProducer { settings: JmsProducerSettings): pekko.stream.javadsl.Sink[java.io.Serializable, CompletionStage[Done]] = pekko.stream.connectors.jms.scaladsl.JmsProducer .objectSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava private def toProducerStatus(scalaStatus: scaladsl.JmsProducerStatus) = new JmsProducerStatus { diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSourceStage.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSourceStage.scala index 436667fde..56db231cd 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSourceStage.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/impl/KinesisSourceStage.scala @@ -22,14 +22,13 @@ import pekko.stream.stage.GraphStageLogic.StageActor import pekko.stream.stage._ import pekko.stream.{ Attributes, Outlet, SourceShape } import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import software.amazon.awssdk.services.kinesis.KinesisAsyncClient import software.amazon.awssdk.services.kinesis.model._ import scala.collection.mutable import scala.util.{ Failure, Success, Try } -import scala.compat.java8.FutureConverters._ - /** * Internal API */ @@ -154,7 +153,7 @@ private[kinesis] class KinesisSourceStage(shardSettings: ShardSettings, amazonKi amazonKinesisAsync .getRecords( GetRecordsRequest.builder().limit(limit).shardIterator(currentShardIterator).build()) - .toScala + .asScala .onComplete(handleGetRecords)(parasitic) private[this] def requestShardIterator(): Unit = { @@ -177,7 +176,7 @@ private[kinesis] class KinesisSourceStage(shardSettings: ShardSettings, amazonKi amazonKinesisAsync .getShardIterator(request) - .toScala + .asScala .onComplete(handleGetShardIterator)(parasitic) } diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/javadsl/KinesisSchedulerSource.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/javadsl/KinesisSchedulerSource.scala index 97d2bf2d5..6e0fb87ea 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/javadsl/KinesisSchedulerSource.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/javadsl/KinesisSchedulerSource.scala @@ -19,11 +19,11 @@ import org.apache.pekko import pekko.NotUsed import pekko.stream.connectors.kinesis.{ scaladsl, CommittableRecord, _ } import pekko.stream.javadsl.{ Flow, Sink, Source, SubSource } +import pekko.util.FutureConverters._ import software.amazon.kinesis.coordinator.Scheduler import software.amazon.kinesis.processor.ShardRecordProcessorFactory import software.amazon.kinesis.retrieval.KinesisClientRecord -import scala.compat.java8.FutureConverters._ import scala.concurrent.Future object KinesisSchedulerSource { @@ -37,7 +37,7 @@ object KinesisSchedulerSource { settings: KinesisSchedulerSourceSettings): Source[CommittableRecord, CompletionStage[Scheduler]] = scaladsl.KinesisSchedulerSource .apply(schedulerBuilder.build, settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava def createSharded( diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/scaladsl/KinesisFlow.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/scaladsl/KinesisFlow.scala index 0f4781a29..44b0b18ff 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/scaladsl/KinesisFlow.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/scaladsl/KinesisFlow.scala @@ -24,6 +24,7 @@ import pekko.stream.connectors.kinesis.KinesisErrors.FailurePublishingRecords import pekko.stream.scaladsl.{ Flow, FlowWithContext } import pekko.util.ccompat.JavaConverters._ import pekko.util.ByteString +import pekko.util.FutureConverters._ import software.amazon.awssdk.core.SdkBytes import software.amazon.awssdk.services.kinesis.KinesisAsyncClient import software.amazon.awssdk.services.kinesis.model.{ @@ -35,7 +36,6 @@ import software.amazon.awssdk.services.kinesis.model.{ import scala.collection.immutable.Queue import scala.concurrent.duration._ -import scala.compat.java8.FutureConverters._ import scala.util.{ Failure, Success, Try } object KinesisFlow { @@ -95,7 +95,7 @@ object KinesisFlow { kinesisClient .putRecords( PutRecordsRequest.builder().streamName(streamName).records(entries.map(_._1).asJavaCollection).build) - .toScala + .asScala .transform(handleBatch(entries))(parasitic)) .mapConcat(identity) } diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala index d8a97b485..e7790fd5a 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala @@ -20,6 +20,7 @@ import pekko.stream.ThrottleMode import pekko.stream.connectors.kinesisfirehose.KinesisFirehoseFlowSettings import pekko.stream.connectors.kinesisfirehose.KinesisFirehoseErrors.FailurePublishingRecords import pekko.stream.scaladsl.Flow +import pekko.util.FutureConverters._ import pekko.util.ccompat.JavaConverters._ import software.amazon.awssdk.services.firehose.FirehoseAsyncClient import software.amazon.awssdk.services.firehose.model.{ PutRecordBatchRequest, PutRecordBatchResponseEntry, Record } @@ -27,8 +28,6 @@ import software.amazon.awssdk.services.firehose.model.{ PutRecordBatchRequest, P import scala.collection.immutable.Queue import scala.concurrent.duration._ -import scala.compat.java8.FutureConverters._ - object KinesisFirehoseFlow { def apply(streamName: String, settings: KinesisFirehoseFlowSettings = KinesisFirehoseFlowSettings.Defaults)( implicit kinesisClient: FirehoseAsyncClient): Flow[Record, PutRecordBatchResponseEntry, NotUsed] = @@ -44,7 +43,7 @@ object KinesisFirehoseFlow { .deliveryStreamName(streamName) .records(records.asJavaCollection) .build()) - .toScala + .asScala .transform(identity, FailurePublishingRecords(_))(parasitic)) .mapConcat(_.requestResponses.asScala.toIndexedSeq) diff --git a/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/KuduTableSettings.scala b/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/KuduTableSettings.scala index c58d0940b..01bb0792c 100644 --- a/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/KuduTableSettings.scala +++ b/kudu/src/main/scala/org/apache/pekko/stream/connectors/kudu/KuduTableSettings.scala @@ -14,7 +14,7 @@ package org.apache.pekko.stream.connectors.kudu import org.apache.kudu.client.PartialRow -import scala.compat.java8.FunctionConverters._ +import org.apache.pekko.util.FunctionConverters._ final class KuduTableSettings[T] private (val tableName: String, val schema: org.apache.kudu.Schema, diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala index 6c01c9799..3e01d5c2f 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala @@ -25,10 +25,10 @@ import pekko.japi.{ Pair => AkkaPair } import pekko.stream.connectors.mqtt.streaming.Connect.ProtocolLevel import pekko.util.ccompat.JavaConverters._ import pekko.util.{ ByteIterator, ByteString, ByteStringBuilder } +import pekko.util.OptionConverters._ import scala.annotation.tailrec import scala.concurrent.duration._ -import scala.compat.java8.OptionConverters._ import scala.concurrent.{ ExecutionContext, Promise } /** @@ -1111,13 +1111,13 @@ final case class Command[A](command: ControlPacket, completed: Option[Promise[Do def this(command: ControlPacket, completed: Optional[CompletionStage[Done]], carry: Optional[A]) = this( command, - completed.asScala.map { f => + completed.toScala.map { f => val p = Promise[Done]() p.future .foreach(f.toCompletableFuture.complete)(ExecutionContext.fromExecutorService(ForkJoinPool.commonPool())) p }, - carry.asScala) + carry.toScala) /** * Send a command to an MQTT session @@ -1175,7 +1175,7 @@ final case class Event[A](event: ControlPacket, carry: Option[A]) { * @param carry The data to carry though */ def this(event: ControlPacket, carry: Optional[A]) = - this(event, carry.asScala) + this(event, carry.toScala) /** * Receive an event from a MQTT session diff --git a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttFlow.scala b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttFlow.scala index b645a91fe..b6bd74190 100644 --- a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttFlow.scala +++ b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttFlow.scala @@ -19,8 +19,7 @@ import org.apache.pekko import pekko.Done import pekko.stream.connectors.mqtt._ import pekko.stream.javadsl.Flow - -import scala.compat.java8.FutureConverters._ +import pekko.util.FutureConverters._ /** * Java API @@ -43,7 +42,7 @@ object MqttFlow { defaultQos: MqttQoS): Flow[MqttMessage, MqttMessage, CompletionStage[Done]] = scaladsl.MqttFlow .atMostOnce(settings, subscriptions, bufferSize, defaultQos) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -62,7 +61,7 @@ object MqttFlow { scaladsl.MqttFlow .atLeastOnce(settings, subscriptions, bufferSize, defaultQos) .map(MqttMessageWithAck.toJava) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -81,6 +80,6 @@ object MqttFlow { scaladsl.MqttFlow .atLeastOnceWithAckForJava(settings, subscriptions, bufferSize, defaultQos) .map(MqttMessageWithAck.toJava) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttMessageWithAck.scala b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttMessageWithAck.scala index 7a1542214..84035bb2b 100644 --- a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttMessageWithAck.scala +++ b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttMessageWithAck.scala @@ -20,8 +20,7 @@ import pekko.Done import pekko.annotation.InternalApi import pekko.stream.connectors.mqtt.MqttMessage import pekko.stream.connectors.mqtt.scaladsl - -import scala.compat.java8.FutureConverters._ +import pekko.util.FutureConverters._ /** * Java API @@ -50,7 +49,7 @@ sealed trait MqttMessageWithAck { private[javadsl] object MqttMessageWithAck { def toJava(cm: scaladsl.MqttMessageWithAck): MqttMessageWithAck = new MqttMessageWithAck { override val message: MqttMessage = cm.message - override def ack(): CompletionStage[Done] = cm.ack().toJava + override def ack(): CompletionStage[Done] = cm.ack().asJava } } diff --git a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttSource.scala b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttSource.scala index 6128f3f79..d7b48ebbb 100644 --- a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttSource.scala +++ b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/javadsl/MqttSource.scala @@ -19,8 +19,7 @@ import org.apache.pekko import pekko.Done import pekko.stream.connectors.mqtt._ import pekko.stream.javadsl.Source - -import scala.compat.java8.FutureConverters._ +import pekko.util.FutureConverters._ /** * Java API @@ -41,7 +40,7 @@ object MqttSource { bufferSize: Int): Source[MqttMessage, CompletionStage[Done]] = scaladsl.MqttSource .atMostOnce(settings, subscriptions, bufferSize) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -57,6 +56,6 @@ object MqttSource { scaladsl.MqttSource .atLeastOnce(settings, subscriptions, bufferSize) .map(MqttMessageWithAck.toJava) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/scaladsl/MqttMessageWithAck.scala b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/scaladsl/MqttMessageWithAck.scala index 5712d304c..fb2ecb625 100644 --- a/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/scaladsl/MqttMessageWithAck.scala +++ b/mqtt/src/main/scala/org/apache/pekko/stream/connectors/mqtt/scaladsl/MqttMessageWithAck.scala @@ -17,8 +17,8 @@ import org.apache.pekko import pekko.Done import pekko.annotation.InternalApi import pekko.stream.connectors.mqtt.MqttMessage +import pekko.util.FutureConverters._ -import scala.compat.java8.FutureConverters import scala.concurrent.Future /** @@ -54,6 +54,6 @@ private[scaladsl] object MqttMessageWithAck { * * @return a future indicating, if the acknowledge reached MQTT */ - override def ack(): Future[Done] = FutureConverters.toScala(e.ack()) + override def ack(): Future[Done] = e.ack().asScala } } diff --git a/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/Pravega.java b/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/Pravega.java index aad97f9d9..7596b28d0 100644 --- a/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/Pravega.java +++ b/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/Pravega.java @@ -25,10 +25,10 @@ import org.apache.pekko.stream.javadsl.Keep; import org.apache.pekko.stream.javadsl.Sink; import org.apache.pekko.stream.javadsl.Source; +import org.apache.pekko.util.FutureConverters; import io.pravega.client.ClientConfig; import io.pravega.client.stream.ReaderGroup; -import scala.compat.java8.FutureConverters; import java.util.concurrent.CompletionStage; import org.apache.pekko.stream.connectors.pravega.impl.PravegaFlow; @@ -49,7 +49,7 @@ public static PravegaReaderGroupManager readerGroup(String scope, ClientConfig c public static Source, CompletionStage> source( ReaderGroup readerGroup, ReaderSettings readerSettings) { return Source.fromGraph(new PravegaSource<>(readerGroup, readerSettings)) - .mapMaterializedValue(FutureConverters::toJava); + .mapMaterializedValue(FutureConverters::asJava); } /** Incoming messages are written to Pravega stream and emitted unchanged. */ diff --git a/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/PravegaTable.java b/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/PravegaTable.java index 72744f3b6..5b2b0f009 100644 --- a/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/PravegaTable.java +++ b/pravega/src/main/java/org/apache/pekko/stream/connectors/pravega/javadsl/PravegaTable.java @@ -25,8 +25,8 @@ import org.apache.pekko.stream.javadsl.Keep; import org.apache.pekko.stream.javadsl.Sink; import org.apache.pekko.stream.javadsl.Source; +import org.apache.pekko.util.FutureConverters; -import scala.compat.java8.FutureConverters; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.function.Function; @@ -34,8 +34,7 @@ import io.pravega.client.tables.TableKey; -import scala.compat.java8.functionConverterImpls.FromJavaFunction; -import scala.compat.java8.OptionConverters; +import org.apache.pekko.util.OptionConverters; import scala.Option; @@ -68,7 +67,7 @@ public static Sink, CompletionStage> sink( public static Source, CompletionStage> source( String scope, String tableName, TableReaderSettings tableReaderSettings) { return Source.fromGraph(new PravegaTableSource(scope, tableName, tableReaderSettings)) - .mapMaterializedValue(FutureConverters::toJava); + .mapMaterializedValue(FutureConverters::asJava); } /** A flow from key to and Option[value]. */ public static Flow, NotUsed> readFlow( diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaFlow.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaFlow.scala index 4df8206d0..e0c65f27e 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaFlow.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaFlow.scala @@ -19,10 +19,10 @@ import pekko.annotation.InternalApi import pekko.event.Logging import pekko.stream.stage.{ AsyncCallback, GraphStage, GraphStageLogic, InHandler, OutHandler, StageLogging } import pekko.stream.{ Attributes, FlowShape, Inlet, Outlet } +import pekko.util.FutureConverters._ import io.pravega.client.stream.EventStreamWriter import scala.util.control.NonFatal -import scala.compat.java8.FutureConverters._ import scala.concurrent.ExecutionContext.Implicits.global import pekko.stream.connectors.pravega.WriterSettings @@ -65,7 +65,7 @@ import scala.util.{ Failure, Success, Try } } def handleSentEvent(completableFuture: CompletableFuture[Void], msg: A): Unit = - completableFuture.toScala.onComplete { t => + completableFuture.asScala.onComplete { t => semaphore.acquire() asyncPushback.invoke((t, msg)) } diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableReadFlow.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableReadFlow.scala index 5b722f42f..612f41f06 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableReadFlow.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableReadFlow.scala @@ -22,9 +22,9 @@ import pekko.stream.stage.{ AsyncCallback, GraphStage, GraphStageLogic, InHandle import pekko.stream.{ Attributes, FlowShape, Inlet, Outlet } import scala.util.control.NonFatal -import scala.compat.java8.FutureConverters._ import scala.concurrent.ExecutionContext.Implicits.global import pekko.stream.connectors.pravega.TableSettings +import pekko.util.FutureConverters._ import scala.util.{ Failure, Try } import io.pravega.client.tables.KeyValueTable @@ -92,7 +92,7 @@ import scala.util.Success } def handleSentEvent(completableFuture: CompletableFuture[TableEntry]): Unit = - completableFuture.toScala.onComplete { t => + completableFuture.asScala.onComplete { t => asyncMessageSendCallback.invokeWithFeedback(t) } diff --git a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableWriteFlow.scala b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableWriteFlow.scala index 0542a1310..2a8cd95d8 100644 --- a/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableWriteFlow.scala +++ b/pravega/src/main/scala/org/apache/pekko/stream/connectors/pravega/impl/PravegaTableWriteFlow.scala @@ -19,9 +19,9 @@ import pekko.annotation.InternalApi import pekko.event.Logging import pekko.stream.stage.{ AsyncCallback, GraphStage, GraphStageLogic, InHandler, OutHandler, StageLogging } import pekko.stream.{ Attributes, FlowShape, Inlet, Outlet } +import pekko.util.FutureConverters._ import scala.util.control.NonFatal -import scala.compat.java8.FutureConverters._ import scala.concurrent.ExecutionContext.Implicits.global import pekko.stream.connectors.pravega.TableWriterSettings @@ -92,7 +92,7 @@ import io.pravega.client.tables.TableKey } def handleSentEvent(completableFuture: CompletableFuture[Version], msg: KVPair): Unit = - completableFuture.toScala.onComplete { t => + completableFuture.asScala.onComplete { t => asyncPushback.invokeWithFeedback((t, msg)) } diff --git a/project/Dependencies.scala b/project/Dependencies.scala index f262215c0..8c1cd2342 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -18,7 +18,7 @@ object Dependencies { val Scala212 = "2.12.17" val ScalaVersions = Seq(Scala213, Scala212) - val PekkoVersion = "0.0.0+26629-321c5721-SNAPSHOT" + val PekkoVersion = "0.0.0+26656-898c6970-SNAPSHOT" val AkkaBinaryVersion = "2.6" val InfluxDBJavaVersion = "2.15" @@ -27,7 +27,7 @@ object Dependencies { val AwsSpiPekkoHttpVersion = "0.1.0-SNAPSHOT" // Sync with plugins.sbt val PekkoGrpcBinaryVersion = "2.1" - val PekkoHttpVersion = "0.0.0+4338-c98db6bd-SNAPSHOT" + val PekkoHttpVersion = "0.0.0+4345-fa1cb9cb-SNAPSHOT" val AkkaHttpBinaryVersion = "10.2" val ScalaTestVersion = "3.2.11" val TestContainersScalaTestVersion = "0.40.3" @@ -134,7 +134,7 @@ object Dependencies { libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-slf4j" % PekkoVersion, "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion % Test, - "org.apache.pekko" %% "pekko-connectors-kafka" % "0.0.0+1715-d36a8e1a-SNAPSHOT" % Test, + "org.apache.pekko" %% "pekko-connectors-kafka" % "0.0.0+1728-e2c660ef-SNAPSHOT" % Test, "junit" % "junit" % "4.13.2" % Test, // Eclipse Public License 1.0 "org.scalatest" %% "scalatest" % "3.2.11" % Test // ApacheV2 )) diff --git a/project/plugins.sbt b/project/plugins.sbt index 35dcf000a..021cd26bb 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -26,6 +26,6 @@ addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0") addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "3.0.2") addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "1.4.1") // Pekko gRPC -- sync with version in Dependencies.scala:19 -addSbtPlugin("org.apache.pekko" % "sbt-pekko-grpc" % "0.0.0-13-b6210989-SNAPSHOT") +addSbtPlugin("org.apache.pekko" % "sbt-pekko-grpc" % "0.0.0-28-e757bd9d-SNAPSHOT") // templating addSbtPlugin("io.spray" % "sbt-boilerplate" % "0.6.1") diff --git a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/javadsl/Reference.scala b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/javadsl/Reference.scala index 864b5e943..bb368f02a 100644 --- a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/javadsl/Reference.scala +++ b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/javadsl/Reference.scala @@ -36,8 +36,8 @@ object Reference { * Call Scala source factory and convert both: the source and materialized values to Java classes. */ def source(settings: SourceSettings): Source[ReferenceReadResult, CompletionStage[Done]] = { - import scala.compat.java8.FutureConverters._ - scaladsl.Reference.source(settings).mapMaterializedValue(_.toJava).asJava + import org.apache.pekko.util.FutureConverters._ + scaladsl.Reference.source(settings).mapMaterializedValue(_.asJava).asJava } /** diff --git a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/model.scala b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/model.scala index 7044f66cf..8e5e7ce9d 100644 --- a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/model.scala +++ b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/model.scala @@ -20,9 +20,9 @@ import pekko.annotation.InternalApi import pekko.util.ccompat._ import pekko.util.ccompat.JavaConverters._ import pekko.util.ByteString +import pekko.util.OptionConverters._ import scala.collection.immutable -import scala.compat.java8.OptionConverters._ import scala.util.{ Success, Try } /** @@ -54,7 +54,7 @@ final class ReferenceReadResult @InternalApi private[reference] ( * otherwise return empty Optional. */ def getBytesRead(): OptionalInt = - bytesRead.toOption.asPrimitive + bytesRead.toOption.toJavaPrimitive /** * Java API @@ -63,7 +63,7 @@ final class ReferenceReadResult @InternalApi private[reference] ( * otherwise return empty Optional. */ def getBytesReadFailure(): Optional[Throwable] = - bytesRead.failed.toOption.asJava + bytesRead.failed.toOption.toJava override def toString: String = s"ReferenceReadMessage(data=$data, bytesRead=$bytesRead)" diff --git a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/settings.scala b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/settings.scala index 6f4c725b2..38957e847 100644 --- a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/settings.scala +++ b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/settings.scala @@ -13,13 +13,14 @@ package org.apache.pekko.stream.connectors.reference +import org.apache.pekko.util.FunctionConverters._ +import org.apache.pekko.util.OptionConverters._ + // rename Java imports if the name clashes with the Scala name import java.time.{ Duration => JavaDuration } import java.util.Optional import java.util.function.Predicate -import scala.compat.java8.FunctionConverters._ -import scala.compat.java8.OptionConverters._ import scala.concurrent.duration._ /** @@ -68,7 +69,7 @@ final class SourceSettings private ( * A separate getter for Java API that converts Scala Option to Java Optional. */ def getTraceId(): Optional[String] = - traceId.asJava + traceId.toJava /** * Java API diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala index ee649545e..68db4612f 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala @@ -31,9 +31,8 @@ import pekko.stream.connectors.s3.impl._ import pekko.stream.javadsl.{ RunnableGraph, Sink, Source } import pekko.util.ccompat.JavaConverters._ import pekko.util.ByteString - -import scala.compat.java8.OptionConverters._ -import scala.compat.java8.FutureConverters._ +import pekko.util.OptionConverters._ +import pekko.util.FutureConverters._ /** * Java API @@ -338,7 +337,7 @@ object S3 { (pekko.stream.scaladsl.Source[ByteString, M], ObjectMetadata)], NotUsed]) : Source[Optional[JPair[Source[ByteString, M], ObjectMetadata]], NotUsed] = download.map { - _.map { case (stream, meta) => JPair(stream.asJava, meta) }.asJava + _.map { case (stream, meta) => JPair(stream.asJava, meta) }.toJava }.asJava /** @@ -663,7 +662,7 @@ object S3 { prefix: Optional[String], s3Headers: S3Headers): Source[ListBucketResultContents, NotUsed] = S3Stream - .listBucket(bucket, prefix.asScala, s3Headers) + .listBucket(bucket, prefix.toScala, s3Headers) .asJava /** @@ -683,7 +682,7 @@ object S3 { delimiter: String, prefix: Optional[String]): Source[ListBucketResultContents, NotUsed] = scaladsl.S3 - .listBucket(bucket, delimiter, prefix.asScala) + .listBucket(bucket, delimiter, prefix.toScala) .asJava /** @@ -705,7 +704,7 @@ object S3 { prefix: Optional[String], s3Headers: S3Headers): Source[ListBucketResultContents, NotUsed] = scaladsl.S3 - .listBucket(bucket, delimiter, prefix.asScala, s3Headers) + .listBucket(bucket, delimiter, prefix.toScala, s3Headers) .asJava /** @@ -730,7 +729,7 @@ object S3 { s3Headers: S3Headers): Source[pekko.japi.Pair[java.util.List[ListBucketResultContents], java.util.List[ ListBucketResultCommonPrefixes]], NotUsed] = S3Stream - .listBucketAndCommonPrefixes(bucket, delimiter, prefix.asScala, s3Headers) + .listBucketAndCommonPrefixes(bucket, delimiter, prefix.toScala, s3Headers) .map { case (contents, commonPrefixes) => pekko.japi.Pair(contents.asJava, commonPrefixes.asJava) } @@ -757,7 +756,7 @@ object S3 { def listMultipartUpload(bucket: String, prefix: Optional[String], s3Headers: S3Headers): Source[ListMultipartUploadResultUploads, NotUsed] = - scaladsl.S3.listMultipartUpload(bucket, prefix.asScala, s3Headers).asJava + scaladsl.S3.listMultipartUpload(bucket, prefix.toScala, s3Headers).asJava /** * Will return in progress or aborted multipart uploads with optional prefix and delimiter. This will automatically page through all keys with the given parameters. @@ -775,7 +774,7 @@ object S3 { s3Headers: S3Headers = S3Headers.empty): Source[pekko.japi.Pair[java.util.List[ListMultipartUploadResultUploads], java.util.List[CommonPrefixes]], NotUsed] = S3Stream - .listMultipartUploadAndCommonPrefixes(bucket, delimiter, prefix.asScala, s3Headers) + .listMultipartUploadAndCommonPrefixes(bucket, delimiter, prefix.toScala, s3Headers) .map { case (uploads, commonPrefixes) => pekko.japi.Pair(uploads.asJava, commonPrefixes.asJava) } @@ -821,7 +820,7 @@ object S3 { : Source[pekko.japi.Pair[java.util.List[ListObjectVersionsResultVersions], java.util.List[ DeleteMarkers]], NotUsed] = S3Stream - .listObjectVersions(bucket, prefix.asScala, S3Headers.empty) + .listObjectVersions(bucket, prefix.toScala, S3Headers.empty) .map { case (versions, markers) => pekko.japi.Pair(versions.asJava, markers.asJava) } @@ -842,7 +841,7 @@ object S3 { s3Headers: S3Headers): Source[pekko.japi.Pair[java.util.List[ListObjectVersionsResultVersions], java.util.List[ DeleteMarkers]], NotUsed] = S3Stream - .listObjectVersions(bucket, prefix.asScala, s3Headers) + .listObjectVersions(bucket, prefix.toScala, s3Headers) .map { case (versions, markers) => pekko.japi.Pair(versions.asJava, markers.asJava) } @@ -865,7 +864,7 @@ object S3 { s3Headers: S3Headers): Source[pekko.japi.Pair[java.util.List[ListObjectVersionsResultVersions], java.util.List[ DeleteMarkers]], NotUsed] = S3Stream - .listObjectVersionsAndCommonPrefixes(bucket, delimiter, prefix.asScala, s3Headers) + .listObjectVersionsAndCommonPrefixes(bucket, delimiter, prefix.toScala, s3Headers) .map { case (versions, markers, _) => pekko.japi.Pair(versions.asJava, markers.asJava) @@ -910,7 +909,7 @@ object S3 { s3Headers: S3Headers): Sink[ByteString, CompletionStage[MultipartUploadResult]] = S3Stream .multipartUpload(S3Location(bucket, key), contentType.asInstanceOf[ScalaContentType], s3Headers) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -976,7 +975,7 @@ object S3 { contentType.asInstanceOf[ScalaContentType], s3Headers) .contramap[JPair[ByteString, C]](_.toScala) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -1061,7 +1060,7 @@ object S3 { previousParts.asScala.toList, contentType.asInstanceOf[ScalaContentType], s3Headers) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } @@ -1150,7 +1149,7 @@ object S3 { contentType.asInstanceOf[ScalaContentType], s3Headers) .contramap[JPair[ByteString, C]](_.toScala) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } @@ -1267,7 +1266,7 @@ object S3 { .completeMultipartUpload(S3Location(bucket, key), uploadId, parts.asScala.toList, s3Headers)( SystemMaterializer(system).materializer, attributes) - .toJava + .asJava /** * Copy a S3 Object by making multiple requests. @@ -1298,7 +1297,7 @@ object S3 { contentType.asInstanceOf[ScalaContentType], s3Headers) } - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) /** * Copy a S3 Object by making multiple requests. @@ -1420,7 +1419,7 @@ object S3 { system: ClassicActorSystemProvider, attributes: Attributes, s3Headers: S3Headers): CompletionStage[Done] = - S3Stream.makeBucket(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).toJava + S3Stream.makeBucket(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).asJava /** * Create new bucket with a given name @@ -1471,7 +1470,7 @@ object S3 { system: ClassicActorSystemProvider, attributes: Attributes, s3Headers: S3Headers): CompletionStage[Done] = - S3Stream.deleteBucket(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).toJava + S3Stream.deleteBucket(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).asJava /** * Delete bucket with a given name @@ -1533,7 +1532,7 @@ object S3 { system: ClassicActorSystemProvider, attributes: Attributes, s3Headers: S3Headers): CompletionStage[BucketAccess] = - S3Stream.checkIfBucketExists(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).toJava + S3Stream.checkIfBucketExists(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).asJava /** * Checks whether the bucket exits and user has rights to perform ListBucket operation @@ -1599,7 +1598,7 @@ object S3 { implicit system: ClassicActorSystemProvider, attributes: Attributes): CompletionStage[Done] = S3Stream .deleteUpload(bucketName, key, uploadId, s3Headers)(SystemMaterializer(system).materializer, attributes) - .toJava + .asJava /** * Delete all existing parts for a specific upload @@ -1663,7 +1662,7 @@ object S3 { implicit system: ClassicActorSystemProvider, attributes: Attributes): CompletionStage[Done] = S3Stream .putBucketVersioning(bucketName, bucketVersioning, s3Headers)(SystemMaterializer(system).materializer, attributes) - .toJava + .asJava /** * Sets the versioning state of an existing bucket. @@ -1718,7 +1717,7 @@ object S3 { system: ClassicActorSystemProvider, attributes: Attributes, s3Headers: S3Headers): CompletionStage[BucketVersioningResult] = - S3Stream.getBucketVersioning(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).toJava + S3Stream.getBucketVersioning(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).asJava /** * Gets the versioning of an existing bucket diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/model.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/model.scala index aab05f161..ff6752619 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/model.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/model.scala @@ -20,11 +20,11 @@ import pekko.http.scaladsl.model.{ DateTime, HttpHeader, IllegalUriException, Ur import pekko.http.scaladsl.model.headers._ import pekko.stream.connectors.s3.AccessStyle.PathAccessStyle import pekko.util.ccompat.JavaConverters._ +import pekko.util.OptionConverters._ import scala.annotation.nowarn import scala.collection.immutable.Seq import scala.collection.immutable -import scala.compat.java8.OptionConverters._ final class MFA private (val serialNumber: String, val tokenCode: String) { @@ -138,7 +138,7 @@ final class BucketVersioningResult private (val status: Option[BucketVersioningS def getStatus: Option[BucketVersioningStatus] = status /** Java API */ - def getMfaDelete: java.util.Optional[Boolean] = mfaDelete.asJava + def getMfaDelete: java.util.Optional[Boolean] = mfaDelete.toJava def withStatus(value: BucketVersioningStatus): BucketVersioningResult = copy(status = Some(value)) @@ -188,17 +188,17 @@ object BucketVersioningResult { def create(status: java.util.Optional[BucketVersioningStatus], mfaDelete: java.util.Optional[Boolean]) : BucketVersioningResult = - apply(status.asScala, mfaDelete.asScala) + apply(status.toScala, mfaDelete.toScala) } final class BucketVersioning private (val status: Option[BucketVersioningStatus], val mfaDelete: Option[MFAStatus]) { /** Java API */ - def getStatus: java.util.Optional[BucketVersioningStatus] = status.asJava + def getStatus: java.util.Optional[BucketVersioningStatus] = status.toJava /** Java API */ - def getMfaDelete: java.util.Optional[MFAStatus] = mfaDelete.asJava + def getMfaDelete: java.util.Optional[MFAStatus] = mfaDelete.toJava /** Java API */ def getBucketVersioningEnabled: Boolean = bucketVersioningEnabled @@ -247,7 +247,7 @@ object BucketVersioning { def create( status: java.util.Optional[BucketVersioningStatus], mfaDelete: java.util.Optional[MFAStatus]): BucketVersioning = - apply(status.asScala, mfaDelete.asScala) + apply(status.toScala, mfaDelete.toScala) } @@ -440,7 +440,7 @@ final class MultipartUploadResult private ( def getETag: String = eTag /** Java API */ - def getVersionId: Optional[String] = versionId.asJava + def getVersionId: Optional[String] = versionId.toJava def withLocation(value: Uri): MultipartUploadResult = copy(location = value) def withBucket(value: String): MultipartUploadResult = copy(bucket = value) @@ -524,7 +524,7 @@ object MultipartUploadResult { bucket, key, eTag, - versionId.asScala) + versionId.toScala) } final class AWSIdentity private (val id: String, val displayName: String) { @@ -585,10 +585,10 @@ final class ListMultipartUploadResultUploads private (val key: String, def getUploadId: String = uploadId /** Java API */ - def getInitiator: Optional[AWSIdentity] = initiator.asJava + def getInitiator: Optional[AWSIdentity] = initiator.toJava /** Java API */ - def getOwner: Optional[AWSIdentity] = owner.asJava + def getOwner: Optional[AWSIdentity] = owner.toJava /** Java API */ def getStorageClass: String = storageClass @@ -661,7 +661,7 @@ object ListMultipartUploadResultUploads { owner: Optional[AWSIdentity], storageClass: String, initiated: Instant): ListMultipartUploadResultUploads = - apply(key, uploadId, initiator.asScala, owner.asScala, storageClass, initiated) + apply(key, uploadId, initiator.toScala, owner.toScala, storageClass, initiated) } final class ListObjectVersionsResultVersions private (val eTag: String, @@ -686,7 +686,7 @@ final class ListObjectVersionsResultVersions private (val eTag: String, def getLastModified: Instant = lastModified /** Java API */ - def getOwner: Optional[AWSIdentity] = owner.asJava + def getOwner: Optional[AWSIdentity] = owner.toJava /** Java API */ def getSize: Long = size @@ -695,7 +695,7 @@ final class ListObjectVersionsResultVersions private (val eTag: String, def getStorageClass: String = storageClass /** Java API */ - def getVersionId: Optional[String] = versionId.asJava + def getVersionId: Optional[String] = versionId.toJava def withETag(value: String): ListObjectVersionsResultVersions = copy(eTag = value) @@ -797,7 +797,7 @@ object ListObjectVersionsResultVersions { size: Long, storageClass: String, versionId: Optional[String]): ListObjectVersionsResultVersions = - apply(eTag, isLatest, key, lastModified, owner.asScala, size, storageClass, versionId.asScala) + apply(eTag, isLatest, key, lastModified, owner.toScala, size, storageClass, versionId.toScala) } final class DeleteMarkers private (val isLatest: Boolean, @@ -816,10 +816,10 @@ final class DeleteMarkers private (val isLatest: Boolean, def getLastModified: Instant = lastModified /** Java API */ - def getOwner: Optional[AWSIdentity] = owner.asJava + def getOwner: Optional[AWSIdentity] = owner.toJava /** Java API */ - def getVersionId: Optional[String] = versionId.asJava + def getVersionId: Optional[String] = versionId.toJava def withIsLatest(value: Boolean): DeleteMarkers = copy(isLatest = value) @@ -892,7 +892,7 @@ object DeleteMarkers { lastModified: Instant, owner: Optional[AWSIdentity], versionId: Optional[String]): DeleteMarkers = - apply(isLatest, key, lastModified, owner.asScala, versionId.asScala) + apply(isLatest, key, lastModified, owner.toScala, versionId.toScala) } final class CommonPrefixes private (val prefix: String) { @@ -1329,7 +1329,7 @@ final class ObjectMetadata private ( * as calculated by Amazon S3. */ lazy val getETag: Optional[String] = - eTag.asJava + eTag.toJava /** *

@@ -1439,7 +1439,7 @@ final class ObjectMetadata private ( * @see ObjectMetadata#setContentType(String) */ def getContentType: Optional[String] = - contentType.asJava + contentType.toJava /** * Gets the value of the Last-Modified header, indicating the date @@ -1479,7 +1479,7 @@ final class ObjectMetadata private ( * Gets the optional Cache-Control header */ def getCacheControl: Optional[String] = - cacheControl.asJava + cacheControl.toJava /** * Gets the value of the version id header. The version id will only be available @@ -1499,7 +1499,7 @@ final class ObjectMetadata private ( * * @return optional version id of the object */ - def getVersionId: Optional[String] = versionId.asJava + def getVersionId: Optional[String] = versionId.toJava } object ObjectMetadata { diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/settings.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/settings.scala index 11295e532..82223cc0c 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/settings.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/settings.scala @@ -22,13 +22,13 @@ import org.apache.pekko import pekko.actor.{ ActorSystem, ClassicActorSystemProvider } import pekko.http.scaladsl.model.Uri import pekko.stream.connectors.s3.AccessStyle.{ PathAccessStyle, VirtualHostAccessStyle } +import pekko.util.OptionConverters._ import com.typesafe.config.Config import org.slf4j.LoggerFactory import software.amazon.awssdk.auth.credentials._ import software.amazon.awssdk.regions.Region import software.amazon.awssdk.regions.providers._ -import scala.compat.java8.OptionConverters._ import scala.concurrent.duration._ import scala.util.Try @@ -144,7 +144,7 @@ final class ForwardProxy private (val scheme: String, def getPort: Int = port /** Java API */ - def getCredentials: java.util.Optional[ForwardProxyCredentials] = credentials.asJava + def getCredentials: java.util.Optional[ForwardProxyCredentials] = credentials.toJava def withScheme(value: String): ForwardProxy = copy(scheme = value) def withHost(host: String): ForwardProxy = copy(host = host) @@ -185,7 +185,7 @@ object ForwardProxy { /** Java API */ def create(host: String, port: Int, credentials: Optional[ForwardProxyCredentials]): ForwardProxy = - apply(host, port, credentials.asScala) + apply(host, port, credentials.toScala) /** Use an HTTP proxy. */ def http(host: String, port: Int): ForwardProxy = new ForwardProxy("http", host, port, credentials = None) @@ -376,13 +376,13 @@ final class S3Settings private ( def pathStyleAccess: Boolean = accessStyle == PathAccessStyle /** Java API */ - def getEndpointUrl: java.util.Optional[String] = endpointUrl.asJava + def getEndpointUrl: java.util.Optional[String] = endpointUrl.toJava /** Java API */ def getListBucketApiVersion: ApiVersion = listBucketApiVersion /** Java API */ - def getForwardProxy: java.util.Optional[ForwardProxy] = forwardProxy.asJava + def getForwardProxy: java.util.Optional[ForwardProxy] = forwardProxy.toJava /** Java API */ def getAccessStyle: AccessStyle = accessStyle @@ -701,7 +701,7 @@ sealed trait BufferType { def path: Option[Path] /** Java API */ - def getPath: java.util.Optional[Path] = path.asJava + def getPath: java.util.Optional[Path] = path.toJava } case object MemoryBufferType extends BufferType { diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala index 7a5e8e0ba..85adbdc36 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala @@ -22,6 +22,7 @@ import pekko.http.scaladsl.model.{ HttpMethods, HttpRequest } import pekko.stream.connectors.testkit.scaladsl.LogCapturing import pekko.stream.scaladsl.Sink import pekko.testkit.TestKit +import pekko.util.OptionConverters._ import org.scalatest.BeforeAndAfterAll import org.scalatest.OptionValues._ import org.scalatest.concurrent.ScalaFutures @@ -31,8 +32,6 @@ import org.scalatest.time.{ Millis, Seconds, Span } import software.amazon.awssdk.auth.credentials._ import software.amazon.awssdk.regions.Region -import scala.compat.java8.OptionConverters._ - class SignerSpec(_system: ActorSystem) extends TestKit(_system) with AnyFlatSpecLike @@ -139,7 +138,7 @@ class SignerSpec(_system: ActorSystem) val srFuture = Signer.signedRequest(req, signingKey(date), signAnonymousRequests = true).runWith(Sink.head) whenReady(srFuture) { signedRequest => - signedRequest.getHeader("Authorization").asScala.value shouldEqual RawHeader( + signedRequest.getHeader("Authorization").toScala.value shouldEqual RawHeader( "Authorization", "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-content-sha256;x-amz-date, Signature=dd479fa8a80364edf2119ec24bebde66712ee9c9cb2b0d92eb3ab9ccdc0c3947") } diff --git a/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/Slick.scala b/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/Slick.scala index 9066d5896..e471e6470 100644 --- a/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/Slick.scala +++ b/slick/src/main/scala/org/apache/pekko/stream/connectors/slick/javadsl/Slick.scala @@ -25,14 +25,14 @@ import pekko.NotUsed import pekko.japi.function.Function2 import pekko.stream.connectors.slick.scaladsl.{ Slick => ScalaSlick } import pekko.stream.javadsl._ +import pekko.util.FunctionConverters._ +import pekko.util.FutureConverters._ import slick.dbio.DBIO import slick.jdbc.GetResult import slick.jdbc.SQLActionBuilder import slick.jdbc.SetParameter import slick.jdbc.SimpleJdbcAction -import scala.compat.java8.FunctionConverters._ -import scala.compat.java8.FutureConverters._ import scala.concurrent.ExecutionContext object Slick { @@ -307,7 +307,7 @@ object Slick { toStatement: JFunction[T, String]): Sink[T, CompletionStage[Done]] = ScalaSlick .sink[T](parallelism, toDBIO(toStatement))(session) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -330,7 +330,7 @@ object Slick { toStatement: Function2[T, Connection, PreparedStatement]): Sink[T, CompletionStage[Done]] = ScalaSlick .sink[T](parallelism, toDBIO(toStatement))(session) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** diff --git a/sns/src/main/scala/org/apache/pekko/stream/connectors/sns/scaladsl/SnsPublisher.scala b/sns/src/main/scala/org/apache/pekko/stream/connectors/sns/scaladsl/SnsPublisher.scala index f18dab6a7..e0607998c 100644 --- a/sns/src/main/scala/org/apache/pekko/stream/connectors/sns/scaladsl/SnsPublisher.scala +++ b/sns/src/main/scala/org/apache/pekko/stream/connectors/sns/scaladsl/SnsPublisher.scala @@ -17,13 +17,12 @@ import org.apache.pekko import pekko.stream.connectors.sns.SnsPublishSettings import pekko.stream.scaladsl.{ Flow, Keep, Sink } import pekko.{ Done, NotUsed } +import pekko.util.FutureConverters._ import software.amazon.awssdk.services.sns.SnsAsyncClient import software.amazon.awssdk.services.sns.model.{ PublishRequest, PublishResponse } import scala.concurrent.Future -import scala.compat.java8.FutureConverters._ - /** * Scala API * Amazon SNS publisher factory. @@ -56,7 +55,7 @@ object SnsPublisher { implicit snsClient: SnsAsyncClient): Flow[PublishRequest, PublishResponse, NotUsed] = { require(snsClient != null, "The `SnsAsyncClient` passed in may not be null.") Flow[PublishRequest] - .mapAsyncUnordered(settings.concurrency)(snsClient.publish(_).toScala) + .mapAsyncUnordered(settings.concurrency)(snsClient.publish(_).asScala) } /** diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/javadsl/SqsAckSink.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/javadsl/SqsAckSink.scala index 3115048df..b664c2809 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/javadsl/SqsAckSink.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/javadsl/SqsAckSink.scala @@ -19,10 +19,9 @@ import org.apache.pekko import pekko.Done import pekko.stream.connectors.sqs.{ MessageAction, SqsAckGroupedSettings, SqsAckSettings } import pekko.stream.javadsl.Sink +import pekko.util.FutureConverters._ import software.amazon.awssdk.services.sqs.SqsAsyncClient -import scala.compat.java8.FutureConverters.FutureOps - /** * Java API to create acknowledging sinks. */ @@ -36,7 +35,7 @@ object SqsAckSink { sqsClient: SqsAsyncClient): Sink[MessageAction, CompletionStage[Done]] = pekko.stream.connectors.sqs.scaladsl.SqsAckSink .apply(queueUrl, settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -47,6 +46,6 @@ object SqsAckSink { sqsClient: SqsAsyncClient): Sink[MessageAction, CompletionStage[Done]] = pekko.stream.connectors.sqs.scaladsl.SqsAckSink .grouped(queueUrl, settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/javadsl/SqsPublishSink.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/javadsl/SqsPublishSink.scala index df2ee97c9..76db7cb78 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/javadsl/SqsPublishSink.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/javadsl/SqsPublishSink.scala @@ -21,11 +21,10 @@ import pekko.stream.connectors.sqs._ import pekko.stream.javadsl.Sink import pekko.stream.scaladsl.{ Flow, Keep } import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model.SendMessageRequest -import scala.compat.java8.FutureConverters.FutureOps - /** * Java API to create SQS Sinks. */ @@ -37,7 +36,7 @@ object SqsPublishSink { def create(queueUrl: String, settings: SqsPublishSettings, sqsClient: SqsAsyncClient): Sink[String, CompletionStage[Done]] = - scaladsl.SqsPublishSink.apply(queueUrl, settings)(sqsClient).mapMaterializedValue(_.toJava).asJava + scaladsl.SqsPublishSink.apply(queueUrl, settings)(sqsClient).mapMaterializedValue(_.asJava).asJava /** * creates a [[pekko.stream.javadsl.Sink Sink]] to publish messages to a SQS queue using an [[software.amazon.awssdk.services.sqs.SqsAsyncClient SqsAsyncClient]] @@ -47,7 +46,7 @@ object SqsPublishSink { sqsClient: SqsAsyncClient): Sink[SendMessageRequest, CompletionStage[Done]] = scaladsl.SqsPublishSink .messageSink(queueUrl, settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -57,7 +56,7 @@ object SqsPublishSink { sqsClient: SqsAsyncClient): Sink[SendMessageRequest, CompletionStage[Done]] = scaladsl.SqsPublishSink .messageSink(settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -67,7 +66,7 @@ object SqsPublishSink { def grouped(queueUrl: String, settings: SqsPublishGroupedSettings, sqsClient: SqsAsyncClient): Sink[String, CompletionStage[Done]] = - scaladsl.SqsPublishSink.grouped(queueUrl, settings)(sqsClient).mapMaterializedValue(_.toJava).asJava + scaladsl.SqsPublishSink.grouped(queueUrl, settings)(sqsClient).mapMaterializedValue(_.asJava).asJava /** * creates a [[pekko.stream.javadsl.Sink Sink]] that groups messages and publishes them in batches to a SQS queue using an [[software.amazon.awssdk.services.sqs.SqsAsyncClient SqsAsyncClient]] @@ -78,7 +77,7 @@ object SqsPublishSink { sqsClient: SqsAsyncClient): Sink[SendMessageRequest, CompletionStage[Done]] = scaladsl.SqsPublishSink .groupedMessageSink(queueUrl, settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -91,7 +90,7 @@ object SqsPublishSink { Flow[java.lang.Iterable[String]] .map(_.asScala) .toMat(scaladsl.SqsPublishSink.batch(queueUrl, settings)(sqsClient))(Keep.right) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -104,6 +103,6 @@ object SqsPublishSink { Flow[java.lang.Iterable[SendMessageRequest]] .map(_.asScala) .toMat(scaladsl.SqsPublishSink.batchedMessageSink(queueUrl, settings)(sqsClient))(Keep.right) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsAckFlow.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsAckFlow.scala index 9f5725a84..d66457039 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsAckFlow.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsAckFlow.scala @@ -26,11 +26,11 @@ import pekko.stream.connectors.sqs.SqsAckResultEntry._ import pekko.stream.connectors.sqs._ import pekko.stream.scaladsl.{ Flow, GraphDSL, Merge, Partition } import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model._ import scala.collection.immutable -import scala.compat.java8.FutureConverters._ import scala.concurrent.Future /** @@ -57,7 +57,7 @@ object SqsAckFlow { sqsClient .deleteMessage(request) - .toScala + .asScala .map(resp => new SqsDeleteResult(messageAction, resp))(parasitic) case messageAction: MessageAction.ChangeMessageVisibility => @@ -71,7 +71,7 @@ object SqsAckFlow { sqsClient .changeMessageVisibility(request) - .toScala + .asScala .map(resp => new SqsChangeMessageVisibilityResult(messageAction, resp))(parasitic) case messageAction: MessageAction.Ignore => @@ -135,7 +135,7 @@ object SqsAckFlow { case (actions: immutable.Seq[Delete], request) => sqsClient .deleteMessageBatch(request) - .toScala + .asScala .map { case response if response.failed().isEmpty => val responseMetadata = response.responseMetadata() @@ -188,7 +188,7 @@ object SqsAckFlow { case (actions, request) => sqsClient .changeMessageVisibilityBatch(request) - .toScala + .asScala .map { case response if response.failed().isEmpty => val responseMetadata = response.responseMetadata() diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsPublishFlow.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsPublishFlow.scala index 21659c56c..6d7a705b7 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsPublishFlow.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsPublishFlow.scala @@ -22,11 +22,10 @@ import pekko.dispatch.ExecutionContexts.parasitic import pekko.stream.connectors.sqs._ import pekko.stream.scaladsl.{ Flow, Source } import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model._ -import scala.compat.java8.FutureConverters._ - /** * Scala API to create publishing SQS flows. */ @@ -59,7 +58,7 @@ object SqsPublishFlow { .mapAsync(settings.maxInFlight) { req => sqsClient .sendMessage(req) - .toScala + .asScala .map(req -> _)(parasitic) } .map { case (request, response) => new SqsPublishResult(request, response) } @@ -107,7 +106,7 @@ object SqsPublishFlow { case (requests, batchRequest) => sqsClient .sendMessageBatch(batchRequest) - .toScala + .asScala .map { case response if response.failed().isEmpty => val responseMetadata = response.responseMetadata() diff --git a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSource.scala b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSource.scala index 1fa342b6e..443b4651b 100644 --- a/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSource.scala +++ b/sqs/src/main/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSource.scala @@ -20,11 +20,10 @@ import pekko.stream.connectors.sqs.SqsSourceSettings import pekko.stream.connectors.sqs.impl.BalancingMapAsync import pekko.stream.scaladsl.{ Flow, Source } import pekko.util.ccompat.JavaConverters._ +import pekko.util.FutureConverters._ import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model._ -import scala.compat.java8.FutureConverters._ - /** * Scala API to create SQS sources. */ @@ -63,11 +62,11 @@ object SqsSource { private def resolveHandler(parallelism: Int)(implicit sqsClient: SqsAsyncClient) = if (parallelism == 1) { - Flow[ReceiveMessageRequest].mapAsyncUnordered(parallelism)(sqsClient.receiveMessage(_).toScala) + Flow[ReceiveMessageRequest].mapAsyncUnordered(parallelism)(sqsClient.receiveMessage(_).asScala) } else { BalancingMapAsync[ReceiveMessageRequest, ReceiveMessageResponse]( parallelism, - sqsClient.receiveMessage(_).toScala, + sqsClient.receiveMessage(_).asScala, (response, _) => if (response.messages().isEmpty) 1 else parallelism) } } diff --git a/sqs/src/test/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSourceMockSpec.scala b/sqs/src/test/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSourceMockSpec.scala index 0f3a3695e..4383c09cb 100644 --- a/sqs/src/test/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSourceMockSpec.scala +++ b/sqs/src/test/scala/org/apache/pekko/stream/connectors/sqs/scaladsl/SqsSourceMockSpec.scala @@ -19,6 +19,7 @@ import org.apache.pekko import pekko.stream.connectors.sqs.SqsSourceSettings import pekko.stream.connectors.testkit.scaladsl.LogCapturing import pekko.stream.testkit.scaladsl.TestSink +import pekko.util.FutureConverters._ import org.mockito.ArgumentMatchers._ import org.mockito.Mockito.{ atMost => atMostTimes, _ } import org.mockito.invocation.InvocationOnMock @@ -29,7 +30,6 @@ import org.scalatestplus.mockito.MockitoSugar.mock import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model.{ Message, ReceiveMessageRequest, ReceiveMessageResponse } -import scala.compat.java8.FutureConverters._ import scala.concurrent.Future import scala.concurrent.duration._ @@ -83,7 +83,7 @@ class SqsSourceMockSpec extends AnyFlatSpec with Matchers with DefaultTestContex .messages(defaultMessages: _*) .build()) }(system.dispatcher) - .toJava + .asJava .toCompletableFuture }) @@ -124,7 +124,7 @@ class SqsSourceMockSpec extends AnyFlatSpec with Matchers with DefaultTestContex .messages(List.empty[Message]: _*) .build()) }(system.dispatcher) - .toJava + .asJava .toCompletableFuture } else { CompletableFuture.completedFuture( diff --git a/sse/src/main/scala/org/apache/pekko/stream/connectors/sse/javadsl/EventSource.scala b/sse/src/main/scala/org/apache/pekko/stream/connectors/sse/javadsl/EventSource.scala index edfc7e64d..b269c9b26 100644 --- a/sse/src/main/scala/org/apache/pekko/stream/connectors/sse/javadsl/EventSource.scala +++ b/sse/src/main/scala/org/apache/pekko/stream/connectors/sse/javadsl/EventSource.scala @@ -21,15 +21,14 @@ import pekko.http.scaladsl.model.{ HttpResponse => SHttpResponse } import pekko.stream.Materializer import pekko.stream.javadsl.Source import pekko.http.javadsl.model.sse.ServerSentEvent +import pekko.util.FutureConverters +import pekko.util.OptionConverters import java.util.Optional import java.util.concurrent.CompletionStage import java.util.function.{ Function => JFunction } import pekko.actor.ClassicActorSystemProvider -import scala.compat.java8.FutureConverters -import scala.compat.java8.OptionConverters - /** * This stream processing stage establishes a continuous source of server-sent events from the given URI. * @@ -92,8 +91,8 @@ object EventSource { scaladsl .EventSource( uri.asScala, - send(_).toScala.map(_.asInstanceOf[SHttpResponse])(system.classicSystem.dispatcher), - lastEventId.asScala)(system) + send(_).asScala.map(_.asInstanceOf[SHttpResponse])(system.classicSystem.dispatcher), + lastEventId.toScala)(system) .map(v => v: ServerSentEvent) eventSource.asJava } @@ -115,8 +114,8 @@ object EventSource { scaladsl .EventSource( uri.asScala, - send(_).toScala.map(_.asInstanceOf[SHttpResponse])(mat.executionContext), - lastEventId.asScala)(mat.system) + send(_).asScala.map(_.asInstanceOf[SHttpResponse])(mat.executionContext), + lastEventId.toScala)(mat.system) .map(v => v: ServerSentEvent) eventSource.asJava } diff --git a/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/javadsl/Udp.scala b/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/javadsl/Udp.scala index fed85b0ef..86a5e3743 100644 --- a/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/javadsl/Udp.scala +++ b/udp/src/main/scala/org/apache/pekko/stream/connectors/udp/javadsl/Udp.scala @@ -24,8 +24,7 @@ import pekko.stream.connectors.udp.Datagram import pekko.stream.javadsl.{ Flow, Sink } import pekko.stream.connectors.udp.scaladsl import pekko.util.ccompat.JavaConverters._ - -import scala.compat.java8.FutureConverters._ +import pekko.util.FutureConverters._ object Udp { import java.lang.{ Iterable => JIterable } @@ -121,7 +120,7 @@ object Udp { */ def bindFlow(localAddress: InetSocketAddress, system: ActorSystem): Flow[Datagram, Datagram, CompletionStage[InetSocketAddress]] = - scaladsl.Udp.bindFlow(localAddress)(system).mapMaterializedValue(_.toJava).asJava + scaladsl.Udp.bindFlow(localAddress)(system).mapMaterializedValue(_.asJava).asJava /** * Creates a flow that upon materialization binds to the given `localAddress`. All incoming @@ -133,7 +132,7 @@ object Udp { */ def bindFlow(localAddress: InetSocketAddress, system: ClassicActorSystemProvider): Flow[Datagram, Datagram, CompletionStage[InetSocketAddress]] = - scaladsl.Udp.bindFlow(localAddress)(system).mapMaterializedValue(_.toJava).asJava + scaladsl.Udp.bindFlow(localAddress)(system).mapMaterializedValue(_.asJava).asJava /** * Creates a flow that upon materialization binds to the given `localAddress`. All incoming @@ -147,7 +146,7 @@ object Udp { def bindFlow(localAddress: InetSocketAddress, options: JIterable[SocketOption], system: ActorSystem): Flow[Datagram, Datagram, CompletionStage[InetSocketAddress]] = - scaladsl.Udp.bindFlow(localAddress, options.asScala.toIndexedSeq)(system).mapMaterializedValue(_.toJava).asJava + scaladsl.Udp.bindFlow(localAddress, options.asScala.toIndexedSeq)(system).mapMaterializedValue(_.asJava).asJava /** * Creates a flow that upon materialization binds to the given `localAddress`. All incoming @@ -161,5 +160,5 @@ object Udp { def bindFlow(localAddress: InetSocketAddress, options: JIterable[SocketOption], system: ClassicActorSystemProvider): Flow[Datagram, Datagram, CompletionStage[InetSocketAddress]] = - scaladsl.Udp.bindFlow(localAddress, options.asScala.toIndexedSeq)(system).mapMaterializedValue(_.toJava).asJava + scaladsl.Udp.bindFlow(localAddress, options.asScala.toIndexedSeq)(system).mapMaterializedValue(_.asJava).asJava } diff --git a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/javadsl/UnixDomainSocket.scala b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/javadsl/UnixDomainSocket.scala index 7256a0254..a08c76ce9 100644 --- a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/javadsl/UnixDomainSocket.scala +++ b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/javadsl/UnixDomainSocket.scala @@ -18,14 +18,14 @@ import java.nio.file.Path import java.util.Optional import java.util.concurrent.CompletionStage -import scala.compat.java8.OptionConverters._ -import scala.compat.java8.FutureConverters._ import org.apache.pekko import pekko.NotUsed import pekko.actor.{ ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider } import pekko.stream.javadsl.{ Flow, Source } import pekko.stream.Materializer import pekko.util.ByteString +import pekko.util.OptionConverters._ +import pekko.util.FutureConverters._ import scala.concurrent.duration.Duration @@ -47,7 +47,7 @@ object UnixDomainSocket extends ExtensionId[UnixDomainSocket] with ExtensionIdPr * * The produced [[java.util.concurrent.CompletionStage]] is fulfilled when the unbinding has been completed. */ - def unbind(): CompletionStage[Unit] = delegate.unbind().toJava + def unbind(): CompletionStage[Unit] = delegate.unbind().asJava } /** @@ -146,7 +146,7 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends pekko.actor.Ex delegate .bind(path, backlog, halfClose) .map(new IncomingConnection(_)) - .mapMaterializedValue(_.map(new ServerBinding(_))(parasitic).toJava)) + .mapMaterializedValue(_.map(new ServerBinding(_))(parasitic).asJava)) /** * Creates a [[UnixDomainSocket.ServerBinding]] without specifying options. @@ -161,7 +161,7 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends pekko.actor.Ex delegate .bind(path) .map(new IncomingConnection(_)) - .mapMaterializedValue(_.map(new ServerBinding(_))(parasitic).toJava)) + .mapMaterializedValue(_.map(new ServerBinding(_))(parasitic).asJava)) /** * Creates an [[UnixDomainSocket.OutgoingConnection]] instance representing a prospective UnixDomainSocket client connection to the given endpoint. @@ -190,8 +190,8 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends pekko.actor.Ex connectTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = Flow.fromGraph( delegate - .outgoingConnection(remoteAddress, localAddress.asScala, halfClose, connectTimeout) - .mapMaterializedValue(_.map(new OutgoingConnection(_))(parasitic).toJava)) + .outgoingConnection(remoteAddress, localAddress.toScala, halfClose, connectTimeout) + .mapMaterializedValue(_.map(new OutgoingConnection(_))(parasitic).asJava)) /** * Creates an [[UnixDomainSocket.OutgoingConnection]] without specifying options. @@ -207,6 +207,6 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends pekko.actor.Ex Flow.fromGraph( delegate .outgoingConnection(new UnixSocketAddress(path)) - .mapMaterializedValue(_.map(new OutgoingConnection(_))(parasitic).toJava)) + .mapMaterializedValue(_.map(new OutgoingConnection(_))(parasitic).asJava)) } diff --git a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/model.scala b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/model.scala index d0c68fcda..1063825e6 100644 --- a/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/model.scala +++ b/xml/src/main/scala/org/apache/pekko/stream/connectors/xml/model.scala @@ -15,9 +15,9 @@ package org.apache.pekko.stream.connectors.xml import java.util.Optional -import org.apache.pekko.util.ccompat.JavaConverters._ - -import scala.compat.java8.OptionConverters._ +import org.apache.pekko +import pekko.util.ccompat.JavaConverters._ +import pekko.util.OptionConverters._ /** * XML parsing events emitted by the parser flow. These roughly correspond to Java XMLEvent types. @@ -54,7 +54,7 @@ case object EndDocument extends ParseEvent { final case class Namespace(uri: String, prefix: Option[String] = None) { /** Java API */ - def getPrefix(): java.util.Optional[String] = prefix.asJava + def getPrefix(): java.util.Optional[String] = prefix.toJava } object Namespace { @@ -63,7 +63,7 @@ object Namespace { * Java API */ def create(uri: String, prefix: Optional[String]) = - Namespace(uri, prefix.asScala) + Namespace(uri, prefix.toScala) } @@ -73,10 +73,10 @@ final case class Attribute(name: String, namespace: Option[String] = None) { /** Java API */ - def getPrefix(): java.util.Optional[String] = prefix.asJava + def getPrefix(): java.util.Optional[String] = prefix.toJava /** Java API */ - def getNamespace(): java.util.Optional[String] = namespace.asJava + def getNamespace(): java.util.Optional[String] = namespace.toJava } object Attribute { @@ -85,7 +85,7 @@ object Attribute { * Java API */ def create(name: String, value: String, prefix: Optional[String], namespace: Optional[String]) = - Attribute(name, value, prefix.asScala, namespace.asScala) + Attribute(name, value, prefix.toScala, namespace.toScala) /** * Java API @@ -109,10 +109,10 @@ final case class StartElement(localName: String, def getAttributes(): java.util.Map[String, String] = attributes.asJava /** Java API */ - def getPrefix(): java.util.Optional[String] = prefix.asJava + def getPrefix(): java.util.Optional[String] = prefix.toJava /** Java API */ - def getNamespace(): java.util.Optional[String] = namespace.asJava + def getNamespace(): java.util.Optional[String] = namespace.toJava /** Java API */ def getNamespaceCtx(): java.util.List[Namespace] = namespaceCtx.asJava @@ -144,8 +144,8 @@ object StartElement { namespaceCtx: java.util.List[Namespace]): StartElement = new StartElement(localName, attributesList.asScala.toList, - prefix.asScala, - namespace.asScala, + prefix.toScala, + namespace.toScala, namespaceCtx.asScala.toList) /** @@ -155,7 +155,7 @@ object StartElement { attributesList: java.util.List[Attribute], prefix: Optional[String], namespace: Optional[String]): StartElement = - new StartElement(localName, attributesList.asScala.toList, prefix.asScala, namespace.asScala, List.empty[Namespace]) + new StartElement(localName, attributesList.asScala.toList, prefix.toScala, namespace.toScala, List.empty[Namespace]) /** * Java API @@ -205,10 +205,10 @@ final case class ProcessingInstruction(target: Option[String], data: Option[Stri val marker = ParseEventMarker.XMLProcessingInstruction /** Java API */ - def getTarget(): java.util.Optional[String] = target.asJava + def getTarget(): java.util.Optional[String] = target.toJava /** Java API */ - def getData(): java.util.Optional[String] = data.asJava + def getData(): java.util.Optional[String] = data.toJava } object ProcessingInstruction { @@ -217,7 +217,7 @@ object ProcessingInstruction { * Java API */ def create(target: Optional[String], data: Optional[String]) = - ProcessingInstruction(target.asScala, data.asScala) + ProcessingInstruction(target.toScala, data.toScala) } final case class Comment(text: String) extends ParseEvent {